diff --git a/README.md b/README.md index 869613966..7af21ef39 100644 --- a/README.md +++ b/README.md @@ -116,7 +116,7 @@ You are welcome to make contributions to PolarDB-X. We appreciate all the contri ## Community You can join these groups and chats to discuss and ask PolarDB-X related questions: - DingTalk Group: [32432897](https://h5.dingtalk.com/circle/healthCheckin.html?dtaction=os&corpId=dingc5456617ca6ab502e1cc01e222598659&1b3d4=1ec1b&cbdbhh=qwertyuiop#/) - ![DingTalk Group](docs/images/dingtalk_group.png) + ![DingTalk Group](docs/images/dingtalk_group.jpg) - WeChat Group: 阿里云 PolarDB-X 开源交流群 (Contact group manager to get into wechat group. Managers' ID: oldbread3, hustfxj, agapple0002) ![WeChat Manager 1](docs/images/wechat_manager_a.jpg) ![WeChat Manager 2](docs/images/wechat_manager_b.jpg) ![WeChat Manager 3](docs/images/wechat_manager_c.jpg) diff --git a/codestyle/format_lab.sh b/codestyle/format_lab.sh index 69f03c20a..5c4da0c82 100644 --- a/codestyle/format_lab.sh +++ b/codestyle/format_lab.sh @@ -2,7 +2,7 @@ baseDir=`pwd`/ commit=`git merge-base origin/polardbx_opensource HEAD` -files=`git diff --name-only ${commit} | grep '.java' | grep -v 'polardbx-calcite' | grep -v 'polardbx-rpc/src/main/java/com/mysql/cj'| grep -v 'com/alibaba/polardbx/rpc/cdc'| xargs -I {} echo ${baseDir}{}` +files=`git diff --name-only ${commit} | grep '.java' | grep -v 'polardbx-calcite' | grep -v 'polardbx-orc' | grep -v 'polardbx-rpc/src/main/java/com/mysql/cj'| grep -v 'com/alibaba/polardbx/rpc/cdc'| xargs -I {} echo ${baseDir}{}` count=0 batchFile='' for file in $files; do diff --git a/docs/en/quickstart-development.md b/docs/en/quickstart-development.md index eac67558c..9598a9d48 100644 --- a/docs/en/quickstart-development.md +++ b/docs/en/quickstart-development.md @@ -80,12 +80,13 @@ make install ```shell # install JDK 1.8 and Maven 3 -# enter the polardbx-sql directory -cd polardbx-sql/ # make sure polardbx-rpc (polardbx-glue) initialized git submodule update --init +# enter the polardbx-sql directory +cd polardbx-sql/ + # compile&install mvn install -D maven.test.skip=true -D env=release diff --git a/docs/images/dingtalk_group.jpg b/docs/images/dingtalk_group.jpg new file mode 100644 index 000000000..b29ce25af Binary files /dev/null and b/docs/images/dingtalk_group.jpg differ diff --git a/docs/images/dingtalk_group.png b/docs/images/dingtalk_group.png deleted file mode 100644 index 6e78285b6..000000000 Binary files a/docs/images/dingtalk_group.png and /dev/null differ diff --git a/docs/zh_CN/quickstart-development.md b/docs/zh_CN/quickstart-development.md index 82d45c545..72172e38c 100644 --- a/docs/zh_CN/quickstart-development.md +++ b/docs/zh_CN/quickstart-development.md @@ -126,7 +126,7 @@ mkdir -p /u01/my3306/{data,log,run,tmp,mysql} - metadb user:以下采用`my_polarx` - metadb database:创建metadb库,以下采用 `polardbx_meta_db_polardbx` -- 密码加密key(dnPasswordKey):以下采用 `asdf1234ghjk5678` (如果要修改加密key, 新key的长度要求为16位) +- 密码加密key(dnPasswordKey):以下采用 `asdf1234ghjk5678` - PolarDB-X默认用户名:默认为 `polarx_root` - PolarDB-X默认用户密码:默认为 `123456`,可通过 `-S` 参数修改 @@ -199,7 +199,7 @@ mysql -h127.1 -P8527 -upolardbx_root - metadb database:和启动PolarDB-X时设置的值保持一致,以下采用 `polardbx_meta_db_polardbx` - metadb password:和启动PolarDB-X时设置的值保持一致,需使用密文,以下采用`HMqvkvXZtT7XedA6t2IWY8+D7fJWIJir/mIY1Nf1b58=` - metadb port:和启动MySQL时设置的值保持一致,以下采用 `4886` -- 密码加密key(dnPasswordKey):和启动PolarDB-X时设置的值保持一致,以下采用 `asdf1234ghjk5678` (如果要修改加密key, 新key的长度要求为16位) +- 密码加密key(dnPasswordKey):和启动PolarDB-X时设置的值保持一致,以下采用 `asdf1234ghjk5678` - PolarDB-X用户名:和启动PolarDB-X时设置的值保持一致,以下采用默认值 `polardbx_root` - PolarDB-X用户密码:和启动PolarDB-X时设置的值保持一致,需使用密文,以下采用默认值`H1AzXc2NmCs61dNjH5nMvA==` - PolarDB-X端口:和启动PolarDB-X时设置的值保持一致,以下采用默认值 `8527` diff --git a/polardbx-calcite/pom.xml b/polardbx-calcite/pom.xml index 0cd4b4f38..dd15cd601 100644 --- a/polardbx-calcite/pom.xml +++ b/polardbx-calcite/pom.xml @@ -20,13 +20,13 @@ limitations under the License. polardbx com.alibaba.polardbx - 5.4.18-SNAPSHOT + ${revision} polardbx-calcite - jar - 5.4.18-SNAPSHOT + jar + ${calcite.version} + ${project.artifactId} module for polardbx ${project.version} Core Calcite APIs and engine. - ${project.artifactId} module for polardbx ${project.version} ${project.basedir}/.. ${maven.build.timestamp} @@ -57,10 +57,12 @@ limitations under the License. ${commons-dbcp.version} - - - org.apache.commons - commons-lang3 + org.apache.commons + commons-lang3 + + + commons-lang + commons-lang com.fasterxml.jackson.core @@ -195,6 +197,12 @@ limitations under the License. polardbx-rule ${parent.version} + + org.mockito + mockito-inline + ${mockito.verison} + test + diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/jdbc/JavaTypeFactoryImpl.java b/polardbx-calcite/src/main/java/org/apache/calcite/jdbc/JavaTypeFactoryImpl.java index bad2c0f46..00c4b28af 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/jdbc/JavaTypeFactoryImpl.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/jdbc/JavaTypeFactoryImpl.java @@ -75,7 +75,7 @@ public JavaTypeFactoryImpl(RelDataTypeSystem typeSystem) { public RelDataType createStructType(Class type) { final List list = new ArrayList<>(); - for (Field field : type.getFields()) { + for (Field field : type.getDeclaredFields()) { if (!Modifier.isStatic(field.getModifiers())) { // FIXME: watch out for recursion final Type fieldType = fieldType(field); diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelOptCluster.java b/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelOptCluster.java index 8d75d9d3a..dc513ce2b 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelOptCluster.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelOptCluster.java @@ -19,6 +19,7 @@ import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.CorrelationId; import org.apache.calcite.rel.metadata.DefaultRelMetadataProvider; +import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider; import org.apache.calcite.rel.metadata.MetadataFactory; import org.apache.calcite.rel.metadata.MetadataFactoryImpl; import org.apache.calcite.rel.metadata.RelMetadataProvider; @@ -150,19 +151,26 @@ public MetadataFactory getMetadataFactory() { * method, then use {@link RelOptRuleCall#getMetadataQuery()} instead. */ public RelMetadataQuery getMetadataQuery() { if (mq == null) { - mq = RelMetadataQuery.instance(); + mq = buildMetaQuery(); } RelMetadataQuery local = mq; if (local == null) { // maybe some concurrent thread call invalidateMetadataQuery // cache RelMetadataQuery on the stack to in case of returning null - local = RelMetadataQuery.instance(); + local = buildMetaQuery(); mq = local; } return local; } + private RelMetadataQuery buildMetaQuery() { + RelMetadataProvider provider = getMetadataProvider(); + // provider in cluster cannot be a JaninoRelMetadataProvider, + // for it doesn't support RelMetadataProvider.apply method + return RelMetadataQuery.instance(JaninoRelMetadataProvider.of(provider)); + } + /** * Should be called whenever the current {@link RelMetadataQuery} becomes * invalid. Typically invoked from {@link RelOptRuleCall#transformTo}. diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelOptUtil.java b/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelOptUtil.java index 6d5b52c8a..9aa33c713 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelOptUtil.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelOptUtil.java @@ -96,12 +96,14 @@ import org.apache.calcite.runtime.CalciteContextException; import org.apache.calcite.runtime.PredicateImpl; import org.apache.calcite.schema.ModifiableView; +import org.apache.calcite.sql.SqlBasicCall; import org.apache.calcite.sql.SqlExplainFormat; import org.apache.calcite.sql.SqlExplainLevel; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlLiteral; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.SqlSelect; import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.calcite.sql.type.MultisetSqlType; import org.apache.calcite.sql.type.SqlTypeName; @@ -235,7 +237,7 @@ public static Set findTables(RelNode rel) { */ public static List findAllTables(RelNode rel) { final Multimap, RelNode> nodes = - RelMetadataQuery.instance().getNodeTypes(rel); + rel.getCluster().getMetadataQuery().getNodeTypes(rel); final List usedTables = new ArrayList<>(); for (Entry, Collection> e : nodes.asMap().entrySet()) { if (TableScan.class.isAssignableFrom(e.getKey())) { @@ -259,7 +261,7 @@ public static Set findCorrelates(RelNode rel) { */ public static List findAllCorrelate(RelNode rel) { final Multimap, RelNode> nodes = - RelMetadataQuery.instance().getNodeTypes(rel); + rel.getCluster().getMetadataQuery().getNodeTypes(rel); final List usedTables = new ArrayList<>(); for (Entry, Collection> e : nodes.asMap().entrySet()) { if (Correlate.class.isAssignableFrom(e.getKey())) { @@ -1925,6 +1927,11 @@ public static boolean eq( return litmus.succeed(); } + if (type1.getSqlTypeName() == SqlTypeName.DECIMAL + && type2.getSqlTypeName() == SqlTypeName.DECIMAL) { + return litmus.succeed(); + } + if (type1 != type2) { return litmus.fail("type mismatch:\n{}:\n{}\n{}:\n{}", desc1, type1.getFullTypeString(), @@ -2877,7 +2884,7 @@ public static boolean containsLimit(RelNode ancestor) { try { new RelVisitor() { public void visit(RelNode node, int ordinal, RelNode parent) { - if (node instanceof Sort) { + if (node instanceof Sort&&((Sort)node).withLimit()) { throw Util.FoundOne.NULL; } super.visit(node, ordinal, parent); @@ -4349,6 +4356,26 @@ public boolean test(RelNode relNode) { return traits.getCollation().isTop() && traits.getDistribution().isTop(); } }; + + public static boolean isUnion(SqlNode node) { + return node instanceof SqlBasicCall && node.getKind() == SqlKind.UNION; + } + + public static int getColumnCount(SqlNode node) { + if (isUnion(node)) { + final SqlNode[] children = ((SqlBasicCall) node).getOperands(); + for (int i = 0; i < children.length; i++) { + final int columnCount = getColumnCount(children[i]); + if (columnCount > 0) { + return columnCount; + } + } + return -1; + } else if (node instanceof SqlSelect) { + return ((SqlSelect) node).getSelectList().size(); + } + return -1; + } } // End RelOptUtil.java diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelTraitSet.java b/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelTraitSet.java index 2457a5875..8ea4eb861 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelTraitSet.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/plan/RelTraitSet.java @@ -20,6 +20,9 @@ import org.apache.calcite.rel.RelCollationTraitDef; import org.apache.calcite.rel.RelDistribution; import org.apache.calcite.rel.RelDistributionTraitDef; +import org.apache.calcite.rel.RelPartitionWise; +import org.apache.calcite.rel.RelPartitionWiseTraitDef; +import org.apache.calcite.rel.RelPartitionWises; import org.apache.calcite.util.mapping.Mappings; import com.google.common.collect.ImmutableList; @@ -390,6 +393,10 @@ public RelTraitSet getDefaultSansConvention() { return (@Nullable T) getTrait(RelCollationTraitDef.INSTANCE); } + public @Nullable T getPartitionWise() { + return (@Nullable T) getTrait(RelPartitionWiseTraitDef.INSTANCE); + } + /** * Returns the size of the RelTraitSet. * diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/plan/volcano/VolcanoPlanner.java b/polardbx-calcite/src/main/java/org/apache/calcite/plan/volcano/VolcanoPlanner.java index 32d64460e..469a5b647 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/plan/volcano/VolcanoPlanner.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/plan/volcano/VolcanoPlanner.java @@ -173,6 +173,8 @@ public class VolcanoPlanner extends AbstractRelOptPlanner { boolean enableDerive = true; + boolean enableColumnar = false; + /** * Extra roots for explorations. */ @@ -401,7 +403,7 @@ protected void registerMaterializations() { this.root = null; this.originalRoot = null; if (ruleCounter != null) { - ruleCount = ruleCounter.values().stream().reduce(0L, (a,b) -> a + b).longValue(); + ruleCount = ruleCounter.values().stream().reduce(0L, Long::sum); this.ruleCounter.clear(); } } @@ -1720,7 +1722,7 @@ public RelOptCost getStartUpCost(RelNode rel, RelMetadataQuery mq) { return costFactory.makeInfiniteCost(); } // use new mq to avoid metadata cache - mq = RelMetadataQuery.instance(); + mq = RelMetadataQuery.instance(mq.metadataProvider); // for efficiency, use RelSubsetBest Cost For CumulativeCost mq.setUseRelSubsetBestCostForCumulativeCost(true); RelOptCost cost = mq.getStartUpCost(rel); @@ -1765,4 +1767,12 @@ public boolean isEnableDerive() { public void setEnableDerive(boolean enableDerive) { this.enableDerive = enableDerive; } + + public boolean isEnableColumnar() { + return enableColumnar; + } + + public void setEnableColumnar(boolean enableColumnar) { + this.enableColumnar = enableColumnar; + } } diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/AbstractRelNode.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/AbstractRelNode.java index 29f0db311..44ef93379 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/AbstractRelNode.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/AbstractRelNode.java @@ -321,7 +321,6 @@ public RelNode onRegister(RelOptPlanner planner) { r = copy(getTraitSet(), inputs); } r.recomputeDigest(); - assert r.isValid(Litmus.THROW, null); return r; } diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelDistribution.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelDistribution.java index 22b722ee6..997c95265 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelDistribution.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelDistribution.java @@ -48,6 +48,10 @@ public interface RelDistribution extends RelMultipleTrait { */ @Nonnull List getKeys(); + boolean isShardWise(); + + @Nonnull Integer getShardCnt(); + RelDistribution apply(Mappings.TargetMapping mapping); /** Type of distribution. */ diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelDistributions.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelDistributions.java index 959303fb1..a905efe9d 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelDistributions.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelDistributions.java @@ -73,6 +73,13 @@ public static RelDistribution hash(Collection numbers) { return RelDistributionTraitDef.INSTANCE.canonize(trait); } + public static RelDistribution hashOss(Collection numbers, int shard) { + ImmutableIntList list = ImmutableIntList.copyOf(numbers); + RelDistributionImpl trait = + new RelDistributionImpl(RelDistribution.Type.HASH_DISTRIBUTED, list, shard); + return RelDistributionTraitDef.INSTANCE.canonize(trait); + } + /** Creates a range distribution. */ public static RelDistribution range(Collection numbers) { ImmutableIntList list = ImmutableIntList.copyOf(numbers); @@ -81,8 +88,8 @@ public static RelDistribution range(Collection numbers) { return RelDistributionTraitDef.INSTANCE.canonize(trait); } - public static RelDistribution of(RelDistribution.Type type, ImmutableIntList keys) { - RelDistribution distribution = new RelDistributionImpl(type, keys); + public static RelDistribution of(RelDistribution.Type type, ImmutableIntList keys, Integer shardCnt) { + RelDistribution distribution = new RelDistributionImpl(type, keys, shardCnt); return RelDistributionTraitDef.INSTANCE.canonize(distribution); } @@ -104,9 +111,12 @@ public static class RelDistributionImpl implements RelDistribution { private final Type type; private final ImmutableIntList keys; + private final Integer shardCnt; + public RelDistributionImpl(Type type, ImmutableIntList keys) { this.type = Preconditions.checkNotNull(type); this.keys = ImmutableIntList.copyOf(keys); + this.shardCnt = 0; // assert type != Type.HASH_DISTRIBUTED // || keys.size() < 2 // || Ordering.natural().isOrdered(keys) @@ -116,22 +126,32 @@ public RelDistributionImpl(Type type, ImmutableIntList keys) { || keys.isEmpty(); } + public RelDistributionImpl(Type type, ImmutableIntList keys, Integer shardCnt) { + this.type = Preconditions.checkNotNull(type); + this.keys = ImmutableIntList.copyOf(keys); + this.shardCnt = shardCnt == null ? 0 : shardCnt; + assert type == Type.HASH_DISTRIBUTED + || type == Type.RANDOM_DISTRIBUTED + || keys.isEmpty(); + } + @Override public int hashCode() { - return Objects.hash(type, keys); + return Objects.hash(type, keys, shardCnt); } @Override public boolean equals(Object obj) { return this == obj || obj instanceof RelDistributionImpl && type == ((RelDistributionImpl) obj).type - && keys.equals(((RelDistributionImpl) obj).keys); + && keys.equals(((RelDistributionImpl) obj).keys) + && shardCnt.equals(((RelDistributionImpl) obj).shardCnt); } @Override public String toString() { if (keys.isEmpty()) { return type.shortName; } else { - return type.shortName + keys; + return shardCnt > 0 ? type.shortName + keys + shardCnt : type.shortName + keys ; } } @@ -143,6 +163,14 @@ public RelDistributionImpl(Type type, ImmutableIntList keys) { return keys; } + public boolean isShardWise() { + return shardCnt > 0; + } + + @Nonnull public Integer getShardCnt() { + return shardCnt; + } + public RelDistributionTraitDef getTraitDef() { return RelDistributionTraitDef.INSTANCE; } @@ -158,7 +186,7 @@ public RelDistribution apply(Mappings.TargetMapping mapping) { } List mappedKeys0 = Mappings.apply2((Mapping) mapping, keys); ImmutableIntList mappedKeys = normalizeKeys(mappedKeys0); - return of(type, mappedKeys); + return of(type, mappedKeys, shardCnt); } public boolean satisfies(RelTrait trait) { @@ -172,7 +200,7 @@ public boolean satisfies(RelTrait trait) { case HASH_DISTRIBUTED: // The "leading edge" property of Range does not apply to Hash. // Only Hash[x, y] satisfies Hash[x, y]. - return keys.equals(distribution.keys); + return keys.equals(distribution.keys) && shardCnt.equals(distribution.shardCnt); case RANGE_DISTRIBUTED: // Range[x, y] satisfies Range[x, y, z] but not Range[x] return Util.startsWith(distribution.keys, keys); @@ -203,7 +231,11 @@ public void register(RelOptPlanner planner) { if (type == distribution.getType() && (type == Type.HASH_DISTRIBUTED || type == Type.RANGE_DISTRIBUTED)) { - return ORDERING.compare(getKeys(), distribution.getKeys()); + int order = ORDERING.compare(getKeys(), distribution.getKeys()); + if (order == 0) { + return Integer.compare(getShardCnt(), distribution.getShardCnt()); + } + return order; } return type.compareTo(distribution.getType()); diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelInput.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelInput.java index 10732a871..576a9b972 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelInput.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelInput.java @@ -104,6 +104,8 @@ public interface RelInput { RelDistribution getDistribution(); + RelPartitionWise getPartitionWise(); + ImmutableList> getTuples(String tag); ImmutableList> getDynamicTuples(String tag); diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelPartitionWise.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelPartitionWise.java new file mode 100644 index 000000000..7653ee577 --- /dev/null +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelPartitionWise.java @@ -0,0 +1,29 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.calcite.rel; + +import org.apache.calcite.plan.RelMultipleTrait; + +public interface RelPartitionWise extends RelMultipleTrait { + + //~ Methods ---------------------------------------------------------------- + boolean isLocalPartition(); + + boolean isRemotePartition(); + + int getCode(); +} \ No newline at end of file diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelPartitionWiseTraitDef.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelPartitionWiseTraitDef.java new file mode 100644 index 000000000..17133441b --- /dev/null +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelPartitionWiseTraitDef.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.calcite.rel; + +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelTraitDef; +import org.apache.calcite.plan.RelTraitSet; +import org.apache.calcite.rel.core.Exchange; +import org.apache.calcite.rel.logical.LogicalExchange; + +public class RelPartitionWiseTraitDef extends RelTraitDef { + public static final RelPartitionWiseTraitDef INSTANCE = + new RelPartitionWiseTraitDef(); + + private RelPartitionWiseTraitDef() { + } + + public Class getTraitClass() { + return RelPartitionWise.class; + } + + public String getSimpleName() { + return "part"; + } + + public RelPartitionWise getDefault() { + return RelPartitionWises.ANY; + } + + public RelNode convert(RelOptPlanner planner, RelNode rel, + RelPartitionWise toDistribution, boolean allowInfiniteCostConverters) { + return rel; + } + + public boolean canConvert(RelOptPlanner planner, RelPartitionWise fromTrait, + RelPartitionWise toTrait) { + return false; + } +} + +// End RelDistributionTraitDef.java diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelPartitionWises.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelPartitionWises.java new file mode 100644 index 000000000..e0add7acf --- /dev/null +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelPartitionWises.java @@ -0,0 +1,117 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.calcite.rel; + +import org.apache.calcite.plan.RelMultipleTrait; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelTrait; +import org.apache.calcite.plan.RelTraitDef; +import org.jetbrains.annotations.NotNull; + +public class RelPartitionWises { + + public static RelPartitionWise ANY = new RelPartitionWiseImpl(false, false); + + public static RelPartitionWise LOCAL = new RelPartitionWiseImpl(true, false); + + public static RelPartitionWise REMOTE = new RelPartitionWiseImpl(false, true); + + public static RelPartitionWise ALL = new RelPartitionWiseImpl(true, true); + + public static class RelPartitionWiseImpl implements RelPartitionWise { + private static final int localPartitionMask = 1 << 1; + + private static final int remotePartitionMask = 1; + + private final int code; + + public RelPartitionWiseImpl(boolean localPartition, boolean remotePartition) { + this.code = (localPartition ? localPartitionMask : 0) + + (remotePartition ? remotePartitionMask : 0); + } + + @Override + public boolean isTop() { + return !(isLocalPartition() || isRemotePartition()); + } + + @Override + public RelTraitDef getTraitDef() { + return RelPartitionWiseTraitDef.INSTANCE; + } + + @Override + public boolean satisfies(RelTrait trait) { + if (!(trait instanceof RelPartitionWise)) { + return false; + } + RelPartitionWise rel = (RelPartitionWise) trait; + return (isLocalPartition() == rel.isLocalPartition()) && (isRemotePartition() == rel.isRemotePartition()); + } + + @Override + public void register(RelOptPlanner planner) { + } + + @Override + public boolean isLocalPartition() { + return (code & localPartitionMask) > 0; + } + + @Override + public boolean isRemotePartition() { + return (code & remotePartitionMask) > 0; + } + + @Override + public int compareTo(@NotNull RelMultipleTrait o) { + RelPartitionWise partitionWise = (RelPartitionWise) o; + return Integer.compare(getCode(), partitionWise.getCode()); + } + + @Override + public int getCode() { + return code; + } + + @Override + public int hashCode() { + return code; + } + + @Override + public boolean equals(Object obj) { + return this == obj + || obj instanceof RelPartitionWises.RelPartitionWiseImpl + && code == ((RelPartitionWises.RelPartitionWiseImpl) obj).code; + } + + @Override + public String toString() { + switch (code) { + case localPartitionMask: + return "[local]"; + case remotePartitionMask: + return "[remote]"; + case localPartitionMask + remotePartitionMask: + return "[local, remote]"; + default: + return "[]"; + } + } + } +} diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelShuttleImpl.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelShuttleImpl.java index 3aa4420ea..43d9e8583 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelShuttleImpl.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/RelShuttleImpl.java @@ -17,9 +17,6 @@ package org.apache.calcite.rel; import org.apache.calcite.linq4j.Ord; -import org.apache.calcite.rel.core.Join; -import org.apache.calcite.rel.core.Project; -import org.apache.calcite.rel.core.SemiJoin; import org.apache.calcite.rel.core.TableFunctionScan; import org.apache.calcite.rel.core.TableScan; import org.apache.calcite.rel.logical.LogicalAggregate; @@ -36,7 +33,6 @@ import org.apache.calcite.rel.logical.LogicalSemiJoin; import org.apache.calcite.rel.logical.LogicalSort; import org.apache.calcite.rel.logical.LogicalTableLookup; -import org.apache.calcite.rel.logical.LogicalTableScan; import org.apache.calcite.rel.logical.LogicalUnion; import org.apache.calcite.rel.logical.LogicalValues; import org.apache.calcite.rel.rules.MultiJoin; diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/AggregateCall.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/AggregateCall.java index f05c4af24..8cd8c4f5a 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/AggregateCall.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/AggregateCall.java @@ -16,18 +16,18 @@ */ package org.apache.calcite.rel.core; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.externalize.RexExplainVisitor; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeFactory; import org.apache.calcite.sql.SqlAggFunction; import org.apache.calcite.sql.type.SqlTypeUtil; +import org.apache.calcite.util.Optionality; import org.apache.calcite.util.mapping.Mapping; import org.apache.calcite.util.mapping.Mappings; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; - import java.util.List; import java.util.Objects; @@ -38,16 +38,16 @@ public class AggregateCall { //~ Instance fields -------------------------------------------------------- - private final SqlAggFunction aggFunction; + protected final SqlAggFunction aggFunction; - private final boolean distinct; - private final boolean approximate; + protected final boolean distinct; + protected final boolean approximate; public final RelDataType type; public final String name; // We considered using ImmutableIntList but we would not save much memory: // since all values are small, ImmutableList uses cached Integer values. - private final ImmutableList argList; + protected final ImmutableList argList; public final int filterArg; //~ Constructors ----------------------------------------------------------- @@ -147,7 +147,9 @@ public static AggregateCall create(SqlAggFunction aggFunction, public static AggregateCall create(SqlAggFunction aggFunction, boolean distinct, boolean approximate, List argList, int filterArg, RelDataType type, String name) { - return new AggregateCall(aggFunction, distinct, approximate, argList, + final boolean distinct2 = distinct + && (aggFunction.getDistinctOptionality() != Optionality.IGNORED); + return new AggregateCall(aggFunction, distinct2, approximate, argList, filterArg, type, name); } @@ -251,6 +253,10 @@ public boolean hasFilter() { return filterArg >= 0; } + public int getFilterArg() { + return filterArg; + } + @Override public boolean equals(Object o) { if (!(o instanceof AggregateCall)) { return false; @@ -292,19 +298,14 @@ public AggregateCall copy(List args, int filterArg) { filterArg, type, name); } - public AggregateCall copy(List args, int filterArg, boolean isDistinct) { - return new AggregateCall(aggFunction, isDistinct, approximate, args, - filterArg, type, name); - } - public AggregateCall copy(List args, int filterArg, boolean isDistinct, String newName) { return new AggregateCall(aggFunction, isDistinct, approximate, args, filterArg, type, newName); } - @Deprecated // to be removed before 2.0 - public AggregateCall copy(List args) { - return copy(args, filterArg); + public AggregateCall withDistinct(boolean distinct) { + return distinct == this.distinct ? this + : new AggregateCall(aggFunction, distinct, approximate, argList, filterArg, type, name); } /** diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/DynamicValues.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/DynamicValues.java index 12ce24bbf..b83d0616d 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/DynamicValues.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/DynamicValues.java @@ -69,6 +69,11 @@ public static DynamicValues create(RelOptCluster cluster, RelDataType rowType, return new DynamicValues(cluster, cluster.traitSet(), rowType, tuples); } + public static DynamicValues create(RelOptCluster cluster, RelTraitSet traits, RelDataType rowType, + ImmutableList> tuples) { + return new DynamicValues(cluster, traits, rowType, tuples); + } + @Override protected RelDataType deriveRowType() { return rowType; @@ -111,8 +116,8 @@ public RelWriter explainTerms(RelWriter pw) { return super.explainTerms(pw) .itemIf("type", rowType, pw.getDetailLevel() == SqlExplainLevel.DIGEST_ATTRIBUTES) - .itemIf("type", rowType.getFieldList(), pw.nest()) - .itemIf("tuples", tuples, pw.nest()); + .itemIf("type", rowType.getFieldList(), true) + .itemIf("tuples", tuples, true); } @Override diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/Filter.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/Filter.java index 22a74f753..a1c12b16b 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/Filter.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/Filter.java @@ -26,7 +26,9 @@ import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.RelWriter; import org.apache.calcite.rel.SingleRel; +import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider; import org.apache.calcite.rel.metadata.RelMdUtil; +import org.apache.calcite.rel.metadata.RelMetadataProvider; import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rex.RexChecker; import org.apache.calcite.rex.RexNode; @@ -145,13 +147,21 @@ public RexNode getCondition() { @Deprecated // to be removed before 2.0 public static double estimateFilteredRows(RelNode child, RexProgram program) { - final RelMetadataQuery mq = RelMetadataQuery.instance(); + final RelMetadataQuery mq; + RelMetadataProvider provider = child.getCluster().getMetadataProvider(); + // provider in cluster cannot be a JaninoRelMetadataProvider, + // for it doesn't support RelMetadataProvider.apply method + mq = RelMetadataQuery.instance(JaninoRelMetadataProvider.of(provider)); return RelMdUtil.estimateFilteredRows(child, program, mq); } @Deprecated // to be removed before 2.0 public static double estimateFilteredRows(RelNode child, RexNode condition) { - final RelMetadataQuery mq = RelMetadataQuery.instance(); + final RelMetadataQuery mq; + RelMetadataProvider provider = child.getCluster().getMetadataProvider(); + // provider in cluster cannot be a JaninoRelMetadataProvider, + // for it doesn't support RelMetadataProvider.apply method + mq = RelMetadataQuery.instance(JaninoRelMetadataProvider.of(provider)); return RelMdUtil.estimateFilteredRows(child, condition, mq); } diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/GroupConcatAggregateCall.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/GroupConcatAggregateCall.java index fd1dc39c8..3998fa99c 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/GroupConcatAggregateCall.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/GroupConcatAggregateCall.java @@ -78,6 +78,25 @@ public GroupConcatAggregateCall copy(List args, int filterArg, List args, int filterArg) { + return new GroupConcatAggregateCall(aggFunction, distinct, approximate, args, + filterArg, type, name, orderList, separator, ascOrDescList); + } + + @Override + public GroupConcatAggregateCall copy(List args, int filterArg, boolean isDistinct, String newName) { + return new GroupConcatAggregateCall(aggFunction, isDistinct, approximate, args, + filterArg, type, newName, orderList, separator, ascOrDescList); + } + + @Override + public AggregateCall withDistinct(boolean distinct) { + return distinct == this.distinct ? this + : new GroupConcatAggregateCall(aggFunction, distinct, approximate, argList, filterArg, type, name, + orderList, separator, ascOrDescList); + } + @Override public AggregateCall adaptTo(RelNode input, List argList, int filterArg, int oldGroupKeyCount, int newGroupKeyCount) { diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/TableModify.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/TableModify.java index 709448dd5..39e4b8ace 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/TableModify.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/TableModify.java @@ -747,6 +747,8 @@ public TableInfoNode(SqlNode table, SqlNode tableWithAlias, List re if (table instanceof SqlIdentifier || table instanceof SqlDynamicParam) { // Table this.columnCount = refTables.get(0).getRowType().getFieldCount(); + } else if (RelOptUtil.isUnion(table)) { + this.columnCount = RelOptUtil.getColumnCount(table); } else { // Subquery SqlSelect subquery = (SqlSelect) table; diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/TableScan.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/TableScan.java index 73816e44a..8b65158b3 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/TableScan.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/TableScan.java @@ -35,6 +35,7 @@ import org.apache.calcite.rex.RexUtil; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlNodeList; +import org.apache.calcite.sql.SqlOperator; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.calcite.tools.RelBuilder; import org.apache.calcite.util.ImmutableBitSet; @@ -78,6 +79,11 @@ public abstract class TableScan extends AbstractRelNode { */ protected RexNode flashback; + /** + * 记录AS OF 种类:AS_OF/AS_OF_80/AS_OF_57 + */ + protected SqlOperator flashbackOperator; + /** * This tableName identifier's partitions of mysql partition selection syntax *
@@ -108,11 +114,11 @@ protected TableScan(RelOptCluster cluster, RelTraitSet traitSet,
 
   protected TableScan(RelOptCluster cluster, RelTraitSet traitSet,
                       RelOptTable table, SqlNodeList hints, SqlNode indexNode) {
-    this(cluster, traitSet, table, hints, indexNode, null, null);
+    this(cluster, traitSet, table, hints, indexNode, null, null, null);
   }
 
   protected TableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptTable table, SqlNodeList hints,
-                      SqlNode indexNode, RexNode flashback, SqlNode partitions) {
+                      SqlNode indexNode, RexNode flashback, SqlOperator flashbackOperator, SqlNode partitions) {
     super(cluster, traitSet);
     this.table = table;
     this.hints = hints;
@@ -121,6 +127,7 @@ protected TableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptTable tab
       cluster.getPlanner().registerSchema(table.getRelOptSchema());
     }
     this.flashback = flashback;
+    this.flashbackOperator = flashbackOperator;
     this.partitions = partitions;
   }
   /**
@@ -265,6 +272,14 @@ public void setFlashback(RexNode flashback) {
     this.flashback = flashback;
   }
 
+  public SqlOperator getFlashbackOperator() {
+    return flashbackOperator;
+  }
+
+  public void setFlashbackOperator(SqlOperator flashbackOperator) {
+    this.flashbackOperator = flashbackOperator;
+  }
+
   public SqlNode getPartitions() {
     return partitions;
   }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/Window.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/Window.java
index 58f228601..d388fd392 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/Window.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/Window.java
@@ -64,7 +64,6 @@
 public abstract class Window extends SingleRel {
   public final ImmutableList      groups;
   public final ImmutableList constants;
-  private      RelOptCost                fixedCost;
 
   /**
    * Creates a window relational expression.
@@ -185,14 +184,6 @@ public List getConstants() {
     return planner.getCostFactory().makeCost(rowsIn, rowsIn * count, 0,0,0);
   }
 
-  public void setFixedCost(RelOptCost relOptCost) {
-    this.fixedCost = relOptCost;
-  }
-
-  public RelOptCost getFixedCost() {
-    return fixedCost;
-  }
-
   /**
    * Group of windowed aggregate calls that have the same window specification.
    *
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/WindowAggregateCall.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/WindowAggregateCall.java
index 2f056fc9d..78456a56b 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/WindowAggregateCall.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/core/WindowAggregateCall.java
@@ -67,6 +67,25 @@ public WindowAggregateCall copy(List args, int filterArg, List args, int filterArg) {
+        return new WindowAggregateCall(aggFunction, distinct, approximate, args,
+            filterArg, type, name, constants, offset);
+    }
+
+    @Override
+    public AggregateCall copy(List args, int filterArg, boolean isDistinct, String newName) {
+        return new WindowAggregateCall(aggFunction, isDistinct, approximate, args,
+            filterArg, type, newName, constants, offset);
+    }
+
+    @Override
+    public AggregateCall withDistinct(boolean distinct) {
+        return distinct == this.distinct ? this
+            : new WindowAggregateCall(aggFunction, distinct, approximate, argList, filterArg, type, name,
+            constants, offset);
+    }
+
 
     public AggregateCall create(SqlAggFunction aggFunction,
                                 boolean distinct, boolean approximate, List argList,
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterInstance.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterInstance.java
new file mode 100644
index 000000000..03ac5c460
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterInstance.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.rel.ddl;
+
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.DDL;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlAlterInstance;
+import org.apache.calcite.sql.SqlNode;
+
+import java.util.List;
+
+/**
+ * Created by zhuqiwei.
+ *
+ * @author zhuqiwei
+ */
+public class AlterInstance extends DDL {
+    public AlterInstance(RelOptCluster cluster, RelTraitSet traitSet, SqlNode sqlNode, RelDataType rowType) {
+        super(cluster, traitSet, null);
+        this.sqlNode = sqlNode;
+    }
+
+    public static AlterInstance create(SqlAlterInstance alterInstance, RelDataType rowType, RelOptCluster cluster) {
+        return new AlterInstance(cluster, cluster.traitSetOf(Convention.NONE), alterInstance, rowType);
+    }
+
+    @Override
+    public AlterInstance copy(RelTraitSet traitSet, List inputs) {
+        assert traitSet.containsIfApplicable(Convention.NONE);
+        return new AlterInstance(this.getCluster(), traitSet, ((AlterInstance) inputs.get(0)).getAst(), rowType);
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterTableDiscardTableSpace.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterTableDiscardTableSpace.java
new file mode 100644
index 000000000..f54fe7901
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterTableDiscardTableSpace.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.rel.ddl;
+
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.DDL;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlAlterJoinGroup;
+import org.apache.calcite.sql.SqlAlterTableDiscardTableSpace;
+import org.apache.calcite.sql.SqlDdl;
+import org.apache.calcite.sql.SqlIdentifier;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.calcite.sql.parser.SqlParserPos;
+
+import java.util.List;
+
+/**
+ * Created by luoyanxin.
+ *
+ * @author luoyanxin
+ */
+public class AlterTableDiscardTableSpace extends DDL {
+    protected AlterTableDiscardTableSpace(RelOptCluster cluster, RelTraitSet traits,
+                                          SqlDdl ddl, SqlNode tableNameNode) {
+        super(cluster, traits, null);
+        this.sqlNode = ddl;
+        this.setTableName(tableNameNode);
+    }
+
+    public static AlterTableDiscardTableSpace create(SqlAlterTableDiscardTableSpace sqlAlterTableDiscardTableSpace,
+                                                     SqlNode tableNameNode, RelOptCluster cluster) {
+        return new AlterTableDiscardTableSpace(cluster, cluster.traitSetOf(Convention.NONE),
+            sqlAlterTableDiscardTableSpace, tableNameNode);
+    }
+
+    @Override
+    public AlterTableDiscardTableSpace copy(RelTraitSet traitSet, List inputs) {
+        assert traitSet.containsIfApplicable(Convention.NONE);
+        return new AlterTableDiscardTableSpace(this.getCluster(), traitSet,
+            ((AlterTableDiscardTableSpace) inputs.get(0)).getAst(), getTableName());
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterTableImportTableSpace.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterTableImportTableSpace.java
new file mode 100644
index 000000000..5e2492c48
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterTableImportTableSpace.java
@@ -0,0 +1,56 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.rel.ddl;
+
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.DDL;
+import org.apache.calcite.sql.SqlAlterTableDiscardTableSpace;
+import org.apache.calcite.sql.SqlAlterTableImportTableSpace;
+import org.apache.calcite.sql.SqlDdl;
+import org.apache.calcite.sql.SqlNode;
+
+import java.util.List;
+
+/**
+ * Created by luoyanxin.
+ *
+ * @author luoyanxin
+ */
+public class AlterTableImportTableSpace extends DDL {
+    protected AlterTableImportTableSpace(RelOptCluster cluster, RelTraitSet traits,
+                                         SqlDdl ddl, SqlNode tableNameNode) {
+        super(cluster, traits, null);
+        this.sqlNode = ddl;
+        this.setTableName(tableNameNode);
+    }
+
+    public static AlterTableImportTableSpace create(SqlAlterTableImportTableSpace sqlAlterTableImportTableSpace,
+                                                    SqlNode tableNameNode, RelOptCluster cluster) {
+        return new AlterTableImportTableSpace(cluster, cluster.traitSetOf(Convention.NONE),
+            sqlAlterTableImportTableSpace, tableNameNode);
+    }
+
+    @Override
+    public AlterTableImportTableSpace copy(RelTraitSet traitSet, List inputs) {
+        assert traitSet.containsIfApplicable(Convention.NONE);
+        return new AlterTableImportTableSpace(this.getCluster(), traitSet,
+            ((AlterTableImportTableSpace) inputs.get(0)).getAst(), getTableName());
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterTableSetTableGroup.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterTableSetTableGroup.java
index f91c0ba3d..93fa770af 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterTableSetTableGroup.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/AlterTableSetTableGroup.java
@@ -16,21 +16,17 @@
 
 package org.apache.calcite.rel.ddl;
 
-import com.alibaba.polardbx.common.exception.NotSupportException;
 import org.apache.calcite.plan.Convention;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.sql.SqlDdl;
 import org.apache.calcite.sql.SqlIdentifier;
 import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.sql.parser.SqlParserPos;
 
 import java.util.List;
-import java.util.Map;
 
 /**
  * Created by luoyanxin.
@@ -41,33 +37,39 @@ public class AlterTableSetTableGroup extends DDL {
     final String tableGroupName;
     final List objectNames;
     final boolean force;
+    final boolean implicit;
 
     protected AlterTableSetTableGroup(RelOptCluster cluster, RelTraitSet traits, SqlDdl ddl,
                                       RelDataType rowType,
                                       List objectNames,
                                       SqlNode tableName,
                                       String tableGroupName,
+                                      boolean implicit,
                                       boolean force) {
         super(cluster, traits, ddl, rowType);
         this.tableGroupName = tableGroupName;
         this.sqlNode = ddl;
         this.objectNames = objectNames;
         this.setTableName(tableName);
+        this.implicit = implicit;
         this.force = force;
     }
 
     public static AlterTableSetTableGroup create(RelOptCluster cluster, RelTraitSet traits, SqlDdl ddl,
-                                                 RelDataType rowType, List objectNames, SqlNode tableName,
-                                                 String tableGroupName, boolean force) {
+                                                 RelDataType rowType, List objectNames,
+                                                 SqlNode tableName,
+                                                 String tableGroupName, boolean implicit, boolean force) {
 
-        return new AlterTableSetTableGroup(cluster, traits, ddl, rowType, objectNames, tableName, tableGroupName, force);
+        return new AlterTableSetTableGroup(cluster, traits, ddl, rowType, objectNames, tableName, tableGroupName,
+            implicit, force);
     }
 
     @Override
     public AlterTableSetTableGroup copy(
         RelTraitSet traitSet, List inputs) {
         assert traitSet.containsIfApplicable(Convention.NONE);
-        return new AlterTableSetTableGroup(this.getCluster(), traitSet, this.ddl, rowType, this.objectNames, getTableName(), tableGroupName, force);
+        return new AlterTableSetTableGroup(this.getCluster(), traitSet, this.ddl, rowType, this.objectNames,
+            getTableName(), tableGroupName, implicit, force);
     }
 
     public String getTableGroupName() {
@@ -81,4 +83,8 @@ public List getObjectNames() {
     public boolean isForce() {
         return force;
     }
+
+    public boolean isImplicit() {
+        return implicit;
+    }
 }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ClearFileStorage.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ClearFileStorage.java
new file mode 100644
index 000000000..a2bd971d9
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ClearFileStorage.java
@@ -0,0 +1,56 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.rel.ddl;
+
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.DDL;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlDdl;
+import org.apache.calcite.sql.SqlIdentifier;
+import org.apache.calcite.sql.parser.SqlParserPos;
+
+import java.util.List;
+
+public class ClearFileStorage extends DDL {
+    final String fileStorageName;
+
+    protected ClearFileStorage(RelOptCluster cluster, RelTraitSet traits, SqlDdl ddl,
+                               RelDataType rowType, String fileStorageName) {
+        super(cluster, traits, ddl, rowType);
+        this.fileStorageName = fileStorageName;
+        this.sqlNode = ddl;
+        this.setTableName(new SqlIdentifier(fileStorageName, SqlParserPos.ZERO));
+    }
+
+    public static ClearFileStorage create(RelOptCluster cluster, RelTraitSet traits, SqlDdl ddl,
+                                          RelDataType rowType, String fileStorageName) {
+        return new ClearFileStorage(cluster, traits, ddl, rowType, fileStorageName);
+    }
+
+    @Override
+    public ClearFileStorage copy(RelTraitSet traitSet, List inputs) {
+        assert traitSet.containsIfApplicable(Convention.NONE);
+        return new ClearFileStorage(this.getCluster(), traitSet, this.ddl, rowType, fileStorageName);
+    }
+
+    public String getFileStorageName() {
+        return fileStorageName;
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ConvertAllSequences.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ConvertAllSequences.java
new file mode 100644
index 000000000..624e95085
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ConvertAllSequences.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.rel.ddl;
+
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.DDL;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlConvertAllSequences;
+import org.apache.calcite.sql.SqlNode;
+
+import java.util.List;
+
+/**
+ * Created by zhuqiwei.
+ *
+ * @author zhuqiwei
+ */
+public class ConvertAllSequences extends DDL {
+    public ConvertAllSequences(RelOptCluster cluster, RelTraitSet traitSet, SqlNode sqlNode, RelDataType relDataType) {
+        super(cluster, traitSet, null);
+        this.sqlNode = sqlNode;
+    }
+
+    public static ConvertAllSequences create(SqlConvertAllSequences sqlConvertAllSequences, RelDataType relDataType,
+                                             RelOptCluster cluster) {
+        return new ConvertAllSequences(cluster, cluster.traitSetOf(Convention.NONE), sqlConvertAllSequences,
+            relDataType);
+    }
+
+    @Override
+    public ConvertAllSequences copy(RelTraitSet traitSet, List inputs) {
+        assert traitSet.containsIfApplicable(Convention.NONE);
+        return new ConvertAllSequences(this.getCluster(), traitSet, ((ConvertAllSequences) (inputs.get(0))).getAst(),
+            rowType);
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/CreateView.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/CreateView.java
new file mode 100644
index 000000000..1c04ec929
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/CreateView.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.rel.ddl;
+
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.DDL;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlCreateView;
+import org.apache.calcite.sql.SqlDdl;
+import org.apache.calcite.sql.SqlIdentifier;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.util.Util;
+
+import java.util.List;
+
+public class CreateView extends DDL {
+
+    protected CreateView(RelOptCluster cluster, RelTraitSet traits, SqlDdl ddl, RelDataType rowType) {
+        super(cluster, traits, ddl, rowType);
+        this.sqlNode = ddl;
+
+        SqlCreateView sqlCreateView = (SqlCreateView) ddl;
+        this.setTableName(sqlCreateView.getName());
+    }
+
+    public static CreateView create(SqlCreateView sqlCreateView, RelDataType rowType, RelOptCluster cluster) {
+        return new CreateView(cluster, cluster.traitSetOf(Convention.NONE), sqlCreateView, rowType);
+    }
+
+    @Override
+    public CreateView copy(RelTraitSet traitSet, List inputs) {
+        assert traitSet.containsIfApplicable(Convention.NONE);
+        return new CreateView(this.getCluster(), traitSet, ((CreateView) inputs.get(0)).getAst(), rowType);
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/DropView.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/DropView.java
new file mode 100644
index 000000000..2190e4cee
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/DropView.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.rel.ddl;
+
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.DDL;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlCreateView;
+import org.apache.calcite.sql.SqlDdl;
+import org.apache.calcite.sql.SqlDropView;
+
+import java.util.List;
+
+public class DropView extends DDL {
+
+    protected DropView(RelOptCluster cluster, RelTraitSet traits, SqlDdl ddl, RelDataType rowType) {
+        super(cluster, traits, ddl, rowType);
+        this.sqlNode = ddl;
+
+        SqlDropView sqlDropView = (SqlDropView) ddl;
+        this.setTableName(sqlDropView.getName());
+    }
+
+    public static DropView create(SqlDropView sqlDropView, RelDataType rowType, RelOptCluster cluster) {
+        return new DropView(cluster, cluster.traitSetOf(Convention.NONE), sqlDropView, rowType);
+    }
+
+    @Override
+    public DropView copy(RelTraitSet traitSet, List inputs) {
+        assert traitSet.containsIfApplicable(Convention.NONE);
+        return new DropView(this.getCluster(), traitSet, ((DropView) inputs.get(0)).getAst(), rowType);
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ImportDatabase.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ImportDatabase.java
new file mode 100644
index 000000000..513a094b9
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ImportDatabase.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.rel.ddl;
+
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.DDL;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlImportDatabase;
+import org.apache.calcite.sql.SqlNode;
+
+import java.util.List;
+
+/**
+ * Created by zhuqiwei.
+ *
+ * @author zhuqiwei
+ */
+public class ImportDatabase extends DDL {
+    public ImportDatabase(RelOptCluster cluster, RelTraitSet traitSet, SqlNode sqlNode, RelDataType rowType) {
+        super(cluster, traitSet, null);
+        this.sqlNode = sqlNode;
+    }
+
+    public static ImportDatabase create(SqlImportDatabase sqlImportDatabase, RelDataType rowType, RelOptCluster cluster) {
+        return new ImportDatabase(cluster, cluster.traitSetOf(Convention.NONE), sqlImportDatabase, rowType);
+    }
+
+    @Override
+    public ImportDatabase copy(RelTraitSet traitSet, List inputs) {
+        return new ImportDatabase(this.getCluster(), traitSet, ((ImportDatabase) inputs.get(0)).getAst(), rowType);
+    }
+}
+
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ImportSequence.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ImportSequence.java
new file mode 100644
index 000000000..2ef3f7f4e
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/ddl/ImportSequence.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.rel.ddl;
+
+import org.apache.calcite.plan.Convention;
+import org.apache.calcite.plan.RelOptCluster;
+import org.apache.calcite.plan.RelTraitSet;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.DDL;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlImportSequence;
+import org.apache.calcite.sql.SqlNode;
+
+import java.util.List;
+
+/**
+ * Created by zhuqiwei.
+ *
+ * @author zhuqiwei
+ */
+public class ImportSequence extends DDL {
+
+    public ImportSequence(RelOptCluster cluster, RelTraitSet traitSet, SqlNode sqlNode, RelDataType rowTyep) {
+        super(cluster, traitSet, null);
+        this.sqlNode = sqlNode;
+    }
+
+    public static ImportSequence create(SqlImportSequence sqlImportSequence, RelDataType rowType,
+                                        RelOptCluster cluster) {
+        return new ImportSequence(cluster, cluster.traitSetOf(Convention.NONE), sqlImportSequence, rowType);
+    }
+
+    @Override
+    public ImportSequence copy(RelTraitSet traitSet, List inputs) {
+        return new ImportSequence(this.getCluster(), traitSet, ((ImportSequence) inputs.get(0)).getAst(), rowType);
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelDrdsJsonWriter.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelDrdsJsonWriter.java
index 05843daa0..a64863272 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelDrdsJsonWriter.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelDrdsJsonWriter.java
@@ -114,6 +114,10 @@ protected void explain_(RelNode rel, List> values) {
                 node.put("actual_next_time", sketch.getDuration());
                 node.put("actual_worker_time", sketch.getWorkerDuration());
                 node.put("actual_rowcount", sketch.getRowCount());
+                if (sketch.getRuntimeFilteredRowCount() > 0) {
+                    node.put("runtime_filtered_count", sketch.getRuntimeFilteredRowCount());
+                }
+
                 node.put("actual_memory", sketch.getMemory());
 
                 if (sketch.getSpillCnt() > 0) {
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelDrdsWriter.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelDrdsWriter.java
index 1d14cb228..7c595547f 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelDrdsWriter.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelDrdsWriter.java
@@ -190,6 +190,11 @@ protected void explain_(RelNode rel, List> values) {
                 s.append(", actual time = ").append(String.format("%.3f", sketch.getStartupDuration()))
                         .append(" + ").append(String.format("%.3f", sketch.getDuration()));
                 s.append(", actual rowcount = ").append(sketch.getRowCount());
+
+                if (sketch.getRuntimeFilteredRowCount() > 0) {
+                    s.append(", runtime filtered count = ").append(sketch.getRuntimeFilteredRowCount());
+                }
+
                 s.append(", actual memory = ").append(sketch.getMemory());
                 if (sketch.getSpillCnt() > 0) {
                     s.append(", spill count = ").append(sketch.getSpillCnt());
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelJson.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelJson.java
index e862c3252..f8e6cb2b2 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelJson.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelJson.java
@@ -29,6 +29,8 @@
 import org.apache.calcite.rel.RelFieldCollation;
 import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelPartitionWise;
+import org.apache.calcite.rel.RelPartitionWises;
 import org.apache.calcite.rel.core.AggregateCall;
 import org.apache.calcite.rel.core.CorrelationId;
 import org.apache.calcite.rel.type.RelDataType;
@@ -60,14 +62,14 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 
 /**
  * Utilities for converting {@link org.apache.calcite.rel.RelNode}
  * into JSON format.
  */
 public class RelJson {
-  protected final Map constructorMap =
-      new HashMap();
+  protected static final Map constructorMap = new ConcurrentHashMap<>();
   protected final JsonBuilder jsonBuilder;
 
   public static final List PACKAGES =
@@ -196,6 +198,10 @@ public RelDistribution toDistribution(Map map) {
     return RelDistributions.ANY; // TODO:
   }
 
+  public RelPartitionWise toPartitionWise(Map map) {
+    return RelPartitionWises.ANY;
+  }
+
   public RelDataType toType(RelDataTypeFactory typeFactory, Object o) {
     if (o instanceof List) {
       @SuppressWarnings("unchecked")
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelJsonReader.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelJsonReader.java
index aab3c61d7..81776255f 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelJsonReader.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/externalize/RelJsonReader.java
@@ -31,6 +31,7 @@
 import org.apache.calcite.rel.RelDistribution;
 import org.apache.calcite.rel.RelInput;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelPartitionWise;
 import org.apache.calcite.rel.core.AggregateCall;
 import org.apache.calcite.rel.core.CorrelationId;
 import org.apache.calcite.rel.core.Window;
@@ -304,6 +305,10 @@ public RelDistribution getDistribution() {
         return relJson.toDistribution((Map) get("distribution"));
       }
 
+      public RelPartitionWise getPartitionWise() {
+        return relJson.toPartitionWise((Map) get("partitionWise"));
+      }
+
       public ImmutableList> getTuples(String tag) {
         //noinspection unchecked
         final List jsonTuples = (List) get(tag);
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/JoinReorderContext.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/JoinReorderContext.java
index f51eeda95..395c9db9c 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/JoinReorderContext.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/JoinReorderContext.java
@@ -17,19 +17,27 @@
 package org.apache.calcite.rel.logical;
 
 public class JoinReorderContext {
-    /** left deep */
+    /**
+     * left deep
+     */
     private boolean hasCommute = false;
     private boolean hasTopPushThrough = false;
 
-    /** zig-zag */
+    /**
+     * zig-zag
+     */
     private boolean hasCommuteZigZag = false;
 
-    /** bushy */
+    /**
+     * bushy
+     */
     private boolean hasExchange = false;
     private boolean hasRightAssociate = false;
     private boolean hasLeftAssociate = false;
+    private boolean hasSemiFilter = false;
 
-    public JoinReorderContext() {}
+    public JoinReorderContext() {
+    }
 
     void copyFrom(JoinReorderContext joinReorderContext) {
         this.hasCommute = joinReorderContext.hasCommute;
@@ -38,6 +46,7 @@ void copyFrom(JoinReorderContext joinReorderContext) {
         this.hasLeftAssociate = joinReorderContext.hasLeftAssociate;
         this.hasRightAssociate = joinReorderContext.hasRightAssociate;
         this.hasCommuteZigZag = joinReorderContext.hasCommuteZigZag;
+        this.hasSemiFilter = joinReorderContext.hasSemiFilter;
     }
 
     public void clear() {
@@ -47,6 +56,7 @@ public void clear() {
         hasExchange = false;
         hasRightAssociate = false;
         hasLeftAssociate = false;
+        hasSemiFilter = false;
     }
 
     public void avoidParticipateInJoinReorder() {
@@ -56,6 +66,17 @@ public void avoidParticipateInJoinReorder() {
         hasExchange = true;
         hasRightAssociate = true;
         hasLeftAssociate = true;
+        hasSemiFilter = true;
+    }
+
+    public boolean reordered() {
+        return hasCommute
+            || hasTopPushThrough
+            || hasCommuteZigZag
+            || hasExchange
+            || hasRightAssociate
+            || hasLeftAssociate
+            || hasSemiFilter;
     }
 
     public boolean isHasCommute() {
@@ -105,4 +126,12 @@ public boolean isHasCommuteZigZag() {
     public void setHasCommuteZigZag(boolean hasCommuteZigZag) {
         this.hasCommuteZigZag = hasCommuteZigZag;
     }
+
+    public boolean isHasSemiFilter() {
+        return hasSemiFilter;
+    }
+
+    public void setHasSemiFilter(boolean hasSemiFilter) {
+        this.hasSemiFilter = hasSemiFilter;
+    }
 }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalSemiJoin.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalSemiJoin.java
index b2186a42d..ccab1e0a7 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalSemiJoin.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalSemiJoin.java
@@ -79,6 +79,11 @@ public class LogicalSemiJoin extends SemiJoin {
     private String subqueryPosition;
     private static final String ERROR_SUBQUERY_MULTI_COLUMNS = " subquery with multi columns transform error";
 
+    /**
+     * use for join reorder
+     */
+    private final JoinReorderContext joinReorderContext = new JoinReorderContext();
+
     //~ Constructors -----------------------------------------------------------
 
     /**
@@ -594,6 +599,7 @@ public LogicalSemiJoin copy(RelTraitSet traitSet, RexNode condition,
         semiJoin.pushDownRelNode = this.pushDownRelNode;
         semiJoin.subqueryPosition = this.subqueryPosition;
         semiJoin.fromSetOp = this.fromSetOp;
+        semiJoin.getJoinReorderContext().copyFrom(this.getJoinReorderContext());
         return semiJoin;
     }
 
@@ -605,6 +611,10 @@ public RelNode accept(RelShuttle shuttle) {
     public boolean isFromSetOp() {
         return fromSetOp;
     }
+
+    public JoinReorderContext getJoinReorderContext() {
+        return joinReorderContext;
+    }
 }
 
 // End SemiJoin.java
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalTableScan.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalTableScan.java
index 763733402..52eed454c 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalTableScan.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalTableScan.java
@@ -31,6 +31,7 @@
 import org.apache.calcite.schema.Table;
 import org.apache.calcite.sql.SqlNode;
 import org.apache.calcite.sql.SqlNodeList;
+import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.parser.SqlParserPos;
 
 import java.util.List;
@@ -81,8 +82,9 @@ public LogicalTableScan(RelOptCluster cluster, RelTraitSet traitSet,
 
   public LogicalTableScan(RelOptCluster cluster, RelTraitSet traitSet,
                           RelOptTable table, SqlNodeList hints, SqlNode indexNode, RexNode flashback,
+                          SqlOperator flashbackOperator,
                           SqlNode partitions) {
-    super(cluster, traitSet, table, hints, indexNode, flashback, partitions);
+    super(cluster, traitSet, table, hints, indexNode, flashback, flashbackOperator, partitions);
   }
 
   @Deprecated // to be removed before 2.0
@@ -110,12 +112,12 @@ public LogicalTableScan(RelInput input) {
    */
   public static LogicalTableScan create(RelOptCluster cluster,
                                         final RelOptTable relOptTable) {
-      return create(cluster, relOptTable, new SqlNodeList(SqlParserPos.ZERO), null, null, null);
+    return create(cluster, relOptTable, new SqlNodeList(SqlParserPos.ZERO), null, null, null, null);
   }
 
   public static LogicalTableScan create(RelOptCluster cluster,
                                         final RelOptTable relOptTable, SqlNodeList hints, SqlNode indexNode,
-                                        RexNode flashback, SqlNode partitions) {
+                                        RexNode flashback, SqlOperator flashbackOperator, SqlNode partitions) {
 
     final Table table = relOptTable.unwrap(Table.class);
     final RelTraitSet traitSet =
@@ -127,7 +129,8 @@ public static LogicalTableScan create(RelOptCluster cluster,
                   }
                   return ImmutableList.of();
                 }).simplify();
-    return new LogicalTableScan(cluster, traitSet, relOptTable, hints, indexNode, flashback, partitions);
+    return new LogicalTableScan(cluster, traitSet, relOptTable, hints, indexNode, flashback, flashbackOperator,
+        partitions);
   }
 
 
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalWindow.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalWindow.java
index 7bc89f470..cb72b09d2 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalWindow.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/logical/LogicalWindow.java
@@ -93,7 +93,6 @@ public LogicalWindow(RelOptCluster cluster, RelTraitSet traitSet,
       List inputs) {
     LogicalWindow logicalWindow =  new LogicalWindow(getCluster(), traitSet, sole(inputs), constants,
       rowType, groups);
-    logicalWindow.setFixedCost(getFixedCost());
     return logicalWindow;
   }
 
@@ -113,9 +112,6 @@ public static LogicalWindow create(RelTraitSet traitSet, RelNode input,
   }
 
   @Override public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) {
-    if (getFixedCost() != null) {
-      return getFixedCost();
-    }
     // Cost is proportional to the number of rows and the number of
     // components (groups and aggregate functions). There is
     // no I/O cost.
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/JaninoRelMetadataProvider.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/JaninoRelMetadataProvider.java
index ee6bc94da..d8f8f9c1c 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/JaninoRelMetadataProvider.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/JaninoRelMetadataProvider.java
@@ -22,24 +22,40 @@
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.LinkedHashMultimap;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 import com.google.common.collect.Multimap;
 import com.google.common.util.concurrent.UncheckedExecutionException;
-import org.apache.calcite.adapter.enumerable.*;
+import org.apache.calcite.adapter.enumerable.EnumerableAggregate;
+import org.apache.calcite.adapter.enumerable.EnumerableFilter;
+import org.apache.calcite.adapter.enumerable.EnumerableJoin;
+import org.apache.calcite.adapter.enumerable.EnumerableProject;
+import org.apache.calcite.adapter.enumerable.EnumerableTableScan;
 import org.apache.calcite.config.CalciteSystemProperty;
 import org.apache.calcite.interpreter.JaninoRexCompiler;
 import org.apache.calcite.linq4j.Ord;
-import org.apache.calcite.linq4j.tree.ClassDeclaration;
-import org.apache.calcite.linq4j.tree.MemberDeclaration;
 import org.apache.calcite.linq4j.tree.Primitive;
 import org.apache.calcite.plan.hep.HepRelVertex;
 import org.apache.calcite.plan.volcano.AbstractConverter;
 import org.apache.calcite.plan.volcano.RelSubset;
-import org.apache.calcite.prepare.CalcitePrepareImpl;
 import org.apache.calcite.rel.AbstractRelNode;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.convert.ConverterImpl;
-import org.apache.calcite.rel.core.DynamicValues;
-import org.apache.calcite.rel.logical.*;
+import org.apache.calcite.rel.logical.LogicalAggregate;
+import org.apache.calcite.rel.logical.LogicalCalc;
+import org.apache.calcite.rel.logical.LogicalCorrelate;
+import org.apache.calcite.rel.logical.LogicalExchange;
+import org.apache.calcite.rel.logical.LogicalFilter;
+import org.apache.calcite.rel.logical.LogicalIntersect;
+import org.apache.calcite.rel.logical.LogicalJoin;
+import org.apache.calcite.rel.logical.LogicalMinus;
+import org.apache.calcite.rel.logical.LogicalProject;
+import org.apache.calcite.rel.logical.LogicalSort;
+import org.apache.calcite.rel.logical.LogicalTableFunctionScan;
+import org.apache.calcite.rel.logical.LogicalTableModify;
+import org.apache.calcite.rel.logical.LogicalTableScan;
+import org.apache.calcite.rel.logical.LogicalUnion;
+import org.apache.calcite.rel.logical.LogicalValues;
+import org.apache.calcite.rel.logical.LogicalWindow;
 import org.apache.calcite.rel.stream.LogicalChi;
 import org.apache.calcite.rel.stream.LogicalDelta;
 import org.apache.calcite.rex.RexNode;
@@ -49,18 +65,20 @@
 import org.checkerframework.checker.nullness.qual.Nullable;
 import org.codehaus.commons.compiler.CompileException;
 import org.codehaus.commons.compiler.CompilerFactoryFactory;
-import org.codehaus.commons.compiler.IClassBodyEvaluator;
 import org.codehaus.commons.compiler.ICompilerFactory;
 import org.codehaus.commons.compiler.ISimpleCompiler;
 
-import javax.annotation.Nonnull;
 import java.io.IOException;
-import java.io.StringReader;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
-import java.lang.reflect.Type;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.CopyOnWriteArraySet;
 import java.util.concurrent.ExecutionException;
 
@@ -249,7 +267,7 @@ private static  MetadataHandler load3(
       }
       safeArgList(buff, method.e)
           .append(");\n")
-          .append("    final Object v = mq.map.get(r, key);\n")
+          .append("    final Object v = mq.getCache(r, key);\n")
           .append("    if (v != null) {\n")
           .append("      if (v == ")
           .append(NullSentinel.class.getName())
@@ -267,9 +285,9 @@ private static  MetadataHandler load3(
           .append(method.e.getReturnType().getName())
           .append(") v;\n")
           .append("    }\n")
-          .append("    mq.map.put(r, key,")
+          .append("    mq.cache(r, key,")
           .append(NullSentinel.class.getName())
-          .append(".ACTIVE);\n")
+          .append(".ACTIVE, 0);\n")
           .append("    try {\n")
           .append("      final ")
           .append(method.e.getReturnType().getName())
@@ -278,14 +296,14 @@ private static  MetadataHandler load3(
           .append("_(r, mq");
       argList(buff, method.e)
           .append(");\n")
-          .append("      mq.map.put(r, key, ")
+          .append("      mq.cache(r, key, ")
           .append(NullSentinel.class.getName())
-          .append(".mask(x));\n")
+          .append(".mask(x), 0);\n")
           .append("      return x;\n")
           .append("    } catch (")
           .append(Exception.class.getName())
           .append(" e) {\n")
-          .append("      mq.map.row(r).clear();\n")
+          .append("      mq.clearCache(r);\n")
           .append("      throw e;\n")
           .append("    }\n")
           .append("  }\n")
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/ReflectiveRelMetadataProvider.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/ReflectiveRelMetadataProvider.java
index 0f3fbde6a..ab6767fed 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/ReflectiveRelMetadataProvider.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/ReflectiveRelMetadataProvider.java
@@ -31,7 +31,6 @@
 import com.google.common.collect.Multimap;
 import org.checkerframework.checker.nullness.qual.Nullable;
 
-import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
@@ -182,7 +181,8 @@ private static RelMetadataProvider reflectiveSource(
                   }
                   key1 = FlatLists.copyOf(args2);
                 }
-                if (mq.map.put(rel, key1, NullSentinel.INSTANCE) != null) {
+                Object o = mq.cache(rel, key1, NullSentinel.INSTANCE, 1);
+                if (o!= null) {
                   throw new CyclicMetadataException();
                 }
                 try {
@@ -192,7 +192,7 @@ private static RelMetadataProvider reflectiveSource(
                   Util.throwIfUnchecked(e.getCause());
                   throw new RuntimeException(e.getCause());
                 } finally {
-                  mq.map.remove(rel, key1);
+                  mq.clearCache(rel, key1);
                 }
               });
       methodsMap.put(key, function);
@@ -321,7 +321,7 @@ Method find(final Class relNodeClass, Method method) {
         }
         r = r.getSuperclass();
         if (r == null || !RelNode.class.isAssignableFrom(r)) {
-          throw new IllegalArgumentException("No handler for method [" + method
+           throw new IllegalArgumentException("No handler for method [" + method
               + "] applied to argument of type [" + relNodeClass
               + "]; we recommend you create a catch-all (RelNode) handler");
         }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdPopulationSize.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdPopulationSize.java
index a20fc453a..3ad2ea887 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdPopulationSize.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdPopulationSize.java
@@ -57,7 +57,7 @@ public MetadataDef getDef() {
 
   public Double getPopulationSize(Filter rel, RelMetadataQuery mq,
       ImmutableBitSet groupKey) {
-    return mq.getPopulationSize(rel.getInput(), groupKey);
+    return RelMdUtil.numDistinctVals(mq.getPopulationSize(rel.getInput(), groupKey), mq.getRowCount(rel));
   }
 
   public Double getPopulationSize(Correlate rel, RelMetadataQuery mq,
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdSize.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdSize.java
index c0ba1d1ac..b64cf2dfe 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdSize.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdSize.java
@@ -298,6 +298,7 @@ public Double averageTypeValueSize(RelDataType type) {
     case REAL:
     case DECIMAL:
     case DATE:
+    case DATETIME:
     case TIME:
     case TIME_WITH_LOCAL_TIME_ZONE:
     case INTERVAL_YEAR:
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdUtil.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdUtil.java
index c26b44740..aee47c0d1 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdUtil.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMdUtil.java
@@ -416,6 +416,15 @@ public static double guessSelectivity(
         RexNode predicate,
         boolean artificialOnly) {
         double sel = 1.0;
+        if (predicate instanceof RexLiteral) {
+            if (((RexLiteral)predicate).isAlwaysTrueIntOrBoolean()) {
+                return sel;
+            }
+            if (((RexLiteral)predicate).isAlwaysFalseIntOrBoolean()) {
+                return 0.0;
+            }
+        }
+
         if ((predicate == null) || predicate.isAlwaysTrue()) {
             return sel;
         }
@@ -438,6 +447,8 @@ public static double guessSelectivity(
                 sel *= .15;
             } else if (pred.isA(SqlKind.COMPARISON)) {
                 sel *= .5;
+            } else if (pred.isA(SqlKind.LIKE)) {
+                sel *= .05;
             } else {
                 sel *= .25;
             }
@@ -571,6 +582,23 @@ public static void setAggChildKeys(
         }
     }
 
+    public static ImmutableBitSet keyThroughChildKeys(
+        ImmutableBitSet currentGroupKey,
+        ImmutableBitSet childGroupKey) {
+        final int childGroupCount = childGroupKey.cardinality();
+        for (int bit : currentGroupKey) {
+            if (bit >= childGroupCount) {
+                return null;
+            }
+        }
+
+        ImmutableBitSet.Builder result = ImmutableBitSet.builder();
+        for (int bit : currentGroupKey) {
+            result.set(childGroupKey.nth(bit));
+        }
+        return result.build();
+    }
+
     /**
      * Forms two bitmaps by splitting the columns in a bitmap according to
      * whether or not the column references the child input or is an expression
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMetadataQuery.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMetadataQuery.java
index 575710e1f..24fc2f2ea 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMetadataQuery.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/metadata/RelMetadataQuery.java
@@ -16,8 +16,13 @@
  */
 package org.apache.calcite.rel.metadata;
 
-import com.google.common.collect.HashBasedTable;
-import com.google.common.collect.Table;
+import com.alibaba.polardbx.common.jdbc.Parameters;
+import com.alibaba.polardbx.common.properties.DynamicConfig;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multimap;
 import org.apache.calcite.plan.RelOptCost;
 import org.apache.calcite.plan.RelOptPredicateList;
 import org.apache.calcite.plan.RelOptTable;
@@ -30,18 +35,12 @@
 import org.apache.calcite.rex.RexTableInputRef.RelTableRef;
 import org.apache.calcite.sql.SqlExplainLevel;
 import org.apache.calcite.util.ImmutableBitSet;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Multimap;
 import org.checkerframework.checker.nullness.qual.Nullable;
 
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -83,7 +82,9 @@
  */
 public class RelMetadataQuery {
   /** Set of active metadata queries, and cache of previous results. */
-  public final Table map = HashBasedTable.create();
+  public final Map> map = Maps.newConcurrentMap();
+
+  public final Map>> threadMap = Maps.newConcurrentMap();
 
   public final JaninoRelMetadataProvider metadataProvider;
 
@@ -132,6 +133,9 @@ protected JaninoRelMetadataProvider initialValue() {
         }
       };
 
+  public static final ThreadLocal THREAD_PARAMETERS =
+      ThreadLocal.withInitial(() -> null);
+
   protected RelMetadataQuery(JaninoRelMetadataProvider metadataProvider,
       RelMetadataQuery prototype) {
     this.metadataProvider = Preconditions.checkNotNull(metadataProvider);
@@ -191,6 +195,14 @@ public static RelMetadataQuery instance() {
     return new RelMetadataQuery(THREAD_PROVIDERS.get(), EMPTY);
   }
 
+  /**
+   * @param metadataProvider target provider being used to build a meta query
+   * @return An instance of RelMetadataQuery
+   */
+  public static RelMetadataQuery instance(JaninoRelMetadataProvider metadataProvider) {
+    return new RelMetadataQuery(metadataProvider, EMPTY);
+  }
+
   /** Creates and initializes the instance that will serve as a prototype for
    * all other instances. */
   private RelMetadataQuery(boolean dummy) {
@@ -1122,20 +1134,130 @@ public void setIgnoreProjectDeriveOriginColumn(boolean ignoreProjectDeriveOrigin
     }
   }
 
-  /**
-   * Removes cached metadata values for specified RelNode.
-   *
-   * @param rel RelNode whose cached metadata should be removed
-   * @return true if cache for the provided RelNode was not empty
-   */
-  public boolean clearCache(RelNode rel) {
-    Map row = map.row(rel);
-    if (row.isEmpty()) {
-      return false;
+    /**
+     * Removes cached metadata values for specified RelNode.
+     *
+     * @param rel RelNode whose cached metadata should be removed
+     * @return true if cache for the provided RelNode was not empty
+     */
+    public boolean clearCache(RelNode rel) {
+        Map row = null;
+        Map row1 = null;
+        row = map.remove(rel);
+        Long threadId = Thread.currentThread().getId();
+        Map> m = threadMap.get(threadId);
+        if (m != null) {
+            row1 = m.remove(rel);
+        }
+        return row != null || row1 != null;
     }
 
-    row.clear();
-    return true;
+    public void clearThreadCache() {
+        Long threadId = Thread.currentThread().getId();
+        threadMap.remove(threadId);
+    }
+
+  public void clearCache(RelNode rel, List key1) {
+    Map m = map.get(rel);
+    if (m != null) {
+      m.remove(key1);
+    }
+    Long threadId = Thread.currentThread().getId();
+    Map> m1 = threadMap.get(threadId);
+    if (m1 != null) {
+      Map m2 = m1.get(rel);
+      if (m2 != null) {
+        m2.remove(key1);
+      }
+    }
+  }
+
+  public Object cache(RelNode rel, List key, Object value, int classIndex) {
+    Object val = null;
+    if (DynamicConfig.getInstance().isEnableMQCacheByThread()) {
+      Class declaringClass = null;
+      boolean forceCacheByThread = false;
+      if(value==NullSentinel.ACTIVE){
+        // NullSentinel.ACTIVE meaning the target of this cache is checking cycle,
+        // so it should be cached by thread to avoiding CyclicMetadataException
+        forceCacheByThread = true;
+      }else if (key.get(classIndex) instanceof MetadataDef) {
+        // key.get(classIndex) is a Method meaning this cache request was coming from JaninoRelMetadataProvider.
+        // Whether this cache request should be cached by thread depended on declaringClass
+        declaringClass = ((MetadataDef) key.get(classIndex)).metadataClass;
+      } else if (key.get(classIndex) instanceof Method) {
+        // key.get(classIndex) is a Method meaning this cache request was coming from ReflectiveRelMetadataProvider
+        // and ReflectiveRelMetadataProvider only use this cache to check dead cycle loop.
+        // so it should be cached by thread to avoiding CyclicMetadataException
+        forceCacheByThread = true;
+      } else {
+        throw new RuntimeException("not supported cache key:" + key.get(classIndex));
+      }
+
+      // if cache request was working for dead cycle loop check, forceCacheByThread was expected as true
+      // if declaringClass(mq api) was something connected to params, this cache shouldn't be cached beyond thread
+      if (forceCacheByThread || isDeclaringClassShouldBeCached(declaringClass)) {
+        Long threadId = Thread.currentThread().getId();
+        Map> map = threadMap.get(threadId);
+
+        if (map == null) {
+          map = Maps.newHashMap();
+          threadMap.put(threadId, map);
+        }
+        Map subMap = map.get(rel);
+        if (subMap == null) {
+          subMap = Maps.newHashMap();
+          map.put(rel, subMap);
+        } else {
+          val = subMap.get(key);
+        }
+        subMap.put(key, value);
+        return val;
+      }
+    }
+    Map m = map.get(rel);
+    if (m == null) {
+      m = Maps.newConcurrentMap();
+      map.put(rel, m);
+    } else {
+      val = m.get(key);
+    }
+    m.put(key, value);
+    return val;
+  }
+
+  public static boolean isDeclaringClassShouldBeCached(Class declaringClass) {
+    return BuiltInMetadata.RowCount.class.equals(declaringClass)
+        || BuiltInMetadata.MaxRowCount.class.equals(declaringClass)
+        || BuiltInMetadata.MinRowCount.class.equals(declaringClass)
+        || BuiltInMetadata.PercentageOriginalRows.class.equals(declaringClass)
+        || BuiltInMetadata.StartUpCost.class.equals(declaringClass)
+        || BuiltInMetadata.NonCumulativeCost.class.equals(declaringClass)
+        || BuiltInMetadata.CumulativeCost.class.equals(declaringClass)
+        || BuiltInMetadata.Selectivity.class.equals(declaringClass)
+        || BuiltInMetadata.PopulationSize.class.equals(declaringClass)
+        || BuiltInMetadata.LowerBoundCost.class.equals(declaringClass)
+        || BuiltInMetadata.Memory.class.equals(declaringClass)
+        || BuiltInMetadata.DistinctRowCount.class.equals(declaringClass)
+        || BuiltInMetadata.Size.class.equals(declaringClass);
+  }
+
+  public Object getCache(RelNode rel, List key) {
+    Map row = map.get(rel);
+    if (row != null) {
+      if (row.containsKey(key)) {
+        return row.get(key);
+      }
+    }
+    Map> relMap = threadMap.get(Thread.currentThread().getId());
+    if (relMap != null && relMap.containsKey(rel)) {
+      Map subMap = relMap.get(rel);
+      if (subMap != null) {
+        return subMap.get(key);
+      }
+    }
+    return null;
+
   }
 }
 
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rel2sql/SqlImplementor.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rel2sql/SqlImplementor.java
index 076e55f15..b46e8b9f9 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rel2sql/SqlImplementor.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rel2sql/SqlImplementor.java
@@ -1441,7 +1441,8 @@ public SqlNode asFrom() {
           final SqlIdentifier identifier = (SqlIdentifier) node;
           final SqlIdentifier newIdentifier =
               new SqlIdentifier(((SqlIdentifier) node).names, ((SqlIdentifier) node).getCollation(),
-                  node.getParserPosition(), null, null, identifier.partitions, identifier.flashback);
+                  node.getParserPosition(), null, null, identifier.partitions, identifier.flashback,
+                  identifier.flashbackOperator);
           final SqlIdentifier alias =
               new SqlIdentifier(ImmutableList.of(neededAlias), null, POS, null, identifier.indexNode);
 
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/AbstractMaterializedViewRule.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/AbstractMaterializedViewRule.java
index 3d997cbd6..4307e646a 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/AbstractMaterializedViewRule.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/AbstractMaterializedViewRule.java
@@ -36,6 +36,8 @@
 import org.apache.calcite.rel.core.Project;
 import org.apache.calcite.rel.core.RelFactories;
 import org.apache.calcite.rel.core.TableScan;
+import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider;
+import org.apache.calcite.rel.metadata.RelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
@@ -175,7 +177,11 @@ protected AbstractMaterializedViewRule(RelOptRuleOperand operand,
    */
   protected void perform(RelOptRuleCall call, Project topProject, RelNode node) {
     final RexBuilder rexBuilder = node.getCluster().getRexBuilder();
-    final RelMetadataQuery mq = RelMetadataQuery.instance();
+    final RelMetadataQuery mq;
+    RelMetadataProvider provider = node.getCluster().getMetadataProvider();
+    // provider in cluster cannot be a JaninoRelMetadataProvider,
+    // for it doesn't support RelMetadataProvider.apply method
+    mq = RelMetadataQuery.instance(JaninoRelMetadataProvider.of(provider));
     final RelOptPlanner planner = call.getPlanner();
     final RexExecutor executor =
         Util.first(planner.getExecutor(), RexUtil.EXECUTOR);
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/CalcRelSplitter.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/CalcRelSplitter.java
index f3b03a9e9..2677755ad 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/CalcRelSplitter.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/CalcRelSplitter.java
@@ -401,6 +401,19 @@ private int chooseLevels(
         levelCount++;
       }
     }
+
+    // For dynamic rex node (constant value), pull up its level to avoid project calculation.
+    int maxLevel = Integer.MIN_VALUE;
+    for (int i = 0; i < exprLevels.length; i++) {
+      maxLevel = Math.max(maxLevel, exprLevels[i]);
+    }
+    for (int i = 0; i < exprs.length; i++) {
+      RexNode rexNode = exprs[i];
+      if (rexNode instanceof RexDynamicParam) {
+        exprLevels[i] = maxLevel;
+      }
+    }
+
     return levelCount;
   }
 
@@ -941,6 +954,12 @@ public Void visitLocalRef(RexLocalRef localRef) {
       return null;
     }
 
+    @Override
+    public Void visitDynamicParam(RexDynamicParam dynamicParam) {
+      level = 0;
+      return null;
+    }
+
     /**
      * Returns the highest level of any of the inputs of an expression.
      */
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectJoinTransposeRule.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectJoinTransposeRule.java
index fc1618db9..5fc093419 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectJoinTransposeRule.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectJoinTransposeRule.java
@@ -45,6 +45,7 @@ public class ProjectJoinTransposeRule extends RelOptRule {
           PushProjector.ExprCondition.TRUE,
           RelFactories.LOGICAL_BUILDER);
 
+
   //~ Instance fields --------------------------------------------------------
 
   /**
@@ -70,6 +71,15 @@ public ProjectJoinTransposeRule(
     this.preserveExprCondition = preserveExprCondition;
   }
 
+  public ProjectJoinTransposeRule(int inputRefThreshold) {
+    super(
+        operand(Project.class,
+            operand(Join.class, any())),
+        RelFactories.LOGICAL_BUILDER, null);
+
+    this.preserveExprCondition = new PushProjector.InputRefExprCondition(inputRefThreshold);
+  }
+
   //~ Methods ----------------------------------------------------------------
 
   // implement RelOptRule
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectMergeRule.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectMergeRule.java
index 2f5573877..b69246d2b 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectMergeRule.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectMergeRule.java
@@ -45,12 +45,16 @@ public class ProjectMergeRule extends RelOptRule {
     public static final ProjectMergeRule INSTANCE =
         new ProjectMergeRule(true, RelFactories.LOGICAL_BUILDER);
 
+    public static final ProjectMergeRule INSTANCE_WONT_IGNORE_REX_SUBQUERY =
+        new ProjectMergeRule(true, false, RelFactories.LOGICAL_BUILDER);
+
     //~ Instance fields --------------------------------------------------------
 
     /**
      * Whether to always merge projects.
      */
     private final boolean force;
+    private boolean ignoreProjectSubquery = true;
 
     //~ Constructors -----------------------------------------------------------
 
@@ -68,6 +72,16 @@ public ProjectMergeRule(boolean force, RelBuilderFactory relBuilderFactory) {
         this.force = force;
     }
 
+    public ProjectMergeRule(boolean force, boolean ignoreProjectSubquery, RelBuilderFactory relBuilderFactory) {
+        super(
+            operand(Project.class,
+                operand(Project.class, any())),
+            relBuilderFactory,
+            "ProjectMergeRule" + (force ? ":force_mode" : ""));
+        this.force = force;
+        this.ignoreProjectSubquery = ignoreProjectSubquery;
+    }
+
     @Deprecated // to be removed before 2.0
     public ProjectMergeRule(boolean force, ProjectFactory projectFactory) {
         this(force, RelBuilder.proto(projectFactory));
@@ -128,6 +142,14 @@ public void onMatch(RelOptRuleCall call) {
             }
         }
 
+        if (!ignoreProjectSubquery) {
+            for (RexNode rexNode : topProject.getProjects()) {
+                if (RexUtil.hasSubQuery(rexNode)) {
+                    return;
+                }
+            }
+        }
+
         Set corList = Sets.newHashSet();
         if (topProject.getVariablesSet() != null) {
             corList.addAll(topProject.getVariablesSet());
@@ -139,6 +161,7 @@ public void onMatch(RelOptRuleCall call) {
         if (corList.size() > 1) {
             return;
         }
+
         // replace the two projects with a combined projection
         relBuilder.push(bottomProject.getInput());
         relBuilder.project(newProjects, topProject.getRowType().getFieldNames(), corList);
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectWindowTransposeRule.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectWindowTransposeRule.java
index acb909638..f015d85e0 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectWindowTransposeRule.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/ProjectWindowTransposeRule.java
@@ -93,6 +93,15 @@ public ProjectWindowTransposeRule(RelBuilderFactory relBuilderFactory) {
       builder.add(relDataTypeField);
     }
 
+    // if there is no reference, use the first column
+    if (beReferred.isEmpty()) {
+      if (windowInputColumn > 1) {
+        final RelDataTypeField relDataTypeField = rowTypeWindowInput.get(0);
+        exps.add(new RexInputRef(0, relDataTypeField.getType()));
+        builder.add(relDataTypeField);
+      }
+    }
+
     final LogicalProject projectBelowWindow =
         new LogicalProject(cluster, window.getTraitSet(),
             window.getInput(), exps, builder.build());
@@ -241,7 +250,7 @@ private ImmutableBitSet findReference(final LogicalProject project,
   private int getAdjustedIndex(final int initIndex,
       final ImmutableBitSet beReferred, final int windowInputColumn) {
     if (initIndex >= windowInputColumn) {
-      return beReferred.cardinality() + (initIndex - windowInputColumn);
+      return Math.max(beReferred.cardinality(), 1) + (initIndex - windowInputColumn);
     } else {
       return beReferred.get(0, initIndex).cardinality();
     }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/PushProjector.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/PushProjector.java
index 02c9f7029..d98b5ee43 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/PushProjector.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/rules/PushProjector.java
@@ -21,7 +21,6 @@
 import org.apache.calcite.plan.Strong;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Join;
-import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rel.core.Project;
 import org.apache.calcite.rel.core.SemiJoin;
 import org.apache.calcite.rel.core.SetOp;
@@ -33,10 +32,7 @@
 import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.rex.RexVisitorImpl;
 import org.apache.calcite.runtime.PredicateImpl;
-import org.apache.calcite.sql.SqlBasicCall;
 import org.apache.calcite.sql.SqlOperator;
-import org.apache.calcite.sql.fun.SqlCastFunction;
-import org.apache.calcite.sql.fun.SqlRowOperator;
 import org.apache.calcite.tools.RelBuilder;
 import org.apache.calcite.util.BitSets;
 import org.apache.calcite.util.ImmutableBitSet;
@@ -47,11 +43,11 @@
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
+import org.jetbrains.annotations.Nullable;
 
 import java.util.ArrayList;
 import java.util.BitSet;
 import java.util.List;
-import java.util.Optional;
 import java.util.Set;
 
 /**
@@ -886,6 +882,26 @@ public boolean test(RexNode expr) {
           && operatorSet.contains(((RexCall) expr).getOperator());
     }
   }
+
+  public static class InputRefExprCondition extends ExprConditionImpl {
+
+    private final int inputRefThreshold;
+
+    public InputRefExprCondition(int inputRefThreshold) {
+      this.inputRefThreshold = inputRefThreshold;
+    }
+
+    @Override
+    public boolean test(@Nullable RexNode rexNode) {
+      if (rexNode instanceof RexInputRef) {
+        return true;
+      } else if (rexNode instanceof RexCall) {
+        return RexUtil.getInputRefCount(rexNode) >= inputRefThreshold;
+      }
+      return false;
+    }
+  }
+
 }
 
 // End PushProjector.java
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/type/RelDataTypeFactory.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/type/RelDataTypeFactory.java
index 4e754f61d..f5121832d 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/type/RelDataTypeFactory.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/type/RelDataTypeFactory.java
@@ -266,11 +266,16 @@ RelDataType createEnumSqlType(
       SqlTypeName typeName,
       List values);
 
+  RelDataType createSetSqlType(
+      SqlTypeName typeName,
+      int precision,
+      List setValues);
+
   /**
    * Creates a SQL interval type.
    *
    * @param intervalQualifier contains information if it is a year-month or a
-   *                          day-time interval along with precision information
+   * day-time interval along with precision information
    * @return canonical type descriptor
    */
   RelDataType createSqlIntervalType(
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rel/type/RelDataTypeSystemImpl.java b/polardbx-calcite/src/main/java/org/apache/calcite/rel/type/RelDataTypeSystemImpl.java
index 79c35ed6d..a928ee067 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rel/type/RelDataTypeSystemImpl.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rel/type/RelDataTypeSystemImpl.java
@@ -127,6 +127,9 @@ public int getMaxScale(SqlTypeName typeName) {
       // SqlTypeName.getDefaultPrecision), but it should be 6
       // (microseconds) per SQL99 part 2 section 6.1 syntax rule 30.
       return 0;
+    case BIT:
+    case BIG_BIT:
+      return 1;
     default:
       return -1;
     }
@@ -163,6 +166,9 @@ public int getMaxScale(SqlTypeName typeName) {
     case INTERVAL_MINUTE_SECOND:
     case INTERVAL_SECOND:
       return SqlTypeName.MAX_INTERVAL_START_PRECISION;
+    case BIT:
+    case BIG_BIT:
+      return 64;
     default:
       return getDefaultPrecision(typeName);
     }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rex/RexLiteral.java b/polardbx-calcite/src/main/java/org/apache/calcite/rex/RexLiteral.java
index a8fb1a571..bffceabb3 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/rex/RexLiteral.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/rex/RexLiteral.java
@@ -56,6 +56,8 @@
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 
+import static org.apache.calcite.sql.type.SqlTypeName.INT_TYPES;
+
 /**
  * Constant value in a row-expression.
  * 

@@ -942,6 +944,20 @@ public boolean isAlwaysFalse() { return !booleanValue(this); } + public boolean isAlwaysTrueIntOrBoolean() { + if (INT_TYPES.contains(typeName)) { + return longValue(this) != 0; + } + return isAlwaysTrue(); + } + + public boolean isAlwaysFalseIntOrBoolean() { + if (INT_TYPES.contains(typeName)) { + return longValue(this) == 0; + } + return isAlwaysFalse(); + } + public boolean equals(Object obj) { return (obj instanceof RexLiteral) && equals(((RexLiteral) obj).value, value) && equals(((RexLiteral) obj).type, type); diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/rex/RexUtil.java b/polardbx-calcite/src/main/java/org/apache/calcite/rex/RexUtil.java index 4910c6dbf..643d6f7a5 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/rex/RexUtil.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/rex/RexUtil.java @@ -777,6 +777,12 @@ public static boolean isConstant(RexNode node) { return node.accept(ConstantFinder.INSTANCE); } + public static int getInputRefCount(RexNode node) { + RexInputRefVisitor visitor = new RexInputRefVisitor(); + node.accept(visitor); + return visitor.getInputRefCount(); + } + /** * Returns whether a given expression is deterministic. @@ -3393,6 +3399,33 @@ public static RexNode split(RexNode rex, ImmutableBitSet retain){ } } + public static class RexInputRefVisitor extends RexVisitorImpl { + + private int inputRefCount; + private BitSet bitSet; + + protected RexInputRefVisitor() { + super(true); + this.inputRefCount = 0; + this.bitSet = new BitSet(8); + } + + public Void visitInputRef(RexInputRef inputRef) { + final int ref = inputRef.getIndex(); + if (bitSet.get(ref)) { + // Remove duplicates input refs. + return null; + } + bitSet.set(ref); + inputRefCount++; + return null; + } + + public int getInputRefCount() { + return inputRefCount; + } + }; + /** * Returns whether a given tree contains any un-pushable function * diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/runtime/CalciteResource.java b/polardbx-calcite/src/main/java/org/apache/calcite/runtime/CalciteResource.java index 558b914fb..ffa642059 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/runtime/CalciteResource.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/runtime/CalciteResource.java @@ -746,9 +746,21 @@ public interface CalciteResource { @BaseMessage("View ''{0}'' not found") ExInst viewNotFound(String name); + @BaseMessage("Index ''{0}'' not found") + ExInst indexNotFound(String name); + @BaseMessage("Global Secondary Index ''{0}'' already exists") ExInst gsiExists(String name); + @BaseMessage("Clustered Columnar Index ''{0}'' already exists") + ExInst cciExists(String name); + + @BaseMessage("Do not support create more than one Clustered Columnar Index on table ''{0}''") + ExInst cciMoreThanOne(String name); + + @BaseMessage("Do not support create Clustered Columnar Index on table without primary key") + ExInst createCciOnTableWithoutPk(); + @BaseMessage("Duplicate column name ''{0}''") ExInst duplicateColumnNameInTable(String a0); @@ -775,7 +787,7 @@ public interface CalciteResource { @BaseMessage("Recursive CTE not support {0}") ExInst validatorRecursiveCTENotSupport(String a0); - + @BaseMessage("Cannot UPDATE generated column ''{0}''") ExInst updateAlwaysGenerated(String a0); } diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAddIndex.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAddIndex.java index e967c4625..c8ec5e066 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAddIndex.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAddIndex.java @@ -78,6 +78,10 @@ public boolean isClusteredIndex() { return indexDef.isClustered(); } + public boolean isColumnarIndex() { + return indexDef.isColumnar(); + } + @Override public boolean supportFileStorage() { return true; diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAggFunction.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAggFunction.java index 6c1f6481a..c78b4372c 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAggFunction.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAggFunction.java @@ -24,7 +24,9 @@ import org.apache.calcite.sql.type.SqlReturnTypeInference; import org.apache.calcite.sql.validate.SqlValidator; import org.apache.calcite.sql.validate.SqlValidatorScope; +import org.apache.calcite.util.Optionality; +import javax.annotation.Nonnull; import java.util.List; /** @@ -131,6 +133,23 @@ public RelDataType getReturnType(RelDataTypeFactory typeFactory) { public boolean allowsFilter() { return true; } + + /** Returns whether this aggregate function allows the {@code DISTINCT} + * keyword. + * + *

The default implementation returns {@link Optionality#OPTIONAL}, + * which is appropriate for most aggregate functions, including {@code SUM} + * and {@code COUNT}. + * + *

Some aggregate functions, for example {@code MIN}, produce the same + * result with or without {@code DISTINCT}, and therefore return + * {@link Optionality#IGNORED} to indicate this. For such functions, + * Calcite will probably remove {@code DISTINCT} while optimizing the query. + */ + public @Nonnull + Optionality getDistinctOptionality() { + return Optionality.OPTIONAL; + } } // End SqlAggFunction.java diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterInstance.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterInstance.java new file mode 100644 index 000000000..4f3735094 --- /dev/null +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterInstance.java @@ -0,0 +1,83 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.calcite.sql; + +import com.google.common.collect.ImmutableList; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rel.type.RelDataTypeFieldImpl; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.validate.SqlValidator; +import org.apache.calcite.sql.validate.SqlValidatorScope; + +import java.util.List; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +public class SqlAlterInstance extends SqlDdl { + private static final SqlSpecialOperator OPERATOR = new SqlAlterInstanceOperator(); + private List optitionList; + + public SqlAlterInstance(SqlParserPos pos, List optitionList) { + super(OPERATOR, pos); + this.optitionList = optitionList; + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + writer.keyword("ALTER INSTANCE"); + + for (SqlSetOption sqlSetOption : optitionList) { + sqlSetOption.unparse(writer, leftPrec, rightPrec); + } + } + + public List getOptitionList() { + return optitionList; + } + + public void setOptitionList(List optitionList) { + this.optitionList = optitionList; + } + + @Override + public List getOperandList() { + return ImmutableList.of(); + } + + public static class SqlAlterInstanceOperator extends SqlSpecialOperator { + public SqlAlterInstanceOperator() { + super("ALTER_INSTANCE", SqlKind.ALTER_INSTANCE); + } + + @Override + public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) { + final RelDataTypeFactory typeFactory = validator.getTypeFactory(); + final RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR); + + return typeFactory.createStructType( + ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("ALTER_INSTANCE_RESULT", 0, + columnType))); + } + } + +} diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTable.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTable.java index 5ae47f27a..197e3129e 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTable.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTable.java @@ -43,6 +43,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.TreeMap; /** * DESCRIPTION @@ -81,6 +82,9 @@ public static enum ColumnOpt { private Map> partRexInfoCtxByLevel; + private String targetImplicitTableGroupName; + private Map indexTableGroupMap = new TreeMap<>(); + /** * Creates a SqlCreateIndex. */ @@ -147,12 +151,12 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec, boolean withO name.unparse(writer, leftPrec, rightPrec); } + SqlUtil.wrapSqlNodeList(alters).unparse(writer, 0, 0); + if (null != tableOptions) { tableOptions.unparse(writer, leftPrec, rightPrec); } - SqlUtil.wrapSqlNodeList(alters).unparse(writer, 0, 0); - writer.endList(frame); } @@ -217,8 +221,9 @@ private String prepare() { final int leftPrec = getOperator().getLeftPrec(); final int rightPrec = getOperator().getRightPrec(); alters.clear(); - alters.add(new SqlAlterTableDropIndex(dropForeignKey.getOriginTableName(), dropForeignKey.getIndexName(), - dropForeignKey.getSourceSql(), SqlParserPos.ZERO)); + alters.add( + SqlDdlNodes.alterTableDropIndex(dropForeignKey.getOriginTableName(), dropForeignKey.getIndexName(), + dropForeignKey.getSourceSql(), SqlParserPos.ZERO)); unparse(writer, leftPrec, rightPrec, true); sqlForExecute = writer.toSqlString().getSql(); alters.clear(); @@ -351,6 +356,11 @@ public boolean createGsi() { return addIndex() && ((SqlAddIndex) alters.get(0)).indexDef.isGlobal(); } + @Override + public boolean createCci() { + return addIndex() && ((SqlAddIndex) alters.get(0)).indexDef.isColumnar(); + } + public boolean isAllocateLocalPartition() { return alters != null && alters.size() == 1 && alters.get(0) instanceof SqlAlterTableAllocateLocalPartition; } @@ -539,4 +549,20 @@ public void setFromAlterIndexPartition(boolean fromAlterIndexPartition) { public void setAlterIndexName(SqlNode alterIndexName) { this.alterIndexName = alterIndexName; } + + public String getTargetImplicitTableGroupName() { + return targetImplicitTableGroupName; + } + + public void setTargetImplicitTableGroupName(String targetImplicitTableGroupName) { + this.targetImplicitTableGroupName = targetImplicitTableGroupName; + } + + public Map getIndexTableGroupMap() { + return indexTableGroupMap; + } + + public void addIndexTableGroup(String index, String tableGroupName) { + this.indexTableGroupMap.put(index, tableGroupName); + } } diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableDiscardTableSpace.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableDiscardTableSpace.java new file mode 100644 index 000000000..7e5a14016 --- /dev/null +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableDiscardTableSpace.java @@ -0,0 +1,102 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.calcite.sql; + +import com.google.common.collect.ImmutableList; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rel.type.RelDataTypeFieldImpl; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.validate.SqlValidator; +import org.apache.calcite.sql.validate.SqlValidatorScope; + +import java.util.List; + +/** + * Created by luoyanxin. + * + * @author luoyanxin + */ +public class SqlAlterTableDiscardTableSpace extends SqlDdl { + /** + * Creates a SqlDdl. + */ + private static final SqlOperator OPERATOR = new SqlAlterTableDiscardTableSpaceOperator(); + private SqlIdentifier tableName; + private final String sourceSql; + + public SqlAlterTableDiscardTableSpace(SqlParserPos pos, SqlIdentifier tableName, + String sourceSql) { + super(OPERATOR, pos); + this.tableName = tableName; + this.sourceSql = sourceSql; + } + + @Override + public List getOperandList() { + return ImmutableList.of(); + } + + @Override + public SqlOperator getOperator() { + return OPERATOR; + } + + @Override + public String toString() { + return sourceSql; + } + + public String getSourceSql() { + return sourceSql; + } + + public SqlIdentifier getTableName() { + return tableName; + } + + @Override + public SqlNode getTargetTable() { + return tableName; + } + + public static class SqlAlterTableDiscardTableSpaceOperator extends SqlSpecialOperator { + + public SqlAlterTableDiscardTableSpaceOperator() { + super("ALTER_TABLE_DISCARD_TABLESPACE", SqlKind.ALTER_TABLE_DISCARD_TABLESPACE); + } + + @Override + public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) { + final RelDataTypeFactory typeFactory = validator.getTypeFactory(); + final RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR); + + return typeFactory.createStructType( + ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("ALTER_TABLE_DISCARD_TABLESPACE_RESULT", + 0, + columnType))); + } + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + writer.print(toString()); + } + +} diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableDropIndex.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableDropIndex.java index eb8a2d4bf..03bcd953c 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableDropIndex.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableDropIndex.java @@ -16,14 +16,13 @@ package org.apache.calcite.sql; +import org.apache.calcite.sql.parser.SqlParserPos; + import java.util.Arrays; import java.util.List; -import org.apache.calcite.sql.parser.SqlParserPos; - /** * @author chenmo.cm - * @date 2019/1/23 12:07 AM */ public class SqlAlterTableDropIndex extends SqlAlterSpecification { private static final SqlOperator OPERATOR = @@ -31,22 +30,21 @@ public class SqlAlterTableDropIndex extends SqlAlterSpecification { /** * Creates a SqlAlterTableDropIndex. - * - * @param indexName - * @param tableName - * @param pos */ - public SqlAlterTableDropIndex(SqlIdentifier tableName, SqlIdentifier indexName , String sql, SqlParserPos pos) { + public SqlAlterTableDropIndex(SqlIdentifier tableName, SqlIdentifier indexName, SqlIdentifier originIndexName, + String sql, SqlParserPos pos) { super(pos); this.tableName = tableName; this.originTableName = tableName; + this.originIndexName = originIndexName; this.indexName = indexName; this.sourceSql = sql; } - private SqlNode tableName; + final private SqlIdentifier tableName; final private SqlIdentifier originTableName; final private SqlIdentifier indexName; + final private SqlIdentifier originIndexName; final private String sourceSql; @Override @@ -56,11 +54,7 @@ public SqlOperator getOperator() { @Override public List getOperandList() { - return Arrays.asList(tableName,indexName); - } - - public void setTargetTable(SqlNode tableName) { - this.tableName = tableName; + return Arrays.asList(tableName, indexName); } @Override @@ -80,6 +74,10 @@ public SqlIdentifier getIndexName() { return indexName; } + public SqlIdentifier getOriginIndexName() { + return originIndexName; + } + public String getSourceSql() { return sourceSql; } @@ -89,5 +87,11 @@ public SqlIdentifier getOriginTableName() { } @Override - public boolean supportFileStorage() { return true;} + public boolean supportFileStorage() { + return true; + } + + public SqlAlterTableDropIndex replaceIndexName(SqlIdentifier newIndexName) { + return new SqlAlterTableDropIndex(tableName, newIndexName, originIndexName, sourceSql, pos); + } } diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableImportTableSpace.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableImportTableSpace.java new file mode 100644 index 000000000..985414b5f --- /dev/null +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableImportTableSpace.java @@ -0,0 +1,102 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.calcite.sql; + +import com.google.common.collect.ImmutableList; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rel.type.RelDataTypeFieldImpl; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.validate.SqlValidator; +import org.apache.calcite.sql.validate.SqlValidatorScope; + +import java.util.List; + +/** + * Created by luoyanxin. + * + * @author luoyanxin + */ +public class SqlAlterTableImportTableSpace extends SqlDdl { + /** + * Creates a SqlDdl. + */ + private static final SqlOperator OPERATOR = new SqlAlterTableImportTableSpaceOperator(); + private SqlIdentifier tableName; + private final String sourceSql; + + public SqlAlterTableImportTableSpace(SqlParserPos pos, SqlIdentifier tableName, + String sourceSql) { + super(OPERATOR, pos); + this.tableName = tableName; + this.sourceSql = sourceSql; + } + + @Override + public List getOperandList() { + return ImmutableList.of(); + } + + @Override + public SqlOperator getOperator() { + return OPERATOR; + } + + @Override + public String toString() { + return sourceSql; + } + + public String getSourceSql() { + return sourceSql; + } + + public SqlIdentifier getTableName() { + return tableName; + } + + @Override + public SqlNode getTargetTable() { + return tableName; + } + + public static class SqlAlterTableImportTableSpaceOperator extends SqlSpecialOperator { + + public SqlAlterTableImportTableSpaceOperator() { + super("ALTER_TABLE_IMPORT_TABLESPACE", SqlKind.ALTER_TABLE_IMPORT_TABLESPACE); + } + + @Override + public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) { + final RelDataTypeFactory typeFactory = validator.getTypeFactory(); + final RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR); + + return typeFactory.createStructType( + ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("ALTER_TABLE_IMPORT_TABLESPACE_RESULT", + 0, + columnType))); + } + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + writer.print(toString()); + } + +} diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableRepartition.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableRepartition.java index 390326f4b..17d8e1d13 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableRepartition.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableRepartition.java @@ -70,6 +70,7 @@ static public SqlAlterTableRepartition create(SqlAlterTablePartitionKey sqlAlter sqlAlterTablePartitionKey.getAlters(), null, false, null, null); sqlAlterPartitionTableRepartition.setBroadcast(sqlAlterTablePartitionKey.isBroadcast()); sqlAlterPartitionTableRepartition.setSingle(sqlAlterTablePartitionKey.isSingle()); + sqlAlterPartitionTableRepartition.setTargetImplicitTableGroupName(sqlAlterTablePartitionKey.getTargetImplicitTableGroupName()); return sqlAlterPartitionTableRepartition; } diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableSetTableGroup.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableSetTableGroup.java index 9d38c5063..a138c7569 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableSetTableGroup.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAlterTableSetTableGroup.java @@ -35,13 +35,16 @@ public class SqlAlterTableSetTableGroup extends SqlCreate { final String targetTableGroup; final List objectNames; final boolean force; + final boolean implicit; - public SqlAlterTableSetTableGroup(List objectNames, SqlIdentifier tableName, String targetTableGroup, String sql, SqlParserPos pos, boolean force){ + public SqlAlterTableSetTableGroup(List objectNames, SqlIdentifier tableName, String targetTableGroup, + String sql, SqlParserPos pos, boolean implicit, boolean force) { super(OPERATOR, SqlParserPos.ZERO, false, false); this.name = tableName; this.sourceSql = sql; this.targetTableGroup = targetTableGroup; this.objectNames = objectNames; + this.implicit = implicit; this.force = force; } @@ -61,6 +64,10 @@ public boolean isForce() { return force; } + public boolean isImplicit() { + return implicit; + } + @Override public List getOperandList() { return Arrays.asList(name); diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAsOf57Operator.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAsOf57Operator.java new file mode 100644 index 000000000..0f2737765 --- /dev/null +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlAsOf57Operator.java @@ -0,0 +1,65 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.calcite.sql; + +import org.apache.calcite.sql.type.InferTypes; +import org.apache.calcite.sql.type.OperandTypes; +import org.apache.calcite.sql.type.ReturnTypes; +import org.apache.calcite.sql.type.SqlOperandTypeChecker; +import org.apache.calcite.sql.type.SqlOperandTypeInference; +import org.apache.calcite.sql.type.SqlReturnTypeInference; + +public class SqlAsOf57Operator extends SqlSpecialOperator { + public SqlAsOf57Operator() { + this( + "AS OF TSO", + SqlKind.AS_OF, + 20, + true, + ReturnTypes.ARG0, + InferTypes.RETURN_TYPE, + OperandTypes.ANY_ANY); + } + + protected SqlAsOf57Operator(String name, SqlKind kind, int prec, + boolean leftAssoc, SqlReturnTypeInference returnTypeInference, + SqlOperandTypeInference operandTypeInference, + SqlOperandTypeChecker operandTypeChecker) { + super(name, kind, prec, leftAssoc, returnTypeInference, + operandTypeInference, operandTypeChecker); + } + + public void unparse( + SqlWriter writer, + SqlCall call, + int leftPrec, + int rightPrec) { + assert call.operandCount() >= 2; + final SqlWriter.Frame frame = + writer.startList( + SqlWriter.FrameTypeEnum.SIMPLE); + call.operand(0).unparse(writer, leftPrec, getLeftPrec()); + final boolean needsSpace = true; + writer.setNeedWhitespace(needsSpace); + if (writer.getDialect().allowsAsOf()) { + writer.sep("AS OF TSO"); + writer.setNeedWhitespace(needsSpace); + } + call.operand(1).unparse(writer, getRightPrec(), rightPrec); + writer.endList(frame); + } +} \ No newline at end of file diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCall.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCall.java index baee8906c..680f90ee5 100755 --- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCall.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCall.java @@ -253,6 +253,17 @@ public boolean isCountStar() { return false; } + public boolean isCountLiteral() { + if (getOperator().isName("COUNT") && operandCount() == 1) { + final SqlNode parm = operand(0); + if (parm instanceof SqlNumericLiteral) { + return true; + } + } + + return false; + } + /** * Test to see if it is the function CHECK_SUM(*) * @@ -271,6 +282,24 @@ public boolean isCheckSumStar() { return false; } + /** + * Test to see if it is the function CHECK_SUM_V2(*) + * + * @return boolean true if function call to CHECK_SUM_V2(*) + */ + public boolean isCheckSumV2Star() { + if (getOperator().isName("CHECK_SUM_V2") && operandCount() == 1) { + final SqlNode parm = operand(0); + if (parm instanceof SqlIdentifier) { + SqlIdentifier id = (SqlIdentifier) parm; + if (id.isStar() && id.names.size() == 1) { + return true; + } + } + } + return false; + } + public SqlLiteral getFunctionQuantifier() { return null; } diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCancelReplicaCheck.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCancelReplicaCheck.java new file mode 100644 index 000000000..feff9ae08 --- /dev/null +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCancelReplicaCheck.java @@ -0,0 +1,98 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.calcite.sql; + +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rel.type.RelDataTypeFieldImpl; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.validate.SqlValidator; +import org.apache.calcite.sql.validate.SqlValidatorScope; + +import java.util.LinkedList; +import java.util.List; + +/** + * @author yudong + * @since 2023/11/9 11:10 + **/ +public class SqlCancelReplicaCheck extends SqlDal { + + private static final SqlSpecialOperator OPERATOR = new SqlCancelReplicaCheckOperator(); + + private SqlNode dbName; + private SqlNode tableName; + + public SqlCancelReplicaCheck(SqlParserPos pos, SqlNode dbName) { + super(pos); + this.dbName = dbName; + } + + public SqlCancelReplicaCheck(SqlParserPos pos, SqlNode dbName, SqlNode tableName) { + super(pos); + this.dbName = dbName; + this.tableName = tableName; + } + + public SqlNode getDbName() { + return dbName; + } + + public void setDbName(SqlNode dbName) { + this.dbName = dbName; + } + + public SqlNode getTableName() { + return tableName; + } + + public void setTableName(SqlNode tableName) { + this.tableName = tableName; + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + writer.keyword("CHECK REPLICA TABLE"); + dbName.unparse(writer, 0, 0); + if (tableName != null) { + writer.print("."); + tableName.unparse(writer, 0, 0); + } + writer.keyword("CANCEL"); + } + + @Override + public SqlOperator getOperator() { + return OPERATOR; + } + + public static class SqlCancelReplicaCheckOperator extends SqlSpecialOperator { + + public SqlCancelReplicaCheckOperator() { + super("CANCEL_REPLICA_CHECK", SqlKind.CANCEL_REPLICA_CHECK); + } + + @Override + public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) { + final RelDataTypeFactory typeFactory = validator.getTypeFactory(); + List columns = new LinkedList<>(); + columns.add(new RelDataTypeFieldImpl("RESULT", 0, typeFactory.createSqlType(SqlTypeName.INTEGER))); + return typeFactory.createStructType(columns); + } + } +} diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCheckColumnarIndex.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCheckColumnarIndex.java new file mode 100644 index 000000000..b6c8273d2 --- /dev/null +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCheckColumnarIndex.java @@ -0,0 +1,171 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.calcite.sql; + +import com.alibaba.polardbx.common.utils.TStringUtil; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rel.type.RelDataTypeFieldImpl; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.validate.SqlValidator; +import org.apache.calcite.sql.validate.SqlValidatorScope; + +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +public class SqlCheckColumnarIndex extends SqlDdl { // Use DDL here to utilize async DDL framework. + + private static final SqlSpecialOperator OPERATOR = new SqlCheckColumnarIndex.SqlCheckColumnarIndexOperator(); + + private SqlIdentifier indexName; + private SqlIdentifier tableName; + private String extraCmd; + + public SqlCheckColumnarIndex(SqlParserPos pos, SqlIdentifier indexName, SqlIdentifier tableName, String extraCmd) { + super(OPERATOR, pos); + this.name = indexName; + this.indexName = indexName; + this.tableName = tableName; + this.extraCmd = extraCmd; + } + + public SqlIdentifier getIndexName() { + return indexName; + } + + public void setIndexName(SqlIdentifier indexName) { + this.indexName = indexName; + } + + public SqlIdentifier getTableName() { + return tableName; + } + + public void setTableName(SqlIdentifier tableName) { + this.tableName = tableName; + } + + public String getExtraCmd() { + return extraCmd; + } + + public void setExtraCmd(String extraCmd) { + this.extraCmd = extraCmd; + } + + public CheckCciExtraCmd getExtraCmdEnum() { + return CheckCciExtraCmd.of(this.extraCmd); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + final SqlWriter.Frame selectFrame = writer.startList(SqlWriter.FrameTypeEnum.SELECT); + writer.sep("CHECK COLUMNAR INDEX"); + + if (indexName != null) { + indexName.unparse(writer, leftPrec, rightPrec); + } + + if (tableName != null) { + writer.sep("ON"); + tableName.unparse(writer, leftPrec, rightPrec); + } + + if (extraCmd != null) { + writer.sep(extraCmd); + } + + writer.endList(selectFrame); + } + + @Override + public SqlOperator getOperator() { + return OPERATOR; + } + + @Override + public List getOperandList() { + return Arrays.asList(name); + } + + @Override + public SqlKind getKind() { + return SqlKind.CHECK_COLUMNAR_INDEX; + } + + @Override + public void validate(SqlValidator validator, SqlValidatorScope scope) { + } + + public static class SqlCheckColumnarIndexOperator extends SqlSpecialOperator { + + public SqlCheckColumnarIndexOperator() { + super("CHECK_COLUMNAR_INDEX", SqlKind.CHECK_COLUMNAR_INDEX); + } + + @Override + public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) { + final RelDataTypeFactory typeFactory = validator.getTypeFactory(); + List columns = new LinkedList<>(); + columns.add(new RelDataTypeFieldImpl("Table", 0, typeFactory.createSqlType(SqlTypeName.VARCHAR))); + columns.add(new RelDataTypeFieldImpl("Op", 1, typeFactory.createSqlType(SqlTypeName.VARCHAR))); + columns.add(new RelDataTypeFieldImpl("Msg_type", 2, typeFactory.createSqlType(SqlTypeName.VARCHAR))); + columns.add(new RelDataTypeFieldImpl("Msg_text", 3, typeFactory.createSqlType(SqlTypeName.VARCHAR))); + + return typeFactory.createStructType(columns); + } + } + + @Override + public SqlNode clone(SqlParserPos pos) { + return new SqlCheckColumnarIndex(this.pos, indexName, tableName, extraCmd); + } + + public SqlCheckColumnarIndex replaceIndexName(SqlIdentifier newIndexName) { + return new SqlCheckColumnarIndex(pos, newIndexName, tableName, extraCmd); + } + + public boolean withTableName() { + return null != tableName; + } + + public enum CheckCciExtraCmd { + UNKNOWN, DEFAULT, CHECK, LOCK, CLEAR, SHOW, META; + private static final Map VALUE_MAP = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + + static { + VALUE_MAP.put("CHECK", CHECK); + VALUE_MAP.put("LOCK", LOCK); + VALUE_MAP.put("CLEAR", CLEAR); + VALUE_MAP.put("SHOW", SHOW); + VALUE_MAP.put("META", META); + } + + public static CheckCciExtraCmd of(String stringVal) { + if (TStringUtil.isBlank(stringVal)) { + return DEFAULT; + } + + return VALUE_MAP.getOrDefault(stringVal, UNKNOWN); + } + } + +} diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCheckColumnarPartition.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCheckColumnarPartition.java new file mode 100644 index 000000000..c5d79be6f --- /dev/null +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCheckColumnarPartition.java @@ -0,0 +1,101 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.calcite.sql; + +import java.util.LinkedList; +import java.util.List; + +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rel.type.RelDataTypeFieldImpl; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.validate.SqlValidator; +import org.apache.calcite.sql.validate.SqlValidatorScope; + +public class SqlCheckColumnarPartition extends SqlDal { + + private static final SqlSpecialOperator OPERATOR = new SqlCheckColumnarPartitionOperator(); + + private SqlNode tableName; + + public SqlCheckColumnarPartition(SqlParserPos pos, SqlNode tableName) { + super(pos); + this.tableName = tableName; + } + + public SqlNode getTableName() { + return tableName; + } + + public void setTableNames(SqlNode tableName) { + this.tableName = tableName; + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + final SqlWriter.Frame selectFrame = writer.startList(SqlWriter.FrameTypeEnum.SELECT); + writer.sep("CHECK COLUMNAR PARTITION"); + tableName.unparse(writer, leftPrec, rightPrec); + writer.endList(selectFrame); + } + + @Override + public SqlOperator getOperator() { + return OPERATOR; + } + + @Override + public SqlKind getKind() { + return SqlKind.CHECK_COLUMNAR_PARTITION; + } + + @Override + public void validate(SqlValidator validator, SqlValidatorScope scope) { + } + + public static class SqlCheckColumnarPartitionOperator extends SqlSpecialOperator { + + public SqlCheckColumnarPartitionOperator() { + super("CHECK_COLUMNAR_PARTITION", SqlKind.CHECK_COLUMNAR_PARTITION); + } + + @Override + public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) { + final RelDataTypeFactory typeFactory = validator.getTypeFactory(); + List columns = new LinkedList<>(); + columns.add(new RelDataTypeFieldImpl("Logical Table", 0, typeFactory.createSqlType(SqlTypeName.VARCHAR))); + columns.add(new RelDataTypeFieldImpl("Columnar Index", 1, typeFactory.createSqlType(SqlTypeName.VARCHAR))); + columns.add(new RelDataTypeFieldImpl("Partition", 2, typeFactory.createSqlType(SqlTypeName.VARCHAR))); + columns.add( + new RelDataTypeFieldImpl("Orc Files", 3, typeFactory.createSqlType(SqlTypeName.INTEGER_UNSIGNED))); + columns.add( + new RelDataTypeFieldImpl("Orc Rows", 4, typeFactory.createSqlType(SqlTypeName.BIGINT_UNSIGNED))); + columns.add( + new RelDataTypeFieldImpl("Csv Files", 5, typeFactory.createSqlType(SqlTypeName.INTEGER_UNSIGNED))); + columns.add( + new RelDataTypeFieldImpl("Csv Rows", 6, typeFactory.createSqlType(SqlTypeName.BIGINT_UNSIGNED))); + columns.add(new RelDataTypeFieldImpl("Extra", 7, typeFactory.createSqlType(SqlTypeName.VARCHAR))); + return typeFactory.createStructType(columns); + } + } + + @Override + public SqlNode clone(SqlParserPos pos) { + return new SqlCheckColumnarPartition(this.pos, tableName); + } +} diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlClearFileStorage.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlClearFileStorage.java new file mode 100644 index 000000000..b86bc6620 --- /dev/null +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlClearFileStorage.java @@ -0,0 +1,90 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.calcite.sql; + +import com.google.common.collect.ImmutableList; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rel.type.RelDataTypeFieldImpl; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.validate.SqlValidator; +import org.apache.calcite.sql.validate.SqlValidatorScope; + +import java.util.ArrayList; +import java.util.List; + +public class SqlClearFileStorage extends SqlDdl { + + private static final SqlOperator OPERATOR = new SqlClearFilesStorageOperator(); + + /** + * Creates a SqlDdl. + */ + public SqlClearFileStorage(SqlIdentifier name) { + super(OPERATOR, SqlParserPos.ZERO); + this.name = name; + } + + @Override + public List getOperandList() { + return ImmutableList.of(name); + } + + public SqlIdentifier getName() { + return (SqlIdentifier) name; + } + + @Override + public SqlOperator getOperator() { + return OPERATOR; + } + + @Override + public SqlKind getKind() { + return SqlKind.CLEAR_FILESTORAGE; + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + writer.keyword(getOperator().getName()); + name.unparse(writer, leftPrec, rightPrec); + } + + @Override + public void validate(SqlValidator validator, SqlValidatorScope scope) { + validator.validateDdl(this, validator.getUnknownType(), scope); + } + + public static class SqlClearFilesStorageOperator extends SqlSpecialOperator { + public SqlClearFilesStorageOperator() { + super("CLEAR FILESTORAGE", SqlKind.CLEAR_FILESTORAGE); + } + + @Override + public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) { + final RelDataTypeFactory typeFactory = validator.getTypeFactory(); + List columns = new ArrayList<>(); + columns.add(new RelDataTypeFieldImpl( + "Clear_File_Storage_Count", + 0, + typeFactory.createSqlType(SqlTypeName.INTEGER_UNSIGNED) + )); + return typeFactory.createStructType(columns); + } + } +} diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlColumnDeclaration.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlColumnDeclaration.java index 7e3f1ecdc..491aad42c 100644 --- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlColumnDeclaration.java +++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlColumnDeclaration.java @@ -87,6 +87,8 @@ public class SqlColumnDeclaration extends SqlCall { */ private final ColumnStrategy strategy; + private String securedWith; + /** *

      * data_type [NOT NULL | NULL] [DEFAULT {literal | (expr)} ]
@@ -389,6 +391,14 @@ public SqlCall getGeneratedAlwaysExpr() {
     public ColumnStrategy getStrategy() {
         return strategy;
     }
+
+    public String getSecuredWith() {
+        return securedWith;
+    }
+
+    public void setSecuredWith(String securedWith) {
+        this.securedWith = securedWith;
+    }
 }
 
 // End SqlColumnDeclaration.java
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlContinueReplicaCheck.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlContinueReplicaCheck.java
new file mode 100644
index 000000000..c8e2cb929
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlContinueReplicaCheck.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * @author yudong
+ * @since 2023/11/9 11:10
+ **/
+public class SqlContinueReplicaCheck extends SqlDal {
+    private static final SqlSpecialOperator OPERATOR = new SqlContinueReplicaCheckOperator();
+
+    private SqlNode dbName;
+    private SqlNode tableName;
+
+    public SqlContinueReplicaCheck(SqlParserPos pos, SqlNode dbName) {
+        super(pos);
+        this.dbName = dbName;
+    }
+
+    public SqlContinueReplicaCheck(SqlParserPos pos, SqlNode dbName, SqlNode tableName) {
+        super(pos);
+        this.dbName = dbName;
+        this.tableName = tableName;
+    }
+
+    public SqlNode getDbName() {
+        return dbName;
+    }
+
+    public void setDbName(SqlNode dbName) {
+        this.dbName = dbName;
+    }
+
+    public SqlNode getTableName() {
+        return tableName;
+    }
+
+    public void setTableName(SqlNode tableName) {
+        this.tableName = tableName;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("CHECK REPLICA TABLE");
+        dbName.unparse(writer, 0, 0);
+        if (tableName != null) {
+            writer.print(".");
+            tableName.unparse(writer, 0, 0);
+        }
+        writer.keyword("CONTINUE");
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    public static class SqlContinueReplicaCheckOperator extends SqlSpecialOperator {
+
+        public SqlContinueReplicaCheckOperator() {
+            super("CONTINUE_REPLICA_CHECK", SqlKind.CONTINUE_REPLICA_CHECK);
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            List columns = new LinkedList<>();
+            columns.add(new RelDataTypeFieldImpl("RESULT", 0, typeFactory.createSqlType(SqlTypeName.INTEGER)));
+            return typeFactory.createStructType(columns);
+        }
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlConvertAllSequences.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlConvertAllSequences.java
index a1c107f55..cf363751d 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlConvertAllSequences.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlConvertAllSequences.java
@@ -17,9 +17,13 @@
 package org.apache.calcite.sql;
 
 import com.alibaba.polardbx.common.constants.SequenceAttribute.Type;
+import com.alibaba.polardbx.gms.topology.SystemDbHelper;
 import org.apache.calcite.sql.parser.SqlParserPos;
 
-public class SqlConvertAllSequences extends SqlDal {
+import java.util.Arrays;
+import java.util.List;
+
+public class SqlConvertAllSequences extends SqlDdl {
 
     private static final SqlSpecialOperator OPERATOR =
         new SqlAffectedRowsOperator("CONVERT_ALL_SEQUENCES", SqlKind.CONVERT_ALL_SEQUENCES);
@@ -31,7 +35,7 @@ public class SqlConvertAllSequences extends SqlDal {
 
     public SqlConvertAllSequences(SqlParserPos pos, Type fromType, Type toType, String schemaName,
                                   boolean allSchemata) {
-        super(pos);
+        super(OPERATOR, SqlParserPos.ZERO);
         this.fromType = fromType;
         this.toType = toType;
         this.schemaName = schemaName;
@@ -43,7 +47,7 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
         final SqlWriter.Frame selectFrame = writer.startList(SqlWriter.FrameTypeEnum.SELECT);
         writer.keyword("CONVERT ALL SEQUENCES FROM");
         writer.print(fromType.name());
-        writer.keyword("TO");
+        writer.keyword(" TO");
         writer.print(toType.name());
         if (!allSchemata) {
             writer.keyword("FOR");
@@ -73,6 +77,11 @@ public SqlOperator getOperator() {
         return OPERATOR;
     }
 
+    @Override
+    public List getOperandList() {
+        return Arrays.asList();
+    }
+
     @Override
     public SqlKind getKind() {
         return SqlKind.CONVERT_ALL_SEQUENCES;
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreate.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreate.java
index 2fa500347..d2325616c 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreate.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreate.java
@@ -88,6 +88,14 @@ public boolean createGsi() {
     return false;
   }
 
+  public boolean createCci() {
+    return false;
+  }
+
+  public boolean createGsiOrCci() {
+    return createGsi() || createCci();
+  }
+
 }
 
 // End SqlCreate.java
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateIndex.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateIndex.java
index 85ddab95b..e9fbb4dd4 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateIndex.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateIndex.java
@@ -16,6 +16,8 @@
 
 package org.apache.calcite.sql;
 
+import com.alibaba.polardbx.common.ColumnarTableOptions;
+import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.druid.sql.SQLUtils;
 import com.alibaba.polardbx.druid.sql.ast.SQLExpr;
 import com.alibaba.polardbx.druid.sql.ast.SQLStatement;
@@ -24,7 +26,6 @@
 import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlStatement;
 import com.alibaba.polardbx.druid.sql.dialect.mysql.visitor.MySqlOutputVisitor;
 import com.alibaba.polardbx.druid.util.JdbcConstants;
-import com.alibaba.polardbx.common.utils.GeneralUtil;
 import org.apache.calcite.sql.SqlIndexDefinition.SqlIndexResiding;
 import org.apache.calcite.sql.SqlIndexDefinition.SqlIndexType;
 import org.apache.calcite.sql.SqlWriter.Frame;
@@ -34,10 +35,13 @@
 import org.apache.calcite.sql.pretty.SqlPrettyWriter;
 import org.apache.calcite.sql.util.SqlString;
 import org.apache.calcite.util.ImmutableNullableList;
+import org.apache.commons.collections.CollectionUtils;
 
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
 
@@ -53,6 +57,10 @@ public class SqlCreateIndex extends SqlCreate {
 
     private final SqlIdentifier originTableName;
     private final SqlIdentifier indexName;
+    /**
+     * Index name without suffix
+     */
+    private final SqlIdentifier originIndexName;
     private final List columns;
     private final SqlIndexConstraintType constraintType;
     private final SqlIndexResiding indexResiding;
@@ -61,29 +69,58 @@ public class SqlCreateIndex extends SqlCreate {
     private final SqlIndexAlgorithmType algorithm;
     private final SqlIndexLockType lock;
     private final List covering;
+    private final List originCovering;
+    private final List clusteredKeys;
     private final SqlNode dbPartitionBy;
     private final SqlNode dbPartitions = null;
     private final SqlNode tbPartitionBy;
     private final SqlNode tbPartitions;
 
     private final SqlNode partitioning;
+    private final SqlNode originPartitioning;
     private final String sourceSql;
+    private final boolean clusteredIndex;
+    private final boolean columnarIndex;
+    private final SqlNode tableGroupName;
+    private final SqlNode engineName;
+    private final List dictColumns;
     //originalSql is the same as what user input, while sourceSql maybe rewrite
     private String originalSql;
     private String primaryTableDefinition;
     private SqlCreateTable primaryTableNode;
-    private final boolean clusteredIndex;
-    private final SqlNode tableGroupName;
+    private final boolean withImplicitTableGroup;
     private boolean visible = true;
 
-    private SqlCreateIndex(SqlParserPos pos, SqlIdentifier indexName, SqlIdentifier table,
-                           List columns, SqlIndexConstraintType constraintType,
-                           SqlIndexResiding indexResiding, SqlIndexType indexType, List options,
-                           SqlIndexAlgorithmType algorithm, SqlIndexLockType lock, List covering,
-                           SqlNode dbPartitionBy, SqlNode tbPartitionBy, SqlNode tbPartitions, SqlNode partitioning,
-                           String sourceSql, boolean clusteredIndex, SqlNode tableGroupName, boolean visible) {
+    private SqlCreateIndex(SqlParserPos pos,
+                           SqlIdentifier indexName,
+                           SqlIdentifier originIndexName,
+                           SqlIdentifier table,
+                           List columns,
+                           SqlIndexConstraintType constraintType,
+                           SqlIndexResiding indexResiding,
+                           SqlIndexType indexType,
+                           List options,
+                           SqlIndexAlgorithmType algorithm,
+                           SqlIndexLockType lock,
+                           List covering,
+                           List originCovering,
+                           SqlNode dbPartitionBy,
+                           SqlNode tbPartitionBy,
+                           SqlNode tbPartitions,
+                           SqlNode partitioning,
+                           SqlNode originPartitioning,
+                           List clusteredKeys,
+                           String sourceSql,
+                           boolean clusteredIndex,
+                           boolean columnarIndex,
+                           SqlNode tableGroupName,
+                           boolean withImplicitTableGroup,
+                           SqlNode engineName,
+                           List dictColumns,
+                           boolean visible) {
         super(OPERATOR, pos, false, false);
         this.indexName = indexName;
+        this.originIndexName = originIndexName;
         this.name = table;
         this.originTableName = table;
         this.columns = columns;
@@ -94,28 +131,60 @@ private SqlCreateIndex(SqlParserPos pos, SqlIdentifier indexName, SqlIdentifier
         this.algorithm = algorithm;
         this.lock = lock;
         this.covering = covering;
+        this.originCovering = originCovering;
         this.dbPartitionBy = dbPartitionBy;
         this.tbPartitionBy = tbPartitionBy;
         this.tbPartitions = tbPartitions;
         this.partitioning = partitioning;
+        this.originPartitioning = originPartitioning;
+        this.clusteredKeys = clusteredKeys;
         this.sourceSql = sourceSql;
         this.clusteredIndex = clusteredIndex;
+        this.columnarIndex = columnarIndex;
         this.tableGroupName = tableGroupName;
+        this.withImplicitTableGroup = withImplicitTableGroup;
+        this.engineName = engineName;
+        this.dictColumns = dictColumns;
         this.visible = visible;
     }
 
-    public SqlCreateIndex(SqlParserPos pos, boolean replace, boolean ifNotExists, SqlIdentifier originTableName,
-                          SqlIdentifier indexName, List columns,
-                          SqlIndexConstraintType constraintType, SqlIndexResiding indexResiding,
-                          SqlIndexType indexType, List options, SqlIndexAlgorithmType algorithm,
-                          SqlIndexLockType lock, List covering, SqlNode dbPartitionBy,
-                          SqlNode tbPartitionBy, SqlNode tbPartitions, SqlNode partitioning,
-                          String sourceSql, String primaryTableDefinition,
-                          SqlCreateTable primaryTableNode, boolean clusteredIndex,
-                          SqlNode tableGroupName, boolean visible) {
+    public SqlCreateIndex(SqlParserPos pos,
+                          boolean replace,
+                          boolean ifNotExists,
+                          SqlNode name,
+                          SqlIdentifier originTableName,
+                          SqlIdentifier indexName,
+                          SqlIdentifier originIndexName,
+                          List columns,
+                          SqlIndexConstraintType constraintType,
+                          SqlIndexResiding indexResiding,
+                          SqlIndexType indexType,
+                          List options,
+                          SqlIndexAlgorithmType algorithm,
+                          SqlIndexLockType lock,
+                          List covering,
+                          List originCovering,
+                          SqlNode dbPartitionBy,
+                          SqlNode tbPartitionBy,
+                          SqlNode tbPartitions,
+                          SqlNode partitioning,
+                          SqlNode originPartitioning,
+                          List clusteredKeys,
+                          String sourceSql,
+                          String primaryTableDefinition,
+                          SqlCreateTable primaryTableNode,
+                          boolean clusteredIndex,
+                          boolean columnarIndex,
+                          SqlNode tableGroupName,
+                          boolean withImplicitTableGroup,
+                          SqlNode engineName,
+                          List dictColumns,
+                          boolean visible) {
         super(OPERATOR, pos, replace, ifNotExists);
+        this.name = name;
         this.originTableName = originTableName;
         this.indexName = indexName;
+        this.originIndexName = originIndexName;
         this.columns = columns;
         this.constraintType = constraintType;
         this.indexResiding = indexResiding;
@@ -124,25 +193,35 @@ public SqlCreateIndex(SqlParserPos pos, boolean replace, boolean ifNotExists, Sq
         this.algorithm = algorithm;
         this.lock = lock;
         this.covering = covering;
+        this.originCovering = originCovering;
         this.dbPartitionBy = dbPartitionBy;
         this.tbPartitionBy = tbPartitionBy;
         this.tbPartitions = tbPartitions;
+        this.clusteredKeys = clusteredKeys;
         this.sourceSql = sourceSql;
         this.primaryTableDefinition = primaryTableDefinition;
         this.primaryTableNode = primaryTableNode;
         this.clusteredIndex = clusteredIndex;
+        this.columnarIndex = columnarIndex;
         this.partitioning = partitioning;
+        this.originPartitioning = originPartitioning;
         this.tableGroupName = tableGroupName;
+        this.withImplicitTableGroup = withImplicitTableGroup;
+        this.engineName = engineName;
+        this.dictColumns = dictColumns;
         this.visible = visible;
     }
 
     public static SqlCreateIndex createLocalIndex(SqlIdentifier indexName, SqlIdentifier tableName,
                                                   List columns,
+                                                  SqlNode tableGroupName,
+                                                  boolean withImplicitTableGroup,
                                                   SqlIndexConstraintType constraintType, boolean explicit,
                                                   SqlIndexType indexType, List options,
                                                   SqlIndexAlgorithmType algorithm, SqlIndexLockType lock, String sql,
                                                   SqlParserPos pos) {
         return new SqlCreateIndex(pos,
+            indexName,
             indexName,
             tableName,
             columns,
@@ -157,8 +236,15 @@ public static SqlCreateIndex createLocalIndex(SqlIdentifier indexName, SqlIdenti
             null,
             null,
             null,
+            null,
+            null,
+            null,
             sql,
             false,
+            false,
+            tableGroupName,
+            withImplicitTableGroup,
+            null,
             null,
             true);
     }
@@ -170,8 +256,10 @@ public static SqlCreateIndex createGlobalIndex(SqlParserPos pos, SqlIdentifier i
                                                    SqlIndexLockType lock, List covering,
                                                    SqlNode dbPartitionBy, SqlNode tbPartitionBy, SqlNode tbPartitions,
                                                    SqlNode partitioning, String sourceSql, SqlNode tableGroupName,
+                                                   boolean withImplicitTableGroup,
                                                    boolean visible) {
         return new SqlCreateIndex(pos,
+            indexName,
             indexName,
             table,
             columns,
@@ -182,13 +270,20 @@ public static SqlCreateIndex createGlobalIndex(SqlParserPos pos, SqlIdentifier i
             algorithm,
             lock,
             covering,
+            covering,
             dbPartitionBy,
             tbPartitionBy,
             tbPartitions,
             partitioning,
+            partitioning,
+            null,
             sourceSql,
             false,
+            false,
             tableGroupName,
+            withImplicitTableGroup,
+            null,
+            null,
             visible);
     }
 
@@ -200,8 +295,53 @@ public static SqlCreateIndex createClusteredIndex(SqlParserPos pos, SqlIdentifie
                                                       SqlNode dbPartitionBy, SqlNode tbPartitionBy,
                                                       SqlNode tbPartitions,
                                                       SqlNode partitioning, String sourceSql,
-                                                      SqlNode tableGroupName, boolean visible) {
+                                                      SqlNode tableGroupName,
+                                                      boolean withImplicitTableGroup, boolean visible) {
+        return new SqlCreateIndex(pos,
+            indexName,
+            indexName,
+            table,
+            columns,
+            constraintType,
+            SqlIndexResiding.GLOBAL,
+            indexType,
+            options,
+            algorithm,
+            lock,
+            covering,
+            covering,
+            dbPartitionBy,
+            tbPartitionBy,
+            tbPartitions,
+            partitioning,
+            partitioning,
+            null,
+            sourceSql,
+            true,
+            false,
+            tableGroupName,
+            withImplicitTableGroup,
+            null,
+            null,
+            visible);
+    }
+
+    public static SqlCreateIndex createColumnarIndex(SqlParserPos pos, SqlIdentifier indexName, SqlIdentifier table,
+                                                     List columns,
+                                                     SqlIndexConstraintType constraintType, SqlIndexType indexType,
+                                                     List options, SqlIndexAlgorithmType algorithm,
+                                                     SqlIndexLockType lock, List covering,
+                                                     SqlNode dbPartitionBy, SqlNode tbPartitionBy,
+                                                     SqlNode tbPartitions,
+                                                     SqlNode partitioning, List clusteredKeys,
+                                                     String sourceSql,
+                                                     SqlNode tableGroupName,
+                                                     boolean withImplicitTableGroup,
+                                                     SqlNode engineName,
+                                                     List dictKeys,
+                                                     boolean visible) {
         return new SqlCreateIndex(pos,
+            indexName,
             indexName,
             table,
             columns,
@@ -212,16 +352,31 @@ public static SqlCreateIndex createClusteredIndex(SqlParserPos pos, SqlIdentifie
             algorithm,
             lock,
             covering,
+            covering,
             dbPartitionBy,
             tbPartitionBy,
             tbPartitions,
             partitioning,
+            partitioning,
+            clusteredKeys,
             sourceSql,
             true,
+            true,
             tableGroupName,
+            withImplicitTableGroup,
+            engineName,
+            dictKeys,
             visible);
     }
 
+    public SqlNode getEngineName() {
+        return engineName;
+    }
+
+    public List getDictColumns() {
+        return dictColumns;
+    }
+
     @Override
     public List getOperandList() {
         return ImmutableNullableList.of(name,
@@ -241,7 +396,7 @@ public List getOperandList() {
 
     @Override
     public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
-        unparse(writer, leftPrec, rightPrec, this.indexResiding, false);
+        unparse(writer, leftPrec, rightPrec, this.indexResiding, false, false);
     }
 
     @Override
@@ -253,8 +408,13 @@ public boolean createClusteredIndex() {
         return clusteredIndex;
     }
 
+    @Override
+    public boolean createCci() {
+        return columnarIndex;
+    }
+
     public void unparse(SqlWriter writer, int leftPrec, int rightPrec, SqlIndexResiding indexResiding,
-                        boolean withOriginTableName) {
+                        boolean withOriginTableName, boolean withOriginNames) {
         final boolean isGlobal = SqlUtil.isGlobal(indexResiding);
 
         final SqlWriter.Frame frame = writer.startList(SqlWriter.FrameTypeEnum.SELECT, "CREATE", "");
@@ -263,12 +423,22 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec, SqlIndexResid
             SqlUtil.wrapSqlLiteralSymbol(constraintType).unparse(writer, leftPrec, rightPrec);
         }
 
-        if (isGlobal) {
+        if (clusteredIndex) {
+            writer.keyword("CLUSTERED");
+        }
+
+        if (columnarIndex) {
+            writer.keyword("COLUMNAR");
+        } else if (isGlobal) {
             SqlUtil.wrapSqlLiteralSymbol(indexResiding).unparse(writer, leftPrec, rightPrec);
         }
 
         writer.keyword("INDEX");
-        indexName.unparse(writer, leftPrec, rightPrec);
+        if (withOriginNames) {
+            originIndexName.unparse(writer, leftPrec, rightPrec);
+        } else {
+            indexName.unparse(writer, leftPrec, rightPrec);
+        }
 
         if (null != indexType) {
             writer.keyword("USING");
@@ -289,13 +459,25 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec, SqlIndexResid
         }
 
         if (isGlobal) {
-            if (null != covering && !covering.isEmpty()) {
+            final List coveringToShow = withOriginNames ? originCovering : covering;
+            if (null != coveringToShow && !coveringToShow.isEmpty()) {
                 writer.keyword("COVERING");
                 final Frame frame2 = writer.startList(FrameTypeEnum.FUN_CALL, "(", ")");
-                SqlUtil.wrapSqlNodeList(covering).commaList(writer);
+                SqlUtil.wrapSqlNodeList(coveringToShow).commaList(writer);
                 writer.endList(frame2);
             }
+        }
 
+        if (columnarIndex) {
+            if (clusteredKeys != null && !clusteredKeys.isEmpty()) {
+                writer.keyword("CLUSTERED KEY");
+                final Frame frame2 = writer.startList(FrameTypeEnum.FUN_CALL, "(", ")");
+                SqlUtil.wrapSqlNodeList(clusteredKeys).commaList(writer);
+                writer.endList(frame2);
+            }
+        }
+
+        if (isGlobal || columnarIndex) {
             final boolean quoteAllIdentifiers = writer.isQuoteAllIdentifiers();
             if (writer instanceof SqlPrettyWriter) {
                 ((SqlPrettyWriter) writer).setQuoteAllIdentifiers(false);
@@ -316,11 +498,23 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec, SqlIndexResid
                 tbPartitions.unparse(writer, leftPrec, rightPrec);
             }
 
+            final SqlNode partitioningToShow = withOriginNames ? originPartitioning : partitioning;
+            if (null != partitioningToShow) {
+                writer.print(" ");
+                partitioningToShow.unparse(writer, leftPrec, rightPrec);
+            }
+
             if (writer instanceof SqlPrettyWriter) {
                 ((SqlPrettyWriter) writer).setQuoteAllIdentifiers(quoteAllIdentifiers);
             }
         }
 
+        if (columnarIndex && null != engineName) {
+            writer.keyword("ENGINE");
+            writer.keyword("=");
+            engineName.unparse(writer, leftPrec, rightPrec);
+        }
+
         if (null != options) {
             for (SqlIndexOption option : options) {
                 option.unparse(writer, leftPrec, rightPrec);
@@ -353,6 +547,10 @@ public void setTargetTable(SqlIdentifier sqlIdentifier) {
     }
 
     private String prepare() {
+        return prepare(false);
+    }
+
+    private String prepare(boolean withOriginNames) {
         String sqlForExecute = sourceSql;
         if (SqlUtil.isGlobal(indexResiding)) {
             // generate CREATE INDEX for executing on MySQL
@@ -362,7 +560,7 @@ private String prepare() {
             writer.setIndentation(0);
             final int leftPrec = getOperator().getLeftPrec();
             final int rightPrec = getOperator().getRightPrec();
-            unparse(writer, leftPrec, rightPrec, indexResiding, true);
+            unparse(writer, leftPrec, rightPrec, indexResiding, true, withOriginNames);
             sqlForExecute = writer.toSqlString().getSql();
         }
 
@@ -401,6 +599,10 @@ public SqlString toSqlString(SqlDialect dialect) {
         return new SqlString(dialect, sql);
     }
 
+    public String toString(boolean withOriginNames) {
+        return prepare(withOriginNames);
+    }
+
     public SqlNode getDbPartitionBy() {
         return dbPartitionBy;
     }
@@ -421,6 +623,10 @@ public SqlIdentifier getIndexName() {
         return indexName;
     }
 
+    public SqlIdentifier getOriginIndexName() {
+        return originIndexName;
+    }
+
     public List getColumns() {
         return columns;
     }
@@ -429,21 +635,16 @@ public List getCovering() {
         return covering;
     }
 
-    public SqlNode getTableGroupName() {
-        return tableGroupName;
-    }
-
-    public static enum SqlIndexConstraintType {
-        UNIQUE, FULLTEXT, SPATIAL;
-
+    public List getOriginCovering() {
+        return originCovering;
     }
 
-    public static enum SqlIndexAlgorithmType {
-        DEFAULT, INPLACE, COPY;
+    public List getClusteredKeys() {
+        return clusteredKeys;
     }
 
-    public static enum SqlIndexLockType {
-        DEFAULT, NONE, SHARED, EXCLUSIVE;
+    public SqlNode getTableGroupName() {
+        return tableGroupName;
     }
 
     public String getPrimaryTableDefinition() {
@@ -506,6 +707,46 @@ public SqlNode getPartitioning() {
         return partitioning;
     }
 
+    public SqlNode getOriginPartitioning() {
+        return originPartitioning;
+    }
+
+    @Override
+    public SqlNode clone(SqlParserPos pos) {
+        return new SqlCreateIndex(pos,
+            replace,
+            ifNotExists,
+            name,
+            originTableName,
+            indexName,
+            originIndexName,
+            columns,
+            constraintType,
+            indexResiding,
+            indexType,
+            options,
+            algorithm,
+            lock,
+            covering,
+            originCovering,
+            dbPartitionBy,
+            tbPartitionBy,
+            tbPartitions,
+            partitioning,
+            originPartitioning,
+            clusteredKeys,
+            sourceSql,
+            primaryTableDefinition,
+            primaryTableNode,
+            clusteredIndex,
+            columnarIndex,
+            tableGroupName,
+            withImplicitTableGroup,
+            engineName,
+            dictColumns,
+            visible);
+    }
+
     public SqlCreateIndex rebuildCovering(Collection coveringColumns) {
         if (GeneralUtil.isEmpty(coveringColumns)) {
             return this;
@@ -518,8 +759,10 @@ public SqlCreateIndex rebuildCovering(Collection coveringColumns) {
         return new SqlCreateIndex(pos,
             replace,
             ifNotExists,
+            name,
             originTableName,
             indexName,
+            originIndexName,
             columns,
             constraintType,
             indexResiding,
@@ -528,21 +771,36 @@ public SqlCreateIndex rebuildCovering(Collection coveringColumns) {
             algorithm,
             lock,
             newCovering,
+            originCovering,
             dbPartitionBy,
             tbPartitionBy,
             tbPartitions,
             partitioning,
+            originPartitioning,
+            clusteredKeys,
             sourceSql,
             primaryTableDefinition,
             primaryTableNode,
             clusteredIndex,
+            columnarIndex,
             tableGroupName,
+            withImplicitTableGroup,
+            engineName,
+            dictColumns,
             this.visible);
     }
 
-    public SqlCreateIndex rebuildToGsi(SqlIdentifier newName, SqlNode dbpartition, boolean clustered) {
+    /**
+     * Rebuild gsi definition with new index name and full partition definition
+     *
+     * @param newName New index name, with random suffix
+     * @param dbPartition Update with full partition definition, with DBPARTITION BY appended
+     * @return Copied SqlIndexDefinition
+     */
+    public SqlCreateIndex rebuildToGsi(SqlIdentifier newName, SqlNode dbPartition) {
         return new SqlCreateIndex(pos,
             null == newName ? indexName : newName,
+            originIndexName,
             originTableName,
             columns,
             constraintType,
@@ -551,20 +809,35 @@ public SqlCreateIndex rebuildToGsi(SqlIdentifier newName, SqlNode dbpartition, b
             options,
             algorithm,
             lock,
-            clustered ? null : covering,
-            null == dbpartition ? dbPartitionBy : dbpartition,
-            null == dbpartition ? tbPartitionBy : null,
-            null == dbpartition ? tbPartitions : null,
+            covering,
+            originCovering,
+            null == dbPartition ? dbPartitionBy : dbPartition,
+            null == dbPartition ? tbPartitionBy : null,
+            null == dbPartition ? tbPartitions : null,
             partitioning,
+            originPartitioning,
+            columnarIndex ? clusteredKeys : null,
             sourceSql,
-            clustered,
+            clusteredIndex,
+            columnarIndex,
             tableGroupName,
+            withImplicitTableGroup,
+            engineName,
+            dictColumns,
             this.visible);
     }
 
-    public SqlCreateIndex rebuildToGsiNewPartition(SqlIdentifier newName, SqlNode newPartition, boolean clustered) {
+    /**
+     * Rebuild gsi definition with new index name and full partition definition
+     *
+     * @param newName New index name, with random suffix
+     * @param newPartition Update with full partition definition, with PARTITION BY/PARTITIONS appended
+     * @return Copied SqlIndexDefinition
+     */
+    public SqlCreateIndex rebuildToGsiNewPartition(SqlIdentifier newName, SqlNode newPartition) {
         return new SqlCreateIndex(pos,
             null == newName ? indexName : newName,
+            originIndexName,
             originTableName,
             columns,
             constraintType,
@@ -573,20 +846,28 @@ public SqlCreateIndex rebuildToGsiNewPartition(SqlIdentifier newName, SqlNode ne
             options,
             algorithm,
             lock,
-            clustered ? null : covering,
+            covering,
+            originCovering,
             null == newPartition ? dbPartitionBy : null,
             null == newPartition ? tbPartitionBy : null,
             null == newPartition ? tbPartitions : null,
             null == newPartition ? partitioning : newPartition,
+            originPartitioning,
+            columnarIndex ? clusteredKeys : null,
             sourceSql,
-            clustered,
+            clusteredIndex,
+            columnarIndex,
             tableGroupName,
+            withImplicitTableGroup,
+            engineName,
+            dictColumns,
             this.visible);
     }
 
     public SqlCreateIndex rebuildToExplicitLocal(SqlIdentifier newName, String sql) {
         return new SqlCreateIndex(pos,
             null == newName ? indexName : newName,
+            originIndexName,
             originTableName,
             columns,
             constraintType,
@@ -600,15 +881,23 @@ public SqlCreateIndex rebuildToExplicitLocal(SqlIdentifier newName, String sql)
             null,
             null,
             null,
+            null,
+            null,
+            null,
             null == sql ? sourceSql : sql,
             false,
+            false,
             tableGroupName,
+            withImplicitTableGroup,
+            engineName,
+            dictColumns,
             this.visible);
     }
 
     public SqlCreateIndex replaceTableName(SqlIdentifier newTableName) {
         return new SqlCreateIndex(pos,
             indexName,
+            originIndexName,
             null == newTableName ? originTableName : newTableName,
             columns,
             constraintType,
@@ -618,13 +907,81 @@ public SqlCreateIndex replaceTableName(SqlIdentifier newTableName) {
             algorithm,
             lock,
             covering,
+            originCovering,
+            dbPartitionBy,
+            tbPartitionBy,
+            tbPartitions,
+            partitioning,
+            originPartitioning,
+            clusteredKeys,
+            sourceSql,
+            clusteredIndex,
+            columnarIndex,
+            tableGroupName,
+            withImplicitTableGroup,
+            engineName,
+            dictColumns,
+            this.visible);
+    }
+
+    public SqlCreateIndex replaceIndexName(SqlIdentifier newIndexName) {
+        return new SqlCreateIndex(pos,
+            null == newIndexName ? indexName : newIndexName,
+            originIndexName,
+            originTableName,
+            columns,
+            constraintType,
+            indexResiding,
+            indexType,
+            options,
+            algorithm,
+            lock,
+            covering,
+            originCovering,
             dbPartitionBy,
             tbPartitionBy,
             tbPartitions,
             partitioning,
+            originPartitioning,
+            clusteredKeys,
             sourceSql,
             clusteredIndex,
+            columnarIndex,
             tableGroupName,
+            withImplicitTableGroup,
+            engineName,
+            dictColumns,
             this.visible);
     }
+
+    /**
+     * columnar index options
+     */
+    public Map getColumnarOptions() {
+        Map options = new HashMap<>();
+        if (CollectionUtils.isNotEmpty(dictColumns)) {
+            String columns = dictColumns.stream()
+                    .map(sqlIndexColumnName -> SqlIdentifier.surroundWithBacktick(sqlIndexColumnName.getColumnNameStr()))
+                    .collect(Collectors.joining(","));
+            options.put(ColumnarTableOptions.DICTIONARY_COLUMNS, columns);
+        }
+        return options;
+    }
+
+    public static enum SqlIndexConstraintType {
+        UNIQUE, FULLTEXT, SPATIAL;
+
+    }
+
+    public static enum SqlIndexAlgorithmType {
+        DEFAULT, INPLACE, COPY;
+    }
+
+    public static enum SqlIndexLockType {
+        DEFAULT, NONE, SHARED, EXCLUSIVE;
+    }
+
+    public boolean isWithImplicitTableGroup() {
+        return withImplicitTableGroup;
+    }
 }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityEntity.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityEntity.java
new file mode 100644
index 000000000..b9005127b
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityEntity.java
@@ -0,0 +1,109 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+/**
+ * @author pangzhaoxing
+ */
+public class SqlCreateSecurityEntity extends SqlDal{
+    private static final SqlOperator OPERATOR = new SqlCreateSecurityEntity.SqlCreateSecurityEntityOperator();
+
+    private SqlIdentifier entityType;
+
+    private SqlIdentifier entityKey;
+
+    private SqlIdentifier entityAttr;
+
+    public SqlCreateSecurityEntity(SqlParserPos pos, SqlIdentifier entityType, SqlIdentifier entityKey,
+                                   SqlIdentifier entityAttr) {
+        super(pos);
+        this.entityType = entityType;
+        this.entityKey = entityKey;
+        this.entityAttr = entityAttr;
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.CREATE_SECURITY_ENTITY;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("CREATE SECURITY ENTITY");
+        entityType.unparse(writer, leftPrec, rightPrec);
+        entityKey.unparse(writer, leftPrec, rightPrec);
+        entityAttr.unparse(writer, leftPrec, rightPrec);
+    }
+
+    public SqlIdentifier getEntityType() {
+        return entityType;
+    }
+
+    public void setEntityType(SqlIdentifier entityType) {
+        this.entityType = entityType;
+    }
+
+    public SqlIdentifier getEntityKey() {
+        return entityKey;
+    }
+
+    public void setEntityKey(SqlIdentifier entityKey) {
+        this.entityKey = entityKey;
+    }
+
+    public SqlIdentifier getEntityAttr() {
+        return entityAttr;
+    }
+
+    public void setEntityAttr(SqlIdentifier entityAttr) {
+        this.entityAttr = entityAttr;
+    }
+
+    public static class SqlCreateSecurityEntityOperator extends SqlSpecialOperator {
+
+        public SqlCreateSecurityEntityOperator() {
+            super("CREATE_SECURITY_ENTITY", SqlKind.CREATE_SECURITY_ENTITY);
+        }
+
+        @Override
+        public RelDataType deriveType(final SqlValidator validator, final SqlValidatorScope scope, final SqlCall call) {
+            RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);
+
+            return typeFactory.createStructType(
+                ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("CREATE_SECURITY_ENTITY",
+                    0,
+                    columnType)));
+        }
+    }
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityLabel.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityLabel.java
new file mode 100644
index 000000000..0227bdfc4
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityLabel.java
@@ -0,0 +1,115 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+import org.apache.commons.lang3.StringUtils;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * @author pangzhaoxing
+ */
+public class SqlCreateSecurityLabel extends SqlDal{
+
+    private static final SqlOperator OPERATOR = new SqlCreateSecurityLabelOperator();
+
+    private SqlIdentifier labelName;
+    private SqlIdentifier policyName;
+    private SqlCharStringLiteral labelContent;
+
+    protected SqlCreateSecurityLabel(SqlParserPos pos) {
+        super(pos);
+    }
+
+    public SqlCreateSecurityLabel(SqlParserPos pos, SqlIdentifier labelName, SqlIdentifier policyName,
+                                  SqlCharStringLiteral labelContent) {
+        super(pos);
+        this.labelName = labelName;
+        this.policyName = policyName;
+        this.labelContent = labelContent;
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.CREATE_SECURITY_LABEL;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("CREATE SECURITY LABEL");
+        writer.keyword(policyName.toString() + "." + labelName.toString());
+        labelContent.unparse(writer, leftPrec, rightPrec);
+    }
+
+    public SqlIdentifier getLabelName() {
+        return labelName;
+    }
+
+    public void setLabelName(SqlIdentifier labelName) {
+        this.labelName = labelName;
+    }
+
+    public SqlIdentifier getPolicyName() {
+        return policyName;
+    }
+
+    public void setPolicyName(SqlIdentifier policyName) {
+        this.policyName = policyName;
+    }
+
+    public SqlCharStringLiteral getLabelContent() {
+        return labelContent;
+    }
+
+    public void setLabelContent(SqlCharStringLiteral labelContent) {
+        this.labelContent = labelContent;
+    }
+
+    public static class SqlCreateSecurityLabelOperator extends SqlSpecialOperator {
+
+        public SqlCreateSecurityLabelOperator() {
+            super("CREATE_SECURITY_LABEL", SqlKind.CREATE_SECURITY_LABEL);
+        }
+
+        @Override
+        public RelDataType deriveType(final SqlValidator validator, final SqlValidatorScope scope, final SqlCall call) {
+            RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);
+
+            return typeFactory.createStructType(
+                ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("CREATE_SECURITY_LABEL",
+                    0,
+                    columnType)));
+        }
+    }
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityLabelComponent.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityLabelComponent.java
new file mode 100644
index 000000000..0876276a5
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityLabelComponent.java
@@ -0,0 +1,114 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+import org.apache.commons.lang3.StringUtils;
+
+/**
+ * @author pangzhaoxing
+ */
+public class SqlCreateSecurityLabelComponent extends SqlDal{
+
+    private static final SqlOperator OPERATOR =
+        new SqlCreateSecurityLabelComponentOperator();
+
+    private SqlIdentifier componentName;
+    private SqlIdentifier componentType;
+    private SqlCharStringLiteral componentContent;
+
+    public SqlCreateSecurityLabelComponent(SqlParserPos pos, SqlIdentifier componentName, SqlIdentifier componentType,
+                                           SqlCharStringLiteral componentContent) {
+        super(pos);
+        this.componentName = componentName;
+        this.componentType = componentType;
+        this.componentContent = componentContent;
+    }
+
+    protected SqlCreateSecurityLabelComponent(SqlParserPos pos) {
+        super(pos);
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.CREATE_SECURITY_LABEL_COMPONENT;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("CREATE SECURITY LABEL COMPONENT");
+        this.componentName.unparse(writer, leftPrec, rightPrec);
+        this.componentType.unparse(writer, leftPrec, rightPrec);
+        this.componentContent.unparse(writer, leftPrec, rightPrec);
+    }
+
+    public static class SqlCreateSecurityLabelComponentOperator extends SqlSpecialOperator {
+
+        public SqlCreateSecurityLabelComponentOperator() {
+            super("CREATE_SECURITY_LABEL_COMPONENT", SqlKind.CREATE_SECURITY_LABEL_COMPONENT);
+        }
+
+        @Override
+        public RelDataType deriveType(final SqlValidator validator, final SqlValidatorScope scope, final SqlCall call) {
+            RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);
+
+            return typeFactory.createStructType(
+                ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("CREATE_SECURITY_LABEL_COMPONENT",
+                    0,
+                    columnType)));
+        }
+    }
+
+    public SqlIdentifier getComponentName() {
+        return componentName;
+    }
+
+    public void setComponentName(SqlIdentifier componentName) {
+        this.componentName = componentName;
+    }
+
+    public SqlIdentifier getComponentType() {
+        return componentType;
+    }
+
+    public void setComponentType(SqlIdentifier componentType) {
+        this.componentType = componentType;
+    }
+
+    public SqlCharStringLiteral getComponentContent() {
+        return componentContent;
+    }
+
+    public void setComponentContent(SqlCharStringLiteral componentContent) {
+        this.componentContent = componentContent;
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityPolicy.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityPolicy.java
new file mode 100644
index 000000000..94a28ef12
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateSecurityPolicy.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+/**
+ * @author pangzhaoxing
+ */
+public class SqlCreateSecurityPolicy extends SqlDal{
+
+    private static final SqlOperator OPERATOR =
+        new SqlCreateSecurityPolicyOperator();
+
+    private SqlIdentifier policyName;
+    private SqlCharStringLiteral policyComponents;
+
+    protected SqlCreateSecurityPolicy(SqlParserPos pos) {
+        super(pos);
+    }
+
+    public SqlCreateSecurityPolicy(SqlParserPos pos, SqlIdentifier policyName, SqlCharStringLiteral policyComponents) {
+        super(pos);
+        this.policyName = policyName;
+        this.policyComponents = policyComponents;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("CREATE SECURITY POLICY");
+        this.policyName.unparse(writer, leftPrec, rightPrec);
+        writer.keyword("COMPONENTS");
+        this.policyComponents.unparse(writer, leftPrec, rightPrec);
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.CREATE_SECURITY_POLICY;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    public SqlIdentifier getPolicyName() {
+        return policyName;
+    }
+
+    public void setPolicyName(SqlIdentifier policyName) {
+        this.policyName = policyName;
+    }
+
+    public SqlCharStringLiteral getPolicyComponents() {
+        return policyComponents;
+    }
+
+    public void setPolicyComponents(SqlCharStringLiteral policyComponents) {
+        this.policyComponents = policyComponents;
+    }
+
+    public static class SqlCreateSecurityPolicyOperator extends SqlSpecialOperator {
+
+        public SqlCreateSecurityPolicyOperator() {
+            super("CREATE_SECURITY_POLICY", SqlKind.CREATE_SECURITY_POLICY);
+        }
+
+        @Override
+        public RelDataType deriveType(final SqlValidator validator, final SqlValidatorScope scope, final SqlCall call) {
+            RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);
+
+            return typeFactory.createStructType(
+                ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("CREATE_SECURITY_POLICY",
+                    0,
+                    columnType)));
+        }
+    }
+
+    
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateTable.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateTable.java
index 6d9b2c594..ca20cf251 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateTable.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateTable.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.calcite.sql;
 
 import com.alibaba.polardbx.common.ArchiveMode;
@@ -39,6 +40,8 @@
 import com.alibaba.polardbx.druid.sql.ast.expr.SQLIntegerExpr;
 import com.alibaba.polardbx.druid.sql.ast.expr.SQLMethodInvokeExpr;
 import com.alibaba.polardbx.druid.sql.ast.expr.SQLPropertyExpr;
+import com.alibaba.polardbx.druid.sql.ast.statement.SQLAssignItem;
+import com.alibaba.polardbx.druid.sql.ast.statement.SQLCharacterDataType;
 import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnDefinition;
 import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnPrimaryKey;
 import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnUniqueKey;
@@ -155,6 +158,7 @@ public class SqlCreateTable extends SqlCreate {
     private List> globalKeys;
     private List> globalUniqueKeys;
     private List> clusteredKeys;
+    private List> columnarKeys;
     private List> clusteredUniqueKeys;
     private List> keys;
     private List> fullTextKeys;
@@ -165,6 +169,7 @@ public class SqlCreateTable extends SqlCreate {
     private List logicalReferencedTables = null;
     private List physicalReferencedTables = null;
     private List addedForeignKeys;
+    private List isAddLogicalForeignKeyOnly;
     public boolean pushDownForeignKeys;
 
     /**
@@ -196,13 +201,21 @@ public class SqlCreateTable extends SqlCreate {
     private SqlNode localPartition = null;
 
     private SqlNode tableGroupName = null;
+    private boolean withImplicitTableGroup = false;
     private SqlNode joinGroupName = null;
     private SQLPartitionByRange localPartitionSuffix;
 
     private Engine engine = null;
     private ArchiveMode archiveMode;
+
+    private String loadTableName = null;
+
     private String loadTableSchema = null;
 
+    private List dictColumns = null;
+
+    private String securityPolicy;
+
     private final static int MAX_AUTO_INDEX_LEN = 191;
 
     boolean ignore = false;
@@ -210,6 +223,7 @@ public class SqlCreateTable extends SqlCreate {
     // use for create table replace select
     // different from base class‘s replace
     boolean replaceInto = false;
+    protected boolean onlyConvertTableMode = false;
 
     public void setIgnore(boolean ignore) {
         this.ignore = ignore;
@@ -227,6 +241,14 @@ public SqlNode getAsTableName() {
         return asTableName;
     }
 
+    public boolean isOnlyConvertTableMode() {
+        return onlyConvertTableMode;
+    }
+
+    public void setOnlyConvertTableMode(boolean onlyConvertTableMode) {
+        this.onlyConvertTableMode = onlyConvertTableMode;
+    }
+
     public boolean isIgnore() {
         return ignore;
     }
@@ -241,13 +263,22 @@ public String getLoadTableSchema() {
     public void setLoadTableSchema(String loadTableSchema) {
         this.loadTableSchema = loadTableSchema;
     }
-    private String loadTableName = null;
+
     public String getLoadTableName() {
         return loadTableName;
     }
     public void setLoadTableName(String loadTableName) {
         this.loadTableName = loadTableName;
     }
+
+    public List getDictColumns() {
+        return dictColumns;
+    }
+
+    public void setDictColumns(List dictColumns) {
+        this.dictColumns = dictColumns;
+    }
+
     public Engine getEngine() {
         return engine;
     }
@@ -285,6 +316,14 @@ public void setEncryption(String defaultEncryption) {
         this.encryption = defaultEncryption;
     }
 
+    public String getSecurityPolicy() {
+        return securityPolicy;
+    }
+
+    public void setSecurityPolicy(String securityPolicy) {
+        this.securityPolicy = securityPolicy;
+    }
+
     public void setRowFormat(String rf) {
         rowFormat = rf;
     }
@@ -325,6 +364,14 @@ public boolean isSelect() {
         return isSelect;
     }
 
+    public boolean isWithImplicitTableGroup() {
+        return withImplicitTableGroup;
+    }
+
+    public void setWithImplicitTableGroup(boolean withImplicitTableGroup) {
+        this.withImplicitTableGroup = withImplicitTableGroup;
+    }
+
     private static final SqlOperator OPERATOR = new SqlSpecialOperator("CREATE TABLE", SqlKind.CREATE_TABLE);
 
     /**
@@ -368,8 +415,35 @@ public SqlCreateTable(SqlParserPos pos, boolean replace, boolean ifNotExists, Sq
                           List> spatialKeys,
                           List> foreignKeys, List checks,
                           SqlIdentifier primaryKeyConstraint, boolean hasPrimaryKeyConstraint, SqlNode sqlPartition,
-                          SqlNode localPartition, SqlNode tableGroupName, SqlNode joinGroupName,
-                          List addedForeignKeys, String defaultCharset, String defaultCollation) {
+                          SqlNode localPartition,
+                          SqlNode tableGroupName,
+                          SqlNode joinGroupName,
+                          boolean dbPartition,
+                          List addedForeignKeys,
+                          List isAddLogicalForeignKeyOnly,
+                          ArchiveMode archiveMode,
+                          SqlNode asTableName,
+                          boolean autoSplit,
+                          List> clusteredKeys,
+                          List> clusteredUniqueKeys,
+                          List> columnarKeys,
+                          String comment,
+                          String defaultCharset,
+                          String defaultCollation,
+                          String encryption,
+                          Engine engine,
+                          boolean ignore,
+                          boolean isSelect,
+                          String loadTableSchema,
+                          String locality,
+                          List logicalReferencedTables,
+                          List mappingRules,
+                          String originalSql,
+                          List physicalReferencedTables,
+                          boolean pushDownForeignKeys,
+                          boolean replaceInto,
+                          String rowFormat,
+                          boolean uniqueShardingKey) {
         super(OPERATOR, pos, replace, ifNotExists);
         this.name = name;
         this.likeTableName = likeTableName;
@@ -402,9 +476,33 @@ public SqlCreateTable(SqlParserPos pos, boolean replace, boolean ifNotExists, Sq
         this.localPartition = localPartition;
         this.tableGroupName = tableGroupName;
         this.joinGroupName = joinGroupName;
+        this.DbPartition = dbPartition;
         this.addedForeignKeys = addedForeignKeys;
+        this.isAddLogicalForeignKeyOnly = isAddLogicalForeignKeyOnly;
+        this.archiveMode = archiveMode;
+        this.asTableName = asTableName;
+        this.autoSplit = autoSplit;
+        this.clusteredKeys = clusteredKeys;
+        this.clusteredUniqueKeys = clusteredUniqueKeys;
+        this.columnarKeys = columnarKeys;
+        this.comment = comment;
         this.defaultCharset = defaultCharset;
         this.defaultCollation = defaultCollation;
+        this.encryption = encryption;
+        this.engine = engine;
+        this.ignore = ignore;
+        this.isSelect = isSelect;
+        this.loadTableName = loadTableName;
+        this.loadTableSchema = loadTableSchema;
+        this.locality = locality;
+        this.logicalReferencedTables = logicalReferencedTables;
+        this.mappingRules = mappingRules;
+        this.originalSql = originalSql;
+        this.physicalReferencedTables = physicalReferencedTables;
+        this.pushDownForeignKeys = pushDownForeignKeys;
+        this.replaceInto = replaceInto;
+        this.rowFormat = rowFormat;
+        this.uniqueShardingKey = uniqueShardingKey;
     }
 
     public boolean shouldLoad() {
@@ -423,6 +521,8 @@ public boolean shouldLoad() {
                 || trimmedComment.equalsIgnoreCase("load_s3")
                 || trimmedComment.equalsIgnoreCase("load_local_disk")
                 || trimmedComment.equalsIgnoreCase("load_nfs")
+                || trimmedComment.equalsIgnoreCase("load_s3")
+                || trimmedComment.equalsIgnoreCase("load_abs")
                 || trimmedComment.equalsIgnoreCase("load_external_disk");
         }
 
@@ -513,7 +613,7 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
                         String n = convertName(otherName);
                         writer.keyword(n);
                     } else {
-                        writer.keyword(c.getKey().getLastName());
+                        writer.identifier(c.getKey().getLastName());
                     }
                     c.getValue().unparse(writer, 0, 0);
 //                    throw new TddlRuntimeException(ERR_CREATE_SELECT_WITH_GSI, "create select don't support table with GSI");
@@ -532,7 +632,7 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
                         String n = convertName(otherName);
                         writer.keyword(n);
                     } else {
-                        writer.keyword(c.getKey().getLastName());
+                        writer.identifier(c.getKey().getLastName());
                     }
                     c.getValue().unparse(writer, 0, 0);
 //                    throw new TddlRuntimeException(ERR_CREATE_SELECT_WITH_GSI, "create select don't support table with GSI");
@@ -544,6 +644,7 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
                     writer.sep(",");
                     writer.keyword("clustered");
                     writer.keyword("index");
+                    writer.identifier(c.getKey().getLastName());
                     c.getValue().unparse(writer, 0, 0);
                 }
             }
@@ -554,6 +655,18 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
                     writer.keyword("unique");
                     writer.keyword("clustered");
                     writer.keyword("index");
+                    writer.identifier(c.getKey().getLastName());
+                    c.getValue().unparse(writer, 0, 0);
+                }
+            }
+
+            if (columnarKeys != null) {
+                for (Pair c : columnarKeys) {
+                    writer.sep(",");
+                    writer.keyword("clustered");
+                    writer.keyword("columnar");
+                    writer.keyword("index");
+                    writer.identifier(c.getKey().getLastName());
                     c.getValue().unparse(writer, 0, 0);
                 }
             }
@@ -642,6 +755,22 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
             writer.keyword("BROADCAST");
         }
 
+        if (tableGroupName != null) {
+            if (withImplicitTableGroup) {
+                writer.keyword("WITH TABLEGROUP=");
+                tableGroupName.unparse(writer, 0, 0);
+                writer.keyword("IMPLICIT");
+            } else {
+                writer.keyword("TABLEGROUP=");
+                tableGroupName.unparse(writer, 0, 0);
+            }
+        }
+
+        if (joinGroupName != null) {
+            writer.keyword("JOINGROUP=");
+            joinGroupName.unparse(writer, 0, 0);
+        }
+
         if (engine != null) {
             writer.keyword("ENGINE=");
             writer.keyword(engine.name());
@@ -923,11 +1052,33 @@ public SqlCreateTable clone(SqlParserPos pos) {
             localPartition,
             tableGroupName,
             joinGroupName,
+            DbPartition,
             addedForeignKeys,
+            isAddLogicalForeignKeyOnly,
+            archiveMode,
+            asTableName,
+            autoSplit,
+            clusteredKeys,
+            clusteredUniqueKeys,
+            columnarKeys,
+            comment,
             defaultCharset,
-            defaultCollation);
-        ret.setEngine(engine);
-        ret.setDBPartition(DbPartition);
+            defaultCollation,
+            encryption,
+            engine,
+            ignore,
+            isSelect,
+            loadTableSchema,
+            locality,
+            logicalReferencedTables,
+            mappingRules,
+            originalSql,
+            physicalReferencedTables,
+            pushDownForeignKeys,
+            replaceInto,
+            rowFormat,
+            uniqueShardingKey);
+        ret.setWithImplicitTableGroup(withImplicitTableGroup);
         return ret;
     }
 
@@ -967,7 +1118,7 @@ public MySqlStatement rewriteForGsi() {
                 });
 
             addIndex(shardKeys, stmt, uniqueShardingKey);
-        } else if (sqlPartition != null) {
+        } else if (sqlPartition != null || createCci()) {
             // Patch for implicit pk if needed.
             colDefs.stream()
                 .filter(pair -> pair.left.getLastName().equalsIgnoreCase(IMPLICIT_COL_NAME))
@@ -993,7 +1144,7 @@ public MySqlStatement rewriteForGsi() {
             addLocalPartitionSuffix(stmt);
         }
         stmt.setBroadCast(false);
-        removeSequenceAndGsi(stmt);
+        removePolarDBXExclusiveFeature(stmt);
 
         stmt.setDbPartitionBy(null);
         stmt.setDbPartitions(null);
@@ -1017,60 +1168,40 @@ public MySqlStatement rewrite() {
                     removeFKs.add(sqlTableElement);
                 }
             }
-            for (ForeignKeyData foreignKeyData : getAddedForeignKeys()) {
-                Set foreignKeys = new LinkedHashSet<>(foreignKeyData.columns);
-
-                addForeignKeyIndex(foreignKeys, stmt, uniqueShardingKey, foreignKeyData);
+            for (int i = 0; i < addedForeignKeys.size(); i++) {
+                if (GeneralUtil.isNotEmpty(isAddLogicalForeignKeyOnly) && !isAddLogicalForeignKeyOnly.get(i)) {
+                    Set foreignKeys = new LinkedHashSet<>(addedForeignKeys.get(i).columns);
+                    addForeignKeyIndex(foreignKeys, stmt, uniqueShardingKey, addedForeignKeys.get(i));
+                }
             }
             stmt.getTableElementList().removeAll(removeFKs);
         }
 
         if (dbpartitionBy != null) {
+            // fetch dbShardKeys for drdsTbl
             shardKeys = getShardingKeys(dbpartitionBy, shardKeys);
         }
-
         if (tbpartitionBy != null) {
+            // fetch tbShardKeys for drdsTbl
             getShardingKeys(tbpartitionBy, shardKeys);
         }
 
         if (sqlPartition != null) {
+            // fetch partkeys and subpartKeys for partTbl
             getPartitionKeys(sqlPartition, shardKeys, subPartKeys);
         }
 
         // Remove implicit pk in shard keys, because it must be primary key.
         shardKeys.removeIf(SqlValidatorImpl::isImplicitKey);
 
-        boolean useSubPartBy = false;
-        boolean subPartKeyContainAllPartKeyAsPrefixCols = false;
-        if (subPartKeys != null && subPartKeys.size() > 0) {
-            useSubPartBy = true;
-            List partKeyList = shardKeys.stream().collect(Collectors.toList());
-            List subPartKeyList = subPartKeys.stream().collect(Collectors.toList());
-            subPartKeyContainAllPartKeyAsPrefixCols = checkIfContainPrefixPartCols(subPartKeyList, partKeyList);
-        }
-        if (shardKeys.size() > 0) {
-            if (!(useSubPartBy && subPartKeyContainAllPartKeyAsPrefixCols)) {
-                if (sqlPartition == null) {
-                    addIndex(shardKeys, stmt, uniqueShardingKey);
-                } else {
-                    addCompositeIndex(shardKeys, stmt);
-                }
-            }
-            if (useSubPartBy) {
-                addCompositeIndex(subPartKeys, stmt);
-            }
+        // add local indexes for shard keys
+        addLocalIndexesForShardKeys(stmt, shardKeys, subPartKeys);
 
-//            if (sqlPartition == null || shardKeys.size() == 1) {
-//                addIndex(shardKeys, stmt, uniqueShardingKey);
-//            } else {
-//                // create composite indexes for key/range column/list column partitions
-//                addCompositeIndex(shardKeys, stmt);
-//            }
-        }
         stmt.setBroadCast(false);
         // remove locality on mysql
         stmt.setLocality(null);
-        removeSequenceAndGsi(stmt);
+        removePolarDBXExclusiveFeature(stmt);
+        SqlCreateTable.removeLBACAttr(stmt);
 
         for (Pair pair : colDefs) {
             String columnName = pair.left.getSimple();
@@ -1097,6 +1228,53 @@ public MySqlStatement rewrite() {
         return stmt;
     }
 
+    /**
+     * Add local index for shardkeys and subshardKeys
+     */
+    private void addLocalIndexesForShardKeys(MySqlCreateTableStatement stmt, Set shardKeys,
+                                             Set subPartKeys) {
+        String partStrategy = SqlCreateTable.fetchPartStrategy(sqlPartition, false);
+        String subpartStrategy = SqlCreateTable.fetchPartStrategy(sqlPartition, true);
+        boolean usePartBy = !partStrategy.isEmpty();
+        boolean useSubPartBy = false;
+        boolean subPartKeyContainAllPartKeyAsPrefixCols = false;
+
+        List partKeyList = shardKeys.stream().collect(Collectors.toList());
+        List subPartKeyList = subPartKeys.stream().collect(Collectors.toList());
+        boolean addPartColIndexLater = false;
+        if (subPartKeys != null && subPartKeys.size() > 0) {
+            useSubPartBy = true;
+            subPartKeyContainAllPartKeyAsPrefixCols = checkIfContainPrefixPartCols(subPartKeyList, partKeyList);
+            addPartColIndexLater = needAddPartColLocalIndexLater(partStrategy, subpartStrategy);
+        }
+        if (shardKeys.size() > 0) {
+            if (!(useSubPartBy && subPartKeyContainAllPartKeyAsPrefixCols)) {
+                if (sqlPartition == null) {
+                    /**
+                     * add local index for drds sharding keys
+                     */
+                    addIndex(shardKeys, stmt, uniqueShardingKey);
+                } else {
+//                    addCompositeIndex(shardKeys, stmt);
+                    if (usePartBy && !addPartColIndexLater) {
+                        SqlCreateTable.addCompositeIndexForAutoTbl(null, stmt,
+                            false, ImmutableList.of(), false, partStrategy, partKeyList, false, "");
+                    }
+                }
+            }
+
+            if (useSubPartBy) {
+//                addCompositeIndex(subPartKeys, stmt);
+                SqlCreateTable.addCompositeIndexForAutoTbl(null, stmt,
+                    false, ImmutableList.of(), false, subpartStrategy, subPartKeyList, false, "");
+                if (usePartBy && addPartColIndexLater) {
+                    SqlCreateTable.addCompositeIndexForAutoTbl(null, stmt,
+                        false, ImmutableList.of(), false, partStrategy, partKeyList, false, "");
+                }
+            }
+        }
+    }
+
     public void addLocalPartitionSuffix(MySqlCreateTableStatement stmt) {
         if (stmt == null || localPartitionSuffix == null) {
             return;
@@ -1105,7 +1283,7 @@ public void addLocalPartitionSuffix(MySqlCreateTableStatement stmt) {
             Lists.newArrayList(new SQLCommentHint("!50500 PARTITION BY " + localPartitionSuffix.toString())));
     }
 
-    private static void removeSequenceAndGsi(MySqlCreateTableStatement stmt) {
+    private static void removePolarDBXExclusiveFeature(MySqlCreateTableStatement stmt) {
         final Iterator iterator = stmt.getTableElementList().iterator();
         while (iterator.hasNext()) {
             final SQLTableElement tableElement = iterator.next();
@@ -1117,11 +1295,40 @@ private static void removeSequenceAndGsi(MySqlCreateTableStatement stmt) {
                 sqlColumnDefinition.setUnitCount(null);
                 sqlColumnDefinition.setUnitIndex(null);
             } else if ((tableElement instanceof MySqlTableIndex && (((MySqlTableIndex) tableElement).isGlobal()
-                || ((MySqlTableIndex) tableElement).isClustered()))
+                || ((MySqlTableIndex) tableElement).isClustered() || ((MySqlTableIndex) tableElement).isColumnar()))
                 || (tableElement instanceof MySqlUnique && (((MySqlUnique) tableElement).isGlobal()
                 || ((MySqlUnique) tableElement).isClustered()))) {
                 // remove gsi definition
                 iterator.remove();
+            } else if (tableElement instanceof MySqlTableIndex) {
+                final MySqlTableIndex index = (MySqlTableIndex) tableElement;
+                index.setTableGroup(null);
+                index.setWithImplicitTablegroup(false);
+            } else if (tableElement instanceof MySqlKey) {
+                final MySqlKey index = (MySqlKey) tableElement;
+                index.setTableGroup(null);
+                index.setWithImplicitTablegroup(false);
+            }
+        }
+    }
+
+    private static void removeLBACAttr(final MySqlCreateTableStatement stmt) {
+        Iterator iterator = stmt.getTableElementList().iterator();
+        while (iterator.hasNext()) {
+            SQLTableElement tableElement = iterator.next();
+            //去除列的安全标号
+            if (tableElement instanceof SQLColumnDefinition) {
+                SQLColumnDefinition sqlColumnDefinition = (SQLColumnDefinition) tableElement;
+                sqlColumnDefinition.setSecuredWith(null);
+            }
+        }
+        Iterator optionIterator = stmt.getTableOptions().iterator();
+        while (optionIterator.hasNext()){
+            SQLAssignItem item = optionIterator.next();
+            //去除表的安全策略
+            if (item.getTarget() instanceof SQLIdentifierExpr &&
+                "SECURITY POLICY".equalsIgnoreCase(((SQLIdentifierExpr) item.getTarget()).getName())){
+                optionIterator.remove();
             }
         }
     }
@@ -1374,6 +1581,8 @@ public static void addIndex(Map indexColumnDefMap, M
                                 .equalsIgnoreCase(UGSI_PK_INDEX_NAME)) {
                                 it.remove();  //  Need to be replaced with MySqlUnique
                             }
+                        } else if (sqlTableElement instanceof MySqlPrimaryKey) {
+                            needAddIndexColumns = false;
                         } else {
                             needAddIndexColumns = false;
                             ((MySqlKey) sqlTableElement).setIndexType(indexType);
@@ -1693,10 +1902,38 @@ public static Set getPartitionKeys(SqlNode partitionBy,
         return getPartitionKeys(partitionBy, partKeys, subPartKeys, true);
     }
 
+    /**
+     * Check if need add local index of the 1st-level part cols after subpart-part cols
+     */
+    public static boolean needAddPartColLocalIndexLater(
+        String partStrategy,
+        String subPartStrategy
+    ) {
+        boolean useSubPart = subPartStrategy != null && !subPartStrategy.isEmpty();
+        boolean usePart = partStrategy != null && !partStrategy.isEmpty();
+        boolean isPartUsingCoHash = usePart && partStrategy.equalsIgnoreCase("CO_HASH");
+        boolean isSubPartUsingCoHash = useSubPart && subPartStrategy.equalsIgnoreCase("CO_HASH");
+
+        if (!useSubPart) {
+            return false;
+        }
+        if (isPartUsingCoHash) {
+            if (isSubPartUsingCoHash) {
+                return false;
+            } else {
+                return true;
+            }
+        } else {
+            return false;
+        }
+    }
+
     /**
      * Check if the target partition columns contains the target prefix part columns
      */
-    public static boolean checkIfContainPrefixPartCols(List targetPartCols, List targetPrefixCols) {
+    public static boolean checkIfContainPrefixPartCols(
+        List targetPartCols,
+        List targetPrefixCols) {
         if (targetPartCols.size() < targetPrefixCols.size()) {
             return false;
         }
@@ -1901,6 +2138,14 @@ public void setClusteredKeys(List> clust
         this.clusteredKeys = clusteredKeys;
     }
 
+    public List> getColumnarKeys() {
+        return columnarKeys;
+    }
+
+    public void setColumnarKeys(List> columnarKeys) {
+        this.columnarKeys = columnarKeys;
+    }
+
     public List> getClusteredUniqueKeys() {
         return clusteredUniqueKeys;
     }
@@ -1967,6 +2212,20 @@ public void setPushDownForeignKeys(boolean pushDownForeignKeys) {
         this.pushDownForeignKeys = pushDownForeignKeys;
     }
 
+    public List getIsAddLogicalForeignKeyOnly() {
+        return isAddLogicalForeignKeyOnly;
+    }
+
+    public void setIsAddLogicalForeignKeyOnly(List isAddLogicalForeignKeyOnly) {
+        if (null == this.isAddLogicalForeignKeyOnly) {
+            this.isAddLogicalForeignKeyOnly = new ArrayList<>();
+        }
+
+        if (isAddLogicalForeignKeyOnly != null) {
+            this.isAddLogicalForeignKeyOnly.addAll(isAddLogicalForeignKeyOnly);
+        }
+    }
+
     public List getChecks() {
         return checks;
     }
@@ -2007,6 +2266,11 @@ public boolean createGsi() {
             || GeneralUtil.isNotEmpty(clusteredKeys) || GeneralUtil.isNotEmpty(clusteredUniqueKeys);
     }
 
+    @Override
+    public boolean createCci() {
+        return GeneralUtil.isNotEmpty(columnarKeys);
+    }
+
     public boolean createClusteredIndex() {
         return GeneralUtil.isNotEmpty(clusteredKeys) || GeneralUtil.isNotEmpty(clusteredUniqueKeys);
     }
@@ -2060,6 +2324,136 @@ public static void addCompositeIndex(Set shardKeys, MySqlCreateTableStat
             new ArrayList<>(shardKeys), false, "");
     }
 
+    public static void addCompositeIndexForAutoTbl(Map indexColumnNameMap,
+                                                   MySqlCreateTableStatement stmt,
+                                                   boolean isUniqueIndex,
+                                                   List options,
+                                                   boolean isGsi,
+                                                   String shardKeysPartStrategy,
+                                                   List shardKeys,
+                                                   boolean addFkIndex,
+                                                   String fkIndexName
+    ) {
+        /**
+         * The linked-hash Map can key the key's order by their insert order
+         */
+        final Map newIndexColumnNameMap =
+            new LinkedHashMap();
+
+        /**
+         * The tree-set can handle the CASE_INSENSITIVE_ORDER that can remove duplicated items
+         */
+        final Set newIndexColumnNameTreeSet =
+            new TreeSet(CaseInsensitive.CASE_INSENSITIVE_ORDER);
+
+        if (indexColumnNameMap != null && !indexColumnNameMap.isEmpty()) {
+            newIndexColumnNameMap.putAll(indexColumnNameMap);
+            newIndexColumnNameTreeSet.addAll(newIndexColumnNameMap.keySet());
+        }
+
+        if (!isGsi && newIndexColumnNameMap.isEmpty()) {
+            for (String columnName : shardKeys) {
+                if (!newIndexColumnNameTreeSet.contains(columnName)) {
+                    newIndexColumnNameMap.put(columnName,
+                        new SqlIndexColumnName(SqlParserPos.ZERO, new SqlIdentifier(columnName,
+                            SqlParserPos.ZERO), null, null));
+                    newIndexColumnNameTreeSet.add(columnName);
+                }
+            }
+        }
+
+        if (shardKeysPartStrategy.equalsIgnoreCase("co_hash")) {
+            for (String shardKey : shardKeys) {
+                List tmpShardKey = new ArrayList<>();
+                tmpShardKey.add(shardKey);
+//                addCompositeIndex(newIndexColumnNameMap, stmt, false, ImmutableList.of(), false,
+//                    tmpShardKey, false, "");
+                addCompositeIndex(newIndexColumnNameMap, stmt, isUniqueIndex, options, isGsi,
+                    tmpShardKey, addFkIndex, fkIndexName);
+            }
+        } else {
+            addCompositeIndex(newIndexColumnNameMap, stmt, isUniqueIndex, options, isGsi,
+                shardKeys, addFkIndex, fkIndexName);
+        }
+    }
+
+    private static String fetchPartStrategy(SqlNode sqlPartition, boolean isForSubPart) {
+        String partStrategy = "";
+        if (sqlPartition == null) {
+            return partStrategy;
+        }
+
+        SqlPartitionBy partBy = (SqlPartitionBy) sqlPartition;
+        SqlSubPartitionBy subPartBy = partBy.getSubPartitionBy();
+        boolean useSubPart = subPartBy != null;
+
+        if (isForSubPart) {
+            if (subPartBy == null) {
+                return partStrategy;
+            }
+            if (subPartBy instanceof SqlSubPartitionByHash) {
+                boolean isKey = ((SqlSubPartitionByHash) subPartBy).isKey();
+                if (isKey) {
+                    partStrategy = "KEY";
+                } else {
+                    partStrategy = "HASH";
+                }
+            } else if (subPartBy instanceof SqlSubPartitionByCoHash) {
+                partStrategy = "CO_HASH";
+
+            } else if (subPartBy instanceof SqlSubPartitionByUdfHash) {
+                partStrategy = "UDF_HASH";
+            } else if (subPartBy instanceof SqlSubPartitionByRange) {
+                boolean isColumns = subPartBy.isColumns();
+                if (isColumns) {
+                    partStrategy = "RANGE_COLUMNS";
+                } else {
+                    partStrategy = "RANGE";
+                }
+            } else if (subPartBy instanceof SqlSubPartitionByList) {
+                boolean isColumns = subPartBy.isColumns();
+                if (isColumns) {
+                    partStrategy = "LIST_COLUMNS";
+                } else {
+                    partStrategy = "LIST";
+                }
+            } else {
+                return partStrategy;
+            }
+        } else {
+            if (partBy instanceof SqlPartitionByHash) {
+                boolean isKey = ((SqlPartitionByHash) partBy).isKey();
+                if (isKey) {
+                    partStrategy = "KEY";
+                } else {
+                    partStrategy = "HASH";
+                }
+            } else if (partBy instanceof SqlPartitionByCoHash) {
+                partStrategy = "CO_HASH";
+            } else if (partBy instanceof SqlPartitionByUdfHash) {
+                partStrategy = "UDF_HASH";
+            } else if (partBy instanceof SqlPartitionByRange) {
+                boolean isColumns = ((SqlPartitionByRange) partBy).isColumns();
+                if (isColumns) {
+                    partStrategy = "RANGE_COLUMNS";
+                } else {
+                    partStrategy = "RANGE";
+                }
+            } else if (partBy instanceof SqlPartitionByList) {
+                boolean isColumns = ((SqlPartitionByList) partBy).isColumns();
+                if (isColumns) {
+                    partStrategy = "LIST_COLUMNS";
+                } else {
+                    partStrategy = "LIST";
+                }
+            } else {
+                return partStrategy;
+            }
+        }
+        return partStrategy;
+
+    }
+
     /**
      * only for auto mode, add global index or create table
      *
@@ -2346,8 +2740,7 @@ public static void addCompositeIndex(Map indexColumn
 
             Set orderedIndexColumnNames = new LinkedHashSet<>(shardingKey);
             final String suffix = buildUnifyIndexName(orderedIndexColumnNames, 45);
-            final String indexName = addFkIndex ? (foreignKeyIndexName == null ?
-                buildForeignKeyIndexName(existingIndexNames, suffix) : foreignKeyIndexName) :
+            final String indexName = addFkIndex ? buildForeignKeyName(foreignKeyIndexName, existingIndexNames, suffix) :
                 buildIndexName(existingIndexNames, suffix);
 
             final MySqlTableIndex mySqlTableIndex = new MySqlTableIndex();
@@ -2373,6 +2766,13 @@ public static void addCompositeIndex(Map indexColumn
         }
     }
 
+    public static String buildForeignKeyName(String foreignKeyIndexName, Set existingIndexNames,
+                                              String suffix) {
+        return foreignKeyIndexName == null ?
+            buildForeignKeyIndexName(existingIndexNames, suffix) :
+            SqlIdentifier.surroundWithBacktick(foreignKeyIndexName);
+    }
+
     private static List preparAutoCompositeIndexs(List shardKeys,
                                                                    Map columnDefMap,
                                                                    int maxLen) {
@@ -2493,6 +2893,18 @@ private static List preparAutoCompositeIndexs(List shar
         }
         return indexColumnInfos;
     }
+
+    public void replaceCciDef(String indexName, SqlIndexDefinition newIndexDef) {
+        this.setColumnarKeys(getColumnarKeys()
+            .stream()
+            .map(p -> {
+                if (TStringUtil.equalsIgnoreCase(p.getKey().getLastName(), indexName)) {
+                    return Pair.of(p.getKey(), newIndexDef);
+                }
+                return p;
+            })
+            .collect(Collectors.toList()));
+    }
 }
 
 class IndexColumnInfo {
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateTableGroup.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateTableGroup.java
index cdb170256..a608531bc 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateTableGroup.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateTableGroup.java
@@ -44,15 +44,17 @@ public class SqlCreateTableGroup extends SqlDdl {
     final String tableGroupName;
     final String schemaName;
     final SqlNode sqlPartition;
+    final boolean single;
 
     public SqlCreateTableGroup(SqlParserPos pos, boolean ifNotExists, String schemaName,
-                               String tableGroupName, String locality, SqlNode sqlPartition) {
+                               String tableGroupName, String locality, SqlNode sqlPartition, boolean single) {
         super(OPERATOR, pos);
         this.ifNotExists = ifNotExists;
         this.schemaName = schemaName;
         this.tableGroupName = tableGroupName;
         this.locality = locality;
         this.sqlPartition = sqlPartition;
+        this.single = single;
     }
 
     @Override
@@ -71,6 +73,8 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
         if (sqlPartition != null) {
             writer.keyword(" ");
             sqlPartition.unparse(writer, 0, 0);
+        } else if (single) {
+            writer.keyword("SINGLE");
         }
         if (TStringUtil.isNotEmpty(locality)) {
             writer.keyword("LOCALITY = ");
@@ -98,6 +102,10 @@ public SqlNode getSqlPartition() {
         return sqlPartition;
     }
 
+    public boolean isSingle() {
+        return single;
+    }
+
     @Override
     public SqlOperator getOperator() {
         return OPERATOR;
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateView.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateView.java
index 390e28d3f..247c39a40 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateView.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlCreateView.java
@@ -48,10 +48,13 @@ public class SqlCreateView extends SqlCreate {
     private static final SqlOperator OPERATOR =
             new SqlSpecialOperator("CREATE VIEW", SqlKind.CREATE_VIEW);
 
+    private boolean alter;
+
     /** Creates a SqlCreateView. */
-    public SqlCreateView(SqlParserPos pos, boolean replace, SqlIdentifier name,
+    public SqlCreateView(SqlParserPos pos, boolean replace, boolean alter, SqlIdentifier name,
                   SqlNodeList columnList, SqlNode query) {
         super(OPERATOR, pos, replace, false);
+        this.alter = alter;
         this.name = Preconditions.checkNotNull(name);
         this.columnList = columnList; // may be null
         this.query = Preconditions.checkNotNull(query);
@@ -97,4 +100,8 @@ public SqlNode getQuery() {
     public SqlIdentifier getName() {
         return (SqlIdentifier) name;
     }
+
+    public boolean isAlter() {
+        return alter;
+    }
 }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDataTypeSpec.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDataTypeSpec.java
index 708586b46..dd258330f 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDataTypeSpec.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDataTypeSpec.java
@@ -25,6 +25,7 @@
 import org.apache.calcite.sql.SqlWriter.FrameTypeEnum;
 import org.apache.calcite.sql.parser.SqlParserPos;
 import org.apache.calcite.sql.type.EnumSqlType;
+import org.apache.calcite.sql.type.SetSqlType;
 import org.apache.calcite.sql.type.SqlTypeFactoryImpl;
 import org.apache.calcite.sql.type.SqlTypeFamily;
 import org.apache.calcite.sql.type.SqlTypeName;
@@ -596,6 +597,11 @@ public RelDataType deriveType(RelDataTypeFactory typeFactory,
             }
         }
 
+        //bit 类型 bit(1) length > 1, 则使用BIG_BIT,兼容DataTypeUtil.jdbcTypeToRelDataType
+        if (sqlTypeName == SqlTypeName.BIT && precision > 1) {
+            sqlTypeName = SqlTypeName.BIG_BIT;
+        }
+
         // For time/datetime/timestamp types
         if (SqlTypeFamily.DATETIME.getTypeNames().contains(sqlTypeName) && sqlTypeName != SqlTypeName.DATE) {
             // fix scale and precision of datetime type.
@@ -618,6 +624,15 @@ public RelDataType deriveType(RelDataTypeFactory typeFactory,
             }
             RelDataType newType = new EnumSqlType(typeFactory.getTypeSystem(), SqlTypeName.ENUM, list, null, null);
             type = ((SqlTypeFactoryImpl) typeFactory).canonize(newType);
+        } else if (drdsTypeName == DrdsTypeName.SET) {
+            List list = new ArrayList();
+            for (SqlNode sqlNode : this.collectionVals.getList()) {
+                assert sqlNode instanceof SqlLiteral;
+                final String stringValue = ((SqlLiteral) sqlNode).getStringValue();
+                list.add(stringValue);
+            }
+            RelDataType newType = new SetSqlType(typeFactory.getTypeSystem(), sqlTypeName, list);
+            type = ((SqlTypeFactoryImpl) typeFactory).canonize(newType);
         } else if ((precision >= 0) && (scale >= 0)) {
             assert sqlTypeName.allowsPrecScale(true, true);
             type = typeFactory.createSqlType(sqlTypeName, precision, scale);
@@ -716,7 +731,8 @@ public enum DrdsTypeName {
         MEDIUMTEXT(SqlTypeName.VARCHAR),
         LONGTEXT(SqlTypeName.VARCHAR),
         ENUM(SqlTypeName.ENUM),
-        SET(SqlTypeName.VARCHAR),
+        //metaDb中构建用的char
+        SET(SqlTypeName.CHAR),
         GEOMETRY(SqlTypeName.GEOMETRY),
         POINT(SqlTypeName.BINARY),
         LINESTRING(SqlTypeName.BINARY),
@@ -735,7 +751,7 @@ public enum DrdsTypeName {
         ;
 
         public static final EnumSet TYPE_WITH_LENGTH =
-            EnumSet.of(TINYINT, SMALLINT, MEDIUMINT, INTEGER, BIGINT, DOUBLE, REAL, FLOAT, DECIMAL);
+            EnumSet.of(TINYINT, SMALLINT, MEDIUMINT, INTEGER, BIGINT, DOUBLE, REAL, FLOAT, DECIMAL, BIT);
 
         public static final EnumSet TYPE_WITH_LENGTH_DECIMALS =
             EnumSet.of(DOUBLE, REAL, FLOAT, DECIMAL);
@@ -807,6 +823,34 @@ public boolean isA(EnumSet enumSet) {
     public boolean isUnsigned() {
         return unsigned;
     }
+
+    public boolean isZerofill() {
+        return zerofill;
+    }
+
+    public boolean isBinary() {
+        return binary;
+    }
+
+    public SqlLiteral getDecimals() {
+        return decimals;
+    }
+
+    public SqlLiteral getCharSet() {
+        return charSet;
+    }
+
+    public SqlLiteral getCollation() {
+        return collation;
+    }
+
+    public SqlNodeList getCollectionVals() {
+        return collectionVals;
+    }
+
+    public SqlLiteral getFsp() {
+        return fsp;
+    }
 }
 
 // End SqlDataTypeSpec.java
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDdlNodes.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDdlNodes.java
index 3099a6d19..54b480981 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDdlNodes.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDdlNodes.java
@@ -45,9 +45,12 @@ private SqlDdlNodes() {
 
     public static SqlCreateDatabase createDatabase(SqlParserPos pos, boolean ifNotExists, SqlIdentifier dbName,
                                                    String charSet, String collate, Boolean encryption, String locality,
-                                                   String partitionMode, Boolean defaultSingle, SqlIdentifier sourceDataBase, boolean like, boolean as, List includeTables, List excludeTables,
+                                                   String partitionMode, Boolean defaultSingle,
+                                                   SqlIdentifier sourceDataBase, boolean like, boolean as,
+                                                   List includeTables, List excludeTables,
                                                    boolean withLock, boolean dryRun, boolean createTables) {
-        return new SqlCreateDatabase(pos, ifNotExists, dbName, charSet, collate, encryption, locality, partitionMode, defaultSingle, sourceDataBase, like, as, includeTables, excludeTables, withLock, dryRun, createTables);
+        return new SqlCreateDatabase(pos, ifNotExists, dbName, charSet, collate, encryption, locality, partitionMode,
+            defaultSingle, sourceDataBase, like, as, includeTables, excludeTables, withLock, dryRun, createTables);
     }
 
     public static SqlDropDatabase dropDatabase(SqlParserPos pos, boolean ifExists, SqlIdentifier dbName) {
@@ -105,7 +108,7 @@ public static SqlCreateTable createTable(SqlParserPos pos, boolean replace,
      */
     public static SqlCreateView createView(SqlParserPos pos, boolean replace,
                                            SqlIdentifier name, SqlNodeList columnList, SqlNode query) {
-        return new SqlCreateView(pos, replace, name, columnList, query);
+        return new SqlCreateView(pos, replace, false, name, columnList, query);
     }
 
     /**
@@ -125,12 +128,12 @@ public static SqlTruncateTable truncateTable(SqlParserPos pos, boolean ifExists,
     }
 
     public static SqlDropIndex dropIndex(SqlIdentifier name, SqlIdentifier tableName, String sql, SqlParserPos pos) {
-        return new SqlDropIndex(name, tableName, sql, pos);
+        return new SqlDropIndex(name, name, tableName, sql, pos);
     }
 
-    public static SqlAlterTableDropIndex alterTabledropIndex(SqlIdentifier tableName, SqlIdentifier indexName,
+    public static SqlAlterTableDropIndex alterTableDropIndex(SqlIdentifier tableName, SqlIdentifier indexName,
                                                              String sql, SqlParserPos pos) {
-        return new SqlAlterTableDropIndex(tableName, indexName, sql, pos);
+        return new SqlAlterTableDropIndex(tableName, indexName, indexName, sql, pos);
     }
 
     public static SqlCreateSequence createSequence(SqlCharStringLiteral name, SqlIdentifier tableName, String sql,
@@ -157,24 +160,28 @@ public static SqlRenameTable renameTable(SqlIdentifier to, SqlIdentifier tableNa
         return new SqlRenameTable(to, tableName, sql, pos);
     }
 
-    public static SqlRenameTables renameTables(List> tableNameList, String sql, SqlParserPos pos) {
+    public static SqlRenameTables renameTables(List> tableNameList, String sql,
+                                               SqlParserPos pos) {
         return new SqlRenameTables(tableNameList, sql, pos);
     }
 
-    public static SqlAlterTable alterTable(List objectNames, SqlIdentifier tableName, Map> columnOpts,
+    public static SqlAlterTable alterTable(List objectNames, SqlIdentifier tableName,
+                                           Map> columnOpts,
                                            String sql, SqlTableOptions tableOptions,
                                            List alters,
                                            SqlParserPos pos) {
         return new SqlAlterTable(objectNames, tableName, columnOpts, sql, tableOptions, alters, false, null, pos);
     }
 
-    public static SqlAlterTable alterTable(List objectNames, SqlIdentifier tableName, Map> columnOpts,
+    public static SqlAlterTable alterTable(List objectNames, SqlIdentifier tableName,
+                                           Map> columnOpts,
                                            String sql, SqlTableOptions tableOptions,
                                            List alters,
                                            boolean fromAlterIndexPartition,
                                            SqlNode alterIndexName,
                                            SqlParserPos pos) {
-        return new SqlAlterTable(objectNames, tableName, columnOpts, sql, tableOptions, alters, fromAlterIndexPartition, alterIndexName, pos);
+        return new SqlAlterTable(objectNames, tableName, columnOpts, sql, tableOptions, alters, fromAlterIndexPartition,
+            alterIndexName, pos);
     }
 
     public static SqlAlterRule alterRule(SqlIdentifier tableName, String sql, SqlParserPos pos) {
@@ -182,8 +189,8 @@ public static SqlAlterRule alterRule(SqlIdentifier tableName, String sql, SqlPar
     }
 
     public static SqlAlterTableSetTableGroup alterTableSetTableGroup(List objectNames, SqlIdentifier tableName, String targetTableGroup,
-                                                                     String sql, SqlParserPos pos, boolean force) {
-        return new SqlAlterTableSetTableGroup(objectNames, tableName, targetTableGroup, sql, pos, force);
+                                                                     String sql, SqlParserPos pos, boolean implicit, boolean force) {
+        return new SqlAlterTableSetTableGroup(objectNames, tableName, targetTableGroup, sql, pos, implicit, force);
     }
 
     /**
@@ -262,6 +269,15 @@ static SqlNode renameColumns(SqlNodeList columnList, SqlNode query) {
             null, null, null);
     }
 
+    public static SqlAlterTableDiscardTableSpace alterTableDiscardTableSpace(SqlIdentifier tableName,
+                                                                             String sourceSql) {
+        return (new SqlAlterTableDiscardTableSpace(SqlParserPos.ZERO, tableName, sourceSql));
+    }
+
+    public static SqlAlterTableImportTableSpace alterTableImportTableSpace(SqlIdentifier tableName,
+                                                                           String sourceSql) {
+        return (new SqlAlterTableImportTableSpace(SqlParserPos.ZERO, tableName, sourceSql));
+    }
 }
 
 // End SqlDdlNodes.java
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropIndex.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropIndex.java
index c3f286768..f051c3611 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropIndex.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropIndex.java
@@ -45,16 +45,19 @@ public class SqlDropIndex extends SqlCreate {
     /**
      * Creates a SqlCreateIndex.
      */
-    public SqlDropIndex(SqlIdentifier indexName, SqlIdentifier tableName, String sql, SqlParserPos pos) {
+    public SqlDropIndex(SqlIdentifier indexName, SqlIdentifier originIndexName, SqlIdentifier tableName, String sql,
+                        SqlParserPos pos) {
         super(OPERATOR, SqlParserPos.ZERO, false, false);
         this.name = tableName;
         this.indexName = indexName;
+        this.originIndexName = originIndexName;
         this.sourceSql = sql;
         this.originTableName = tableName;
     }
 
     private SqlIdentifier originTableName;
     private SqlIdentifier indexName;
+    private SqlIdentifier originIndexName;
     private String sourceSql;
 
     @Override
@@ -121,11 +124,19 @@ public SqlIdentifier getIndexName() {
         return indexName;
     }
 
+    public SqlIdentifier getOriginIndexName() {
+        return originIndexName;
+    }
+
     public SqlDropIndex replaceTableName(SqlIdentifier newTableName) {
-        return new SqlDropIndex(indexName, null == newTableName ? originTableName : newTableName, sourceSql, pos);
+        return new SqlDropIndex(indexName,
+            originIndexName,
+            null == newTableName ? originTableName : newTableName,
+            sourceSql,
+            pos);
     }
 
     public SqlDropIndex replaceIndexName(SqlIdentifier newIndexName) {
-        return new SqlDropIndex(newIndexName, originTableName, sourceSql, pos);
+        return new SqlDropIndex(newIndexName, originIndexName, originTableName, sourceSql, pos);
     }
 }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropSecurityEntity.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropSecurityEntity.java
new file mode 100644
index 000000000..7b4ce3f7b
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropSecurityEntity.java
@@ -0,0 +1,96 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.List;
+import java.util.StringJoiner;
+
+/**
+ * @author pangzhaoxing
+ */
+public class SqlDropSecurityEntity extends SqlDal{
+
+
+    private static final SqlOperator OPERATOR = new SqlDropSecurityEntity.SqlDropSecurityEntityOperator();
+
+    private List entityTypes;
+    private List entityKeys;
+
+    public SqlDropSecurityEntity(SqlParserPos pos, List entityTypes, List entityKeys) {
+        super(pos);
+        this.entityTypes = entityTypes;
+        this.entityKeys = entityKeys;
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.DROP_SECURITY_ENTITY;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    public List getEntityTypes() {
+        return entityTypes;
+    }
+
+    public List getEntityKeys() {
+        return entityKeys;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("DROP SECURITY ENTITY");
+        StringJoiner sj = new StringJoiner(",");
+        for (int i = 0 ; i < entityTypes.size(); i++){
+            sj.add(entityTypes.get(i).getSimple() + " " + entityKeys.get(i).getSimple());
+        }
+        writer.keyword(sj.toString().toUpperCase());
+    }
+
+    public static class SqlDropSecurityEntityOperator extends SqlSpecialOperator {
+
+        public SqlDropSecurityEntityOperator() {
+            super("DROP_SECURITY_Entity", SqlKind.DROP_SECURITY_ENTITY);
+        }
+
+        @Override
+        public RelDataType deriveType(final SqlValidator validator, final SqlValidatorScope scope, final SqlCall call) {
+            RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);
+
+            return typeFactory.createStructType(
+                ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("DROP_SECURITY_ENTITY",
+                    0,
+                    columnType)));
+        }
+    }
+
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropSecurityLabel.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropSecurityLabel.java
new file mode 100644
index 000000000..2b523736c
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropSecurityLabel.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.List;
+
+/**
+ * @author pangzhaoxing
+ */
+public class SqlDropSecurityLabel extends SqlDal{
+
+    private static final SqlOperator OPERATOR = new SqlDropSecurityLabelOperator();
+
+    private List labelNames;
+
+    protected SqlDropSecurityLabel(SqlParserPos pos) {
+        super(pos);
+    }
+
+    public SqlDropSecurityLabel(SqlParserPos pos, List labelNames) {
+        super(pos);
+        this.labelNames = labelNames;
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.DROP_SECURITY_LABEL;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    public List getLabelNames() {
+        return labelNames;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("DROP SECURITY LABEL");
+        labelNames.get(0).unparse(writer, leftPrec, rightPrec);
+        for (int i = 1; i  componentNames;
+
+    public SqlDropSecurityLabelComponent(SqlParserPos pos, List componentNames) {
+        super(pos);
+        this.componentNames = componentNames;
+    }
+
+    public List getComponentNames() {
+        return componentNames;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("DROP SECURITY LABEL COMPONENT");
+        componentNames.get(0).unparse(writer, leftPrec, rightPrec);
+        for (int i = 1; i < componentNames.size(); i++){
+            writer.keyword(",");
+            componentNames.get(i).unparse(writer, leftPrec, rightPrec);
+        }
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.DROP_SECURITY_LABEL_COMPONENT;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    public static class SqlDropSecurityLabelComponentOperator extends SqlSpecialOperator {
+
+        public SqlDropSecurityLabelComponentOperator() {
+            super("DROP_SECURITY_LABEL_COMPONENT", SqlKind.DROP_SECURITY_LABEL_COMPONENT);
+        }
+
+        @Override
+        public RelDataType deriveType(final SqlValidator validator, final SqlValidatorScope scope, final SqlCall call) {
+            RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);
+
+            return typeFactory.createStructType(
+                ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("DROP_SECURITY_LABEL_COMPONENT",
+                    0,
+                    columnType)));
+        }
+    }
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropSecurityPolicy.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropSecurityPolicy.java
new file mode 100644
index 000000000..e7a7cc1aa
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlDropSecurityPolicy.java
@@ -0,0 +1,92 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.List;
+
+/**
+ * @author pangzhaoxing
+ */
+public class SqlDropSecurityPolicy extends SqlDal{
+
+    private static final SqlOperator OPERATOR = new SqlDropSecurityPolicyOperator();
+
+    private List policyNames;
+
+
+    protected SqlDropSecurityPolicy(SqlParserPos pos) {
+        super(pos);
+    }
+
+    public SqlDropSecurityPolicy(SqlParserPos pos, List policyNames) {
+        super(pos);
+        this.policyNames = policyNames;
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.DROP_SECURITY_POLICY;
+    }
+
+    public List getPolicyNames() {
+        return policyNames;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("DROP SECURITY POLICY");
+        policyNames.get(0).unparse(writer, leftPrec, rightPrec);
+        for (int i = 1; i < policyNames.size(); i++){
+            writer.keyword(",");
+            policyNames.get(i).unparse(writer, leftPrec, rightPrec);
+        }
+    }
+
+    public static class SqlDropSecurityPolicyOperator extends SqlSpecialOperator {
+
+        public SqlDropSecurityPolicyOperator() {
+            super("DROP_SECURITY_POLICY", SqlKind.DROP_SECURITY_POLICY);
+        }
+
+        @Override
+        public RelDataType deriveType(final SqlValidator validator, final SqlValidatorScope scope, final SqlCall call) {
+            RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);
+
+            return typeFactory.createStructType(
+                ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("DROP_SECURITY_POLICY",
+                    0,
+                    columnType)));
+        }
+    }
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlFunction.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlFunction.java
index 69af4035c..acb328cf8 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlFunction.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlFunction.java
@@ -93,7 +93,13 @@ public String apply(Integer a0) {
         NON_PUSHDOWN_FUNCTION.add("RELEASE_ALL_LOCKS");
         NON_PUSHDOWN_FUNCTION.add("IS_FREE_LOCK");
         NON_PUSHDOWN_FUNCTION.add("IS_USED_LOCK");
+        NON_PUSHDOWN_FUNCTION.add("HYPERLOGLOG");
         NON_PUSHDOWN_FUNCTION.add("PART_HASH");
+        SqlFunction.NON_PUSHDOWN_FUNCTION.add("LBAC_CHECK");
+        SqlFunction.NON_PUSHDOWN_FUNCTION.add("LBAC_READ");
+        SqlFunction.NON_PUSHDOWN_FUNCTION.add("LBAC_WRITE");
+        SqlFunction.NON_PUSHDOWN_FUNCTION.add("LBAC_WRITE_STRICT_CHECK");
+        SqlFunction.NON_PUSHDOWN_FUNCTION.add("LBAC_USER_WRITE_LABEL");
 
         // Time Function
         DYNAMIC_FUNCTION.add("CURDATE");
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlGrantSecurityLabel.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlGrantSecurityLabel.java
new file mode 100644
index 000000000..a7eb2741f
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlGrantSecurityLabel.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+/**
+ * @author pangzhaoxing
+ */
+public class SqlGrantSecurityLabel extends SqlDal{
+
+    private static final SqlOperator OPERATOR =
+        new SqlGrantSecurityLabelOperator();
+
+    private SqlIdentifier policyName;
+    private SqlIdentifier labelName;
+    private SqlUserName userName;
+    private SqlIdentifier accessType;
+
+    protected SqlGrantSecurityLabel(SqlParserPos pos) {
+        super(pos);
+    }
+
+    public SqlGrantSecurityLabel(SqlParserPos pos, SqlIdentifier policyName, SqlIdentifier labelName,
+                                 SqlUserName userName, SqlIdentifier accessType) {
+        super(pos);
+        this.policyName = policyName;
+        this.labelName = labelName;
+        this.userName = userName;
+        this.accessType = accessType;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("GRANT SECURITY LABEL");
+        writer.keyword(policyName.toString() + "." + labelName.toString());
+        writer.keyword("TO USER");
+        this.userName.unparse(writer, leftPrec, rightPrec);
+        writer.keyword("FOR");
+        this.accessType.unparse(writer, leftPrec, rightPrec);
+        writer.keyword("ACCESS");
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.GRANT_SECURITY_LABEL;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    public SqlIdentifier getPolicyName() {
+        return policyName;
+    }
+
+    public void setPolicyName(SqlIdentifier policyName) {
+        this.policyName = policyName;
+    }
+
+    public SqlIdentifier getLabelName() {
+        return labelName;
+    }
+
+    public void setLabelName(SqlIdentifier labelName) {
+        this.labelName = labelName;
+    }
+
+    public SqlUserName getUserName() {
+        return userName;
+    }
+
+    public void setUserName(SqlUserName userName) {
+        this.userName = userName;
+    }
+
+    public SqlIdentifier getAccessType() {
+        return accessType;
+    }
+
+    public void setAccessType(SqlIdentifier accessType) {
+        this.accessType = accessType;
+    }
+
+    public static class SqlGrantSecurityLabelOperator extends SqlSpecialOperator {
+
+        public SqlGrantSecurityLabelOperator() {
+            super("GRANT_SECURITY_LABEL", SqlKind.GRANT_SECURITY_LABEL);
+        }
+
+        @Override
+        public RelDataType deriveType(final SqlValidator validator, final SqlValidatorScope scope, final SqlCall call) {
+            RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);
+
+            return typeFactory.createStructType(
+                ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("GRANT_SECURITY_LABEL",
+                    0,
+                    columnType)));
+        }
+    }
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlIdentifier.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlIdentifier.java
index 0d3773357..79666c431 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlIdentifier.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlIdentifier.java
@@ -100,6 +100,11 @@ public String apply(String s) {
    */
   public SqlNode flashback;
 
+  /**
+   * 记录AS OF 种类:AS_OF/AS_OF_80/AS_OF_57
+   */
+  public SqlOperator flashbackOperator;
+
   /**
    * This identifier's collation (if any).
    */
@@ -153,7 +158,7 @@ public SqlIdentifier(
 //    for (String name : names) {
 //      assert name != null;
 //    }
-    this(names, collation, pos, componentPositions, null, null, null);
+    this(names, collation, pos, componentPositions, null, null, null, null);
   }
 
   public SqlIdentifier(
@@ -161,14 +166,14 @@ public SqlIdentifier(
       SqlCollation collation,
       SqlParserPos pos,
       List componentPositions, SqlNode indexNode) {
-    this(names, collation, pos, componentPositions, indexNode, null, null);
+    this(names, collation, pos, componentPositions, indexNode, null, null, null);
   }
 
   public SqlIdentifier(
       String name,
       SqlParserPos pos,
       SqlNode indexNode) {
-    this(ImmutableList.of(name), null, pos, null, indexNode, null, null);
+    this(ImmutableList.of(name), null, pos, null, indexNode, null, null, null);
   }
 
   /**
@@ -181,6 +186,15 @@ public SqlIdentifier(
       SqlCollation collation,
       SqlParserPos pos,
       List componentPositions, SqlNode indexNode, SqlNode partitions, SqlNode flashback) {
+    this(ImmutableList.copyOf(names), collation, pos, componentPositions, indexNode, partitions, flashback, null);
+  }
+
+  public SqlIdentifier(
+      List names,
+      SqlCollation collation,
+      SqlParserPos pos,
+      List componentPositions, SqlNode indexNode, SqlNode partitions, SqlNode flashback,
+      SqlOperator flashbackOperator) {
     super(pos);
     this.names = ImmutableList.copyOf(names);
     this.collation = collation;
@@ -192,16 +206,21 @@ public SqlIdentifier(
     this.indexNode = indexNode;
     this.partitions = partitions;
     this.flashback = flashback;
+    this.flashbackOperator = flashbackOperator;
   }
 
-  /** Creates an identifier that is a singleton wildcard star. */
+  /**
+   * Creates an identifier that is a singleton wildcard star.
+   */
   public static SqlIdentifier star(SqlParserPos pos) {
     return star(ImmutableList.of(""), pos, ImmutableList.of(pos));
   }
 
-  /** Creates an identifier that ends in a wildcard star. */
+  /**
+   * Creates an identifier that ends in a wildcard star.
+   */
   public static SqlIdentifier star(List names, SqlParserPos pos,
-      List componentPositions) {
+                                   List componentPositions) {
     return new SqlIdentifier(Lists.transform(names, STAR_TO_EMPTY), null, pos,
         componentPositions);
   }
@@ -213,7 +232,7 @@ public SqlKind getKind() {
   }
 
   @Override public SqlNode clone(SqlParserPos pos) {
-    return new SqlIdentifier(names, collation, pos, componentPositions, indexNode, partitions, flashback);
+    return new SqlIdentifier(names, collation, pos, componentPositions, indexNode, partitions, flashback, flashbackOperator);
   }
 
   public String toStringWithBacktick() {
@@ -498,6 +517,10 @@ public SqlMonotonicity getMonotonicity(SqlValidatorScope scope) {
     final SqlIdentifier fqId = qualified.identifier;
     return qualified.namespace.resolve().getMonotonicity(Util.last(fqId.names));
   }
+
+  public SqlOperator getFlashbackOperator() {
+    return flashbackOperator;
+  }
 }
 
 // End SqlIdentifier.java
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlImportDatabase.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlImportDatabase.java
new file mode 100644
index 000000000..406e526b8
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlImportDatabase.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.List;
+
+/**
+ * Created by zhuqiwei.
+ *
+ * @author zhuqiwei
+ */
+public class SqlImportDatabase extends SqlDdl {
+
+    private static final SqlOperator OPERATOR = new SqlImportDatabaseOperator();
+
+    final String dstLogicalDb;
+    final String srcPhyDb;
+
+    final String locality;
+
+    final boolean existStillImport;
+
+    public SqlImportDatabase(SqlParserPos pos, String dstLogicalDb, String srcPhyDb, String locality,
+                             boolean existStillImport) {
+        super(OPERATOR, pos);
+        this.dstLogicalDb = dstLogicalDb;
+        this.srcPhyDb = srcPhyDb;
+        this.locality = locality;
+        this.existStillImport = existStillImport;
+    }
+
+    public SqlImportDatabase(SqlParserPos pos, String dstLogicalDb, String srcPhyDb, String locality) {
+        super(OPERATOR, pos);
+        this.dstLogicalDb = dstLogicalDb;
+        this.srcPhyDb = srcPhyDb;
+        this.locality = locality;
+        this.existStillImport = false;
+    }
+
+    @Override
+    public List getOperandList() {
+        return ImmutableList.of();
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    public String getDstLogicalDb() {
+        return dstLogicalDb;
+    }
+
+    public String getSrcPhyDb() {
+        return srcPhyDb;
+    }
+
+    public String getLocality() {
+        return locality;
+    }
+
+    public static class SqlImportDatabaseOperator extends SqlSpecialOperator {
+        public SqlImportDatabaseOperator() {
+            super("IMPORT_DATABASE", SqlKind.IMPORT_DATABASE);
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            final RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);
+
+            return typeFactory.createStructType(
+                ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("IMPORT_DATABASE_RESULT",
+                    0,
+                    columnType)));
+        }
+    }
+
+    public boolean isExistStillImport() {
+        return existStillImport;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("IMPORT DATABASE ");
+        writer.print(srcPhyDb);
+        writer.keyword(" AS ");
+        writer.print(dstLogicalDb);
+        writer.keyword(" LOCALITY=");
+        writer.print(locality);
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlImportSequence.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlImportSequence.java
new file mode 100644
index 000000000..6d922afdc
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlImportSequence.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.List;
+
+/**
+ * Created by zhuqiwei.
+ *
+ * @author zhuqiwei
+ */
+public class SqlImportSequence extends SqlDdl {
+    private static final SqlOperator OPERATOR = new SqlImportSequenceOperator();
+
+    final String logicalDb;
+
+    public SqlImportSequence(SqlParserPos pos, String logicalDb) {
+        super(OPERATOR, pos);
+        this.logicalDb = logicalDb;
+    }
+
+    @Override
+    public List getOperandList() {
+        return ImmutableList.of();
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    public String getLogicalDb() {
+        return logicalDb;
+    }
+
+    public static class SqlImportSequenceOperator extends SqlSpecialOperator {
+        public SqlImportSequenceOperator() {
+            super("IMPORT_SEQUENCE", SqlKind.IMPORT_SEQUENCE);
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            final RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);
+
+            return typeFactory.createStructType(
+                ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("IMPORT_SEQUENCE_RESULT",
+                    0,
+                    columnType)));
+        }
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("IMPORT SEQUENCE FROM ");
+        writer.print(logicalDb);
+    }
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlIndexDefinition.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlIndexDefinition.java
index 985323ba7..876bbb312 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlIndexDefinition.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlIndexDefinition.java
@@ -16,8 +16,8 @@
 
 package org.apache.calcite.sql;
 
-import com.google.common.collect.Maps;
 import com.alibaba.polardbx.common.utils.GeneralUtil;
+import com.google.common.collect.Maps;
 import org.apache.calcite.sql.SqlWriter.Frame;
 import org.apache.calcite.sql.SqlWriter.FrameTypeEnum;
 import org.apache.calcite.sql.parser.SqlParserPos;
@@ -41,20 +41,25 @@ public class SqlIndexDefinition extends SqlCall {
      */
     private final boolean hasConstraint;
     private final boolean clusteredIndex;
+    private final boolean columnarIndex;
     private final SqlIdentifier uniqueConstraint;
     private final SqlIndexResiding indexResiding;
     private final String type; // FULLTEXT/PRIMARY/UNIQUE/SPATIAL
     private final SqlIndexType indexType;
     private final SqlIdentifier indexName;
+    private final SqlIdentifier originIndexName;
     private final SqlIdentifier table;
     private final List columns;
     private final List covering;
+    private final List originCovering;
+    private final List clusteredKeys;
     private final SqlNode dbPartitionBy;
     private final SqlNode dbPartitions = null;
     private final SqlNode tbPartitionBy;
     private final SqlNode tbPartitions;
 
-    private SqlNode partitioning;
+    private final SqlNode partitioning;
+    private final SqlNode originPartitioning;
     private final List options;
     private String primaryTableDefinition;
     private SqlCreateTable primaryTableNode;
@@ -67,13 +72,36 @@ public class SqlIndexDefinition extends SqlCall {
      */
     private boolean visible;
     private final SqlNode tableGroupName;
-
-    public SqlIndexDefinition(SqlParserPos pos, boolean hasConstraint, SqlIdentifier uniqueConstraint,
-                              SqlIndexResiding indexResiding, String type, SqlIndexType indexType,
-                              SqlIdentifier indexName, SqlIdentifier table, List columns,
-                              List covering, SqlNode dbPartitionBy, SqlNode tbPartitionBy,
-                              SqlNode tbPartitions, SqlNode partitioning, List options,
-                              boolean clusteredIndex, SqlNode tableGroupName, boolean visible) {
+    private final SqlNode engineName;
+    private final List dictColumns;
+    private final boolean withImplicitTableGroup;
+
+    public SqlIndexDefinition(SqlParserPos pos,
+                              boolean hasConstraint,
+                              SqlIdentifier uniqueConstraint,
+                              SqlIndexResiding indexResiding,
+                              String type,
+                              SqlIndexType indexType,
+                              SqlIdentifier indexName,
+                              SqlIdentifier originIndexName,
+                              SqlIdentifier table,
+                              List columns,
+                              List covering,
+                              List originCovering,
+                              SqlNode dbPartitionBy,
+                              SqlNode tbPartitionBy,
+                              SqlNode tbPartitions,
+                              SqlNode partitioning,
+                              SqlNode originPartitioning,
+                              List clusteredKeys,
+                              List options,
+                              boolean clusteredIndex,
+                              boolean columnarIndex,
+                              SqlNode tableGroupName,
+                              SqlNode engineName,
+                              List dictColumns,
+                              boolean withImplicitTableGroup,
+                              boolean visible) {
         super(pos);
         this.hasConstraint = hasConstraint;
         this.uniqueConstraint = uniqueConstraint;
@@ -81,26 +109,55 @@ public SqlIndexDefinition(SqlParserPos pos, boolean hasConstraint, SqlIdentifier
         this.type = type;
         this.indexType = indexType;
         this.indexName = indexName;
+        this.originIndexName = originIndexName;
         this.table = table;
         this.columns = columns;
         this.covering = covering;
+        this.originCovering = originCovering;
         this.dbPartitionBy = dbPartitionBy;
         this.tbPartitionBy = tbPartitionBy;
         this.tbPartitions = tbPartitions;
+        this.clusteredKeys = clusteredKeys;
         this.options = options;
         this.clusteredIndex = clusteredIndex;
+        this.columnarIndex = columnarIndex;
         this.partitioning = partitioning;
+        this.originPartitioning = originPartitioning;
         this.tableGroupName = tableGroupName;
+        this.engineName = engineName;
+        this.dictColumns = dictColumns;
+        this.withImplicitTableGroup = withImplicitTableGroup;
         this.visible = visible;
     }
 
-    public SqlIndexDefinition(SqlParserPos pos, boolean hasConstraint, SqlIdentifier uniqueConstraint,
-                              SqlIndexResiding indexResiding, String type, SqlIndexType indexType,
-                              SqlIdentifier indexName, SqlIdentifier table, List columns,
-                              List covering, SqlNode dbPartitionBy, SqlNode tbPartitionBy,
-                              SqlNode tbPartitions, SqlNode partitioning, List options,
-                              String primaryTableDefinition, SqlCreateTable primaryTableNode, boolean clusteredIndex,
-                              SqlNode tableGroupName, boolean visible) {
+    public SqlIndexDefinition(SqlParserPos pos,
+                              boolean hasConstraint,
+                              SqlIdentifier uniqueConstraint,
+                              SqlIndexResiding indexResiding,
+                              String type,
+                              SqlIndexType indexType,
+                              SqlIdentifier indexName,
+                              SqlIdentifier originIndexName,
+                              SqlIdentifier table,
+                              List columns,
+                              List covering,
+                              List originCovering,
+                              SqlNode dbPartitionBy,
+                              SqlNode tbPartitionBy,
+                              SqlNode tbPartitions,
+                              SqlNode partitioning,
+                              SqlNode originPartitioning,
+                              List clusteredKeys,
+                              List options,
+                              String primaryTableDefinition,
+                              SqlCreateTable primaryTableNode,
+                              boolean clusteredIndex,
+                              boolean columnarIndex,
+                              SqlNode tableGroupName,
+                              SqlNode engineName,
+                              List dictColumns,
+                              boolean withImplicitTableGroup,
+                              boolean visible) {
         super(pos);
         this.hasConstraint = hasConstraint;
         this.uniqueConstraint = uniqueConstraint;
@@ -108,18 +165,26 @@ public SqlIndexDefinition(SqlParserPos pos, boolean hasConstraint, SqlIdentifier
         this.type = type;
         this.indexType = indexType;
         this.indexName = indexName;
+        this.originIndexName = originIndexName;
         this.table = table;
         this.columns = columns;
         this.covering = covering;
+        this.originCovering = originCovering;
         this.dbPartitionBy = dbPartitionBy;
         this.tbPartitionBy = tbPartitionBy;
         this.tbPartitions = tbPartitions;
+        this.clusteredKeys = clusteredKeys;
         this.options = options;
         this.primaryTableDefinition = primaryTableDefinition;
         this.primaryTableNode = primaryTableNode;
         this.clusteredIndex = clusteredIndex;
+        this.columnarIndex = columnarIndex;
         this.partitioning = partitioning;
+        this.originPartitioning = originPartitioning;
         this.tableGroupName = tableGroupName;
+        this.engineName = engineName;
+        this.dictColumns = dictColumns;
+        this.withImplicitTableGroup = withImplicitTableGroup;
         this.visible = visible;
     }
 
@@ -134,6 +199,40 @@ public static SqlIndexDefinition localIndex(SqlParserPos pos, boolean hasConstra
             type,
             indexType,
             indexName,
+            indexName,
+            table,
+            columns,
+            null,
+            null,
+            null,
+            null,
+            null,
+            null,
+            null,
+            null,
+            options,
+            false,
+            false,
+            null,
+            null,
+            null,
+            false,
+            true);
+    }
+
+    public static SqlIndexDefinition localIndex(SqlParserPos pos, boolean hasConstraint,
+                                                SqlIdentifier uniqueConstraint, boolean explicit, String type,
+                                                SqlIndexType indexType, SqlIdentifier indexName, SqlIdentifier table,
+                                                List columns, List options,
+                                                SqlNode tableGroupName, boolean withImplicitTableGroup) {
+        return new SqlIndexDefinition(pos,
+            hasConstraint,
+            uniqueConstraint,
+            explicit ? SqlIndexResiding.LOCAL : null,
+            type,
+            indexType,
+            indexName,
+            indexName,
             table,
             columns,
             null,
@@ -141,9 +240,16 @@ public static SqlIndexDefinition localIndex(SqlParserPos pos, boolean hasConstra
             null,
             null,
             null,
+            null,
+            null,
+            null,
             options,
             false,
+            false,
+            tableGroupName,
             null,
+            null,
+            withImplicitTableGroup,
             true);
     }
 
@@ -153,7 +259,8 @@ public static SqlIndexDefinition globalIndex(SqlParserPos pos, boolean hasConstr
                                                  List columns, List covering,
                                                  SqlNode dbPartitionBy, SqlNode tbPartitionBy, SqlNode tbPartitions,
                                                  SqlNode partitioning, List options,
-                                                 SqlNode tableGroupName, boolean visible) {
+                                                 SqlNode tableGroupName, boolean withImplicitTablegroup,
+                                                 boolean visible) {
         return new SqlIndexDefinition(pos,
             hasConstraint,
             uniqueConstraint,
@@ -161,16 +268,24 @@ public static SqlIndexDefinition globalIndex(SqlParserPos pos, boolean hasConstr
             type,
             indexType,
             indexName,
+            indexName,
             table,
             columns,
             covering,
+            covering,
             dbPartitionBy,
             tbPartitionBy,
             tbPartitions,
             partitioning,
+            partitioning,
+            null,
             options,
             false,
+            false,
             tableGroupName,
+            null,
+            null,
+            withImplicitTablegroup,
             visible);
     }
 
@@ -181,7 +296,45 @@ public static SqlIndexDefinition clusteredIndex(SqlParserPos pos, boolean hasCon
                                                     List covering, SqlNode dbPartitionBy,
                                                     SqlNode tbPartitionBy, SqlNode tbPartitions, SqlNode partitioning,
                                                     List options, SqlNode tableGroupName,
-                                                    boolean visible) {
+                                                    boolean withImplicitTablegroup, boolean visible) {
+        return new SqlIndexDefinition(pos,
+            hasConstraint,
+            uniqueConstraint,
+            SqlIndexResiding.GLOBAL,
+            type,
+            indexType,
+            indexName,
+            indexName,
+            table,
+            columns,
+            covering,
+            covering,
+            dbPartitionBy,
+            tbPartitionBy,
+            tbPartitions,
+            partitioning,
+            partitioning,
+            null,
+            options,
+            true,
+            false,
+            tableGroupName,
+            null,
+            null,
+            withImplicitTablegroup,
+            visible);
+    }
+
+    public static SqlIndexDefinition columnarIndex(SqlParserPos pos, boolean hasConstraint,
+                                                   SqlIdentifier uniqueConstraint, String type,
+                                                   SqlIndexType indexType, SqlIdentifier indexName,
+                                                   SqlIdentifier table, List columns,
+                                                   List covering, SqlNode dbPartitionBy,
+                                                   SqlNode tbPartitionBy, SqlNode tbPartitions, SqlNode partitioning,
+                                                   List clusteredKeys,
+                                                   List options, SqlNode tableGroupName,
+                                                   SqlNode engineName, List dictColumns,
+                                                   boolean withImplicitTablegroup, boolean visible) {
         return new SqlIndexDefinition(pos,
             hasConstraint,
             uniqueConstraint,
@@ -189,16 +342,24 @@ public static SqlIndexDefinition clusteredIndex(SqlParserPos pos, boolean hasCon
             type,
             indexType,
             indexName,
+            indexName,
             table,
             columns,
             covering,
+            covering,
             dbPartitionBy,
             tbPartitionBy,
             tbPartitions,
             partitioning,
+            partitioning,
+            clusteredKeys,
             options,
             true,
+            true,
             tableGroupName,
+            engineName,
+            dictColumns,
+            withImplicitTablegroup,
             visible);
     }
 
@@ -224,6 +385,10 @@ public List getOperandList() {
 
     @Override
     public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        unparse(writer, leftPrec, rightPrec, false);
+    }
+
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec, boolean withOriginNames) {
         final boolean isGlobal = SqlUtil.isGlobal(indexResiding);
 
         final SqlWriter.Frame frame = writer.startList(SqlWriter.FrameTypeEnum.SELECT, "", "");
@@ -239,11 +404,12 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
             writer.endList(frame1);
         }
 
-        if (isGlobal) {
-            if (null != covering && !covering.isEmpty()) {
+        if (isGlobal || columnarIndex) {
+            final List coveringToShow = withOriginNames ? originCovering : covering;
+            if (null != coveringToShow && !coveringToShow.isEmpty()) {
                 writer.keyword("COVERING");
                 final Frame frame2 = writer.startList(FrameTypeEnum.FUN_CALL, "(", ")");
-                SqlUtil.wrapSqlNodeList(covering).commaList(writer);
+                SqlUtil.wrapSqlNodeList(coveringToShow).commaList(writer);
                 writer.endList(frame2);
             }
 
@@ -267,8 +433,9 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
                 tbPartitions.unparse(writer, leftPrec, rightPrec);
             }
 
-            if (null != partitioning) {
-                 partitioning.unparse(writer, leftPrec, rightPrec);
+            final SqlNode partitioningToShow = withOriginNames ? originPartitioning : partitioning;
+            if (null != partitioningToShow) {
+                partitioningToShow.unparse(writer, leftPrec, rightPrec);
             }
 
             if (writer instanceof SqlPrettyWriter) {
@@ -276,6 +443,12 @@ public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
             }
         }
 
+        if (columnarIndex && null != engineName) {
+            writer.keyword("ENGINE");
+            writer.keyword("=");
+            engineName.unparse(writer, leftPrec, rightPrec);
+        }
+
         if (null != options) {
             for (SqlIndexOption option : options) {
                 option.unparse(writer, leftPrec, rightPrec);
@@ -313,6 +486,10 @@ public boolean isClustered() {
         return clusteredIndex;
     }
 
+    public boolean isColumnar() {
+        return columnarIndex;
+    }
+
     public boolean isHasConstraint() {
         return hasConstraint;
     }
@@ -349,6 +526,10 @@ public List getCovering() {
         return covering;
     }
 
+    public List getClusteredKeys() {
+        return clusteredKeys;
+    }
+
     public SqlNode getDbPartitionBy() {
         return dbPartitionBy;
     }
@@ -438,6 +619,49 @@ public void setPrimaryTableNode(SqlCreateTable primaryTableNode) {
         this.primaryTableNode = primaryTableNode;
     }
 
+    public SqlIdentifier getOriginIndexName() {
+        return originIndexName;
+    }
+
+    public List getOriginCovering() {
+        return originCovering;
+    }
+
+    public SqlNode getOriginPartitioning() {
+        return originPartitioning;
+    }
+
+    public SqlIndexDefinition replacePartitioning(SqlNode newPartition) {
+        return new SqlIndexDefinition(pos,
+            hasConstraint,
+            uniqueConstraint,
+            indexResiding,
+            type,
+            indexType,
+            indexName,
+            originIndexName,
+            table,
+            columns,
+            covering,
+            originCovering,
+            null == newPartition ? dbPartitionBy : null,
+            null == newPartition ? tbPartitionBy : null,
+            null == newPartition ? tbPartitions : null,
+            null == newPartition ? partitioning : newPartition,
+            originPartitioning,
+            columnarIndex ? clusteredKeys : null,
+            options,
+            primaryTableDefinition,
+            primaryTableNode,
+            clusteredIndex,
+            columnarIndex,
+            tableGroupName,
+            engineName,
+            dictColumns,
+            withImplicitTableGroup,
+            visible);
+    }
+
     public SqlIndexDefinition replaceCovering(Collection coveringColumns) {
         if (GeneralUtil.isEmpty(coveringColumns)) {
             return this;
@@ -456,18 +680,26 @@ public SqlIndexDefinition replaceCovering(Collection coveringColumns) {
             type,
             indexType,
             indexName,
+            originIndexName,
             table,
             columns,
             tmpCovering,
+            originCovering,
             dbPartitionBy,
             tbPartitionBy,
             tbPartitions,
             partitioning,
+            originPartitioning,
+            clusteredKeys,
             options,
             primaryTableDefinition,
             primaryTableNode,
             clusteredIndex,
+            columnarIndex,
             tableGroupName,
+            engineName,
+            dictColumns,
+            withImplicitTableGroup,
             this.visible);
     }
 
@@ -503,24 +735,39 @@ public SqlIndexDefinition mergeCovering(Collection coveringColumns) {
                 type,
                 indexType,
                 indexName,
+                originIndexName,
                 table,
                 columns,
                 tmpCovering,
+                originCovering,
                 dbPartitionBy,
                 tbPartitionBy,
                 tbPartitions,
                 partitioning,
+                originPartitioning,
+                clusteredKeys,
                 options,
                 primaryTableDefinition,
                 primaryTableNode,
                 clusteredIndex,
+                columnarIndex,
                 tableGroupName,
+                engineName,
+                dictColumns,
+                withImplicitTableGroup,
                 visible);
         }
 
     }
 
-    public SqlIndexDefinition rebuildToGsi(SqlIdentifier newName, SqlNode dbpartition, boolean clustered) {
+    /**
+     * Rebuild gsi definition with new index name and full partition definition
+     *
+     * @param newName New index name, with random suffix
+     * @param dbPartition Update with full partition definition, with DBPARTITION BY appended
+     * @return Copied SqlIndexDefinition
+     */
+    public SqlIndexDefinition rebuildToGsi(SqlIdentifier newName, SqlNode dbPartition) {
         return new SqlIndexDefinition(pos,
             hasConstraint,
             uniqueConstraint,
@@ -528,52 +775,37 @@ public SqlIndexDefinition rebuildToGsi(SqlIdentifier newName, SqlNode dbpartitio
             type,
             indexType,
             null == newName ? indexName : newName,
+            originIndexName,
             table,
             columns,
-            clustered ? null : covering,
-            null == dbpartition ? dbPartitionBy : dbpartition,
-            null == dbpartition ? tbPartitionBy : null,
-            null == dbpartition ? tbPartitions : null,
+            covering,
+            originCovering,
+            null == dbPartition ? dbPartitionBy : dbPartition,
+            null == dbPartition ? tbPartitionBy : null,
+            null == dbPartition ? tbPartitions : null,
             partitioning,
+            originPartitioning,
+            columnarIndex ? clusteredKeys : null,
             options,
             primaryTableDefinition,
             primaryTableNode,
-            clustered,
+            clusteredIndex,
+            columnarIndex,
             tableGroupName,
+            engineName,
+            dictColumns,
+            withImplicitTableGroup,
             visible);
     }
 
-//    public SqlIndexDefinition rebuildToGsiNewPartition(SqlIdentifier newName, SqlNode newPartition, boolean clustered, List newAddedCols) {
-//        List newColInfos = new ArrayList<>();
-//        newColInfos.addAll(columns);
-////        for (int i = 0; i < newAddedCols.size(); i++) {
-////            SqlIdentifier colId = newAddedCols.get(i);
-////            SqlIndexColumnName colName =
-////            new SqlIndexColumnName(SqlParserPos.ZERO, colId, null, null);
-////            newColInfos.add(colName);
-////        }
-//        return new SqlIndexDefinition(pos,
-//            hasConstraint,
-//            uniqueConstraint,
-//            SqlIndexResiding.GLOBAL,
-//            type,
-//            indexType,
-//            null == newName ? indexName : newName,
-//            table,
-//            newColInfos,
-//            clustered ? null : covering,
-//            null == newPartition ? dbPartitionBy : null,
-//            null == newPartition ? tbPartitionBy : null,
-//            null == newPartition ? tbPartitions : null,
-//            null == newPartition ? partitioning : newPartition,
-//            options,
-//            primaryTableDefinition,
-//            primaryTableNode,
-//            clustered,
-//            tableGroupName);
-//    }
-
-    public SqlIndexDefinition rebuildToGsiNewPartition(SqlIdentifier newName, SqlNode newPartition, boolean clustered) {
+    /**
+     * Rebuild gsi definition with new index name and full partition definition
+     *
+     * @param newName New index name, with random suffix
+     * @param newPartition Update with full partition definition, with PARTITION BY/PARTITIONS appended
+     * @return Copied SqlIndexDefinition
+     */
+    public SqlIndexDefinition rebuildToGsiNewPartition(SqlIdentifier newName, SqlNode newPartition) {
         return new SqlIndexDefinition(pos,
             hasConstraint,
             uniqueConstraint,
@@ -581,18 +813,26 @@ public SqlIndexDefinition rebuildToGsiNewPartition(SqlIdentifier newName, SqlNod
             type,
             indexType,
             null == newName ? indexName : newName,
+            originIndexName,
             table,
             columns,
-            clustered ? null : covering,
+            covering,
+            originCovering,
             null == newPartition ? dbPartitionBy : null,
             null == newPartition ? tbPartitionBy : null,
             null == newPartition ? tbPartitions : null,
             null == newPartition ? partitioning : newPartition,
+            originPartitioning,
+            columnarIndex ? clusteredKeys : null,
             options,
             primaryTableDefinition,
             primaryTableNode,
-            clustered,
+            clusteredIndex,
+            columnarIndex,
             tableGroupName,
+            engineName,
+            dictColumns,
+            withImplicitTableGroup,
             visible);
     }
 
@@ -604,6 +844,7 @@ public SqlIndexDefinition rebuildToExplicitLocal(SqlIdentifier newName) {
             type,
             indexType,
             null == newName ? indexName : newName,
+            originIndexName,
             table,
             columns,
             null,
@@ -611,11 +852,18 @@ public SqlIndexDefinition rebuildToExplicitLocal(SqlIdentifier newName) {
             null,
             null,
             null,
+            null,
+            null,
+            null,
             options,
             primaryTableDefinition,
             primaryTableNode,
             false,
+            false,
             tableGroupName,
+            engineName,
+            dictColumns,
+            withImplicitTableGroup,
             visible);
     }
 
@@ -623,11 +871,27 @@ public SqlNode getPartitioning() {
         return partitioning;
     }
 
-    public void setPartitioning(SqlNode partitioning) {
-        this.partitioning = partitioning;
-    }
-
     public SqlNode getTableGroupName() {
         return tableGroupName;
     }
+
+    public SqlNode getEngineName() {
+        return engineName;
+    }
+
+    public List getDictColumns() {
+        return dictColumns;
+    }
+
+    public boolean withoutPartitionDef() {
+        return null == partitioning && null == dbPartitionBy;
+    }
+
+    public boolean isPartitionIndex() {
+        return !isSingle() && !isBroadcast();
+    }
+
+    public boolean isWithImplicitTableGroup() {
+        return withImplicitTableGroup;
+    }
 }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlKind.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlKind.java
index 2647de28c..30362e943 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlKind.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlKind.java
@@ -1069,6 +1069,12 @@ public enum SqlKind {
      */
     BIT_XOR,
 
+    HYPER_LOGLOG,
+
+    PARTIAL_HYPER_LOGLOG,
+
+    FINAL_HYPER_LOGLOG,
+
     /**
      * TDDL ADD {@code  CHECK_SUM}
      */
@@ -1078,6 +1084,17 @@ public enum SqlKind {
      * TDDL ADD {@code  CHECK_SUM_MERGE}
      */
     CHECK_SUM_MERGE,
+
+    /**
+     * TDDL ADD {@code  CHECK_SUM_V2}
+     */
+    CHECK_SUM_V2,
+
+    /**
+     * TDDL ADD {@code  CHECK_SUM_V2_MERGE}
+     */
+    CHECK_SUM_V2_MERGE,
+
     // Group functions
 
     /**
@@ -1385,6 +1402,8 @@ public enum SqlKind {
      */
     ADD_COLUMN,
 
+    ADD_SECURITY_POLICY,
+
     /**
      * {@code ENABLE KEYS} DDL statement.
      */
@@ -1408,6 +1427,10 @@ public enum SqlKind {
 
     ALTER_DATABASE,
 
+    IMPORT_DATABASE,
+
+    IMPORT_SEQUENCE,
+
     CREATE_JAVA_FUNCTION,
 
     CREATE_CCL_RULE,
@@ -1424,6 +1447,26 @@ public enum SqlKind {
 
     CONTINUE_SCHEDULE,
 
+    CREATE_SECURITY_LABEL,
+
+    DROP_SECURITY_LABEL,
+
+    CREATE_SECURITY_LABEL_COMPONENT,
+
+    DROP_SECURITY_LABEL_COMPONENT,
+
+    CREATE_SECURITY_POLICY,
+
+    DROP_SECURITY_POLICY,
+
+    GRANT_SECURITY_LABEL,
+
+    REVOKE_SECURITY_LABEL,
+
+    CREATE_SECURITY_ENTITY,
+
+    DROP_SECURITY_ENTITY,
+
     /**
      * {@code DROP DATABASE} DDL statement.
      */
@@ -1487,6 +1530,8 @@ public enum SqlKind {
 
     SHOW_LOCALITY_INFO,
 
+    SHOW_PHYSICAL_DDL,
+
     SHOW_CREATE_DATABASE,
 
     SHOW_CREATE_TABLE,
@@ -1537,6 +1582,8 @@ public enum SqlKind {
 
     SHOW_TRACE,
 
+    SHOW_PRUNE_TRACE,
+
     SHOW_SEQUENCES,
 
     SHOW_RULE,
@@ -1569,6 +1616,8 @@ public enum SqlKind {
 
     SHOW_GLOBAL_INDEX,
 
+    SHOW_COLUMNAR_INDEX,
+
     SHOW_GLOBAL_DEADLOCKS,
 
     SHOW_LOCAL_DEADLOCKS,
@@ -1606,7 +1655,9 @@ public enum SqlKind {
     REBALANCE_MASTER,
 
     RESET_MASTER,
+
     SET_CDC_GLOBAL,
+
     FLUSH_LOGS,
 
     START_SLAVE,
@@ -1619,6 +1670,22 @@ public enum SqlKind {
 
     SHOW_SLAVE_STATUS,
 
+    REPLICA_HASH_CHECK,
+
+    START_REPLICA_CHECK,
+
+    PAUSE_REPLICA_CHECK,
+
+    CONTINUE_REPLICA_CHECK,
+
+    CANCEL_REPLICA_CHECK,
+
+    RESET_REPLICA_CHECK,
+
+    SHOW_REPLICA_CHECK_PROGRESS,
+
+    SHOW_REPLICA_CHECK_DIFF,
+
     DESCRIBE_COLUMNS,
 
     LOCK_TABLE,
@@ -1627,6 +1694,10 @@ public enum SqlKind {
 
     CHECK_TABLE,
 
+    CHECK_COLUMNAR_PARTITION,
+
+    CHECK_COLUMNAR_INDEX, // As a special DDL.
+
     CHECK_GLOBAL_INDEX, // As a special DDL.
 
     ANALYZE_TABLE,
@@ -1794,7 +1865,6 @@ public enum SqlKind {
      */
     REBALANCE,
 
-
     CREATE_STORAGE_POOL,
     /**
      * create storage pool
@@ -1827,6 +1897,11 @@ public enum SqlKind {
      */
     DROP_FILESTORAGE,
 
+    /**
+     * file storage management: clear fileStorage, which will only clear the meta-data of cold data storage
+     */
+    CLEAR_FILESTORAGE,
+
     /**
      * alter table set tablegroup
      */
@@ -1857,6 +1932,8 @@ public enum SqlKind {
      */
     REPARTITION_LOCAL_PARTITION,
 
+    LOCAL_PARTITION,
+
     CREATE_FUNCTION,
 
     DROP_FUNCTION,
@@ -1891,7 +1968,29 @@ public enum SqlKind {
 
     ALTER_TABLEGROUP_ADD_TABLE,
 
-    DROP_TRIGGER;
+    DROP_TRIGGER,
+
+    CREATE_ROLE,
+
+    DROP_ROLE,
+
+    CREATE_USER,
+
+    DROP_USER,
+
+    GRANT_PRIVILEGE,
+
+    GRANT_ROLE,
+
+    REVOKE_PRIVILEGE,
+
+    REVOKE_ROLE,
+
+    SET_PASSWORD,
+
+    ALTER_TABLE_DISCARD_TABLESPACE,
+    ALTER_TABLE_IMPORT_TABLESPACE,
+    ALTER_INSTANCE;
 
     //~ Static fields/initializers ---------------------------------------------
 
@@ -1975,6 +2074,7 @@ public enum SqlKind {
         SHOW_DDL_RESULTS,
         SHOW_SCHEDULE_RESULTS,
         SHOW_LOCALITY_INFO,
+        SHOW_PHYSICAL_DDL,
         SHOW_GRANTS,
         DESCRIBE_COLUMNS,
         SHOW_AUTHORS,
@@ -1983,6 +2083,7 @@ public enum SqlKind {
         SHOW_PROFILE,
         SHOW_PROFILES,
         SHOW_GLOBAL_INDEX,
+        SHOW_COLUMNAR_INDEX,
         SHOW_METADATA_LOCK,
         SHOW_TRANS,
         SHOW_TRANS_STATS,
@@ -2001,22 +2102,27 @@ public enum SqlKind {
         DESCRIBE_COLUMNS,
         SHOW_PARTITIONS);
     public static final EnumSet LOGICAL_SHOW_WITH_SCHEMA = EnumSet.of(SHOW_TABLES,
-        SHOW_LOCALITY_INFO
+        SHOW_LOCALITY_INFO, SHOW_PARTITIONS, SHOW_PHYSICAL_DDL
     );
 
     public static final EnumSet LOGICAL_SHOW_BINLOG =
         EnumSet.of(SHOW_BINARY_LOGS, SHOW_BINLOG_EVENTS, SHOW_MASTER_STATUS, SHOW_BINARY_STREAMS, SHOW_CDC_STORAGE);
 
     public static final EnumSet LOGICAL_REPLICATION = EnumSet.of(CHANGE_MASTER,
-        START_SLAVE, STOP_SLAVE, CHANGE_REPLICATION_FILTER, SHOW_SLAVE_STATUS, RESET_SLAVE);
+        START_SLAVE, STOP_SLAVE, CHANGE_REPLICATION_FILTER, SHOW_SLAVE_STATUS, RESET_SLAVE, REPLICA_HASH_CHECK,
+        START_REPLICA_CHECK, PAUSE_REPLICA_CHECK, CONTINUE_REPLICA_CHECK, CANCEL_REPLICA_CHECK, RESET_REPLICA_CHECK,
+        SHOW_REPLICA_CHECK_PROGRESS, SHOW_REPLICA_CHECK_DIFF);
 
     public static final EnumSet LOGICAL_CDC_COMMAND =
-        EnumSet.of(START_MASTER, STOP_MASTER, RESTART_MASTER, REBALANCE_MASTER, RESET_MASTER, SET_CDC_GLOBAL, FLUSH_LOGS);
+        EnumSet.of(START_MASTER, STOP_MASTER, RESTART_MASTER, REBALANCE_MASTER, RESET_MASTER, SET_CDC_GLOBAL,
+            FLUSH_LOGS);
 
     public static final EnumSet SHOW_QUERY = concat(EnumSet.of(SHOW), LOGICAL_SHOW_QUERY);
 
     public static final EnumSet TABLE_MAINTENANCE_QUERY = EnumSet.of(CHECK_TABLE,
         CHECK_GLOBAL_INDEX,
+        CHECK_COLUMNAR_INDEX,
+        CHECK_COLUMNAR_PARTITION,
         ANALYZE_TABLE,
         OPTIMIZE_TABLE);
 
@@ -2080,13 +2186,12 @@ public enum SqlKind {
 
     /**
      * aggregate function only used in window
-     * */
+     */
     public static final EnumSet WINDOW_AGG =
         EnumSet.of(LEAD, LAG, NTH_VALUE, FIRST_VALUE, LAST_VALUE,
             NTILE, ROW_NUMBER, RANK, PERCENT_RANK, DENSE_RANK,
             CUME_DIST);
 
-
     /**
      * Category consisting of all DML operators.
      *
@@ -2138,26 +2243,28 @@ public enum SqlKind {
             CREATE_PROCEDURE, DROP_PROCEDURE, ALTER_PROCEDURE,
             CREATE_INDEX, ALTER_INDEX, DROP_INDEX, ALTER_RENAME_INDEX, RENAME_TABLE,
             SET_OPTION, OTHER_DDL, TRUNCATE_TABLE, RENAME_SEQUENCE, ALTER_RULE, ENABLE_KEYS, CREATE_DATABASE,
-            DROP_DATABASE, ALTER_DATABASE,
-            MOVE_DATABASE, CHECK_GLOBAL_INDEX, ALTER_TABLEGROUP, CREATE_TABLEGROUP,
+            DROP_DATABASE, ALTER_DATABASE, IMPORT_DATABASE, IMPORT_SEQUENCE,
+            MOVE_DATABASE, CHECK_GLOBAL_INDEX, CHECK_COLUMNAR_INDEX, ALTER_TABLEGROUP, CREATE_TABLEGROUP,
             CREATE_JAVA_FUNCTION, DROP_JAVA_FUNCTION,
             CHANGE_CONSENSUS_ROLE, ALTER_SYSTEM_SET_CONFIG, ALTER_TABLE_SET_TABLEGROUP,
-            REFRESH_TOPOLOGY, DROP_TABLEGROUP, ALTER_FILESTORAGE, DROP_FILESTORAGE, CREATE_FILESTORAGE,
+            REFRESH_TOPOLOGY, DROP_TABLEGROUP,
+            ALTER_FILESTORAGE, DROP_FILESTORAGE, CLEAR_FILESTORAGE, CREATE_FILESTORAGE,
             CREATE_JOINGROUP, DROP_JOINGROUP, ALTER_JOINGROUP,
             OPTIMIZE_TABLE, ANALYZE_TABLE,
-            CREATE_STORAGE_POOL, ALTER_STORAGE_POOL, DROP_STORAGE_POOL
+            CREATE_STORAGE_POOL, ALTER_STORAGE_POOL, DROP_STORAGE_POOL, ALTER_INSTANCE
         ));
 
     public static final EnumSet DDL_SUPPORTED_BY_NEW_ENGINE =
         EnumSet.of(RENAME_TABLE, TRUNCATE_TABLE, DROP_TABLE, CREATE_INDEX, DROP_INDEX, ALTER_TABLE, CREATE_TABLE,
-            ALTER_TABLEGROUP, ALTER_TABLE_SET_TABLEGROUP, REFRESH_TOPOLOGY, CHECK_GLOBAL_INDEX, ALTER_RULE,
-            MOVE_DATABASE, ALTER_FILESTORAGE, DROP_FILESTORAGE, CREATE_FILESTORAGE, CREATE_JOINGROUP, DROP_JOINGROUP,
-            ALTER_JOINGROUP, MERGE_TABLEGROUP,
-            CREATE_JAVA_FUNCTION, DROP_JAVA_FUNCTION,ANALYZE_TABLE,
+            ALTER_TABLEGROUP, ALTER_TABLE_SET_TABLEGROUP, REFRESH_TOPOLOGY, CHECK_GLOBAL_INDEX, CHECK_COLUMNAR_INDEX,
+            ALTER_RULE, MOVE_DATABASE, ALTER_FILESTORAGE, DROP_FILESTORAGE, CLEAR_FILESTORAGE, CREATE_FILESTORAGE,
+            CREATE_JOINGROUP, DROP_JOINGROUP, ALTER_JOINGROUP, MERGE_TABLEGROUP,
+            CREATE_VIEW, DROP_VIEW, CREATE_JAVA_FUNCTION, DROP_JAVA_FUNCTION, ANALYZE_TABLE,
             ALTER_TABLEGROUP_ADD_TABLE, OPTIMIZE_TABLE, DROP_MATERIALIZED_VIEW, PUSH_DOWN_UDF,
             CREATE_FUNCTION,
             DROP_FUNCTION, ALTER_FUNCTION, CREATE_PROCEDURE, DROP_PROCEDURE, ALTER_PROCEDURE, ALTER_DATABASE,
-            CREATE_STORAGE_POOL, ALTER_STORAGE_POOL, DROP_STORAGE_POOL);
+            IMPORT_DATABASE, IMPORT_SEQUENCE,
+            CREATE_STORAGE_POOL, ALTER_STORAGE_POOL, DROP_STORAGE_POOL, ALTER_INSTANCE);
 
     public static final EnumSet SUPPORT_DDL =
         EnumSet.of(CREATE_TABLE, ALTER_TABLE, DROP_TABLE,
@@ -2170,14 +2277,15 @@ public enum SqlKind {
             CREATE_FUNCTION, DROP_FUNCTION, ALTER_FUNCTION, PUSH_DOWN_UDF,
             CREATE_PROCEDURE, DROP_PROCEDURE, ALTER_PROCEDURE,
             CREATE_INDEX, ALTER_INDEX, DROP_INDEX, ALTER_RENAME_INDEX, RENAME_TABLE, TRUNCATE_TABLE, RENAME_SEQUENCE,
-            CREATE_DATABASE, ALTER_DATABASE,
-            DROP_DATABASE, CHECK_GLOBAL_INDEX, MOVE_DATABASE,
+            CREATE_DATABASE, ALTER_DATABASE, IMPORT_DATABASE, IMPORT_SEQUENCE,
+            DROP_DATABASE, CHECK_GLOBAL_INDEX, CHECK_COLUMNAR_INDEX, MOVE_DATABASE,
             CREATE_JAVA_FUNCTION, DROP_JAVA_FUNCTION,
             CHANGE_CONSENSUS_ROLE, ALTER_SYSTEM_SET_CONFIG,
             ALTER_TABLEGROUP,
             REBALANCE, ALLOCATE_LOCAL_PARTITION, REPARTITION_LOCAL_PARTITION,
             CREATE_JOINGROUP, DROP_JOINGROUP, ALTER_JOINGROUP, MERGE_TABLEGROUP, ALTER_TABLEGROUP_ADD_TABLE,
-            OPTIMIZE_TABLE, ANALYZE_TABLE);
+            OPTIMIZE_TABLE, ANALYZE_TABLE, ALTER_TABLE_DISCARD_TABLESPACE, ALTER_TABLE_IMPORT_TABLESPACE,
+            ALTER_INSTANCE);
 
     public static final EnumSet SUPPORT_SHADOW_DDL =
         EnumSet.of(CREATE_TABLE, ALTER_TABLE, DROP_TABLE,
@@ -2193,6 +2301,9 @@ public enum SqlKind {
     public static final EnumSet SUPPORT_SCHEDULE =
         EnumSet.of(CREATE_SCHEDULE, DROP_SCHEDULE, PAUSE_SCHEDULE, CONTINUE_SCHEDULE, FIRE_SCHEDULE);
 
+    public static final EnumSet SUPPORT_LBAC_SECURITY =
+        EnumSet.of(SqlKind.CREATE_SECURITY_LABEL_COMPONENT, SqlKind.DROP_SECURITY_LABEL_COMPONENT, SqlKind.CREATE_SECURITY_LABEL, SqlKind.DROP_SECURITY_LABEL, SqlKind.CREATE_SECURITY_POLICY, SqlKind.DROP_SECURITY_POLICY, SqlKind.GRANT_SECURITY_LABEL, SqlKind.REVOKE_SECURITY_LABEL, SqlKind.CREATE_SECURITY_ENTITY, SqlKind.DROP_SECURITY_ENTITY);
+
     public static final EnumSet SUPPORT_ALTER_SYSTEM_DAL =
         EnumSet.of(ALTER_SYSTEM_REFRESH_STORAGE, ALTER_SYSTEM_RELOAD_STORAGE, ALTER_SYSTEM_LEADER);
 
@@ -2209,7 +2320,8 @@ public enum SqlKind {
         SUPPORT_SCHEDULE,
         SUPPORT_ALTER_SYSTEM_DAL,
         LOGICAL_REPLICATION,
-        LOGICAL_CDC_COMMAND);
+        LOGICAL_CDC_COMMAND,
+        SUPPORT_LBAC_SECURITY);
 
     /**
      * Category consisting of query node types.
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlModifyColumn.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlModifyColumn.java
index cbadfd9d1..db3cc985c 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlModifyColumn.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlModifyColumn.java
@@ -76,7 +76,7 @@ public void setTargetTable(SqlNode tableName) {
     public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
         final SqlWriter.Frame frame = writer.startList(SqlWriter.FrameTypeEnum.SELECT, "MODIFY COLUMN", "");
 
-        colName.unparse(writer, leftPrec, rightPrec);
+        //colName.unparse(writer, leftPrec, rightPrec);
         colDef.unparse(writer, leftPrec, rightPrec);
 
         if (first) {
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlMoveDatabase.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlMoveDatabase.java
index 2309d33f7..03a8844fa 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlMoveDatabase.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlMoveDatabase.java
@@ -57,7 +57,7 @@ public SqlMoveDatabase(SqlParserPos pos, Map> storageGroups
         this.isCleanUpCommand = isCleanUpCommand;
         if (!isCleanUpCommand) {
             this.currentSourceGroupKey = firstGroup;
-            this.currentTargetGroupKey = GroupInfoUtil.buildScaloutGroupName(firstGroup);
+            this.currentTargetGroupKey = GroupInfoUtil.buildScaleOutGroupName(firstGroup);
         }
     }
 
@@ -75,7 +75,7 @@ public void setCurrentStorageInstId(String currentStorageInstId) {
 
     public void setCleanUpGroups(String groupName) {
         Map> toBeCleanGroups = new HashMap<>();
-        toBeCleanGroups.put(VIRTUAL_STORAGE_ID, ImmutableList.of(GroupInfoUtil.buildScaloutGroupName(groupName)));
+        toBeCleanGroups.put(VIRTUAL_STORAGE_ID, ImmutableList.of(GroupInfoUtil.buildScaleOutGroupName(groupName)));
         this.storageGroups = toBeCleanGroups;
     }
 
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlNode.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlNode.java
index 8aabf8625..333c20e61 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlNode.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlNode.java
@@ -56,6 +56,8 @@ public abstract class SqlNode implements Cloneable {
 
   protected final SqlParserPos pos;
 
+  protected Boolean async = null;
+
   //~ Constructors -----------------------------------------------------------
 
   /**
@@ -338,6 +340,14 @@ public static boolean equalDeep(List operands0,
     }
     return litmus.succeed();
   }
+
+  public Boolean getAsync() {
+    return async;
+  }
+
+  public void setAsync(Boolean async) {
+    this.async = async;
+  }
 }
 
 // End SqlNode.java
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPartition.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPartition.java
index aecea5263..78e2d5604 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPartition.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPartition.java
@@ -16,16 +16,9 @@
 
 package org.apache.calcite.sql;
 
-import com.alibaba.polardbx.common.exception.TddlRuntimeException;
-import com.alibaba.polardbx.common.exception.code.ErrorCode;
-import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.common.utils.TStringUtil;
 import com.alibaba.polardbx.druid.util.StringUtils;
-import com.google.common.base.Preconditions;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.sql.parser.SqlParserPos;
-import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.sql.util.SqlVisitor;
 import org.apache.calcite.sql.validate.SqlValidator;
 import org.apache.calcite.sql.validate.SqlValidatorScope;
@@ -76,18 +69,37 @@ public String toString() {
         StringBuilder sb = new StringBuilder("");
         sb.append("PARTITION ");
         sb.append(name);
-        sb.append(" ");
-        sb.append(values.toString());
+        if (values != null) {
+            sb.append(" ");
+            sb.append(values.toString());
+        }
         if (TStringUtil.isNotEmpty(locality)) {
             sb.append(" LOCALITY=");
             sb.append(TStringUtil.quoteString(locality));
         }
+
+        if (subPartitionCount != null) {
+            sb.append(" ");
+            sb.append("SUBPARTITIONS ");
+            sb.append(subPartitionCount.toString());
+        }
+        if (subPartitions != null && !subPartitions.isEmpty()) {
+            sb.append(" ");
+            sb.append("(");
+            for (int i = 0; i < subPartitions.size(); i++) {
+                if (i > 0) {
+                    sb.append(",");
+                }
+                SqlNode subPart = subPartitions.get(i);
+                sb.append(subPart.toString());
+            }
+            sb.append(")");
+        }
         return sb.toString();
     }
 
     @Override
     public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
-
     }
 
     @Override
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPartitionBy.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPartitionBy.java
index eb898de12..343d1123d 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPartitionBy.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPartitionBy.java
@@ -145,7 +145,7 @@ public void validate(SqlValidator validator, SqlValidatorScope scope) {
         }
         int partColCnt = partColTypes.size();
 
-        boolean allowNoPartBndVal = this instanceof SqlPartitionByHash;
+        boolean allowNoPartBndVal = (this instanceof SqlPartitionByHash) || (this instanceof SqlPartitionByCoHash);
         // Validate partitions
         SqlPartitionBy.validatePartitionDefs(validator, scope, this.getPartitions(), partColCnt, -1, allowNoPartBndVal,
             true);
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPartitionByCoHash.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPartitionByCoHash.java
new file mode 100644
index 000000000..730c545c2
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPartitionByCoHash.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.util.EqualsContext;
+import org.apache.calcite.util.Litmus;
+
+/**
+ * @author chenghui.lch
+ */
+public class SqlPartitionByCoHash extends SqlPartitionBy {
+
+    public SqlPartitionByCoHash(SqlParserPos pos) {
+        super(pos);
+    }
+
+    @Override
+    public boolean equalsDeep(SqlNode node, Litmus litmus, EqualsContext context) {
+        if (!super.equalsDeep(node, litmus, context)) {
+            return false;
+        }
+        return true;
+    }
+}
\ No newline at end of file
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPauseReplicaCheck.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPauseReplicaCheck.java
new file mode 100644
index 000000000..522aeb5a2
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlPauseReplicaCheck.java
@@ -0,0 +1,98 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * @author yudong
+ * @since 2023/11/9 11:10
+ **/
+public class SqlPauseReplicaCheck extends SqlDal {
+
+    private static final SqlSpecialOperator OPERATOR = new SqlPauseReplicaCheckOperator();
+
+    private SqlNode dbName;
+    private SqlNode tableName;
+
+    public SqlPauseReplicaCheck(SqlParserPos pos, SqlNode dbName) {
+        super(pos);
+        this.dbName = dbName;
+    }
+
+    public SqlPauseReplicaCheck(SqlParserPos pos, SqlNode dbName, SqlNode tableName) {
+        super(pos);
+        this.dbName = dbName;
+        this.tableName = tableName;
+    }
+
+    public SqlNode getDbName() {
+        return dbName;
+    }
+
+    public void setDbName(SqlNode dbName) {
+        this.dbName = dbName;
+    }
+
+    public SqlNode getTableName() {
+        return tableName;
+    }
+
+    public void setTableName(SqlNode tableName) {
+        this.tableName = tableName;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("CHECK REPLICA TABLE");
+        dbName.unparse(writer, 0, 0);
+        if (tableName != null) {
+            writer.print(".");
+            tableName.unparse(writer, 0, 0);
+        }
+        writer.keyword("PAUSE");
+    }
+
+    public static class SqlPauseReplicaCheckOperator extends SqlSpecialOperator {
+
+        public SqlPauseReplicaCheckOperator() {
+            super("PAUSE_REPLICA_CHECK", SqlKind.PAUSE_REPLICA_CHECK);
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            List columns = new LinkedList<>();
+            columns.add(new RelDataTypeFieldImpl("RESULT", 0, typeFactory.createSqlType(SqlTypeName.INTEGER)));
+            return typeFactory.createStructType(columns);
+        }
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlReplicaHashcheck.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlReplicaHashcheck.java
new file mode 100644
index 000000000..8f2194fc7
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlReplicaHashcheck.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.alibaba.polardbx.druid.sql.ast.SQLExpr;
+import lombok.Getter;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * @author yudong
+ * @since 2023/8/22 15:32
+ **/
+@Getter
+public class SqlReplicaHashcheck extends SqlDal {
+    private static final SqlSpecialOperator OPERATOR = new SqlReplicaHashcheckOperator();
+
+    protected SqlKind sqlKind = SqlKind.REPLICA_HASH_CHECK;
+    protected SqlNode from;
+    protected SqlNode where;
+    protected List upperBounds;
+    protected List lowerBounds;
+
+    public SqlReplicaHashcheck(SqlParserPos pos, SqlNode from, SqlNode where, List lowerBounds,
+                               List upperBounds) {
+        super(pos);
+        this.from = from;
+        this.where = where;
+        this.upperBounds = upperBounds;
+        this.lowerBounds = lowerBounds;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return sqlKind;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("REPLICA HASHCHECK * FROM");
+        from.unparse(writer, 0, 0);
+        if (where != null) {
+            writer.keyword("WHERE");
+            where.unparse(writer, 0, 0);
+        }
+    }
+
+    public static class SqlReplicaHashcheckOperator extends SqlSpecialOperator {
+        public SqlReplicaHashcheckOperator() {
+            super("SQL_REPLICA_HASH_CHECK", SqlKind.REPLICA_HASH_CHECK);
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            List columns = new LinkedList<>();
+            columns.add(new RelDataTypeFieldImpl("RESULT", 0, typeFactory.createSqlType(SqlTypeName.INTEGER)));
+            return typeFactory.createStructType(columns);
+        }
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlResetReplicaCheck.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlResetReplicaCheck.java
new file mode 100644
index 000000000..bd527bb82
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlResetReplicaCheck.java
@@ -0,0 +1,98 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * @author yudong
+ * @since 2023/11/9 11:11
+ **/
+public class SqlResetReplicaCheck extends SqlDal {
+
+    private static final SqlSpecialOperator OPERATOR = new SqlResetReplicaCheckOperator();
+
+    private SqlNode dbName;
+    private SqlNode tableName;
+
+    public SqlResetReplicaCheck(SqlParserPos pos, SqlNode dbName) {
+        super(pos);
+        this.dbName = dbName;
+    }
+
+    public SqlResetReplicaCheck(SqlParserPos pos, SqlNode dbName, SqlNode tableName) {
+        super(pos);
+        this.dbName = dbName;
+        this.tableName = tableName;
+    }
+
+    public SqlNode getDbName() {
+        return dbName;
+    }
+
+    public void setDbName(SqlNode dbName) {
+        this.dbName = dbName;
+    }
+
+    public SqlNode getTableName() {
+        return tableName;
+    }
+
+    public void setTableName(SqlNode tableName) {
+        this.tableName = tableName;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("CHECK REPLICA TABLE");
+        dbName.unparse(writer, 0, 0);
+        if (tableName != null) {
+            writer.print(".");
+            tableName.unparse(writer, 0, 0);
+        }
+        writer.keyword("RESET");
+    }
+
+    public static class SqlResetReplicaCheckOperator extends SqlSpecialOperator {
+
+        public SqlResetReplicaCheckOperator() {
+            super("RESET_REPLICA_CHECK", SqlKind.RESET_REPLICA_CHECK);
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            List columns = new LinkedList<>();
+            columns.add(new RelDataTypeFieldImpl("RESULT", 0, typeFactory.createSqlType(SqlTypeName.INTEGER)));
+            return typeFactory.createStructType(columns);
+        }
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlRevokeSecurityLabel.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlRevokeSecurityLabel.java
new file mode 100644
index 000000000..edb5e8bdd
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlRevokeSecurityLabel.java
@@ -0,0 +1,105 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+/**
+ * @author pangzhaoxing
+ */
+public class SqlRevokeSecurityLabel extends SqlDal{
+
+    private static final SqlOperator OPERATOR = new SqlRevokeSecurityLabel.SqlRevokeSecurityLabelOperator();
+
+    private SqlIdentifier accessType;
+
+    private SqlIdentifier policyName;
+
+    private SqlUserName userName;
+
+    protected SqlRevokeSecurityLabel(SqlParserPos pos) {
+        super(pos);
+    }
+
+    public SqlRevokeSecurityLabel(SqlParserPos pos, SqlIdentifier accessType, SqlIdentifier policyName,
+                                  SqlUserName userName) {
+        super(pos);
+        this.accessType = accessType;
+        this.policyName = policyName;
+        this.userName = userName;
+    }
+
+    public SqlIdentifier getAccessType() {
+        return accessType;
+    }
+
+    public SqlIdentifier getPolicyName() {
+        return policyName;
+    }
+
+    public SqlUserName getUserName() {
+        return userName;
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.REVOKE_SECURITY_LABEL;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("REVOKE SECURITY LABEL");
+        accessType.unparse(writer, leftPrec, rightPrec);
+        writer.keyword("ACCESS");
+        writer.keyword("ON");
+        policyName.unparse(writer, leftPrec, rightPrec);
+        writer.keyword("FROM");
+        writer.keyword("USER");
+        userName.unparse(writer, leftPrec, rightPrec);
+    }
+
+    public static class SqlRevokeSecurityLabelOperator extends SqlSpecialOperator {
+
+        public SqlRevokeSecurityLabelOperator() {
+            super("REVOKE_SECURITY_LABEL", SqlKind.REVOKE_SECURITY_LABEL);
+        }
+
+        @Override
+        public RelDataType deriveType(final SqlValidator validator, final SqlValidatorScope scope, final SqlCall call) {
+            RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            RelDataType columnType = typeFactory.createSqlType(SqlTypeName.CHAR);
+
+            return typeFactory.createStructType(
+                ImmutableList.of((RelDataTypeField) new RelDataTypeFieldImpl("REVOKE_SECURITY_LABEL",
+                    0,
+                    columnType)));
+        }
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSelect.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSelect.java
index 25d522f64..3e5aa74d8 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSelect.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSelect.java
@@ -31,362 +31,374 @@
  * methods to put somewhere.
  */
 public class SqlSelect extends SqlCall {
-  //~ Static fields/initializers ---------------------------------------------
+    //~ Static fields/initializers ---------------------------------------------
+
+    // constants representing operand positions
+    public static final int FROM_OPERAND = 2;
+    public static final int WHERE_OPERAND = 3;
+    public static final int HAVING_OPERAND = 5;
+
+    public enum LockMode {
+        UNDEF, SHARED_LOCK, EXCLUSIVE_LOCK;
+
+        public static LockMode getLockMode(SqlNode sqlNOde) {
+            LockMode lockMode = UNDEF;
+            if (sqlNOde instanceof TDDLSqlSelect) {
+                lockMode = ((TDDLSqlSelect) sqlNOde).getLockMode();
+            }
+            return lockMode;
+        }
+    }
+
+    private LockMode lockMode = LockMode.UNDEF;
+
+    OptimizerHint optimizerHint = new OptimizerHint();
+    SqlNodeList keywordList;
+    SqlNodeList selectList;
+    SqlNode from;
+    SqlNode where;
+    SqlNodeList groupBy;
+    SqlNode having;
+    SqlNodeList windowDecls;
+    SqlNodeList orderBy;
+    SqlNode offset;
+    SqlNode fetch;
+    /**
+     * computedFetch should be a SqlNumericLiteral
+     * while fetch is a SqlBasicCall with SqlDynamicParam
+     */
+    SqlNode computedFetch;
+    SqlMatchRecognize matchRecognize;
+    OutFileParams outFileParams;
+
+    //~ Constructors -----------------------------------------------------------
+
+    public SqlSelect(SqlParserPos pos,
+                     SqlNodeList keywordList,
+                     SqlNodeList selectList,
+                     SqlNode from,
+                     SqlNode where,
+                     SqlNodeList groupBy,
+                     SqlNode having,
+                     SqlNodeList windowDecls,
+                     SqlNodeList orderBy,
+                     SqlNode offset,
+                     SqlNode fetch) {
+        super(pos);
+        this.keywordList = Preconditions.checkNotNull(keywordList != null
+            ? keywordList : new SqlNodeList(pos));
+        this.selectList = selectList;
+        this.from = from;
+        this.where = where;
+        this.groupBy = groupBy;
+        this.having = having;
+        this.windowDecls = Preconditions.checkNotNull(windowDecls != null
+            ? windowDecls : new SqlNodeList(pos));
+        this.orderBy = orderBy;
+        this.offset = offset;
+        this.fetch = fetch;
+    }
+
+    public SqlSelect(SqlParserPos pos,
+                     SqlNodeList keywordList,
+                     SqlNodeList selectList,
+                     SqlNode from,
+                     SqlNode where,
+                     SqlNodeList groupBy,
+                     SqlNode having,
+                     SqlNodeList windowDecls,
+                     SqlNodeList orderBy,
+                     SqlNode offset,
+                     SqlNode fetch,
+                     OutFileParams outFileParams) {
+        super(pos);
+        this.keywordList = Preconditions.checkNotNull(keywordList != null
+            ? keywordList : new SqlNodeList(pos));
+        this.selectList = selectList;
+        this.from = from;
+        this.where = where;
+        this.groupBy = groupBy;
+        this.having = having;
+        this.windowDecls = Preconditions.checkNotNull(windowDecls != null
+            ? windowDecls : new SqlNodeList(pos));
+        this.orderBy = orderBy;
+        this.offset = offset;
+        this.fetch = fetch;
+        this.outFileParams = outFileParams;
+    }
 
-  // constants representing operand positions
-  public static final int FROM_OPERAND = 2;
-  public static final int WHERE_OPERAND = 3;
-  public static final int HAVING_OPERAND = 5;
+    //~ Methods ----------------------------------------------------------------
 
-  public enum LockMode {
-    UNDEF, SHARED_LOCK, EXCLUSIVE_LOCK;
+    public SqlOperator getOperator() {
+        return SqlSelectOperator.INSTANCE;
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.SELECT;
+    }
 
-    public static LockMode getLockMode(SqlNode sqlNOde) {
-        LockMode lockMode = UNDEF;
-        if (sqlNOde instanceof TDDLSqlSelect) {
-            lockMode = ((TDDLSqlSelect) sqlNOde).getLockMode();
+    @Override
+    public List getOperandList() {
+        return ImmutableNullableList.of(keywordList, selectList, from, where,
+            groupBy, having, windowDecls, orderBy, offset, fetch);
+    }
+
+    /**
+     * switch position of offset and fetch, cause different order is
+     * accepted by parametrize and physical sql generation
+     */
+    public List getParameterizableOperandList() {
+        if (isDynamicFetch()) {
+            return ImmutableNullableList.of(keywordList, selectList, from, where,
+                groupBy, having, windowDecls, orderBy, computedFetch, offset);
+        } else {
+            return ImmutableNullableList.of(keywordList, selectList, from, where,
+                groupBy, having, windowDecls, orderBy, fetch, offset);
         }
-        return lockMode;
     }
-  }
-
-  private LockMode lockMode = LockMode.UNDEF;
-
-  OptimizerHint optimizerHint = new OptimizerHint();
-  SqlNodeList keywordList;
-  SqlNodeList selectList;
-  SqlNode from;
-  SqlNode where;
-  SqlNodeList groupBy;
-  SqlNode having;
-  SqlNodeList windowDecls;
-  SqlNodeList orderBy;
-  SqlNode offset;
-  SqlNode fetch;
-  /**
-   * computedFetch should be a SqlNumericLiteral
-   * while fetch is a SqlBasicCall with SqlDynamicParam
-   */
-  SqlNode computedFetch;
-  SqlMatchRecognize matchRecognize;
-  OutFileParams outFileParams;
-
-  //~ Constructors -----------------------------------------------------------
-
-  public SqlSelect(SqlParserPos pos,
-      SqlNodeList keywordList,
-      SqlNodeList selectList,
-      SqlNode from,
-      SqlNode where,
-      SqlNodeList groupBy,
-      SqlNode having,
-      SqlNodeList windowDecls,
-      SqlNodeList orderBy,
-      SqlNode offset,
-      SqlNode fetch) {
-    super(pos);
-    this.keywordList = Preconditions.checkNotNull(keywordList != null
-        ? keywordList : new SqlNodeList(pos));
-    this.selectList = selectList;
-    this.from = from;
-    this.where = where;
-    this.groupBy = groupBy;
-    this.having = having;
-    this.windowDecls = Preconditions.checkNotNull(windowDecls != null
-        ? windowDecls : new SqlNodeList(pos));
-    this.orderBy = orderBy;
-    this.offset = offset;
-    this.fetch = fetch;
-  }
-
-  public SqlSelect(SqlParserPos pos,
-                   SqlNodeList keywordList,
-                   SqlNodeList selectList,
-                   SqlNode from,
-                   SqlNode where,
-                   SqlNodeList groupBy,
-                   SqlNode having,
-                   SqlNodeList windowDecls,
-                   SqlNodeList orderBy,
-                   SqlNode offset,
-                   SqlNode fetch,
-                   OutFileParams outFileParams) {
-    super(pos);
-    this.keywordList = Preconditions.checkNotNull(keywordList != null
-        ? keywordList : new SqlNodeList(pos));
-    this.selectList = selectList;
-    this.from = from;
-    this.where = where;
-    this.groupBy = groupBy;
-    this.having = having;
-    this.windowDecls = Preconditions.checkNotNull(windowDecls != null
-        ? windowDecls : new SqlNodeList(pos));
-    this.orderBy = orderBy;
-    this.offset = offset;
-    this.fetch = fetch;
-    this.outFileParams = outFileParams;
-  }
-
-  //~ Methods ----------------------------------------------------------------
-
-  public SqlOperator getOperator() {
-    return SqlSelectOperator.INSTANCE;
-  }
-
-  @Override public SqlKind getKind() {
-    return SqlKind.SELECT;
-  }
-
-  @Override public List getOperandList() {
-    return ImmutableNullableList.of(keywordList, selectList, from, where,
-        groupBy, having, windowDecls, orderBy, offset, fetch);
-  }
-  /**
-   * switch position of offset and fetch, cause different order is
-   * accepted by parametrize and physical sql generation
-   */
-  public List getParameterizableOperandList() {
-    if (isDynamicFetch()) {
-      return ImmutableNullableList.of(keywordList, selectList, from, where,
-          groupBy, having, windowDecls, orderBy, computedFetch, offset);
-    } else {
-      return ImmutableNullableList.of(keywordList, selectList, from, where,
-          groupBy, having, windowDecls, orderBy, fetch, offset);
-    }
-  }
-
-  @Override public void setOperand(int i, SqlNode operand) {
-    switch (i) {
-    case 0:
-      keywordList = Preconditions.checkNotNull((SqlNodeList) operand);
-      break;
-    case 1:
-      selectList = (SqlNodeList) operand;
-      break;
-    case 2:
-      from = operand;
-      break;
-    case 3:
-      where = operand;
-      break;
-    case 4:
-      groupBy = (SqlNodeList) operand;
-      break;
-    case 5:
-      having = operand;
-      break;
-    case 6:
-      windowDecls = Preconditions.checkNotNull((SqlNodeList) operand);
-      break;
-    case 7:
-      orderBy = (SqlNodeList) operand;
-      break;
-    case 8:
-      offset = operand;
-      break;
-    case 9:
-      fetch = operand;
-      break;
-    default:
-      throw new AssertionError(i);
-    }
-  }
-
-  public final boolean isDistinct() {
-    return getModifierNode(SqlSelectKeyword.DISTINCT) != null;
-  }
-
-  public final SqlNode getModifierNode(SqlSelectKeyword modifier) {
-    for (SqlNode keyword : keywordList) {
-      SqlSelectKeyword keyword2 =
-          ((SqlLiteral) keyword).symbolValue(SqlSelectKeyword.class);
-      if (keyword2 == modifier) {
-        return keyword;
-      }
-    }
-    return null;
-  }
-
-  public final SqlNode getFrom() {
-    return from;
-  }
-
-  public void setFrom(SqlNode from) {
-    this.from = from;
-  }
-
-  public final SqlNodeList getGroup() {
-    return groupBy;
-  }
-
-  public void setGroupBy(SqlNodeList groupBy) {
-    this.groupBy = groupBy;
-  }
-
-  public final SqlNode getHaving() {
-    return having;
-  }
-
-  public void setHaving(SqlNode having) {
-    this.having = having;
-  }
-
-  public final SqlNodeList getSelectList() {
-    return selectList;
-  }
-
-  public void setSelectList(SqlNodeList selectList) {
-    this.selectList = selectList;
-  }
-
-  public final SqlNode getWhere() {
-    return where;
-  }
-
-  public void setWhere(SqlNode whereClause) {
-    this.where = whereClause;
-  }
-
-  @Nonnull public final SqlNodeList getWindowList() {
-    return windowDecls;
-  }
-
-  public final SqlNodeList getOrderList() {
-    return orderBy;
-  }
-
-  public void setOrderBy(SqlNodeList orderBy) {
-    this.orderBy = orderBy;
-  }
-
-  public final SqlNode getOffset() {
-    return offset;
-  }
-
-  public void setOffset(SqlNode offset) {
-    this.offset = offset;
-  }
-
-  public final SqlNode getFetch() {
-    return fetch;
-  }
-
-  public void setFetch(SqlNode fetch) {
-    this.fetch = fetch;
-  }
-
-  /**
-   * computed fetch should be set only once as a DynamicParam
-   * if it is set concurrently, make sure the param index is identical
-   */
-  public void setComputedFetch(SqlNode computedFetch) {
-    assert computedFetch instanceof SqlDynamicParam;
-    if (this.computedFetch != null) {
-      Preconditions.checkArgument(((SqlDynamicParam) this.computedFetch).index ==
-          ((SqlDynamicParam) computedFetch).index, "Computed fetch should be set at the exact same index");
-    } else {
-      this.computedFetch = computedFetch;
-    }
-  }
-
-  public boolean isDynamicFetch() {
-    return fetch != null && fetch.getKind() == SqlKind.PLUS && computedFetch != null;
-  }
-
-  public SqlMatchRecognize getMatchRecognize() {
-    return matchRecognize;
-  }
-
-  public void setMatchRecognize(SqlMatchRecognize matchRecognize) {
-    this.matchRecognize = matchRecognize;
-  }
-
-  public OutFileParams getOutFileParams() {
-    return outFileParams;
-  }
-
-  public void setOutFileParams(OutFileParams outFileParams) {
-    this.outFileParams = outFileParams;
-  }
-
-  public void validate(SqlValidator validator, SqlValidatorScope scope) {
-    validator.validateQuery(this, scope, validator.getUnknownType());
-  }
-
-  // Override SqlCall, to introduce a sub-query frame.
-  @Override public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
-    if (!writer.inQuery()) {
-      // If this SELECT is the topmost item in a sub-query, introduce a new
-      // frame. (The topmost item in the sub-query might be a UNION or
-      // ORDER. In this case, we don't need a wrapper frame.)
-      final SqlWriter.Frame frame =
-          writer.startList(SqlWriter.FrameTypeEnum.SUB_QUERY, "(", ")");
-      getOperator().unparse(writer, this, 0, 0);
-      writer.endList(frame);
-    } else {
-      getOperator().unparse(writer, this, leftPrec, rightPrec);
-    }
-    if (this.lockMode == LockMode.EXCLUSIVE_LOCK) {
-      writer.print("FOR UPDATE");
-    } else if (this.lockMode == LockMode.SHARED_LOCK){
-      writer.print("LOCK IN SHARE MODE");
-    }
-  }
-
-  @Override public SqlNode clone(SqlParserPos pos) {
-    final List operandList = getOperandList();
-    SqlNode sqlNode = getOperator().createCall(getFunctionQuantifier(), pos,
+
+    @Override
+    public void setOperand(int i, SqlNode operand) {
+        switch (i) {
+        case 0:
+            keywordList = Preconditions.checkNotNull((SqlNodeList) operand);
+            break;
+        case 1:
+            selectList = (SqlNodeList) operand;
+            break;
+        case 2:
+            from = operand;
+            break;
+        case 3:
+            where = operand;
+            break;
+        case 4:
+            groupBy = (SqlNodeList) operand;
+            break;
+        case 5:
+            having = operand;
+            break;
+        case 6:
+            windowDecls = Preconditions.checkNotNull((SqlNodeList) operand);
+            break;
+        case 7:
+            orderBy = (SqlNodeList) operand;
+            break;
+        case 8:
+            offset = operand;
+            break;
+        case 9:
+            fetch = operand;
+            break;
+        default:
+            throw new AssertionError(i);
+        }
+    }
+
+    public final boolean isDistinct() {
+        return getModifierNode(SqlSelectKeyword.DISTINCT) != null;
+    }
+
+    public final SqlNode getModifierNode(SqlSelectKeyword modifier) {
+        for (SqlNode keyword : keywordList) {
+            SqlSelectKeyword keyword2 =
+                ((SqlLiteral) keyword).symbolValue(SqlSelectKeyword.class);
+            if (keyword2 == modifier) {
+                return keyword;
+            }
+        }
+        return null;
+    }
+
+    public final SqlNode getFrom() {
+        return from;
+    }
+
+    public void setFrom(SqlNode from) {
+        this.from = from;
+    }
+
+    public final SqlNodeList getGroup() {
+        return groupBy;
+    }
+
+    public void setGroupBy(SqlNodeList groupBy) {
+        this.groupBy = groupBy;
+    }
+
+    public final SqlNode getHaving() {
+        return having;
+    }
+
+    public void setHaving(SqlNode having) {
+        this.having = having;
+    }
+
+    public final SqlNodeList getSelectList() {
+        return selectList;
+    }
+
+    public void setSelectList(SqlNodeList selectList) {
+        this.selectList = selectList;
+    }
+
+    public final SqlNode getWhere() {
+        return where;
+    }
+
+    public void setWhere(SqlNode whereClause) {
+        this.where = whereClause;
+    }
+
+    @Nonnull
+    public final SqlNodeList getWindowList() {
+        return windowDecls;
+    }
+
+    public final SqlNodeList getOrderList() {
+        return orderBy;
+    }
+
+    public void setOrderBy(SqlNodeList orderBy) {
+        this.orderBy = orderBy;
+    }
+
+    public final SqlNode getOffset() {
+        return offset;
+    }
+
+    public void setOffset(SqlNode offset) {
+        this.offset = offset;
+    }
+
+    public final SqlNode getFetch() {
+        return fetch;
+    }
+
+    public void setFetch(SqlNode fetch) {
+        this.fetch = fetch;
+    }
+
+    /**
+     * computed fetch should be set only once as a DynamicParam
+     * if it is set concurrently, make sure the param index is identical
+     */
+    public void setComputedFetch(SqlNode computedFetch) {
+        assert computedFetch instanceof SqlDynamicParam;
+        if (this.computedFetch != null) {
+            Preconditions.checkArgument(((SqlDynamicParam) this.computedFetch).index ==
+                ((SqlDynamicParam) computedFetch).index, "Computed fetch should be set at the exact same index");
+        } else {
+            this.computedFetch = computedFetch;
+        }
+    }
+
+    public boolean isDynamicFetch() {
+        return fetch != null && fetch.getKind() == SqlKind.PLUS && computedFetch != null;
+    }
+
+    public SqlMatchRecognize getMatchRecognize() {
+        return matchRecognize;
+    }
+
+    public void setMatchRecognize(SqlMatchRecognize matchRecognize) {
+        this.matchRecognize = matchRecognize;
+    }
+
+    public OutFileParams getOutFileParams() {
+        return outFileParams;
+    }
+
+    public void setOutFileParams(OutFileParams outFileParams) {
+        this.outFileParams = outFileParams;
+    }
+
+    public void validate(SqlValidator validator, SqlValidatorScope scope) {
+        validator.validateQuery(this, scope, validator.getUnknownType());
+    }
+
+    // Override SqlCall, to introduce a sub-query frame.
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        if (!writer.inQuery()) {
+            // If this SELECT is the topmost item in a sub-query, introduce a new
+            // frame. (The topmost item in the sub-query might be a UNION or
+            // ORDER. In this case, we don't need a wrapper frame.)
+            final SqlWriter.Frame frame =
+                writer.startList(SqlWriter.FrameTypeEnum.SUB_QUERY, "(", ")");
+            getOperator().unparse(writer, this, 0, 0);
+            if (this.lockMode == LockMode.EXCLUSIVE_LOCK) {
+                writer.print("FOR UPDATE");
+            } else if (this.lockMode == LockMode.SHARED_LOCK) {
+                writer.print("LOCK IN SHARE MODE");
+            }
+            writer.endList(frame);
+        } else {
+            getOperator().unparse(writer, this, leftPrec, rightPrec);
+            if (this.lockMode == LockMode.EXCLUSIVE_LOCK) {
+                writer.print("FOR UPDATE");
+            } else if (this.lockMode == LockMode.SHARED_LOCK) {
+                writer.print("LOCK IN SHARE MODE");
+            }
+        }
+    }
+
+    @Override
+    public SqlNode clone(SqlParserPos pos) {
+        final List operandList = getOperandList();
+        SqlNode sqlNode = getOperator().createCall(getFunctionQuantifier(), pos,
             operandList.toArray(new SqlNode[operandList.size()]));
-    if(sqlNode instanceof SqlSelect){
-      ((SqlSelect) sqlNode).setLockMode(lockMode);
-      ((SqlSelect) sqlNode).optimizerHint = optimizerHint.clone(SqlParserPos.ZERO);
-    }
-    return sqlNode;
-  }
-
-  public boolean hasOrderBy() {
-    return orderBy != null && orderBy.size() != 0;
-  }
-
-  public boolean hasLimit() {
-    return offset != null || fetch != null;
-  }
-
-  public boolean hasWhere() {
-    return where != null;
-  }
-
-  public boolean isKeywordPresent(SqlSelectKeyword targetKeyWord) {
-    return getModifierNode(targetKeyWord) != null;
-  }
-
-  public boolean withLock() {
-    return lockMode != LockMode.UNDEF;
-  }
-
-  public LockMode getLockMode() {
-    return lockMode;
-  }
-
-  public void setLockMode(LockMode lockMode) {
-    this.lockMode = lockMode;
-  }
-
-  public SqlSelect shallowClone(LockMode lockMode) {
-    SqlSelect sqlSelect =
-        new SqlSelect(this.pos, this.keywordList, this.selectList, this.from, this.where, this.groupBy,
-            this.having, this.windowDecls, this.orderBy, this.offset, this.fetch);
-    sqlSelect.setLockMode(lockMode);
-    return sqlSelect;
-  }
-
-  public OptimizerHint getOptimizerHint() {
-    return optimizerHint;
-  }
-
-  public void setOptimizerHint(OptimizerHint optimizerHint) {
-    this.optimizerHint = optimizerHint;
-  }
+        if (sqlNode instanceof SqlSelect) {
+            ((SqlSelect) sqlNode).setLockMode(lockMode);
+            ((SqlSelect) sqlNode).optimizerHint = optimizerHint.clone(SqlParserPos.ZERO);
+        }
+        return sqlNode;
+    }
+
+    public boolean hasOrderBy() {
+        return orderBy != null && orderBy.size() != 0;
+    }
+
+    public boolean hasLimit() {
+        return offset != null || fetch != null;
+    }
+
+    public boolean hasWhere() {
+        return where != null;
+    }
+
+    public boolean isKeywordPresent(SqlSelectKeyword targetKeyWord) {
+        return getModifierNode(targetKeyWord) != null;
+    }
+
+    public boolean withLock() {
+        return lockMode != LockMode.UNDEF;
+    }
+
+    public LockMode getLockMode() {
+        return lockMode;
+    }
+
+    public void setLockMode(LockMode lockMode) {
+        this.lockMode = lockMode;
+    }
+
+    public SqlSelect shallowClone(LockMode lockMode) {
+        SqlSelect sqlSelect =
+            new SqlSelect(this.pos, this.keywordList, this.selectList, this.from, this.where, this.groupBy,
+                this.having, this.windowDecls, this.orderBy, this.offset, this.fetch);
+        sqlSelect.setLockMode(lockMode);
+        return sqlSelect;
+    }
+
+    public OptimizerHint getOptimizerHint() {
+        return optimizerHint;
+    }
+
+    public void setOptimizerHint(OptimizerHint optimizerHint) {
+        this.optimizerHint = optimizerHint;
+    }
 
 }
 
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSelectOperator.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSelectOperator.java
index 724efb90b..dc4ae01d5 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSelectOperator.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSelectOperator.java
@@ -28,8 +28,6 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import static org.apache.calcite.sql.SqlKind.PLUS;
-
 /**
  * An operator describing a query. (Not a query itself.)
  *
@@ -307,6 +305,10 @@ public boolean needUnparseFrom(SqlNode fromNode, SqlWriter writer) {
             return true;
         }
 
+        if (fromNode instanceof SqlValuesTableSource || firstOperand instanceof SqlValuesTableSource) {
+            return true;
+        }
+
         return false;
     }
 
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinaryLogs.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinaryLogs.java
index d7f225389..d073e539c 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinaryLogs.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinaryLogs.java
@@ -16,31 +16,19 @@
 
 package org.apache.calcite.sql;
 
-import com.google.common.collect.Lists;
 import org.apache.calcite.sql.parser.SqlParserPos;
 
-import java.util.ArrayList;
 import java.util.List;
 
-/**
- * @Author ShuGuang
- * @Description
- * @Date 2020/10/20 3:30 下午
- */
 public class SqlShowBinaryLogs extends SqlShow {
     private SqlNode with;
-    private static final List OPERANDS_EMPTY = new ArrayList<>(0);
-    private static final List SPECIAL_IDENTIFIERS = Lists.newArrayList(
-        SqlSpecialIdentifier.BINARY,
-        SqlSpecialIdentifier.LOGS);
+    private boolean full;
 
-    public SqlShowBinaryLogs(SqlParserPos pos, SqlNode with) {
-        super(pos, SPECIAL_IDENTIFIERS, OPERANDS_EMPTY, null, null, null, null);
+    public SqlShowBinaryLogs(SqlParserPos pos, List specialIdentifiers, SqlNode with,
+                             boolean full) {
+        super(pos, specialIdentifiers);
         this.with = with;
-    }
-
-    public static SqlShowBinaryLogs create(SqlParserPos pos, SqlNode with) {
-        return new SqlShowBinaryLogs(pos, with);
+        this.full = full;
     }
 
     @Override
@@ -51,4 +39,8 @@ public SqlKind getShowKind() {
     public SqlNode getWith() {
         return with;
     }
+
+    public boolean isFull() {
+        return full;
+    }
 }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinaryStreams.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinaryStreams.java
index ac72fbe66..027fd1d4a 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinaryStreams.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinaryStreams.java
@@ -22,16 +22,19 @@
 import java.util.List;
 
 public class SqlShowBinaryStreams extends SqlShow {
-    private static final List SPECIAL_IDENTIFIERS = Lists.newArrayList(
-        SqlSpecialIdentifier.BINARY,
-        SqlSpecialIdentifier.STREAMS);
+    private SqlNode with;
 
-    public SqlShowBinaryStreams(SqlParserPos pos) {
-        super(pos, SPECIAL_IDENTIFIERS);
+    public SqlShowBinaryStreams(SqlParserPos pos, List specialIdentifiers, SqlNode with) {
+        super(pos, specialIdentifiers);
+        this.with = with;
     }
 
     @Override
     public SqlKind getShowKind() {
         return SqlKind.SHOW_BINARY_STREAMS;
     }
+
+    public SqlNode getWith() {
+        return with;
+    }
 }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinlogEvents.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinlogEvents.java
index f8c311e0a..a550de515 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinlogEvents.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowBinlogEvents.java
@@ -21,22 +21,11 @@
 import java.util.List;
 
 /**
- * @Author ShuGuang
- * @Description
- * @Date 2020/11/2 2:33 下午
+ *
  */
 public class SqlShowBinlogEvents extends SqlShow {
 
-    private SqlNode with,logName, pos, limit;
-
-    public SqlShowBinlogEvents(SqlParserPos parserPos,
-                               List specialIdentifiers,
-                               List operands, SqlNode logName, SqlNode pos, SqlNode limit) {
-        super(parserPos, specialIdentifiers, operands);
-        this.logName = logName;
-        this.pos = pos;
-        this.limit = limit;
-    }
+    private SqlNode with, logName, pos, limit;
 
     public SqlShowBinlogEvents(SqlParserPos parserPos,
                                List specialIdentifiers,
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowColumnarIndex.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowColumnarIndex.java
new file mode 100644
index 000000000..d3773c99e
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowColumnarIndex.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.LinkedList;
+import java.util.List;
+
+public class SqlShowColumnarIndex extends SqlShow {
+
+    private SqlShowGlobalIndexOperator operator;
+
+    private SqlNode table;
+
+    public SqlShowColumnarIndex(SqlParserPos pos, List specialIdentifiers, SqlNode table){
+        super(pos, specialIdentifiers);
+        this.table = table;
+    }
+
+    public SqlNode getTable() {
+        return table;
+    }
+
+    public void setTable(SqlNode table) {
+        this.table = table;
+    }
+
+    @Override
+    protected boolean showWhere() {
+        return false;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        if (null == operator) {
+            operator = new SqlShowGlobalIndexOperator(this.table);
+        }
+        return operator;
+    }
+
+    @Override
+    public SqlKind getShowKind() {
+        return SqlKind.SHOW_COLUMNAR_INDEX;
+    }
+
+    public static class SqlShowGlobalIndexOperator extends SqlSpecialOperator {
+
+        private SqlNode table;
+
+        public SqlShowGlobalIndexOperator(SqlNode table){
+            super("SHOW_COLUMNAR_INDEX", SqlKind.SHOW_COLUMNAR_INDEX);
+            this.table = table;
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            List columns = new LinkedList<>();
+
+            columns.add(new RelDataTypeFieldImpl("SCHEMA", 0, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("TABLE", 1, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("INDEX_NAME", 2, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("CLUSTERED", 3, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("PK_NAMES", 4, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("COVERING_NAMES", 5, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("PARTITION_KEY", 6, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("PARTITION_STRATEGY", 7, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("PARTITION_COUNT", 8, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("SORT_KEY", 9, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("STATUS", 10, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+
+            return typeFactory.createStructType(columns);
+        }
+    }
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowMasterStatus.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowMasterStatus.java
index 28432a997..47c5d58db 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowMasterStatus.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowMasterStatus.java
@@ -16,26 +16,19 @@
 
 package org.apache.calcite.sql;
 
-import com.google.common.collect.Lists;
 import org.apache.calcite.sql.parser.SqlParserPos;
 
-import java.util.ArrayList;
 import java.util.List;
 
 public class SqlShowMasterStatus extends SqlShow {
     private SqlNode with;
-    private static final List OPERANDS_EMPTY = new ArrayList<>(0);
-    private static final List SPECIAL_IDENTIFIERS = Lists.newArrayList(
-        SqlSpecialIdentifier.MASTER,
-        SqlSpecialIdentifier.STATUS);
+    private boolean full;
 
-    public SqlShowMasterStatus(SqlParserPos pos) {
-        super(pos, SPECIAL_IDENTIFIERS);
-    }
-
-    public SqlShowMasterStatus(SqlParserPos pos, SqlNode with) {
-        super(pos, SPECIAL_IDENTIFIERS);
+    public SqlShowMasterStatus(SqlParserPos pos, List specialIdentifiers, SqlNode with,
+                               boolean full) {
+        super(pos, specialIdentifiers);
         this.with = with;
+        this.full = full;
     }
 
     @Override
@@ -46,4 +39,8 @@ public SqlKind getShowKind() {
     public SqlNode getWith() {
         return with;
     }
+
+    public boolean isFull() {
+        return full;
+    }
 }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowPhysicalDdl.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowPhysicalDdl.java
new file mode 100644
index 000000000..e470108a0
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowPhysicalDdl.java
@@ -0,0 +1,133 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import lombok.Getter;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * @author jinkun.taojinkun
+ */
+public class SqlShowPhysicalDdl extends SqlShow {
+
+    private SqlSpecialOperator operator;
+
+    private boolean full;
+
+    public void setStatus(boolean status) {
+        this.status = status;
+    }
+
+    @Getter
+    private boolean status;
+
+    private String schema;
+
+    @Override
+    public SqlKind getShowKind() {
+        return SqlKind.SHOW_PHYSICAL_DDL;
+    }
+
+    public static class SqlShowTableInfoOperator extends SqlSpecialOperator {
+        private boolean full;
+
+
+        public SqlShowTableInfoOperator(boolean full) {
+            super("SHOW_PHYSICAL_DDL", SqlKind.SHOW_PHYSICAL_DDL);
+            this.full = full;
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+
+            List columns = new LinkedList<>();
+            columns.add(new RelDataTypeFieldImpl("ID", 0, typeFactory.createSqlType(SqlTypeName.INTEGER)));
+            columns.add(new RelDataTypeFieldImpl("GROUP_NAME", 1, typeFactory.createSqlType(SqlTypeName.CHAR)));
+            columns.add(new RelDataTypeFieldImpl("TABLE_NAME", 2, typeFactory.createSqlType(SqlTypeName.CHAR)));
+            columns.add(new RelDataTypeFieldImpl("SIZE_IN_MB", 3, typeFactory.createSqlType(SqlTypeName.DOUBLE)));
+
+            return typeFactory.createStructType(columns);
+        }
+    }
+
+    public SqlShowPhysicalDdl(SqlParserPos pos, List specialIdentifiers,
+                              List operands, SqlNode like, SqlNode where, SqlNode orderBy,
+                              SqlNode limit, String schema, Boolean status) {
+        super(pos, specialIdentifiers, operands, like, where, orderBy, limit,
+            specialIdentifiers.size() + operands.size() - 1);
+        this.status = status;
+        this.schema = schema;
+    }
+
+    public boolean isFull() {
+        return full;
+    }
+
+    public void setFull(boolean full) {
+        this.full = full;
+    }
+
+    public String getSchema() {
+        return schema;
+    }
+
+    @Override
+    protected boolean showWhere() {
+        return false;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        if (null == operator) {
+            operator = new SqlShowTableInfoOperator(full);
+        }
+        return operator;
+    }
+
+
+//    public static class SqlShowPhysicalDdlOperator extends SqlSpecialOperator {
+//        private boolean full;
+//
+//        public SqlShowPhysicalDdl(boolean full) {
+//            super("SHOW_PHYSICAL_DDL", SqlKind.SHOW_PHYSICAL_DDL);
+//            this.full = full;
+//        }
+//
+//        @Override
+//        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+//            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+//
+//            List columns = new LinkedList<>();
+//            columns.add(new RelDataTypeFieldImpl("ID", 0, typeFactory.createSqlType(SqlTypeName.INTEGER)));
+//            columns.add(new RelDataTypeFieldImpl("GROUP_NAME", 1, typeFactory.createSqlType(SqlTypeName.CHAR)));
+//            columns.add(new RelDataTypeFieldImpl("TABLE_NAME", 2, typeFactory.createSqlType(SqlTypeName.CHAR)));
+//            columns.add(new RelDataTypeFieldImpl("SIZE_IN_MB", 3, typeFactory.createSqlType(SqlTypeName.DOUBLE)));
+//
+//            return typeFactory.createStructType(columns);
+//        }
+//    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowPruneTrace.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowPruneTrace.java
new file mode 100644
index 000000000..53de1fbd4
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowPruneTrace.java
@@ -0,0 +1,96 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+
+public class SqlShowPruneTrace extends SqlShow {
+
+    private static final SqlSpecialOperator OPERATOR = new SqlShowPruneTraceOperator();
+
+    public SqlShowPruneTrace(SqlParserPos pos, List specialIdentifiers, List operands,
+                             SqlNodeList selectList,
+                             SqlNode like, SqlNode where, SqlNode orderBy, SqlNode limit) {
+        super(pos, specialIdentifiers, operands, selectList, like, where, orderBy, limit, Collections.emptyList(),
+            Collections.emptyList(), Collections.emptyList());
+    }
+
+    @Override
+    protected boolean showWhere() {
+        return false;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public SqlKind getShowKind() {
+        return SqlKind.SHOW_PRUNE_TRACE;
+    }
+
+    @Override
+    public boolean canConvertToSelect() {
+        return true;
+    }
+
+    @Override
+    public SqlSelect convertToSelect() {
+        return doConvertToSelect();
+    }
+
+    public static class SqlShowPruneTraceOperator extends SqlSpecialOperator {
+
+        public SqlShowPruneTraceOperator() {
+            super("SHOW_PRUNE_TRACE", SqlKind.SHOW_PRUNE_TRACE);
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            List columns = new LinkedList<>();
+            columns.add(new RelDataTypeFieldImpl("ID", 0, typeFactory.createSqlType(SqlTypeName.INTEGER)));
+            columns.add(new RelDataTypeFieldImpl("NODE_IP", 1, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("TIMESTAMP", 2, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("SCAN_ID", 3, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("TABLE_NAME", 4, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("ORC_FILE", 5, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("FILTER", 6, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("INDEX_NAME", 7, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("ROWS", 8, typeFactory.createSqlType(SqlTypeName.BIGINT)));
+            columns.add(new RelDataTypeFieldImpl("STATEMENT", 9, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("ACQUIRE_INDEX_TIME(ms)", 10, typeFactory.createSqlType(SqlTypeName.BIGINT)));
+            columns.add(new RelDataTypeFieldImpl("PRUNE_TIME(ms)", 11, typeFactory.createSqlType(SqlTypeName.BIGINT)));
+            columns.add(new RelDataTypeFieldImpl("RG_NUM_BEFORE", 12, typeFactory.createSqlType(SqlTypeName.BIGINT)));
+            columns.add(new RelDataTypeFieldImpl("RG_NUM_AFTER", 13, typeFactory.createSqlType(SqlTypeName.BIGINT)));
+            columns.add(new RelDataTypeFieldImpl("TRACE_ID", 14, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+
+            return typeFactory.createStructType(columns);
+        }
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowReplicaCheckDiff.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowReplicaCheckDiff.java
new file mode 100644
index 000000000..36ea09b99
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowReplicaCheckDiff.java
@@ -0,0 +1,98 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * @author yudong
+ * @since 2023/11/9 11:11
+ **/
+public class SqlShowReplicaCheckDiff extends SqlDal {
+
+    private static final SqlSpecialOperator OPERATOR = new SqlShowReplicaCheckDiffOperator();
+
+    private SqlNode dbName;
+    private SqlNode tableName;
+
+    public SqlShowReplicaCheckDiff(SqlParserPos pos, SqlNode dbName) {
+        super(pos);
+        this.dbName = dbName;
+    }
+
+    public SqlShowReplicaCheckDiff(SqlParserPos pos, SqlNode dbName, SqlNode tableName) {
+        super(pos);
+        this.dbName = dbName;
+        this.tableName = tableName;
+    }
+
+    public SqlNode getDbName() {
+        return dbName;
+    }
+
+    public void setDbName(SqlNode dbName) {
+        this.dbName = dbName;
+    }
+
+    public SqlNode getTableName() {
+        return tableName;
+    }
+
+    public void setTableName(SqlNode tableName) {
+        this.tableName = tableName;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("CHECK REPLICA TABLE");
+        dbName.unparse(writer, 0, 0);
+        if (tableName != null) {
+            writer.print(".");
+            tableName.unparse(writer, 0, 0);
+        }
+        writer.keyword("SHOW DIFF");
+    }
+
+    public static class SqlShowReplicaCheckDiffOperator extends SqlSpecialOperator {
+
+        public SqlShowReplicaCheckDiffOperator() {
+            super("SHOW_REPLICA_CHECK_DIFF", SqlKind.SHOW_REPLICA_CHECK_DIFF);
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            List columns = new LinkedList<>();
+            columns.add(new RelDataTypeFieldImpl("RESULT", 0, typeFactory.createSqlType(SqlTypeName.INTEGER)));
+            return typeFactory.createStructType(columns);
+        }
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowReplicaCheckProgress.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowReplicaCheckProgress.java
new file mode 100644
index 000000000..bfc2f5aba
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowReplicaCheckProgress.java
@@ -0,0 +1,98 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * @author yudong
+ * @since 2023/11/9 11:11
+ **/
+public class SqlShowReplicaCheckProgress extends SqlDal {
+
+    private static final SqlSpecialOperator OPERATOR = new SqlShowReplicaCheckProgressOperator();
+
+    private SqlNode dbName;
+    private SqlNode tableName;
+
+    public SqlShowReplicaCheckProgress(SqlParserPos pos, SqlNode dbName) {
+        super(pos);
+        this.dbName = dbName;
+    }
+
+    public SqlShowReplicaCheckProgress(SqlParserPos pos, SqlNode dbName, SqlNode tableName) {
+        super(pos);
+        this.dbName = dbName;
+        this.tableName = tableName;
+    }
+
+    public SqlNode getDbName() {
+        return dbName;
+    }
+
+    public void setDbName(SqlNode dbName) {
+        this.dbName = dbName;
+    }
+
+    public SqlNode getTableName() {
+        return tableName;
+    }
+
+    public void setTableName(SqlNode tableName) {
+        this.tableName = tableName;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("CHECK REPLICA TABLE");
+        dbName.unparse(writer, 0, 0);
+        if (tableName != null) {
+            writer.print(".");
+            tableName.unparse(writer, 0, 0);
+        }
+        writer.keyword("SHOW PROGRESS");
+    }
+
+    public static class SqlShowReplicaCheckProgressOperator extends SqlSpecialOperator {
+
+        public SqlShowReplicaCheckProgressOperator() {
+            super("SHOW_REPLICA_CHECK_PROGRESS", SqlKind.SHOW_REPLICA_CHECK_PROGRESS);
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            List columns = new LinkedList<>();
+            columns.add(new RelDataTypeFieldImpl("RESULT", 0, typeFactory.createSqlType(SqlTypeName.INTEGER)));
+            return typeFactory.createStructType(columns);
+        }
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowTrans.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowTrans.java
index 51c8d18fc..5eb02a06a 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowTrans.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlShowTrans.java
@@ -30,10 +30,13 @@
 
 public class SqlShowTrans extends SqlShow {
 
-    private static final SqlSpecialOperator OPERATOR = new SqlShowTransOperator();
+    private SqlSpecialOperator operator;
 
-    public SqlShowTrans(SqlParserPos pos) {
+    private boolean columnar;
+
+    public SqlShowTrans(SqlParserPos pos, boolean columnar) {
         super(pos, ImmutableList.of());
+        this.columnar = columnar;
     }
 
     @Override
@@ -43,7 +46,11 @@ protected boolean showWhere() {
 
     @Override
     public SqlOperator getOperator() {
-        return OPERATOR;
+        if (null == operator) {
+            operator = new SqlShowTransOperator(columnar);
+        }
+
+        return operator;
     }
 
     @Override
@@ -51,10 +58,17 @@ public SqlKind getShowKind() {
         return SqlKind.SHOW_TRANS;
     }
 
+    public boolean isColumnar() {
+        return columnar;
+    }
+
     public static class SqlShowTransOperator extends SqlSpecialOperator {
 
-        public SqlShowTransOperator(){
+        private boolean columnar;
+
+        public SqlShowTransOperator(boolean columnar) {
             super("SHOW_TRANS", SqlKind.SHOW_TRANS);
+            this.columnar = columnar;
         }
 
         @Override
@@ -64,8 +78,11 @@ public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, S
             columns.add(new RelDataTypeFieldImpl("TRANS_ID", 0, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
             columns.add(new RelDataTypeFieldImpl("TYPE", 1, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
             columns.add(new RelDataTypeFieldImpl("DURATION_MS", 2, typeFactory.createSqlType(SqlTypeName.BIGINT)));
-            columns.add(new RelDataTypeFieldImpl("STATE", 8, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
-            columns.add(new RelDataTypeFieldImpl("PROCESS_ID", 8, typeFactory.createSqlType(SqlTypeName.BIGINT)));
+            columns.add(new RelDataTypeFieldImpl("STATE", 3, typeFactory.createSqlType(SqlTypeName.VARCHAR)));
+            columns.add(new RelDataTypeFieldImpl("PROCESS_ID", 4, typeFactory.createSqlType(SqlTypeName.BIGINT)));
+            if (columnar) {
+                columns.add(new RelDataTypeFieldImpl("TSO", 5, typeFactory.createSqlType(SqlTypeName.BIGINT)));
+            }
             return typeFactory.createStructType(columns);
         }
     }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSpecialIdentifier.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSpecialIdentifier.java
index 240ff452c..61c919cc2 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSpecialIdentifier.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSpecialIdentifier.java
@@ -31,7 +31,7 @@ public enum SqlSpecialIdentifier {
     UNCOMMITTED, VARIABLES, VIEW, WARNINGS, SEQUENCES, TOPOLOGY, PARITIONS, BROADCASTS, RULE, TRACE, DATASOURCES,
     CHARSET, PARTITIONS, PREPARE, EXECUTE, DEALLOCATE, DBLOCK, OFF, PHYSICAL_SLOW, STATS, ONLY, STC, HTC, HIS, DS, DDL,
     VERSION, PHYSICAL_PROCESSLIST, DB, OUTLINES, BINARY, LOGS, CHARACTER, SET, FROM, PROCEDURE, FOR, QUERY, LIMIT,
-    OFFSET, CHANGESET, LOCALITY,
+    OFFSET, CHANGESET, LOCALITY, PHYSICAL_DDL,
     RELAYLOG, IN, TABLE, CREATE, DATABASE, IF, NOT, EXISTS, TRIGGER, KEYS, SLOW, RECYCLEBIN, DATABASES, CCL_RULE, INFO,
     CONVERT, WITH, STREAMS, CDC;
     private static final Map specialIdentifiers = new HashMap<>();
@@ -89,6 +89,7 @@ public enum SqlSpecialIdentifier {
         specialIdentifiers.put("IPC", SqlSpecialIdentifier.IPC);
         specialIdentifiers.put("LOCAL", SqlSpecialIdentifier.LOCAL);
         specialIdentifiers.put("LOCALITY", SqlSpecialIdentifier.LOCALITY);
+        specialIdentifiers.put("PHYSICAL_DDL", SqlSpecialIdentifier.PHYSICAL_DDL);
         specialIdentifiers.put("HOSTS", SqlSpecialIdentifier.HOSTS);
         specialIdentifiers.put("INDEXES", SqlSpecialIdentifier.INDEXES);
         specialIdentifiers.put("TRANSACTION", SqlSpecialIdentifier.TRANSACTION);
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlStartReplicaCheck.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlStartReplicaCheck.java
new file mode 100644
index 000000000..d8db2e6d0
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlStartReplicaCheck.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * @author yudong
+ * @since 2023/11/9 11:09
+ **/
+public class SqlStartReplicaCheck extends SqlDal {
+
+    private static final SqlSpecialOperator OPERATOR = new SqlStartReplicaCheckOperator();
+
+    private SqlNode dbName;
+    private SqlNode tableName;
+    private SqlNode channel;
+
+    public SqlStartReplicaCheck(SqlParserPos pos, SqlNode channel, SqlNode dbName) {
+        super(pos);
+        this.dbName = dbName;
+        this.channel = channel;
+    }
+
+    public SqlNode getDbName() {
+        return dbName;
+    }
+
+    public void setDbName(SqlNode dbName) {
+        this.dbName = dbName;
+    }
+
+    public SqlNode getTableName() {
+        return tableName;
+    }
+
+    public void setTableName(SqlNode tableName) {
+        this.tableName = tableName;
+    }
+
+    public SqlNode getChannel() {
+        return channel;
+    }
+
+    public void setChannel(SqlNode channel) {
+        this.channel = channel;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        writer.keyword("CHECK REPLICA TABLE");
+        dbName.unparse(writer, 0, 0);
+        if (tableName != null) {
+            writer.print(".");
+            tableName.unparse(writer, 0, 0);
+        }
+        if (channel != null) {
+            writer.keyword("FOR CHANNEL");
+            channel.unparse(writer, 0, 0);
+        }
+    }
+
+    public static class SqlStartReplicaCheckOperator extends SqlSpecialOperator {
+
+        public SqlStartReplicaCheckOperator() {
+            super("START_REPLICA_CHECK", SqlKind.START_REPLICA_CHECK);
+        }
+
+        @Override
+        public RelDataType deriveType(SqlValidator validator, SqlValidatorScope scope, SqlCall call) {
+            final RelDataTypeFactory typeFactory = validator.getTypeFactory();
+            List columns = new LinkedList<>();
+            columns.add(new RelDataTypeFieldImpl("RESULT", 0, typeFactory.createSqlType(SqlTypeName.INTEGER)));
+            return typeFactory.createStructType(columns);
+        }
+
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSubPartition.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSubPartition.java
index 1adebaab7..9953a844d 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSubPartition.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSubPartition.java
@@ -70,8 +70,10 @@ public String toString() {
         StringBuilder sb = new StringBuilder("");
         sb.append("SUBPARTITION ");
         sb.append(name);
-        sb.append(" ");
-        sb.append(values.toString());
+        if (values != null) {
+            sb.append(" ");
+            sb.append(values.toString());
+        }
         if (TStringUtil.isNotEmpty(locality)) {
             sb.append(" LOCALITY=");
             sb.append(TStringUtil.quoteString(locality));
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSubPartitionBy.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSubPartitionBy.java
index 3b5026490..b95407a67 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSubPartitionBy.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSubPartitionBy.java
@@ -198,7 +198,7 @@ public void validateSubPartitions(SqlValidator validator, SqlValidatorScope scop
         int partColCnt = partColTypes.size();
 
         boolean allowNoPartBndVal = this instanceof SqlSubPartitionByHash;
-        // Validate subpartitions template
+        // Validate subpartition template
         SqlSubPartitionBy.validatePartitionDefs(validator, scope, this.getSubPartitions(), partColCnt, allowNoPartBndVal);
 
         // Validate partitionsCount
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSubPartitionByCoHash.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSubPartitionByCoHash.java
new file mode 100644
index 000000000..db3ab23bc
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlSubPartitionByCoHash.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import org.apache.calcite.sql.parser.SqlParserPos;
+
+/**
+ * @author chenghui.lch
+ */
+public class SqlSubPartitionByCoHash extends SqlSubPartitionBy {
+
+    public SqlSubPartitionByCoHash(SqlParserPos sqlParserPos) {
+        super(sqlParserPos);
+    }
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlTableOptions.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlTableOptions.java
index 44a7b9235..c25005ba2 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlTableOptions.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlTableOptions.java
@@ -26,45 +26,44 @@
 
 /**
  * @author chenmo.cm
- * @date 2018/12/13 5:38 PM
  */
 public class SqlTableOptions extends SqlCall {
 
-    private static final SqlOperator OPERATOR                  = new SqlSpecialOperator("TABLE OPTIONS",
-                                                                   SqlKind.TABLE_OPTIONS);
-    private SqlIdentifier            engine;
-    private SqlNumericLiteral        autoIncrement;
-    private SqlCall                  avgRowLength;
-    private SqlIdentifier            charSet;
-    private Boolean                  defaultCharset;
+    private static final SqlOperator OPERATOR = new SqlSpecialOperator("TABLE OPTIONS",
+        SqlKind.TABLE_OPTIONS);
+    private SqlIdentifier engine;
+    private SqlNumericLiteral autoIncrement;
+    private SqlNumericLiteral avgRowLength;
+    private SqlIdentifier charSet;
+    private Boolean defaultCharset;
     /**
      * 这里需要区分带charset的collate还是不带charset的collate
      */
-    private SqlIdentifier            collateWithCharset;
-    private Boolean                  defaultCollateWithCharset;
-    private SqlIdentifier            collation;
-    private Boolean                  defaultCollate;
-    private SqlLiteral               checkSum;
-    private SqlCharStringLiteral     comment;
-    private SqlCharStringLiteral     connection;
-    private SqlCharStringLiteral     dataDir;
-    private SqlCharStringLiteral     indexDir;
-    private SqlLiteral               delayKeyWrite;
-    private InsertMethod             insertMethod;
-    private SqlCall                  keyBlockSize;
-    private SqlCall                  maxRows;
-    private SqlCall                  minRows;
-    private PackKeys                 packKeys;
-    private SqlCharStringLiteral     password;
-    private RowFormat                rowFormat;
-    private StatsAutoRecalc          statsAutoRecalc;
-    private StatsPersistent          statsPersistent;
-    private SqlCall                  statsSamplePages;
-    private SqlIdentifier            tablespaceName;
-    private TableSpaceStorage        tableSpaceStorage;
-    private List      union;
-    private Boolean                  broadcast;
-    private SqlIdentifier            algorithm;
+    private SqlIdentifier collateWithCharset;
+    private Boolean defaultCollateWithCharset;
+    private SqlIdentifier collation;
+    private Boolean defaultCollate;
+    private SqlLiteral checkSum;
+    private SqlCharStringLiteral comment;
+    private SqlCharStringLiteral connection;
+    private SqlCharStringLiteral dataDir;
+    private SqlCharStringLiteral indexDir;
+    private SqlLiteral delayKeyWrite;
+    private InsertMethod insertMethod;
+    private SqlNumericLiteral keyBlockSize;
+    private SqlNumericLiteral maxRows;
+    private SqlNumericLiteral minRows;
+    private PackKeys packKeys;
+    private SqlCharStringLiteral password;
+    private RowFormat rowFormat;
+    private StatsAutoRecalc statsAutoRecalc;
+    private StatsPersistent statsPersistent;
+    private SqlCall statsSamplePages;
+    private SqlIdentifier tablespaceName;
+    private TableSpaceStorage tableSpaceStorage;
+    private List union;
+    private Boolean broadcast;
+    private SqlIdentifier algorithm;
 
     // table_option:
     // ENGINE [=] engine_name
@@ -87,7 +86,7 @@ public class SqlTableOptions extends SqlCall {
     // | ROW_FORMAT [=] {DEFAULT|DYNAMIC|FIXED|COMPRESSED|REDUNDANT|COMPACT}
     // | UNION [=] (tbl_name[,tbl_name]...)
     // | ALGORITHM [=] algorithm_name
-    public SqlTableOptions(SqlParserPos pos){
+    public SqlTableOptions(SqlParserPos pos) {
         super(pos);
     }
 
@@ -133,7 +132,64 @@ public List getOperandList() {
 
     @Override
     public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
-        super.unparse(writer, leftPrec, rightPrec);
+        writer.print(optionToString());
+        //super.unparse(writer, leftPrec, rightPrec);
+    }
+
+    public String optionToString() {
+        StringBuilder sb = new StringBuilder();
+        appendOption(sb, "ENGINE", engine);
+        appendOption(sb, "AUTO_INCREMENT", autoIncrement);
+        appendOption(sb, "AVG_ROW_LENGTH", avgRowLength);
+        appendOption(sb, "CHARACTER SET", charSet);
+        appendOption(sb, "DEFAULT CHARSET", defaultCharset);
+        appendOption(sb, "COLLATE", collateWithCharset);
+        appendOption(sb, "DEFAULT COLLATE", defaultCollateWithCharset);
+        appendOption(sb, "COLLATION", collation);
+        appendOption(sb, "DEFAULT COLLATE", defaultCollate);
+        appendOption(sb, "CHECKSUM", checkSum);
+        appendOption(sb, "COMMENT", comment, true);
+        appendOption(sb, "CONNECTION", connection, true);
+        appendOption(sb, "DATA DIRECTORY", dataDir, true);
+        appendOption(sb, "INDEX DIRECTORY", indexDir, true);
+        appendOption(sb, "DELAY_KEY_WRITE", delayKeyWrite);
+        appendOption(sb, "INSERT_METHOD", insertMethod);
+        appendOption(sb, "KEY_BLOCK_SIZE", keyBlockSize);
+        appendOption(sb, "MAX_ROWS", maxRows);
+        appendOption(sb, "MIN_ROWS", minRows);
+        appendOption(sb, "PACK_KEYS", packKeys);
+        appendOption(sb, "PASSWORD", password, true);
+        appendOption(sb, "ROW_FORMAT", rowFormat);
+        appendOption(sb, "STATS_AUTO_RECALC", statsAutoRecalc);
+        appendOption(sb, "STATS_PERSISTENT", statsPersistent);
+        appendOption(sb, "STATS_SAMPLE_PAGES", statsSamplePages);
+        appendOption(sb, "TABLESPACE", tablespaceName);
+        appendOption(sb, "STORAGE", tableSpaceStorage);
+        appendOption(sb, "UNION", union);
+        appendOption(sb, "BROADCAST", broadcast);
+        appendOption(sb, "ALGORITHM", algorithm);
+
+        if (sb.length() > 2) {
+            sb.setLength(sb.length() - 2);
+        }
+
+        return sb.toString();
+    }
+
+    private void appendOption(StringBuilder sb, String optionName, Object value) {
+        appendOption(sb, optionName, value, false);
+    }
+
+    private void appendOption(StringBuilder sb, String optionName, Object value, boolean isString) {
+        if (value != null) {
+            sb.append(optionName).append(" = ");
+            if (isString) {
+                sb.append("'").append(value.toString().replace("'", "\\'")).append("'");
+            } else {
+                sb.append(value);
+            }
+            sb.append(", ");
+        }
     }
 
     public SqlIdentifier getEngine() {
@@ -152,11 +208,11 @@ public void setAutoIncrement(SqlNumericLiteral autoIncrement) {
         this.autoIncrement = autoIncrement;
     }
 
-    public SqlCall getAvgRowLength() {
+    public SqlNumericLiteral getAvgRowLength() {
         return avgRowLength;
     }
 
-    public void setAvgRowLength(SqlCall avgRowLength) {
+    public void setAvgRowLength(SqlNumericLiteral avgRowLength) {
         this.avgRowLength = avgRowLength;
     }
 
@@ -248,27 +304,27 @@ public void setInsertMethod(InsertMethod insertMethod) {
         this.insertMethod = insertMethod;
     }
 
-    public SqlCall getKeyBlockSize() {
+    public SqlNumericLiteral getKeyBlockSize() {
         return keyBlockSize;
     }
 
-    public void setKeyBlockSize(SqlCall keyBlockSize) {
+    public void setKeyBlockSize(SqlNumericLiteral keyBlockSize) {
         this.keyBlockSize = keyBlockSize;
     }
 
-    public SqlCall getMaxRows() {
+    public SqlNumericLiteral getMaxRows() {
         return maxRows;
     }
 
-    public void setMaxRows(SqlCall maxRows) {
+    public void setMaxRows(SqlNumericLiteral maxRows) {
         this.maxRows = maxRows;
     }
 
-    public SqlCall getMinRows() {
+    public SqlNumericLiteral getMinRows() {
         return minRows;
     }
 
-    public void setMinRows(SqlCall minRows) {
+    public void setMinRows(SqlNumericLiteral minRows) {
         this.minRows = minRows;
     }
 
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlUtil.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlUtil.java
index f70958174..a2a6af8c2 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlUtil.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlUtil.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.calcite.sql;
 
 import com.alibaba.polardbx.common.charset.CharsetName;
@@ -68,6 +69,7 @@
 import java.util.Optional;
 import java.util.Set;
 
+import static org.apache.calcite.sql.validate.SqlValidatorImpl.isImplicitKey;
 import static org.apache.calcite.util.Static.RESOURCE;
 
 /**
@@ -1187,6 +1189,15 @@ public static boolean hasPrimaryKey(SqlNode node) {
         return false;
     }
 
+    public static boolean hasExplicitPrimaryKey(SqlCreateTable node) {
+        final boolean withPk = SqlUtil.hasPrimaryKey(node);
+        final boolean usingImplicitPk =
+            node.getPrimaryKey() != null
+                && node.getPrimaryKey().getColumns().stream().anyMatch(c -> isImplicitKey(c.getColumnNameStr()));
+
+        return withPk && !usingImplicitPk;
+    }
+
     //~ Inner Classes ----------------------------------------------------------
 
     /**
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlValuesTableSource.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlValuesTableSource.java
new file mode 100644
index 000000000..2a5c5d6fe
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/SqlValuesTableSource.java
@@ -0,0 +1,105 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql;
+
+import org.apache.calcite.sql.fun.SqlStdOperatorTable;
+import org.apache.calcite.sql.parser.SqlParserPos;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * @author pangzhaoxing
+ *
+ * for values statement
+ */
+public class SqlValuesTableSource extends SqlCall {
+
+    public static final SqlValuesTableSourceOperator OPERATOR = new SqlValuesTableSourceOperator();
+
+    public static final String VALUES_TABLE_NAME = "values_table";
+
+    public static final String COLUMN_NAME_PREFIX = "column_";
+
+    List operands;
+
+    public SqlValuesTableSource(SqlParserPos pos, List operands){
+        super(pos);
+        this.operands = operands;
+    }
+
+    @Override
+    public SqlKind getKind() {
+        return SqlKind.VALUES;
+    }
+
+    @Override
+    public SqlOperator getOperator() {
+        return OPERATOR;
+    }
+
+    @Override
+    public List getOperandList() {
+        return operands;
+    }
+
+    @Override
+    public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
+        if (!writer.inQuery()) {
+            final SqlWriter.Frame frame =
+                writer.startList(SqlWriter.FrameTypeEnum.SUB_QUERY, "(", ")");
+            getOperator().unparse(writer, this, 0, 0);
+            writer.endList(frame);
+        } else {
+            getOperator().unparse(writer, this, leftPrec, rightPrec);
+        }
+    }
+
+
+    public static class SqlValuesTableSourceOperator extends SqlSpecialOperator{
+
+        public SqlValuesTableSourceOperator() {
+            super("VALUES_TABLE_SOURCE", SqlKind.VALUES);
+        }
+
+        @Override
+        public void unparse(
+            SqlWriter writer,
+            SqlCall call,
+            int leftPrec,
+            int rightPrec) {
+            final SqlWriter.Frame frame =
+                writer.startList(SqlWriter.FrameTypeEnum.VALUES, "VALUES ", "");
+            for (SqlNode operand : call.getOperandList()) {
+                writer.sep(",");
+                SqlWriter.Frame rowFrame = writer.startList(SqlWriter.FrameTypeEnum.FUN_CALL, "ROW", "");
+                operand.unparse(writer, 0, 0);
+                writer.endList(rowFrame);
+            }
+            writer.endList(frame);
+        }
+
+        @Override
+        public SqlCall createCall(SqlLiteral functionQualifier, SqlParserPos pos, SqlNode... operands) {
+            return new SqlValuesTableSource(pos, Arrays.asList(operands));
+        }
+
+    }
+
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlBinaryFunction.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlBinaryFunction.java
new file mode 100644
index 000000000..4357f513d
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlBinaryFunction.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql.fun;
+
+import org.apache.calcite.sql.SqlCall;
+import org.apache.calcite.sql.SqlFunction;
+import org.apache.calcite.sql.SqlFunctionCategory;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlWriter;
+import org.apache.calcite.sql.type.OperandTypes;
+import org.apache.calcite.sql.type.ReturnTypes;
+
+/**
+ * @author wumu
+ */
+public class SqlBinaryFunction extends SqlFunction {
+    public SqlBinaryFunction() {
+        super(
+            "BINARY",
+            SqlKind.OTHER_FUNCTION,
+            ReturnTypes.VARCHAR_BINARY,
+            null,
+            OperandTypes.ANY_ANY_OR_ANY_ANY_ANY,
+            SqlFunctionCategory.STRING);
+    }
+
+    //~ Methods ----------------------------------------------------------------
+
+    @Override
+    public void unparse(
+        SqlWriter writer,
+        SqlCall call,
+        int leftPrec,
+        int rightPrec) {
+        final SqlWriter.Frame frame = writer.startFunCall(getName());
+        call.operand(0).unparse(writer, leftPrec, rightPrec);
+        writer.endFunCall(frame);
+    }
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlCheckSumV2Function.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlCheckSumV2Function.java
new file mode 100644
index 000000000..98f7dbcf7
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlCheckSumV2Function.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql.fun;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlAggFunction;
+import org.apache.calcite.sql.SqlCall;
+import org.apache.calcite.sql.SqlFunctionCategory;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlSplittableAggFunction;
+import org.apache.calcite.sql.type.OperandTypes;
+import org.apache.calcite.sql.type.ReturnTypes;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+/**
+ * @author yaozhili
+ */
+public class SqlCheckSumV2Function extends SqlAggFunction {
+    public SqlCheckSumV2Function() {
+        super(
+            "CHECK_SUM_V2",
+            null,
+            SqlKind.CHECK_SUM_V2,
+            ReturnTypes.BIGINT,
+            null,
+            OperandTypes.ONE_OR_MORE,
+            SqlFunctionCategory.NUMERIC,
+            false,
+            false);
+    }
+
+
+    @Override
+    public RelDataType deriveType(
+        SqlValidator validator,
+        SqlValidatorScope scope,
+        SqlCall call) {
+        // Check for CHECK_SUM_V2(*) function.  If it is we don't
+        // want to try and derive the "*"
+        if (call.isCheckSumV2Star()) {
+            return validator.getTypeFactory().createSqlType(
+                SqlTypeName.BIGINT);
+        }
+        return super.deriveType(validator, scope, call);
+    }
+
+    @Override
+    public  T unwrap(Class clazz) {
+        if (clazz == SqlSplittableAggFunction.class) {
+            return clazz.cast(SqlSplittableAggFunction.CountSplitter.INSTANCE);
+        }
+        return super.unwrap(clazz);
+    }
+}
+
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlCheckSumV2MergeFunction.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlCheckSumV2MergeFunction.java
new file mode 100644
index 000000000..726ba033f
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlCheckSumV2MergeFunction.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql.fun;
+
+import org.apache.calcite.sql.SqlAggFunction;
+import org.apache.calcite.sql.SqlFunctionCategory;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlSplittableAggFunction;
+import org.apache.calcite.sql.type.OperandTypes;
+import org.apache.calcite.sql.type.ReturnTypes;
+
+/**
+ * @author yaozhili
+ */
+public class SqlCheckSumV2MergeFunction extends SqlAggFunction {
+    public SqlCheckSumV2MergeFunction() {
+        super(
+            "CHECK_SUM_V2_MERGE",
+            null,
+            SqlKind.CHECK_SUM_V2_MERGE,
+            ReturnTypes.BIGINT,
+            null,
+            OperandTypes.ONE_OR_MORE,
+            SqlFunctionCategory.NUMERIC,
+            false,
+            false);
+    }
+
+    @Override public  T unwrap(Class clazz) {
+        if (clazz == SqlSplittableAggFunction.class) {
+            return clazz.cast(SqlSplittableAggFunction.CountSplitter.INSTANCE);
+        }
+        return super.unwrap(clazz);
+    }
+}
+
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlCountAggFunction.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlCountAggFunction.java
index 15974b3cf..09e14bc58 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlCountAggFunction.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlCountAggFunction.java
@@ -90,9 +90,9 @@ public RelDataType deriveType(
       SqlValidator validator,
       SqlValidatorScope scope,
       SqlCall call) {
-    // Check for COUNT(*) function.  If it is we don't
+    // Check for COUNT(*) or COUNT(1) function.  If it is we don't
     // want to try and derive the "*"
-    if (call.isCountStar()) {
+    if (call.isCountStar() || call.isCountLiteral()) {
       return validator.getTypeFactory().createSqlType(
           SqlTypeName.BIGINT);
     }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlFinalHyperloglogFunction.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlFinalHyperloglogFunction.java
new file mode 100644
index 000000000..ce6ccb3e7
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlFinalHyperloglogFunction.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql.fun;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlAggFunction;
+import org.apache.calcite.sql.SqlCall;
+import org.apache.calcite.sql.SqlFunctionCategory;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlSplittableAggFunction;
+import org.apache.calcite.sql.type.OperandTypes;
+import org.apache.calcite.sql.type.ReturnTypes;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+public class SqlFinalHyperloglogFunction extends SqlAggFunction {
+    public SqlFinalHyperloglogFunction() {
+        super(
+            "FINAL_HYPERLOGLOG",
+            null,
+            SqlKind.FINAL_HYPER_LOGLOG,
+            ReturnTypes.BIGINT,
+            null,
+            OperandTypes.ONE_OR_MORE,
+            SqlFunctionCategory.NUMERIC,
+            false,
+            false);
+    }
+
+
+    public RelDataType deriveType(
+        SqlValidator validator,
+        SqlValidatorScope scope,
+        SqlCall call) {
+        return super.deriveType(validator, scope, call);
+    }
+
+    @Override public  T unwrap(Class clazz) {
+        if (clazz == SqlSplittableAggFunction.class) {
+            return clazz.cast(SqlSplittableAggFunction.CountSplitter.INSTANCE);
+        }
+        return super.unwrap(clazz);
+    }
+}
+
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlHyperloglogFunction.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlHyperloglogFunction.java
new file mode 100644
index 000000000..455e5c61c
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlHyperloglogFunction.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql.fun;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlAggFunction;
+import org.apache.calcite.sql.SqlCall;
+import org.apache.calcite.sql.SqlFunctionCategory;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlSplittableAggFunction;
+import org.apache.calcite.sql.type.OperandTypes;
+import org.apache.calcite.sql.type.ReturnTypes;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+public class SqlHyperloglogFunction extends SqlAggFunction {
+    public SqlHyperloglogFunction() {
+        super(
+            "HYPERLOGLOG",
+            null,
+            SqlKind.HYPER_LOGLOG,
+            ReturnTypes.BIGINT,
+            null,
+            OperandTypes.ONE_OR_MORE,
+            SqlFunctionCategory.NUMERIC,
+            false,
+            false);
+    }
+
+
+    public RelDataType deriveType(
+        SqlValidator validator,
+        SqlValidatorScope scope,
+        SqlCall call) {
+        return super.deriveType(validator, scope, call);
+    }
+
+    @Override public  T unwrap(Class clazz) {
+        if (clazz == SqlSplittableAggFunction.class) {
+            return clazz.cast(SqlSplittableAggFunction.CountSplitter.INSTANCE);
+        }
+        return super.unwrap(clazz);
+    }
+}
+
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlMinMaxAggFunction.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlMinMaxAggFunction.java
index 94f855bec..78010e75d 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlMinMaxAggFunction.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlMinMaxAggFunction.java
@@ -16,6 +16,8 @@
  */
 package org.apache.calcite.sql.fun;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.sql.SqlAggFunction;
@@ -24,10 +26,8 @@
 import org.apache.calcite.sql.SqlSplittableAggFunction;
 import org.apache.calcite.sql.type.OperandTypes;
 import org.apache.calcite.sql.type.ReturnTypes;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
 import org.apache.calcite.sql.type.SqlReturnTypeInference;
+import org.apache.calcite.util.Optionality;
 
 import java.util.List;
 
@@ -142,6 +142,10 @@ public RelDataType getReturnType(RelDataTypeFactory typeFactory) {
     }
     return super.unwrap(clazz);
   }
+
+  @Override public Optionality getDistinctOptionality() {
+    return Optionality.IGNORED;
+  }
 }
 
 // End SqlMinMaxAggFunction.java
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlPartialHyperloglogFunction.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlPartialHyperloglogFunction.java
new file mode 100644
index 000000000..28db302ce
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlPartialHyperloglogFunction.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql.fun;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.sql.SqlAggFunction;
+import org.apache.calcite.sql.SqlCall;
+import org.apache.calcite.sql.SqlFunctionCategory;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlSplittableAggFunction;
+import org.apache.calcite.sql.type.OperandTypes;
+import org.apache.calcite.sql.type.ReturnTypes;
+import org.apache.calcite.sql.validate.SqlValidator;
+import org.apache.calcite.sql.validate.SqlValidatorScope;
+
+public class SqlPartialHyperloglogFunction extends SqlAggFunction {
+    public SqlPartialHyperloglogFunction() {
+        super(
+            "PARTIAL_HYPERLOGLOG",
+            null,
+            SqlKind.PARTIAL_HYPER_LOGLOG,
+            ReturnTypes.VARCHAR_BINARY,
+            null,
+            OperandTypes.ONE_OR_MORE,
+            SqlFunctionCategory.NUMERIC,
+            false,
+            false);
+    }
+
+
+    public RelDataType deriveType(
+        SqlValidator validator,
+        SqlValidatorScope scope,
+        SqlCall call) {
+        return super.deriveType(validator, scope, call);
+    }
+
+    @Override public  T unwrap(Class clazz) {
+        if (clazz == SqlSplittableAggFunction.class) {
+            return clazz.cast(SqlSplittableAggFunction.CountSplitter.INSTANCE);
+        }
+        return super.unwrap(clazz);
+    }
+}
+
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlSingleValueAggFunction.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlSingleValueAggFunction.java
index 6e745df1a..258cd3783 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlSingleValueAggFunction.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlSingleValueAggFunction.java
@@ -16,6 +16,7 @@
  */
 package org.apache.calcite.sql.fun;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.sql.SqlAggFunction;
@@ -23,8 +24,7 @@
 import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.type.OperandTypes;
 import org.apache.calcite.sql.type.ReturnTypes;
-
-import com.google.common.collect.ImmutableList;
+import org.apache.calcite.util.Optionality;
 
 import java.util.List;
 
@@ -71,6 +71,10 @@ public RelDataType getReturnType(RelDataTypeFactory typeFactory) {
   public RelDataType getType() {
     return type;
   }
+
+  @Override public Optionality getDistinctOptionality() {
+    return Optionality.IGNORED;
+  }
 }
 
 // End SqlSingleValueAggFunction.java
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlStdOperatorTable.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlStdOperatorTable.java
index 9318451d3..bd9d15224 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlStdOperatorTable.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/fun/SqlStdOperatorTable.java
@@ -18,6 +18,7 @@
 
 import com.google.common.collect.ImmutableList;
 import org.apache.calcite.sql.SqlAggFunction;
+import org.apache.calcite.sql.SqlAsOf57Operator;
 import org.apache.calcite.sql.SqlAsOf80Operator;
 import org.apache.calcite.sql.SqlAsOfOperator;
 import org.apache.calcite.sql.SqlAsOperator;
@@ -173,9 +174,20 @@ public class SqlStdOperatorTable extends ReflectiveSqlOperatorTable {
    * with an alias.
    */
   public static final SqlAsOperator AS = new SqlAsOperator();
+  /**
+   * as of timestamp; 5.7 物理sql使用 as of timestamp; 8.0 需转成 AS OF GCN
+   */
   public static final SqlAsOfOperator AS_OF = new SqlAsOfOperator();
+  /**
+   * 8.0版本: 用户sql为as of tso,物理sql使用 AS OF GCN
+   */
   public static final SqlAsOf80Operator AS_OF_80 = new SqlAsOf80Operator();
 
+  /**
+   * 5.7 版本: 用户sql为as of tso,物理sql使用 as of tso
+   */
+  public static final SqlAsOf57Operator AS_OF_57 = new SqlAsOf57Operator();
+
   /**
    * ARGUMENT_ASSIGNMENT operator (=<)
    * assigns an argument to a function call to a particular named parameter.
@@ -1109,12 +1121,21 @@ public boolean argumentMustBeScalar(int ordinal) {
   public static final SqlRankFunction RANK =
       new SqlRankFunction(SqlKind.RANK, ReturnTypes.RANK, true);
 
+  public static final SqlHyperloglogFunction HYPERLOGLOG =
+      new SqlHyperloglogFunction();
+
   public static final SqlCheckSumFunction CHECK_SUM =
       new SqlCheckSumFunction();
 
+  public static final SqlCheckSumV2Function CHECK_SUM_V2 =
+      new SqlCheckSumV2Function();
+
   public static final SqlCheckSumMergeFunction CHECK_SUM_MERGE_FUNCTION =
       new SqlCheckSumMergeFunction();
 
+  public static final SqlCheckSumV2MergeFunction CHECK_SUM_MERGE_V2_FUNCTION =
+          new SqlCheckSumV2MergeFunction();
+
   /**
    * ROW_NUMBER window function.
    */
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/MySQLStandardTypeInference.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/MySQLStandardTypeInference.java
index 00809a802..6623e6542 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/MySQLStandardTypeInference.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/MySQLStandardTypeInference.java
@@ -18,6 +18,7 @@
 
 import com.alibaba.polardbx.common.utils.time.MySQLTimeTypeUtil;
 import com.alibaba.polardbx.common.utils.time.calculator.MySQLIntervalType;
+import com.alibaba.polardbx.common.utils.version.InstanceVersion;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.sql.SqlCall;
@@ -390,7 +391,8 @@ public RelDataType inferReturnType(SqlOperatorBinding opBinding) {
                     typeFactory.getTypeSystem().getMaxPrecision(SqlTypeName.TIME),
                     scale
                 );
-            } else if (isDatetimeOrTimestamp(operandType1)) {
+            } else if (isDatetimeOrTimestamp(operandType1)
+                || (InstanceVersion.isMYSQL80() && SqlTypeUtil.isDate(operandType1))) {
                 int scale = Math.max(dynamicTemporalScale(operandType1), dynamicTemporalScale(operandType2));
                 return typeFactory.createSqlType(
                     SqlTypeName.DATETIME,
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/ReturnTypes.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/ReturnTypes.java
index cbdea0af5..2135e025d 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/ReturnTypes.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/ReturnTypes.java
@@ -31,6 +31,7 @@
 import java.util.Optional;
 import java.util.Set;
 
+import com.alibaba.polardbx.common.datatype.DecimalTypeBase;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableSet;
 import com.alibaba.polardbx.common.charset.CharsetName;
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/SetSqlType.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/SetSqlType.java
new file mode 100644
index 000000000..c94339bbe
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/SetSqlType.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql.type;
+
+import org.apache.calcite.rel.type.RelDataTypeSystem;
+
+import java.util.List;
+
+/**
+ * This class is not fully implemented yet.
+ * This can be used while converting from jdbc type to Polardb-X type, and cannot be used directly in optimizer
+ */
+public class SetSqlType extends BasicSqlType {
+    private final List setValues;
+
+    public SetSqlType(RelDataTypeSystem typeSystem, SqlTypeName typeName, List setValues) {
+        super(typeSystem, typeName);
+        this.setValues = setValues;
+    }
+
+    public SetSqlType(RelDataTypeSystem typeSystem, SqlTypeName typeName, int precision, List setValues) {
+        super(typeSystem, typeName, precision);
+        this.setValues = setValues;
+    }
+
+    public List getSetValues() {
+        return setValues;
+    }
+
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/SqlTypeFactoryImpl.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/SqlTypeFactoryImpl.java
index 5678df5a0..0ee71650d 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/SqlTypeFactoryImpl.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/SqlTypeFactoryImpl.java
@@ -95,6 +95,19 @@ public RelDataType createEnumSqlType(SqlTypeName typeName, List values)
     return canonize(new EnumSqlType(typeSystem, typeName, values, null, null));
   }
 
+  @Override
+  public RelDataType createSetSqlType(SqlTypeName typeName, int precision, List setValues) {
+    final int maxPrecision = typeSystem.getMaxPrecision(typeName);
+    if (maxPrecision >= 0 && precision > maxPrecision) {
+      precision = maxPrecision;
+    }
+    RelDataType newType = precision == RelDataType.PRECISION_NOT_SPECIFIED
+        ? new SetSqlType(typeSystem, typeName, setValues)
+        : new SetSqlType(typeSystem, typeName, precision, setValues);
+    newType = SqlTypeUtil.addCharsetAndCollation(newType, this);
+    return canonize(newType);
+  }
+
   public RelDataType createUnknownType() {
     return canonize(new UnknownSqlType(this));
   }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/SqlTypeName.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/SqlTypeName.java
index e061c2247..8271c5fbf 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/SqlTypeName.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/type/SqlTypeName.java
@@ -17,6 +17,7 @@
 package org.apache.calcite.sql.type;
 
 import com.alibaba.polardbx.common.charset.CharsetName;
+import com.alibaba.polardbx.common.datatype.DecimalTypeBase;
 import com.alibaba.polardbx.common.utils.time.MySQLTimeTypeUtil;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
@@ -72,8 +73,6 @@ public enum SqlTypeName {
     REAL(PrecScale.NO_NO, false, Types.REAL, SqlTypeFamily.NUMERIC),
     DOUBLE(PrecScale.NO_NO, false, Types.DOUBLE, SqlTypeFamily.NUMERIC),
 
-    // TODO: remove UNSIGNED/SIGNED
-    // add by xiaoying
     UNSIGNED(PrecScale.NO_NO, false, Types.BIGINT, SqlTypeFamily.NUMERIC),
     SIGNED(PrecScale.NO_NO, false, Types.BIGINT, SqlTypeFamily.NUMERIC),
 
@@ -169,7 +168,16 @@ public enum SqlTypeName {
     public static final int MAX_TIME_FRACTIONAL_SECOND_SCALE = 6;
 
     // Cached map of enum values
-    private static final Map VALUES_MAP = Util.enumConstants(SqlTypeName.class);
+    private static final Map VALUES_MAP = ImmutableMap.builder()
+        .putAll(Util.enumConstants(SqlTypeName.class))
+        // For Alias in MySQL https://dev.mysql.com/doc/refman/5.7/en/cast-functions.html#function_cast
+        // SIGNED [INTEGER]
+        // Produces a signed BIGINT value.
+        // UNSIGNED [INTEGER]
+        // Produces an unsigned BIGINT value.
+        .put("SIGNED INTEGER", SIGNED)
+        .put("UNSIGNED INTEGER", UNSIGNED)
+        .build();
 
     // categorizations used by SqlTypeFamily definitions
 
@@ -396,15 +404,6 @@ public static long getSize() {
      * @return Type name, or null if not found
      */
     public static SqlTypeName get(String name) {
-        if (false) {
-            // The following code works OK, but the spurious exceptions are
-            // annoying.
-            try {
-                return SqlTypeName.valueOf(name);
-            } catch (IllegalArgumentException e) {
-                return null;
-            }
-        }
         return VALUES_MAP.get(name);
     }
 
@@ -477,7 +476,7 @@ private static List combine(List list0, List gsiNames, SqlIdentifier currentGsiName);
+  void validateGsiName(Set gsiNames, SqlCreateIndex createIndex);
+
+  void validateGsiName(Set gsiNames, SqlIndexDefinition indexDefinition);
 
     /**
    * Set if implicit type coercion is allowed when the validator does validation.
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java
index 72c1843bf..bc3fcc42f 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql/validate/SqlValidatorImpl.java
@@ -26,9 +26,7 @@
 import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.druid.sql.SQLUtils;
 import com.alibaba.polardbx.druid.sql.ast.SQLStatement;
-import com.alibaba.polardbx.druid.sql.ast.expr.SQLCharExpr;
-import com.alibaba.polardbx.druid.sql.ast.expr.SQLListExpr;
-import com.alibaba.polardbx.druid.sql.ast.expr.SQLLiteralExpr;
+import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnDefinition;
 import com.alibaba.polardbx.druid.sql.ast.statement.SQLTableElement;
 import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlKey;
 import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement;
@@ -143,6 +141,7 @@
 import org.apache.calcite.sql.SqlUpdate;
 import org.apache.calcite.sql.SqlUtil;
 import org.apache.calcite.sql.SqlUtil.SpecialIdentiFiers;
+import org.apache.calcite.sql.SqlValuesTableSource;
 import org.apache.calcite.sql.SqlWindow;
 import org.apache.calcite.sql.SqlWith;
 import org.apache.calcite.sql.SqlWithItem;
@@ -184,6 +183,7 @@
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.IdentityHashMap;
+import java.util.Iterator;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Locale;
@@ -494,6 +494,26 @@ public SqlNode[] expandStarForCheckSum(
         return list.toArray(new SqlNode[list.size()]);
     }
 
+    @Override
+    public SqlNode[] expandStarForCheckSumV2(
+        SqlNode selectItem,
+        SqlSelect select) {
+        final List list = new ArrayList<>();
+        final RelDataType originalType = getValidatedNodeTypeIfKnown(selectItem);
+        expandSelectItem(
+            selectItem,
+            select,
+            Util.first(originalType, unknownType),
+            list,
+            catalogReader.nameMatcher().isCaseSensitive()
+                ? new LinkedHashSet<>()
+                : new TreeSet<>(String.CASE_INSENSITIVE_ORDER),
+            new ArrayList<>(),
+            false);
+        //getRawSelectScope(select).setExpandedSelectList(list);
+        return list.toArray(new SqlNode[list.size()]);
+    }
+
     // implement SqlValidator
     public void declareCursor(SqlSelect select, SqlValidatorScope parentScope) {
         cursorSet.add(select);
@@ -1046,6 +1066,7 @@ private SqlNode validateScopedExpression(
         }
         if (topNode.getKind() == SqlKind.CREATE_TABLE || topNode.getKind() == SqlKind.DROP_TABLE
             || topNode.getKind() == SqlKind.DROP_VIEW || topNode.getKind() == SqlKind.DROP_FILESTORAGE
+            || topNode.getKind() == SqlKind.CLEAR_FILESTORAGE
             || topNode.getKind() == SqlKind.CREATE_FILESTORAGE
             || topNode.getKind() == SqlKind.DROP_MATERIALIZED_VIEW
             || topNode.getKind() == SqlKind.DROP_VIEW
@@ -1289,8 +1310,7 @@ public static SqlNode assignAutoPartition(final SqlIdentifier firstKey, final St
     }
 
     private SqlIndexDefinition assignAutoPartitionForGsiIndex(
-        final SqlIndexDefinition index, final List> cols,
-        final SqlIdentifier newIndexName, boolean clustered) {
+        final SqlIndexDefinition index, final List> cols) {
         // Check we have to change this.
         if (index.isLocal()) {
             return null; // Ignore explicit local index.
@@ -1329,7 +1349,7 @@ private SqlIndexDefinition assignAutoPartitionForGsiIndex(
                 "Index '" + index.getIndexName().getLastName() + "' column '" + firstColName + "' not found."));
 
         final String pkDataType = firstColumn.getValue().getDataType().getTypeName().getLastName().toLowerCase();
-        return index.rebuildToGsi(newIndexName, assignAutoPartition(firstColumn.getKey(), pkDataType), clustered);
+        return index.rebuildToGsi(null, assignAutoPartition(firstColumn.getKey(), pkDataType));
     }
 
     private static boolean checkKeyExistence(SqlCreateTable sqlCreateTable, String keyName) {
@@ -1353,6 +1373,10 @@ private static boolean checkKeyExistence(SqlCreateTable sqlCreateTable, String k
             .anyMatch(pair -> pair.getKey().getLastName().equalsIgnoreCase(keyName))) {
             return true;
         }
+        if (sqlCreateTable.getColumnarKeys() != null && sqlCreateTable.getColumnarKeys().stream()
+            .anyMatch(pair -> pair.getKey().getLastName().equalsIgnoreCase(keyName))) {
+            return true;
+        }
         if (sqlCreateTable.getClusteredUniqueKeys() != null && sqlCreateTable.getClusteredUniqueKeys().stream()
             .anyMatch(pair -> pair.getKey().getLastName().equalsIgnoreCase(keyName))) {
             return true;
@@ -1450,25 +1474,33 @@ public static SqlNode assignAutoPartitionNewPartition(List keys,
                                                           List pkKeys,
                                                           List pkTypeNames,
                                                           long defaultPartitions,
-                                                          boolean force) {
+                                                          boolean force,
+                                                          boolean isColumnar) {
         assert keys.size() == typeNames.size();
+        assert pkKeys.size() == pkTypeNames.size();
+
+        // Columnar index use pk as partition key by default
+        final List candidates = isColumnar ? pkKeys : keys;
+        final List candidateTypes = isColumnar ? pkTypeNames : typeNames;
+
         Set duplicateChecker = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
         List validKeys = new ArrayList<>();
         List validTypeNames = new ArrayList<>();
-        for (int i = 0; i < keys.size(); ++i) {
+        for (int i = 0; i < candidates.size(); ++i) {
             // Use as much as possible.
-            if (!supportNewPartition(typeNames.get(i))) {
+            if (!supportNewPartition(candidateTypes.get(i))) {
                 if (0 == i && !force) {
                     // First key must be valid.
                     throw new NotSupportException(
-                        "Key '" + keys.get(i).getLastName() + "' type '" + typeNames.get(i) + "' for auto partition");
+                        "Key '" + candidates.get(i).getLastName() + "' type '" + candidateTypes.get(i)
+                            + "' for auto partition");
                 }
                 // Or just ignore and next.
             } else {
                 // Put valid keys in correct order and remove duplicates.
-                if (duplicateChecker.add(keys.get(i).getLastName())) {
-                    validKeys.add(keys.get(i));
-                    validTypeNames.add(typeNames.get(i));
+                if (duplicateChecker.add(candidates.get(i).getLastName())) {
+                    validKeys.add(candidates.get(i));
+                    validTypeNames.add(candidateTypes.get(i));
                 }
             }
         }
@@ -1492,7 +1524,7 @@ public static SqlNode assignAutoPartitionNewPartition(List keys,
         }
         for (int i = 0; i < validKeys.size(); i++) {
             SqlIdentifier keyId = validKeys.get(i);
-            String typeName = typeNames.get(i);
+            String typeName = validTypeNames.get(i);
             String keyName = SQLUtils.normalizeNoTrim(keyId.getLastName());
             if (pkKeyInfoMaps.containsKey(keyName)) {
                 continue;
@@ -1514,11 +1546,15 @@ public static SqlNode assignAutoPartitionNewPartition(List keys,
         SqlNode partByAst = null;
         if (!useSubPartByForDateTimeIdx) {
             // Generate the partitioning clause.
-            final SqlPartitionByHash sqlPartitionByHash = new SqlPartitionByHash(true, false, SqlParserPos.ZERO);
-            sqlPartitionByHash
-                .setPartitionsCount(SqlLiteral
-                    .createLiteralForIntTypes(Long.toString(DynamicConfig.getInstance().getAutoPartitionPartitions()),
-                        SqlParserPos.ZERO, SqlTypeName.BIGINT));
+            final SqlPartitionByHash sqlPartitionByHash = new SqlPartitionByHash(!isColumnar, false, SqlParserPos.ZERO);
+            // For cci, generate partition count in {@link PartitionInfoBuilder#buildPartitionInfoByPartDefAst}
+            if (!isColumnar) {
+                // Set partition count for gsi.
+                sqlPartitionByHash
+                    .setPartitionsCount(SqlLiteral
+                        .createLiteralForIntTypes(Long.toString(defaultPartitions),
+                            SqlParserPos.ZERO, SqlTypeName.BIGINT));
+            }
             sqlPartitionByHash.getColumns().addAll(validKeys);
             final StringBuilder builder = new StringBuilder();
             for (int i = 0; i < validKeys.size(); ++i) {
@@ -1527,8 +1563,12 @@ public static SqlNode assignAutoPartitionNewPartition(List keys,
                 }
                 builder.append(SqlIdentifier.surroundWithBacktick(validKeys.get(i).getLastName()));
             }
-            sqlPartitionByHash.setSourceSql("KEY(" + builder + ") PARTITIONS "
-                + defaultPartitions);
+            if (isColumnar) {
+                // For cci, generate partition count in {@link PartitionInfoBuilder#buildPartitionInfoByPartDefAst}
+                sqlPartitionByHash.setSourceSql("DIRECT_HASH(" + builder + ")");
+            } else {
+                sqlPartitionByHash.setSourceSql("KEY(" + builder + ") PARTITIONS " + defaultPartitions);
+            }
             partByAst = sqlPartitionByHash;
         } else {
             // Generate the partitioning clause.
@@ -1550,7 +1590,7 @@ public static SqlNode assignAutoPartitionNewPartition(List keys,
                 subPartKeyBuilder.append(SqlIdentifier.surroundWithBacktick(subPartKeys.get(i).getLastName()));
             }
 
-            subPartBuilder.append("SUBPARTITION KEY(");
+            subPartBuilder.append("SUBPARTITION BY KEY(");
             subPartBuilder.append(subPartKeyBuilder);
             subPartBuilder.append(") SUBPARTITIONS ");
             subPartBuilder.append(defaultPartitions);
@@ -1655,10 +1695,8 @@ public static SqlNode assignAutoPartitionNewPartition(List keys,
 //        return sqlPartitionByHash;
 //    }
 
-    private static SqlIndexDefinition assignAutoPartitionForGsiIndexNewPartition(
-        SqlIndexDefinition index, SqlCreateTable createTable,
-        SqlIdentifier newIndexName, boolean clustered) {
-
+    private static SqlIndexDefinition assignAutoPartitionForGsiIndexNewPartition(SqlIndexDefinition index,
+                                                                                 SqlCreateTable createTable) {
         // Check legacy partition clause.
         if (index.getDbPartitionBy() != null || index.getDbPartitions() != null) {
             throw new NotSupportException(
@@ -1734,13 +1772,18 @@ private static SqlIndexDefinition assignAutoPartitionForGsiIndexNewPartition(
         }
         assert concatKeys.size() == dataTypes.size();
 
-        long defaultPartitions = DynamicConfig.getInstance().getAutoPartitionPartitions();
-//        return index.rebuildToGsiNewPartition(
-//            newIndexName, assignAutoPartitionNewPartition(concatKeys, dataTypes, pks, pkDataTypes, defaultPartitions), clustered, pks);
+        final boolean columnar = index.isColumnar();
+        long defaultPartitions = getRowStoreDefaultPartitions();
         return index.rebuildToGsiNewPartition(
-            newIndexName, assignAutoPartitionNewPartition(concatKeys, dataTypes, pks, pkDataTypes, defaultPartitions,
-                index.isGlobal()),
-            clustered);
+            null,
+            assignAutoPartitionNewPartition(
+                columnar ? pks : concatKeys,
+                columnar ? pkDataTypes : dataTypes,
+                pks,
+                pkDataTypes,
+                defaultPartitions,
+                index.isGlobal(),
+                columnar));
     }
 
     private static class RewriteOps {
@@ -1760,7 +1803,7 @@ private static class RewriteOps {
         for (Pair pair : indexes) {
             // No name change, pure GSI.
             final SqlIndexDefinition changed =
-                assignAutoPartitionForGsiIndexNewPartition(pair.getValue(), createTable, null, false);
+                assignAutoPartitionForGsiIndexNewPartition(pair.getValue(), createTable);
             if (null == changed) {
                 localList.add(pair);
             } else {
@@ -1787,22 +1830,24 @@ private static class RewriteOps {
         for (Pair pair : indexes) {
             // No name change, pure GSI.
             final SqlIndexDefinition changed =
-                assignAutoPartitionForGsiIndexNewPartition(pair.getValue(), createTable, null, false);
+                assignAutoPartitionForGsiIndexNewPartition(pair.getValue(), createTable);
             if (changed != null) {
                 // Update it.
                 gsiList.add(new Pair<>(changed.getIndexName(), changed));
             } else {
                 gsiList.add(pair);
             }
-            // Generate a local index.
-            final SqlIndexDefinition indexDefinition = null == changed ? pair.getValue() : changed;
-            final String newLocalName = AUTO_LOCAL_INDEX_PREFIX + indexDefinition.getIndexName().getLastName();
-            if (checkKeyExistence(createTable, newLocalName)) {
-                continue; // Ignore existing.
+            // Generate a local index for gsi only.
+            if (!pair.getValue().isColumnar()) {
+                final SqlIndexDefinition indexDefinition = null == changed ? pair.getValue() : changed;
+                final String newLocalName = AUTO_LOCAL_INDEX_PREFIX + indexDefinition.getIndexName().getLastName();
+                if (checkKeyExistence(createTable, newLocalName)) {
+                    continue; // Ignore existing.
+                }
+                final SqlIdentifier newName = new SqlIdentifier(newLocalName, SqlParserPos.ZERO);
+                localList.add(new Pair<>(newName, pair.getValue().rebuildToExplicitLocal(newName)));
+                ops.addLocalOp.add(new Pair<>(indexDefinition.getIndexName().getLastName(), newLocalName));
             }
-            final SqlIdentifier newName = new SqlIdentifier(newLocalName, SqlParserPos.ZERO);
-            localList.add(new Pair<>(newName, pair.getValue().rebuildToExplicitLocal(newName)));
-            ops.addLocalOp.add(new Pair<>(indexDefinition.getIndexName().getLastName(), newLocalName));
         }
         return new Pair<>(localList, gsiList);
     }
@@ -1838,10 +1883,10 @@ private static SqlCreateTable rewriteForNewPartition(SqlCreateTable createTable)
             final List pkDataTypes = pks.stream()
                 .map(col -> getColumnDefine(createTable, col.getLastName()).getDataType().getTypeName().getLastName()
                     .toLowerCase()).collect(Collectors.toList());
-            long defaultPartitions = DynamicConfig.getInstance().getAutoPartitionPartitions();
+            long defaultPartitions = getRowStoreDefaultPartitions();
             createTable.setSqlPartition(
                 assignAutoPartitionNewPartition(pks, pkDataTypes, pks, pkDataTypes, defaultPartitions,
-                    createTable.createGsi()));
+                    createTable.createGsi(), false));
         }
 
         RewriteOps ops = new RewriteOps();
@@ -1912,6 +1957,14 @@ private static SqlCreateTable rewriteForNewPartition(SqlCreateTable createTable)
             createTable.getUniqueKeys().addAll(localGsi.left);
         }
 
+        // Assign auto dbpartition if not contain on CCI.
+        if (createTable.getColumnarKeys() != null && !createTable.getColumnarKeys().isEmpty()) {
+            final Pair>, List>>
+                lsiAndCci = gsiNormalizeNewPartition(createTable.getColumnarKeys(), createTable, ops);
+            createTable.setColumnarKeys(lsiAndCci.right);
+            // Do not add local index for CCI
+        }
+
         // Do sql rewrite.
         final List statementList =
             SQLUtils.parseStatementsWithDefaultFeatures(createTable.getSourceSql(), JdbcConstants.MYSQL);
@@ -2162,26 +2215,27 @@ protected SqlNode performUnconditionalRewrites(
             }
 
             if (autoPartitionDatabase) {
-
-                if (((SqlCreateTable) node).isAutoPartition()) {
-                    // Auto partition for new partition database.
-                    final SqlCreateTable createTable = (SqlCreateTable) node;
-
-                    // Dealing normal rewrites.
+                // Auto partition for new partition database.
+                final SqlCreateTable createTable = (SqlCreateTable) node;
+                if (createTable.isAutoPartition()) {
+                    // Dealing normal rewrites for auto partition table.
                     node = rewriteForNewPartition(createTable);
                 } else {
                     if (defaultSingle) {
                         /**
                          * create autodb with default_single=true
-                         */
-
-                        final SqlCreateTable createTable = (SqlCreateTable) node;
-
-                        /**
-                         *  auto rewrite create tbl without partitionby by single locality='balance_single_table=on'
+                         * auto rewrite create tbl without partitionby by single locality='balance_single_table=on'
                          */
                         node = rewriteForBalanceSingleTableIfNeed(createTable);
                     }
+
+                    // Rewrite cci on partition table
+                    // Assign auto dbpartition if not contain on CCI.
+                    if (createTable.getColumnarKeys() != null && !createTable.getColumnarKeys().isEmpty()) {
+                        createTable.setColumnarKeys(
+                            gsiNormalizeNewPartition(createTable.getColumnarKeys(), createTable, null).getValue());
+                        // Do not add local index for CCI
+                    }
                 }
 
             } else if (((SqlCreateTable) node).isAutoPartition()) { // Auto partition.
@@ -2220,7 +2274,7 @@ protected SqlNode performUnconditionalRewrites(
                     for (Pair pair : createTable.getKeys()) {
                         // No name change, pure GSI.
                         final SqlIndexDefinition changed =
-                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs(), null, false);
+                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs());
                         if (null == changed) {
                             tmpList.add(pair);
                         } else {
@@ -2246,7 +2300,7 @@ protected SqlNode performUnconditionalRewrites(
                     for (Pair pair : createTable.getUniqueKeys()) {
                         // No name change, pure GSI.
                         final SqlIndexDefinition changed =
-                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs(), null, false);
+                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs());
                         if (null == changed) {
                             tmpList.add(pair);
                         } else {
@@ -2271,7 +2325,7 @@ protected SqlNode performUnconditionalRewrites(
                     for (Pair pair : createTable.getGlobalKeys()) {
                         // No name change, pure GSI.
                         final SqlIndexDefinition changed =
-                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs(), null, false);
+                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs());
                         if (changed != null) {
                             // Update it.
                             tmpList.add(new Pair<>(pair.getKey(), changed));
@@ -2299,7 +2353,7 @@ protected SqlNode performUnconditionalRewrites(
                     for (Pair pair : createTable.getGlobalUniqueKeys()) {
                         // No name change, pure GSI.
                         final SqlIndexDefinition changed =
-                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs(), null, false);
+                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs());
                         if (changed != null) {
                             // Update it.
                             tmpList.add(new Pair<>(pair.getKey(), changed));
@@ -2328,7 +2382,7 @@ protected SqlNode performUnconditionalRewrites(
                     for (Pair pair : createTable.getClusteredKeys()) {
                         // No name change, clustered.
                         final SqlIndexDefinition changed =
-                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs(), null, true);
+                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs());
                         if (changed != null) {
                             // Update it.
                             tmpList.add(new Pair<>(pair.getKey(), changed));
@@ -2356,7 +2410,7 @@ protected SqlNode performUnconditionalRewrites(
                     for (Pair pair : createTable.getClusteredUniqueKeys()) {
                         // No name change, clustered.
                         final SqlIndexDefinition changed =
-                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs(), null, true);
+                            assignAutoPartitionForGsiIndex(pair.getValue(), createTable.getColDefs());
                         if (changed != null) {
                             // Update it.
                             tmpList.add(new Pair<>(pair.getKey(), changed));
@@ -2439,6 +2493,7 @@ protected SqlNode performUnconditionalRewrites(
         if (node instanceof SqlCreateTable) {
             final SqlCreateTable createTable = (SqlCreateTable) node;
             // Deny GSI for no partitioned table.
+            // And support CCI for non-partitioned table.
             if (createTable.createGsi() &&
                 null == createTable.getDbpartitionBy() && null == createTable.getTbpartitionBy() &&
                 null == createTable.getSqlPartition()) {
@@ -3027,7 +3082,12 @@ RelDataType getTableConstructorRowType(
             final List aliasList = new ArrayList<>();
             final List typeList = new ArrayList<>();
             for (Ord column : Ord.zip(rowConstructor.getOperandList())) {
-                final String alias = deriveAlias(column.e, column.i);
+                final String alias;
+                if (values instanceof SqlValuesTableSource){
+                    alias = SqlValuesTableSource.COLUMN_NAME_PREFIX + column.i;
+                }else{
+                    alias = deriveAlias(column.e, column.i);
+                }
                 aliasList.add(alias);
                 final RelDataType type = deriveType(scope, column.e);
                 typeList.add(type);
@@ -4115,7 +4175,7 @@ private void registerQuery(
 //            final RelDataType relDataType = typeFactory.createStructType(relDataTypes, fieldList);
 //            setValidatedNodeType(node, relDataType);
 
-            if (sqlCreateTbNode.createGsi()) {
+            if (sqlCreateTbNode.createGsiOrCci()) {
                 // For checking table existence.
                 IdentifierNamespace namespace =
                     new IdentifierNamespace(this, sqlCreateTbNode.getTargetTable(), sqlCreateTbNode, parentScope);
@@ -4152,6 +4212,14 @@ private void registerQuery(
                         registerNamespace(usingScope, alias, ddlTableNamespace, forceNullable);
                     }
                 }
+
+                if (GeneralUtil.isNotEmpty(node1.getColumnarKeys())) {
+                    for (Pair pair : node1.getColumnarKeys()) {
+                        IdentifierNamespace ddlTableNamespace =
+                            new IdentifierNamespace(this, pair.getKey(), node1, parentScope);
+                        registerNamespace(usingScope, alias, ddlTableNamespace, forceNullable);
+                    }
+                }
             } else if (sqlCreateTbNode.getSqlPartition() != null) {
 
                 final CreateTableScope createTbScope = new CreateTableScope(parentScope, sqlCreateTbNode);
@@ -4285,6 +4353,7 @@ private void registerQuery(
         case IDENTIFIER:
         case ALTER_FILESTORAGE:
         case DROP_FILESTORAGE:
+        case CLEAR_FILESTORAGE:
         case CREATE_FILESTORAGE:
         case CREATE_STORAGE_POOL:
         case ALTER_STORAGE_POOL:
@@ -4577,6 +4646,16 @@ private void registerQuery(
                 new AlterDatabaseNamespace(this, (SqlDdl) node, enclosingNode, parentScope);
             registerNamespace(usingScope, alias, alterDatabaseNamespace, forceNullable);
             break;
+        case IMPORT_DATABASE:
+            final ImportDatabaseNamespace importDatabaseNamespace =
+                new ImportDatabaseNamespace(this, (SqlDdl) node, enclosingNode, parentScope);
+            registerNamespace(usingScope, alias, importDatabaseNamespace, forceNullable);
+            break;
+        case IMPORT_SEQUENCE:
+            final ImportSequenceNamespace importSequenceNamespace =
+                new ImportSequenceNamespace(this, (SqlDdl) node, enclosingNode, parentScope);
+            registerNamespace(usingScope, alias, importSequenceNamespace, forceNullable);
+            break;
         case DROP_DATABASE:
             final DropDatabaseNamespace dropDbNamespace =
                 new DropDatabaseNamespace(this, (SqlDdl) node, enclosingNode, parentScope);
@@ -4626,6 +4705,11 @@ private void registerQuery(
                 new AlterJoinGroupNamespace(this, (SqlDdl) node, enclosingNode, parentScope);
             registerNamespace(usingScope, alias, alterJoinGroupNamespace, forceNullable);
             break;
+        case ALTER_INSTANCE:
+            final AlterInstanceNamespace alterInstanceNamespace =
+                new AlterInstanceNamespace(this, (SqlDdl) node, enclosingNode, parentScope);
+            registerNamespace(usingScope, alias, alterInstanceNamespace, forceNullable);
+            break;
         default:
             if (node.isA(DAL)) {
                 registerDal(parentScope, usingScope, node, enclosingNode, alias, forceNullable);
@@ -5319,10 +5403,10 @@ public void validateDdl(
             validateCreateTable(create);
         }
 
-        if (create.createGsi() && create instanceof SqlCreateTable) {
+        if (create.createGsiOrCci() && create instanceof SqlCreateTable) {
             final SqlCreateTable createTable = (SqlCreateTable) create;
 
-            if (createTable.createGsi() && !createTable.isIfNotExists()) {
+            if (createTable.createGsiOrCci() && !createTable.isIfNotExists()) {
                 // Not allow create table with GSI which have name conflict.
                 SqlValidatorTable table = null;
                 try {
@@ -5359,6 +5443,10 @@ public void validateDdl(
                 createTable.getClusteredUniqueKeys().forEach(s -> gsiNames.add(s.getKey().getLastName()));
             }
 
+            if (GeneralUtil.isNotEmpty(createTable.getColumnarKeys())) {
+                createTable.getColumnarKeys().forEach(s -> gsiNames.add(s.getKey().getLastName()));
+            }
+
             if (GeneralUtil.isNotEmpty(createTable.getKeys())) {
                 validateIndexName(createTable.getKeys(), gsiNames);
             }
@@ -5384,13 +5472,14 @@ public void validateDdl(
             validateGsiColumn(createTable.getGlobalUniqueKeys());
             validateGsiColumn(createTable.getClusteredKeys());
             validateGsiColumn(createTable.getClusteredUniqueKeys());
-        } else if (create.createGsi() && create instanceof SqlAlterTablePartitionKey) {
+            validateGsiColumn(createTable.getColumnarKeys());
+        } else if (create.createGsiOrCci() && create instanceof SqlAlterTablePartitionKey) {
             //do nothing for now
         } else if (create instanceof SqlAlterTableAsOfTimeStamp) {
             //do nothing for now
         } else if (create instanceof SqlAlterTablePurgeBeforeTimeStamp) {
             //do nothing for now
-        } else if (create.createGsi() && create instanceof SqlAlterTable) {
+        } else if (create.createGsiOrCci() && create instanceof SqlAlterTable) {
             final SqlAlterTable alterTable = (SqlAlterTable) create;
             alterTable.getAlters().forEach(alterItem -> {
                 if (alterItem.isA(SqlKind.ALTER_ADD_INDEX)) {
@@ -5405,7 +5494,7 @@ public void validateDdl(
                     });
                 }
             });
-        } else if (create.createGsi() && create instanceof SqlCreateIndex) {
+        } else if (create.createGsiOrCci() && create instanceof SqlCreateIndex) {
             checkDuplicatedIndexColumn((SqlCreateIndex) create);
         } else if (create instanceof SqlCreateTable && ((SqlCreateTable) create).getSqlPartition() != null) {
             SqlPartitionBy partBy = (SqlPartitionBy) ((SqlCreateTable) create).getSqlPartition();
@@ -5517,6 +5606,46 @@ public void validateGsiColumn(List> glob
         }
     }
 
+    public static void validateUnsupportedTypeWithCciWhenModifyColumn(SqlColumnDeclaration columnDeclaration) {
+        final List deniedTypes = Arrays.asList(
+            "text", "binary", "varbinary", "blob", "timestamp", "time", "year", "json", "enum", "set", "point", "geometry");
+        String columnType = columnDeclaration.getDataType().getTypeName().getLastName().toLowerCase();
+        if (deniedTypes.contains(columnType)) {
+            throw new TddlRuntimeException(ErrorCode.ERR_UNSUPPORTED_COLUMN_TYPE_WITH_CCI,
+                "MODIFY/CHANGE COLUMN", columnType);
+        }
+    }
+
+    public static void validateUnsupportedColumnTypeWithCci(MySqlCreateTableStatement stmt, List primaryKeys,
+                                                           List sortKeys, List shardingKeys) {
+        final String[] deniedTypes = {
+            "float", "double", "decimal", "numeric", "json", "enum", "set", "point", "geometry"};
+
+        final Iterator it = stmt.getTableElementList().iterator();
+        while (it.hasNext()) {
+            final SQLTableElement sqlTableElement = it.next();
+            if (sqlTableElement instanceof SQLColumnDefinition) {
+                final SQLColumnDefinition columnDef = (SQLColumnDefinition) sqlTableElement;
+                String columnType = columnDef.getDataType().getName().toLowerCase();
+                for (String deniedType : deniedTypes) {
+                    if (columnType.equals(deniedType)) {
+                        String columnName = SQLUtils.normalizeNoTrim(columnDef.getName().getSimpleName());
+                        if (sortKeys.contains(columnName)) {
+                            throw new TddlRuntimeException(ErrorCode.ERR_UNSUPPORTED_COLUMN_TYPE_WITH_CCI,
+                                "Sort Key '" + columnName + "'", columnType);
+                        } else if (shardingKeys.contains(columnName)) {
+                            throw new TddlRuntimeException(ErrorCode.ERR_UNSUPPORTED_COLUMN_TYPE_WITH_CCI,
+                                "Sharding Key '" + columnName + "'", columnType);
+                        } else if (primaryKeys.contains(columnName)) {
+                            throw new TddlRuntimeException(ErrorCode.ERR_UNSUPPORTED_COLUMN_TYPE_WITH_CCI,
+                                "Primary Key '" + columnName + "'", columnType);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
     public void checkDuplicatedIndexColumn(String indexName, SqlIndexDefinition indexDef) {
         checkDuplicatedIndexColumn(indexName, indexDef.getColumns(), indexDef.getCovering(), indexDef);
     }
@@ -5554,8 +5683,7 @@ public void checkDuplicatedIndexColumn(String indexName, List gsiNames, SqlIdentifier currentGsiName) {
+    public void validateGsiName(Set gsiNames, SqlIdentifier currentGsiName, boolean isColumnar) {
         final String gsiName = currentGsiName.getLastName();
 
         SqlValidatorTable table = null;
@@ -5569,12 +5697,26 @@ public void validateGsiName(Set gsiNames, SqlIdentifier currentGsiName)
         }
 
         if (null != table) {
-            throw newValidationError(currentGsiName, RESOURCE.gsiExists(gsiName));
+            if (isColumnar) {
+                throw newValidationError(currentGsiName, RESOURCE.cciExists(gsiName));
+            } else {
+                throw newValidationError(currentGsiName, RESOURCE.gsiExists(gsiName));
+            }
         }
 
         gsiNames.add(gsiName);
     }
 
+    @Override
+    public void validateGsiName(Set gsiNames, SqlIndexDefinition indexDefinition) {
+        validateGsiName(gsiNames, indexDefinition.getIndexName(), indexDefinition.isColumnar());
+    }
+
+    @Override
+    public void validateGsiName(Set gsiNames, SqlCreateIndex createIndex) {
+        validateGsiName(gsiNames, createIndex.getIndexName(), createIndex.createCci());
+    }
+
     public void validateIndexName(List> keys, Set gsiNames) {
         for (Pair pair : keys) {
             final String indexName = pair.getKey().getLastName();
@@ -6768,6 +6910,10 @@ public void validateInsert(SqlInsert insert) {
                 insert.getTargetColumnList(),
                 false, null);
 
+        // for INSERT t() values(), in which no source or target column list is specified,
+        // rewrite SqlNode with INSERT t() values(DEFAULT, DEFAULT, ...)
+        checkAndRewriteEmptySource(insert, targetRowType);
+
         final SqlNode source = insert.getSource();
         if (source instanceof SqlSelect) {
             final SqlSelect sqlSelect = (SqlSelect) source;
@@ -6816,6 +6962,29 @@ public void validateInsert(SqlInsert insert) {
         }
     }
 
+    public void checkAndRewriteEmptySource(SqlInsert insert, RelDataType targetRowType) {
+        if (insert.getTargetColumnList() == null) {
+            final SqlNode originSource = insert.getSource();
+            if (originSource instanceof SqlBasicCall && originSource.getKind() == SqlKind.VALUES) {
+                final SqlBasicCall values = (SqlBasicCall) originSource;
+                final SqlBasicCall row = (SqlBasicCall) values.getOperands()[0];
+                // If target column list and source value list is all empty,
+                // rewrite source to VALUES(DEFAULT, DEFAULT, ...)
+                if (row.getOperands() == null || row.getOperands().length == 0) {
+                    final List fieldNames = targetRowType.getFieldNames();
+                    final long fieldCount = fieldNames.stream().filter(f -> !isImplicitKey(f)).count();
+                    final SqlNode[] defaults = new SqlNode[(int) fieldCount];
+                    for (int i = 0; i < defaults.length; i++) {
+                        // Add DEFAULT to VALUES for each target column
+                        defaults[i] =
+                            new SqlBasicCall(SqlStdOperatorTable.DEFAULT, SqlNode.EMPTY_ARRAY, SqlParserPos.ZERO);
+                    }
+                    values.setOperand(0, new SqlBasicCall(row.getOperator(), defaults, row.getParserPosition()));
+                }
+            }
+        }
+    }
+
     /**
      * Validates insert values against the constraint of a modifiable view.
      *
@@ -7572,6 +7741,10 @@ protected boolean checkTargetTableUpdatable(RelOptTable table) {
         return true;
     }
 
+    protected static long getRowStoreDefaultPartitions() {
+        return DynamicConfig.getInstance().getAutoPartitionPartitions(false);
+    }
+
     protected SqlNode getTop() {
         return top;
     }
@@ -8449,6 +8622,72 @@ protected RelDataType validateImpl(RelDataType targetRowType) {
         }
     }
 
+    private static class AlterInstanceNamespace extends AbstractNamespace {
+        private final SqlCall current;
+        private final SqlValidatorScope scope;
+
+        AlterInstanceNamespace(SqlValidatorImpl validator, SqlDdl node,
+                               SqlNode enclosingNode, SqlValidatorScope scope) {
+            super(validator, enclosingNode);
+            this.current = Preconditions.checkNotNull(node);
+            this.scope = scope;
+        }
+
+        @Override
+        public SqlNode getNode() {
+            return current;
+        }
+
+        @Override
+        protected RelDataType validateImpl(RelDataType targetRowType) {
+            return current.getOperator().deriveType(this.validator, this.scope, this.current);
+        }
+    }
+
+    private static class ImportDatabaseNamespace extends AbstractNamespace {
+        private final SqlCall current;
+        private final SqlValidatorScope scope;
+
+        ImportDatabaseNamespace(SqlValidatorImpl validator, SqlDdl node,
+                                SqlNode enclosingNode, SqlValidatorScope scope) {
+            super(validator, enclosingNode);
+            this.current = Preconditions.checkNotNull(node);
+            this.scope = scope;
+        }
+
+        @Override
+        public SqlNode getNode() {
+            return current;
+        }
+
+        @Override
+        protected RelDataType validateImpl(RelDataType targetRowType) {
+            return current.getOperator().deriveType(this.validator, this.scope, this.current);
+        }
+    }
+
+    private static class ImportSequenceNamespace extends AbstractNamespace {
+        private final SqlCall current;
+        private final SqlValidatorScope scope;
+
+        ImportSequenceNamespace(SqlValidatorImpl validator, SqlDdl node,
+                                SqlNode enclosingNode, SqlValidatorScope scope) {
+            super(validator, enclosingNode);
+            this.current = Preconditions.checkNotNull(node);
+            this.scope = scope;
+        }
+
+        @Override
+        public SqlNode getNode() {
+            return current;
+        }
+
+        @Override
+        protected RelDataType validateImpl(RelDataType targetRowType) {
+            return current.getOperator().deriveType(this.validator, this.scope, this.current);
+        }
+    }
+
     /**
      * Namespace for a DropDatabaseStatement statement.
      */
@@ -9037,6 +9276,17 @@ protected SqlNode visitScoped(SqlCall call) {
                 }
             }
 
+            if (call.isCheckSumV2Star()) {
+                SqlValidatorScope scope = getScope();
+                if (scope instanceof SelectScope) {
+                    SqlValidator validator = scope.getValidator();
+                    SqlNode[] expandedSelectList =
+                        validator.expandStarForCheckSumV2(call.operand(0), ((SelectScope) scope).getNode());
+
+                    return new SqlBasicCall(SqlStdOperatorTable.CHECK_SUM_V2, expandedSelectList, SqlParserPos.ZERO);
+                }
+            }
+
             // Only visits arguments which are expressions. We don't want to
             // qualify non-expressions such as 'x' in 'empno * 5 AS x'.
             ArgHandler argHandler =
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java
index b31f287d0..a4f7a5e07 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql2rel/SqlToRelConverter.java
@@ -48,6 +48,7 @@
 import org.apache.calcite.rel.RelCollation;
 import org.apache.calcite.rel.RelCollationTraitDef;
 import org.apache.calcite.rel.RelCollations;
+import org.apache.calcite.rel.RelDistributions;
 import org.apache.calcite.rel.RelFieldCollation;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelRoot;
@@ -80,9 +81,11 @@
 import org.apache.calcite.rel.ddl.AlterFileStorageBackup;
 import org.apache.calcite.rel.ddl.AlterFileStoragePurgeBeforeTimestamp;
 import org.apache.calcite.rel.ddl.AlterFunction;
+import org.apache.calcite.rel.ddl.AlterInstance;
 import org.apache.calcite.rel.ddl.AlterJoinGroup;
 import org.apache.calcite.rel.ddl.AlterProcedure;
 import org.apache.calcite.rel.ddl.AlterRule;
+import org.apache.calcite.rel.ddl.AlterStoragePool;
 import org.apache.calcite.rel.ddl.AlterSystemSetConfig;
 import org.apache.calcite.rel.ddl.AlterTable;
 import org.apache.calcite.rel.ddl.AlterTableGroupAddPartition;
@@ -105,6 +108,8 @@
 import org.apache.calcite.rel.ddl.AlterTableSetTableGroup;
 import org.apache.calcite.rel.ddl.AnalyzeTable;
 import org.apache.calcite.rel.ddl.ChangeConsensusRole;
+import org.apache.calcite.rel.ddl.ClearFileStorage;
+import org.apache.calcite.rel.ddl.ConvertAllSequences;
 import org.apache.calcite.rel.ddl.CreateDatabase;
 import org.apache.calcite.rel.ddl.CreateFileStorage;
 import org.apache.calcite.rel.ddl.CreateFunction;
@@ -116,6 +121,7 @@
 import org.apache.calcite.rel.ddl.CreateStoragePool;
 import org.apache.calcite.rel.ddl.CreateTable;
 import org.apache.calcite.rel.ddl.CreateTableGroup;
+import org.apache.calcite.rel.ddl.CreateView;
 import org.apache.calcite.rel.ddl.DropDatabase;
 import org.apache.calcite.rel.ddl.DropFileStorage;
 import org.apache.calcite.rel.ddl.DropFunction;
@@ -124,9 +130,13 @@
 import org.apache.calcite.rel.ddl.DropJoinGroup;
 import org.apache.calcite.rel.ddl.DropMaterializedView;
 import org.apache.calcite.rel.ddl.DropProcedure;
+import org.apache.calcite.rel.ddl.DropStoragePool;
 import org.apache.calcite.rel.ddl.DropTable;
 import org.apache.calcite.rel.ddl.DropTableGroup;
+import org.apache.calcite.rel.ddl.DropView;
 import org.apache.calcite.rel.ddl.GenericDdl;
+import org.apache.calcite.rel.ddl.ImportDatabase;
+import org.apache.calcite.rel.ddl.ImportSequence;
 import org.apache.calcite.rel.ddl.InspectIndex;
 import org.apache.calcite.rel.ddl.MergeTableGroup;
 import org.apache.calcite.rel.ddl.MoveDatabase;
@@ -167,7 +177,6 @@
 import org.apache.calcite.rel.logical.LogicalTableScan;
 import org.apache.calcite.rel.logical.LogicalUnion;
 import org.apache.calcite.rel.logical.LogicalValues;
-import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelColumnMapping;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rel.stream.Delta;
@@ -205,10 +214,10 @@
 import org.apache.calcite.sql.SemiJoinType;
 import org.apache.calcite.sql.SqlAddIndex;
 import org.apache.calcite.sql.SqlAggFunction;
-import org.apache.calcite.sql.SqlAlter;
 import org.apache.calcite.sql.SqlAlterDatabase;
 import org.apache.calcite.sql.SqlAlterFileStorage;
 import org.apache.calcite.sql.SqlAlterFunction;
+import org.apache.calcite.sql.SqlAlterInstance;
 import org.apache.calcite.sql.SqlAlterJoinGroup;
 import org.apache.calcite.sql.SqlAlterProcedure;
 import org.apache.calcite.sql.SqlAlterRule;
@@ -251,6 +260,9 @@
 import org.apache.calcite.sql.SqlCallBinding;
 import org.apache.calcite.sql.SqlChangeConsensusRole;
 import org.apache.calcite.sql.SqlCharStringLiteral;
+import org.apache.calcite.sql.SqlCheckColumnarIndex;
+import org.apache.calcite.sql.SqlClearFileStorage;
+import org.apache.calcite.sql.SqlConvertAllSequences;
 import org.apache.calcite.sql.SqlCreate;
 import org.apache.calcite.sql.SqlCreateDatabase;
 import org.apache.calcite.sql.SqlCreateFileStorage;
@@ -265,6 +277,7 @@
 import org.apache.calcite.sql.SqlCreateTable;
 import org.apache.calcite.sql.SqlCreateTableGroup;
 import org.apache.calcite.sql.SqlCreateTrigger;
+import org.apache.calcite.sql.SqlCreateView;
 import org.apache.calcite.sql.SqlDal;
 import org.apache.calcite.sql.SqlDataTypeSpec;
 import org.apache.calcite.sql.SqlDdl;
@@ -283,12 +296,15 @@
 import org.apache.calcite.sql.SqlDropTable;
 import org.apache.calcite.sql.SqlDropTableGroup;
 import org.apache.calcite.sql.SqlDropTrigger;
+import org.apache.calcite.sql.SqlDropView;
 import org.apache.calcite.sql.SqlDynamicParam;
 import org.apache.calcite.sql.SqlExplainFormat;
 import org.apache.calcite.sql.SqlExplainLevel;
 import org.apache.calcite.sql.SqlFunction;
 import org.apache.calcite.sql.SqlFunctionCategory;
 import org.apache.calcite.sql.SqlIdentifier;
+import org.apache.calcite.sql.SqlImportDatabase;
+import org.apache.calcite.sql.SqlImportSequence;
 import org.apache.calcite.sql.SqlIndexDefinition;
 import org.apache.calcite.sql.SqlInsert;
 import org.apache.calcite.sql.SqlInspectIndex;
@@ -333,6 +349,7 @@
 import org.apache.calcite.sql.SqlUtil;
 import org.apache.calcite.sql.SqlUtil.SpecialIdentiFiers;
 import org.apache.calcite.sql.SqlValuesOperator;
+import org.apache.calcite.sql.SqlValuesTableSource;
 import org.apache.calcite.sql.SqlWindow;
 import org.apache.calcite.sql.SqlWith;
 import org.apache.calcite.sql.SqlWithItem;
@@ -497,7 +514,7 @@ public SqlToRelConverter(RelOptTable.ViewExpander viewExpander, SqlValidator val
                              Prepare.CatalogReader catalogReader, RelOptPlanner planner, RexBuilder rexBuilder,
                              SqlRexConvertletTable convertletTable) {
         this(viewExpander, validator, catalogReader, RelOptCluster.create(planner, rexBuilder), convertletTable,
-            Config.DEFAULT, Integer.MAX_VALUE);
+            Config.DEFAULT, Integer.MAX_VALUE, false);
     }
 
     @Deprecated // to be removed before 2.0
@@ -510,13 +527,15 @@ public SqlToRelConverter(RelOptTable.ViewExpander viewExpander, SqlValidator val
     public SqlToRelConverter(RelOptTable.ViewExpander viewExpander, SqlValidator validator,
                              Prepare.CatalogReader catalogReader, RelOptCluster cluster,
                              SqlRexConvertletTable convertletTable, Config config) {
-        this(viewExpander, validator, catalogReader, cluster, convertletTable, config, config.getInSubQueryThreshold());
+        this(viewExpander, validator, catalogReader, cluster, convertletTable, config, config.getInSubQueryThreshold(),
+            config.isRemoveDistinct());
     }
 
     /* Creates a converter. */
     public SqlToRelConverter(RelOptTable.ViewExpander viewExpander, SqlValidator validator,
                              Prepare.CatalogReader catalogReader, RelOptCluster cluster,
-                             SqlRexConvertletTable convertletTable, Config config, int inSubQueryThreshold) {
+                             SqlRexConvertletTable convertletTable, Config config, int inSubQueryThreshold,
+                             boolean isRemoveDistinct) {
         this.viewExpander = viewExpander;
         this.opTab = (validator == null) ? SqlStdOperatorTable.instance() : validator.getOperatorTable();
         this.validator = validator;
@@ -527,7 +546,9 @@ public SqlToRelConverter(RelOptTable.ViewExpander viewExpander, SqlValidator val
         this.cluster = Preconditions.checkNotNull(cluster);
         this.exprConverter = new SqlNodeToRexConverterImpl(convertletTable);
         this.explainParamCount = 0;
-        this.config = new ConfigBuilder().withConfig(config).withInSubQueryThreshold(inSubQueryThreshold).build();
+        this.config = new ConfigBuilder().withConfig(config)
+            .withInSubQueryThreshold(inSubQueryThreshold)
+            .withRemoveDistinct(isRemoveDistinct).build();
         this.relBuilder = RelFactories.LOGICAL_BUILDER.create(cluster, null);
     }
 
@@ -741,7 +762,6 @@ public RelRoot convertQuery(SqlNode query, final boolean needsValidation, final
             query = validator.validate(query);
         }
 
-        RelMetadataQuery.THREAD_PROVIDERS.set(JaninoRelMetadataProvider.of(cluster.getMetadataProvider()));
         RelNode result = convertQueryRecursive(query, top, null).rel;
         if (top) {
             if (isStream(query)) {
@@ -2155,7 +2175,7 @@ protected void convertFrom(Blackboard bb, SqlNode from) {
             SqlNode tableName = call.operand(0);
             SqlNode timeStamp = call.operand(1);
             final RexNode timestampExpr = bb.convertExpression(timeStamp);
-            this.hintBlackboard.beginAsOf(timestampExpr);
+            this.hintBlackboard.beginAsOf(Pair.of(timestampExpr, call.getOperator()));
             SqlIdentifier identifier = (SqlIdentifier) tableName;
             convertIdentifier(bb, identifier, null, identifier.indexNode, identifier.partitions);
             this.hintBlackboard.endAsOf();
@@ -2591,10 +2611,11 @@ private void convertIdentifier(Blackboard bb, SqlIdentifier id, SqlNodeList exte
         } else {
             SqlNodeList hint = this.hintBlackboard.currentHints(Util.last(table.getQualifiedName()));
             if (this.hintBlackboard.hasAsOf()) {
-                tableRel = LogicalTableScan.create(cluster, table, hint, indexNode, this.hintBlackboard.peekAsOf(),
+                Pair asOf = this.hintBlackboard.peekAsOf();
+                tableRel = LogicalTableScan.create(cluster, table, hint, indexNode, asOf.getKey(), asOf.getValue(),
                     partitions);
             } else {
-                tableRel = LogicalTableScan.create(cluster, table, hint, indexNode, null, partitions);
+                tableRel = LogicalTableScan.create(cluster, table, hint, indexNode, null, null, partitions);
 
             }
         }
@@ -3290,7 +3311,61 @@ protected final void createAggImpl(Blackboard bb, final AggConverter aggConverte
      */
     protected RelNode createAggregate(Blackboard bb, ImmutableBitSet groupSet, ImmutableList groupSets,
                                       List aggCalls) {
-        return LogicalAggregate.create(bb.root, groupSet, groupSets, aggCalls);
+
+        final List aggregateCalls = new ArrayList<>();
+        Set nullRefs = new HashSet<>();
+        for (AggregateCall aggregateCall : aggCalls) {
+            if (groupSets.size() <= 1 && config.isRemoveDistinct()) {
+                aggregateCall = removeRedundantAggregateDistinct(
+                    aggregateCall, groupSet, bb.root, nullRefs);
+            }
+            aggregateCalls.add(aggregateCall);
+        }
+
+        RelNode relNode = bb.root;
+        if (nullRefs.size() > 0) {
+            final List fields = relNode.getRowType().getFieldList();
+            final List isNotNullOperands = new ArrayList<>();
+            for (Integer nullRef : nullRefs) {
+                RelDataType dataType = fields.get(nullRef).getType();
+                if (dataType.isNullable()) {
+                    isNotNullOperands.add(relBuilder.isNotNull(rexBuilder.makeInputRef(relNode, nullRef)));
+                }
+            }
+            if (isNotNullOperands.size() > 0) {
+                relNode = LogicalFilter.create(
+                    relNode,  RexUtil.composeConjunction(rexBuilder, isNotNullOperands, true));
+            }
+        }
+        return LogicalAggregate.create(relNode, groupSet, groupSets, aggregateCalls);
+    }
+
+    private AggregateCall removeRedundantAggregateDistinct(
+        AggregateCall aggregateCall,
+        ImmutableBitSet groupSet,
+        RelNode relNode,
+        Set nullRefs) {
+        final List argList = aggregateCall.getArgList();
+        if (aggregateCall.isDistinct()) {
+            final RelMetadataQuery mq = relNode.getCluster().getMetadataQuery();
+            final ImmutableBitSet distinctArg = ImmutableBitSet.builder()
+                .addAll(argList)
+                .build();
+            final ImmutableBitSet columns = groupSet.union(distinctArg);
+            final Boolean alreadyUnique =
+                mq.areColumnsUnique(relNode, columns);
+            if (alreadyUnique != null && alreadyUnique) {
+                // columns have been distinct or columns are primary keys
+                if (aggregateCall.getAggregation().getKind() == SqlKind.COUNT) {
+                    //in order to avoid generate the physical sql "count(a, b, c)" for the singleTable,
+                    //and mysql don't support count multi-column.
+                    return aggregateCall;
+                } else {
+                    return aggregateCall.withDistinct(false);
+                }
+            }
+        }
+        return aggregateCall;
     }
 
     public RexDynamicParam convertDynamicParam(final SqlDynamicParam dynamicParam) {
@@ -3481,8 +3556,8 @@ protected RelRoot convertQueryRecursive(SqlNode query, boolean top, RelDataType
             return RelRoot.of(convertSequenceDdl((SqlSequence) query), kind);
         case ALTER_RULE:
             return RelRoot.of(convertAlterRule((SqlAlterRule) query), kind);
-        case CREATE_VIEW:
-        case DROP_VIEW:
+        case CHECK_COLUMNAR_INDEX:
+            return RelRoot.of(convertCheckColumnarIndex((SqlCheckColumnarIndex) query), kind);
         case CHECK_GLOBAL_INDEX:
         case REBALANCE:
             return RelRoot.of(convertGenericDdl((SqlDdl) query), kind);
@@ -3508,6 +3583,10 @@ protected RelRoot convertQueryRecursive(SqlNode query, boolean top, RelDataType
             return RelRoot.of(convertAlterDatabase((SqlAlterDatabase) query), kind);
         case DROP_DATABASE:
             return RelRoot.of(convertDropDatabase((SqlDropDatabase) query), kind);
+        case CREATE_VIEW:
+            return RelRoot.of(convertCreateView((SqlCreateView) query), kind);
+        case DROP_VIEW:
+            return RelRoot.of(convertDropView((SqlDropView) query), kind);
         case CREATE_JAVA_FUNCTION:
             return RelRoot.of(convertCreateJavaFunction((SqlCreateJavaFunction) query), kind);
         case DROP_JAVA_FUNCTION:
@@ -3550,6 +3629,8 @@ protected RelRoot convertQueryRecursive(SqlNode query, boolean top, RelDataType
             return RelRoot.of(convertUnArchive((SqlUnArchive) query), kind);
         case DROP_FILESTORAGE:
             return RelRoot.of(convertDropFileStorage((SqlDropFileStorage) query), kind);
+        case CLEAR_FILESTORAGE:
+            return RelRoot.of(convertClearFileStorage((SqlClearFileStorage) query), kind);
         case CREATE_FILESTORAGE:
             return RelRoot.of(convertCreateFileStorage((SqlCreateFileStorage) query), kind);
         case CREATE_JOINGROUP:
@@ -3566,10 +3647,18 @@ protected RelRoot convertQueryRecursive(SqlNode query, boolean top, RelDataType
             }
         case INSPECT_INDEX:
             return RelRoot.of(convertInspectIndex((SqlInspectIndex) query), kind);
+        case IMPORT_DATABASE:
+            return RelRoot.of(convertImportDatabase((SqlImportDatabase) query), kind);
+        case IMPORT_SEQUENCE:
+            return RelRoot.of(convertImportSequence((SqlImportSequence) query), kind);
         case ANALYZE_TABLE:
             if (query instanceof SqlAnalyzeTableDdl) {
                 return RelRoot.of(convertAnalyzeTable((SqlAnalyzeTableDdl) query), kind);
             }
+        case CONVERT_ALL_SEQUENCES:
+            return RelRoot.of(convertConvertAllSequence((SqlConvertAllSequences) query), kind);
+        case ALTER_INSTANCE:
+            return RelRoot.of(convertAlterInstance((SqlAlterInstance) query), kind);
         default:
             if (kind.belongsTo(SqlKind.DAL)) {
                 return RelRoot.of(convertDal((SqlDal) query), kind);
@@ -3615,38 +3704,40 @@ private RelNode convertCreateIndex(SqlCreateIndex query) {
         return CreateIndex.create(getCluster(), query, query.getOperandList().get(0), rexNodesForPartition);
     }
 
-    private RelNode convertCreateStoragePool(SqlCreate query){
-        if(query instanceof SqlCreateStoragePool) {
-            SqlNode storagePool = ((SqlCreateStoragePool)query).getStoragePool();
+    private RelNode convertCreateStoragePool(SqlCreate query) {
+        if (query instanceof SqlCreateStoragePool) {
+            SqlNode storagePool = ((SqlCreateStoragePool) query).getStoragePool();
             SqlNode dnList = ((SqlCreateStoragePool) query).getDnList();
-            SqlNode tableName = ((SqlCreateStoragePool)query).getName();
-            SqlNode undeletableDn = ((SqlCreateStoragePool)query).getUndeletableDn();
+            SqlNode tableName = ((SqlCreateStoragePool) query).getName();
+            SqlNode undeletableDn = ((SqlCreateStoragePool) query).getUndeletableDn();
             return CreateStoragePool.create(getCluster(), query, tableName, storagePool, dnList, undeletableDn);
-        }else{
+        } else {
             throw new AssertionError("not suitable create storage pool statement");
         }
     }
 
-    private RelNode convertDropStoragePool(SqlCreate query){
-        if(query instanceof SqlDropStoragePool) {
-            SqlNode storagePool = ((SqlDropStoragePool)query).getStoragePool();
-            SqlNode tableName = ((SqlDropStoragePool)query).getName();
+    private RelNode convertDropStoragePool(SqlCreate query) {
+        if (query instanceof SqlDropStoragePool) {
+            SqlNode storagePool = ((SqlDropStoragePool) query).getStoragePool();
+            SqlNode tableName = ((SqlDropStoragePool) query).getName();
             return DropStoragePool.create(getCluster(), query, tableName, storagePool);
-        }else{
+        } else {
             throw new AssertionError("not suitable drop storage pool statement");
         }
     }
-    private RelNode convertAlterStoragePool(SqlCreate query){
-        if(query instanceof SqlAlterStoragePool) {
-            SqlNode storagePool = ((SqlAlterStoragePool)query).getStoragePool();
+
+    private RelNode convertAlterStoragePool(SqlCreate query) {
+        if (query instanceof SqlAlterStoragePool) {
+            SqlNode storagePool = ((SqlAlterStoragePool) query).getStoragePool();
             SqlNode dnList = ((SqlAlterStoragePool) query).getDnList();
             SqlNode operation = ((SqlAlterStoragePool) query).getOperation();
             SqlNode tableName = ((SqlAlterStoragePool) query).getName();
             return AlterStoragePool.create(getCluster(), query, tableName, storagePool, dnList, operation);
-        }else{
+        } else {
             throw new AssertionError("not suitable alter storage pool statement");
         }
     }
+
     private RelNode convertAlterTable(SqlAlterTable query) {
         final RelDataType targetRowType = validator.getValidatedNodeType(query);
         assert targetRowType != null;
@@ -3789,6 +3880,13 @@ private RelNode convertDropFileStorage(SqlDropFileStorage query) {
             query.getName().toString());
     }
 
+    private RelNode convertClearFileStorage(SqlClearFileStorage query) {
+        final RelDataType targetRowType = validator.getValidatedNodeType(query);
+        assert targetRowType != null;
+        return ClearFileStorage.create(getCluster(), getCluster().traitSetOf(Convention.NONE), query, targetRowType,
+            query.getName().toString());
+    }
+
     private RelNode convertCreateFileStorage(SqlCreateFileStorage query) {
         final RelDataType targetRowType = validator.getValidatedNodeType(query);
         assert targetRowType != null;
@@ -3877,9 +3975,10 @@ private RelNode convertAlterTableSetTableGroup(SqlAlterTableSetTableGroup sqlAlt
         SqlNode tableName = sqlAlterTableSetTableGroup.getTargetTable();
         List objectNames = sqlAlterTableSetTableGroup.getObjectNames();
         boolean force = sqlAlterTableSetTableGroup.isForce();
+        boolean implicit = sqlAlterTableSetTableGroup.isImplicit();
         assert targetRowType != null;
         return AlterTableSetTableGroup.create(getCluster(), getCluster().traitSetOf(Convention.NONE),
-            sqlAlterTableSetTableGroup, targetRowType, objectNames, tableName, tableGroupName, force);
+            sqlAlterTableSetTableGroup, targetRowType, objectNames, tableName, tableGroupName, implicit, force);
     }
 
     private RelNode convertRefreshTopology(SqlRefreshTopology sqlRefreshTopology) {
@@ -3974,6 +4073,15 @@ private RelNode convertDropIndex(SqlDropIndex query) {
         return DropIndex.create(getCluster(), query, query.getOperandList().get(0));
     }
 
+    private RelNode convertCheckColumnarIndex(SqlCheckColumnarIndex query) {
+        final RelDataType targetRowType = validator.getValidatedNodeType(query);
+        assert targetRowType != null;
+
+        query = checkAndRewriteGsiName(query);
+
+        return GenericDdl.create(getCluster(), query, query.getOperandList().get(0));
+    }
+
     private RelNode convertSequenceDdl(SqlSequence query) {
         final RelDataType targetRowType = validator.getValidatedNodeType(query);
         assert targetRowType != null;
@@ -4200,6 +4308,11 @@ protected SqlShow checkAndRewriteShow(SqlShow show) {
         return show;
     }
 
+    protected SqlCheckColumnarIndex checkAndRewriteGsiName(SqlCheckColumnarIndex query) {
+        // implemented in TddlSqlToRelConverter
+        return query;
+    }
+
     protected void checkGsiColumnLen(SqlCreateTable create) {
         // implemented in TddlSqlToRelConverter
     }
@@ -4243,6 +4356,12 @@ private RelNode convertCreateTable(SqlCreateTable query) {
                 rexNodesForPartition.putAll(rexNodesForIndexPartition);
             }
         }
+        if (GeneralUtil.isNotEmpty(query.getColumnarKeys())) {
+            for (Pair pair : query.getColumnarKeys()) {
+                Map rexNodesForIndexPartition = convertPartition(pair.getValue().getPartitioning());
+                rexNodesForPartition.putAll(rexNodesForIndexPartition);
+            }
+        }
         return CreateTable.create(getCluster(), query, query.getOperandList().get(0), query.getLikeTableName(),
             rexNodesForPartition);
     }
@@ -4680,16 +4799,46 @@ private RelNode convertAlterDatabase(SqlAlterDatabase query) {
         return AlterDatabase.create(query, targetRowType, getCluster());
     }
 
+    private RelNode convertAlterInstance(SqlAlterInstance query) {
+        final RelDataType targetRowType = validator.getValidatedNodeType(query);
+        return AlterInstance.create(query, targetRowType, getCluster());
+    }
+
     private RelNode convertInspectIndex(SqlInspectIndex query) {
         final RelDataType targetRowType = validator.getValidatedNodeType(query);
         return InspectIndex.create(query, targetRowType, getCluster());
     }
 
+    private RelNode convertImportDatabase(SqlImportDatabase query) {
+        final RelDataType targetRowType = validator.getValidatedNodeType(query);
+        return ImportDatabase.create(query, targetRowType, getCluster());
+    }
+
+    private RelNode convertImportSequence(SqlImportSequence query) {
+        final RelDataType targetRowType = validator.getValidatedNodeType(query);
+        return ImportSequence.create(query, targetRowType, getCluster());
+    }
+
+    private RelNode convertConvertAllSequence(SqlConvertAllSequences query) {
+        final RelDataType tarRowType = validator.getParameterRowType(query);
+        return ConvertAllSequences.create(query, tarRowType, getCluster());
+    }
+
     private RelNode convertDropDatabase(SqlDropDatabase query) {
         final RelDataType targetRowType = validator.getValidatedNodeType(query);
         return DropDatabase.create(query, targetRowType, getCluster());
     }
 
+    private RelNode convertCreateView(SqlCreateView query) {
+        final RelDataType targetRowType = validator.getValidatedNodeType(query);
+        return CreateView.create(query, targetRowType, getCluster());
+    }
+
+    private RelNode convertDropView(SqlDropView query) {
+        final RelDataType targetRowType = validator.getValidatedNodeType(query);
+        return DropView.create(query, targetRowType, getCluster());
+    }
+
     private RelNode convertCreateJavaFunction(SqlCreateJavaFunction query) {
         final RelDataType targetRowType = validator.getValidatedNodeType(query);
         return CreateJavaFunction.create(query, targetRowType, getCluster(),
@@ -4974,7 +5123,7 @@ protected List getUpdateSrcTables(SqlUpdate sqlUpdate) {
             if (srcNode instanceof SqlIdentifier) {
                 // Table name
                 result.add(new TableInfoNode(srcNode, srcWithAlias, ImmutableList.of(getTargetTable(srcNode))));
-            } else if (srcNode instanceof SqlSelect) {
+            } else if (srcNode instanceof SqlSelect || RelOptUtil.isUnion(srcNode)) {
                 // SubQuery
                 final List tableNames = sqlUpdate.subQueryTables(srcNode);
                 result.add(new TableInfoNode(srcNode, srcWithAlias,
@@ -5021,7 +5170,7 @@ protected TableInfo getDeleteTargetTables(SqlDelete sqlDelete, Blackboard bb, Re
                 // Table name
                 srcTableInfos.add(
                     new TableModify.TableInfoNode(srcNode, srcWithAlias, ImmutableList.of(getTargetTable(srcNode))));
-            } else if (srcNode instanceof SqlSelect) {
+            } else if (srcNode instanceof SqlSelect || RelOptUtil.isUnion(srcNode)) {
                 // SubQuery
                 final List tableNames = sqlDelete.subQueryTables(srcNode);
                 srcTableInfos.add(new TableModify.TableInfoNode(srcNode, srcWithAlias,
@@ -5352,7 +5501,7 @@ private List buildUpdateSourceInfo(SqlUpdate call, Map t instanceof SqlDynamicParam);
         RelNode valuesRel;
-        if (allIsDynamic) {
+        if (values instanceof SqlValuesTableSource) {
+            valuesRel = convertValues(bb, (SqlValuesTableSource) values, targetRowType);
+        } else if (allIsDynamic) {
             valuesRel = convertDynamicRowValues(bb, values, values.getOperandList(), true, targetRowType);
         } else {
             valuesRel = convertRowValues(bb, values, values.getOperandList(), true, targetRowType);
@@ -5920,6 +6071,26 @@ private void convertValuesImpl(Blackboard bb, SqlCall values, RelDataType target
         // ?
     }
 
+    private DynamicValues convertValues(Blackboard bb, SqlValuesTableSource values, RelDataType targetRowType) {
+        if (targetRowType == null) {
+            targetRowType = SqlTypeUtil.promoteToRowType(typeFactory, validator.getValidatedNodeType(values), null);
+        }
+        final ImmutableList.Builder> tupleList = ImmutableList.builder();
+        for (SqlNode rowConstructor1 : values.getOperandList()) {
+            ImmutableList.Builder tuple = ImmutableList.builder();
+            SqlCall rowConstructor = (SqlCall) rowConstructor1;
+            for (int i = 0; i < rowConstructor.getOperandList().size(); i++) {
+                tuple.add(convertExpression(rowConstructor.getOperandList().get(i)));
+            }
+            tupleList.add(tuple.build());
+        }
+        DynamicValues dynamicValues = DynamicValues.create(
+            cluster,
+            cluster.traitSet().replace(RelDistributions.SINGLETON), //for mpp
+            targetRowType, tupleList.build());
+        return dynamicValues;
+    }
+
     // ~ Inner Classes
     // ----------------------------------------------------------
 
@@ -7627,6 +7798,8 @@ public interface Config {
          * usage of OR in all cases.
          */
         int getInSubQueryThreshold();
+
+        boolean isRemoveDistinct();
     }
 
     /**
@@ -7641,6 +7814,7 @@ public static class ConfigBuilder {
         private boolean explain;
         private boolean expand = true;
         private int inSubQueryThreshold = DEFAULT_IN_SUB_QUERY_THRESHOLD;
+        private boolean removeDistinct = true;
 
         private ConfigBuilder() {
         }
@@ -7656,6 +7830,7 @@ public ConfigBuilder withConfig(Config config) {
             this.explain = config.isExplain();
             this.expand = config.isExpand();
             this.inSubQueryThreshold = config.getInSubQueryThreshold();
+            this.removeDistinct = config.isRemoveDistinct();
             return this;
         }
 
@@ -7699,12 +7874,17 @@ public ConfigBuilder withInSubQueryThreshold(int inSubQueryThreshold) {
             return this;
         }
 
+        public ConfigBuilder withRemoveDistinct(boolean removeDistinct) {
+            this.removeDistinct = removeDistinct;
+            return this;
+        }
+
         /**
          * Builds a {@link Config}.
          */
         public Config build() {
             return new ConfigImpl(convertTableAccess, decorrelationEnabled, trimUnusedFields, createValuesRel, explain,
-                expand, inSubQueryThreshold);
+                expand, inSubQueryThreshold, removeDistinct);
         }
     }
 
@@ -7721,9 +7901,11 @@ private static class ConfigImpl implements Config {
         private final boolean explain;
         private final int inSubQueryThreshold;
         private final boolean expand;
+        private final boolean removeDistinct;
 
         private ConfigImpl(boolean convertTableAccess, boolean decorrelationEnabled, boolean trimUnusedFields,
-                           boolean createValuesRel, boolean explain, boolean expand, int inSubQueryThreshold) {
+                           boolean createValuesRel, boolean explain, boolean expand, int inSubQueryThreshold,
+                           boolean removeDistinct) {
             this.convertTableAccess = convertTableAccess;
             this.decorrelationEnabled = decorrelationEnabled;
             this.trimUnusedFields = trimUnusedFields;
@@ -7731,6 +7913,7 @@ private ConfigImpl(boolean convertTableAccess, boolean decorrelationEnabled, boo
             this.explain = explain;
             this.expand = expand;
             this.inSubQueryThreshold = inSubQueryThreshold;
+            this.removeDistinct = removeDistinct;
         }
 
         public boolean isConvertTableAccess() {
@@ -7760,6 +7943,10 @@ public boolean isExpand() {
         public int getInSubQueryThreshold() {
             return inSubQueryThreshold;
         }
+
+        public boolean isRemoveDistinct() {
+            return removeDistinct;
+        }
     }
 
     private static class AliasContext {
@@ -7791,7 +7978,7 @@ public static class HintBlackboard {
 
         private final Deque aliasStack = new ArrayDeque<>();
 
-        private final Deque asOfStack = new ArrayDeque<>();
+        private final Deque> asOfStack = new ArrayDeque<>();
 
         public void beginSelect() {
             hintStack.push(new HashMap<>(2));
@@ -7813,11 +8000,11 @@ public Map currentGroups() {
             return hintStack.peek();
         }
 
-        public void beginAsOf(RexNode flashback) {
+        public void beginAsOf(Pair flashback) {
             asOfStack.push(flashback);
         }
 
-        public RexNode endAsOf() {
+        public Pair endAsOf() {
             return this.asOfStack.pop();
         }
 
@@ -7825,7 +8012,7 @@ public boolean hasAsOf() {
             return !asOfStack.isEmpty();
         }
 
-        public RexNode peekAsOf() {
+        public Pair peekAsOf() {
             return this.asOfStack.peek();
         }
 
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java b/polardbx-calcite/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java
index c290b7048..81a79220c 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/sql2rel/StandardConvertletTable.java
@@ -752,7 +752,7 @@ public RexNode convertAggregateFunction(
       SqlCall call) {
     final List operands = call.getOperandList();
     final List exprs;
-    if (call.isCountStar()) {
+    if (call.isCountStar() || call.isCountLiteral()) {
       exprs = ImmutableList.of();
     } else {
       exprs = convertExpressionList(cx, operands,
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/tools/Programs.java b/polardbx-calcite/src/main/java/org/apache/calcite/tools/Programs.java
index edf4be589..d7df98f4d 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/tools/Programs.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/tools/Programs.java
@@ -36,6 +36,7 @@
 import org.apache.calcite.rel.core.RelFactories;
 import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider;
 import org.apache.calcite.rel.metadata.DefaultRelMetadataProvider;
+import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
 import org.apache.calcite.rel.rules.AggregateExpandDistinctAggregatesRule;
 import org.apache.calcite.rel.rules.AggregateReduceFunctionsRule;
@@ -205,8 +206,8 @@ public RelNode run(RelOptPlanner planner, RelNode rel,
           list.add(metadataProvider);
         }
         hepPlanner.registerMetadataProviders(list);
-        RelMetadataProvider plannerChain =
-            ChainedRelMetadataProvider.of(list);
+        JaninoRelMetadataProvider plannerChain =
+            JaninoRelMetadataProvider.of(ChainedRelMetadataProvider.of(list));
         rel.getCluster().setMetadataProvider(plannerChain);
 
         hepPlanner.setRoot(rel);
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/tools/RelBuilder.java b/polardbx-calcite/src/main/java/org/apache/calcite/tools/RelBuilder.java
index ae8f5aab1..6a8bcb165 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/tools/RelBuilder.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/tools/RelBuilder.java
@@ -16,6 +16,14 @@
  */
 package org.apache.calcite.tools;
 
+import com.google.common.base.Function;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
 import org.apache.calcite.linq4j.Ord;
 import org.apache.calcite.plan.Context;
 import org.apache.calcite.plan.Contexts;
@@ -28,14 +36,33 @@
 import org.apache.calcite.rel.RelCollations;
 import org.apache.calcite.rel.RelFieldCollation;
 import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.*;
+import org.apache.calcite.rel.core.Aggregate;
+import org.apache.calcite.rel.core.AggregateCall;
+import org.apache.calcite.rel.core.CorrelationId;
+import org.apache.calcite.rel.core.JoinInfo;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.core.Project;
+import org.apache.calcite.rel.core.RelFactories;
+import org.apache.calcite.rel.core.Sort;
+import org.apache.calcite.rel.core.TableModify;
+import org.apache.calcite.rel.core.TableScan;
+import org.apache.calcite.rel.core.Values;
 import org.apache.calcite.rel.logical.LogicalSemiJoin;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
-import org.apache.calcite.rex.*;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexCorrelVariable;
+import org.apache.calcite.rex.RexExecutor;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexLiteral;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexShuttle;
+import org.apache.calcite.rex.RexSimplify;
+import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.runtime.Hook;
 import org.apache.calcite.schema.SchemaPlus;
 import org.apache.calcite.server.CalciteServerStatement;
@@ -52,20 +79,12 @@
 import org.apache.calcite.util.ImmutableIntList;
 import org.apache.calcite.util.Litmus;
 import org.apache.calcite.util.NlsString;
+import org.apache.calcite.util.Optionality;
 import org.apache.calcite.util.Pair;
 import org.apache.calcite.util.Util;
 import org.apache.calcite.util.mapping.Mapping;
 import org.apache.calcite.util.mapping.Mappings;
 
-import com.google.common.base.Function;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-
 import javax.annotation.Nonnull;
 import java.math.BigDecimal;
 import java.util.AbstractList;
@@ -910,13 +929,14 @@ public RelBuilder scan(Iterable tableNames) {
     return this;
   }
 
-  public RelBuilder flashback(RexNode flashback) {
+  public RelBuilder flashback(RexNode flashback, SqlOperator flashbackOperator) {
     if (stack.peek() == null) {
       return this;
     }
     RelNode last = stack.peek().rel;
-    if (last instanceof TableScan){
-      ((TableScan)last).setFlashback(flashback);
+    if (last instanceof TableScan) {
+      ((TableScan) last).setFlashback(flashback);
+      ((TableScan) last).setFlashbackOperator(flashbackOperator);
     }
     return this;
   }
@@ -2146,7 +2166,10 @@ private static class AggCallImpl implements AggCall {
         boolean approximate, RexNode filter,
         String alias, ImmutableList operands) {
       this.aggFunction = aggFunction;
-      this.distinct = distinct;
+      // If the aggregate function ignores DISTINCT,
+      // make the DISTINCT flag FALSE.
+      this.distinct = distinct
+          && aggFunction.getDistinctOptionality() != Optionality.IGNORED;
       this.approximate = approximate;
       this.filter = filter;
       this.alias = alias;
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/util/JsonBuilder.java b/polardbx-calcite/src/main/java/org/apache/calcite/util/JsonBuilder.java
index 5a892f35d..7b24c4c55 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/util/JsonBuilder.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/util/JsonBuilder.java
@@ -19,9 +19,11 @@
 import org.apache.calcite.avatica.util.Spaces;
 
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 /**
  * Builder for JSON documents (represented as {@link List}, {@link Map},
@@ -87,6 +89,8 @@ public void append(StringBuilder buf, int indent, Object o) {
     } else if (o instanceof List) {
       //noinspection unchecked
       appendList(buf, indent, (List) o);
+    } else if (o instanceof Set) {
+      appendList(buf, indent, (Set)o);
     } else if (o instanceof String) {
       buf.append('"')
           .append(
@@ -127,7 +131,7 @@ private void newline(StringBuilder buf, int indent) {
   }
 
   private void appendList(
-      StringBuilder buf, int indent, List list) {
+      StringBuilder buf, int indent, Collection list) {
     if (list.isEmpty()) {
       buf.append("[]");
       return;
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/util/Optionality.java b/polardbx-calcite/src/main/java/org/apache/calcite/util/Optionality.java
new file mode 100644
index 000000000..2e34dbd2e
--- /dev/null
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/util/Optionality.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.calcite.util;
+
+/**
+ * Four states that describe whether a particular behavior or
+ * property is allowed and/or not allowed.
+ */
+public enum Optionality {
+  /** A property is mandatory if an instance must possess it;
+   * it is an error if it does not. */
+  MANDATORY,
+
+  /** A property is optional if an instance may or may not possess it;
+   * neither state is an error. */
+  OPTIONAL,
+
+  /** A property is ignored if an instance may or may not possess it;
+   * if it possesses the property, the effect is as if it does not. */
+  IGNORED,
+
+  /** A property is forbidden if an instance must not possess it;
+   * it is an error if the instance has the property. */
+  FORBIDDEN
+}
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/util/Util.java b/polardbx-calcite/src/main/java/org/apache/calcite/util/Util.java
index de0f0f3ae..9e41b4fd9 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/util/Util.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/util/Util.java
@@ -23,6 +23,8 @@
 import org.apache.calcite.avatica.util.DateTimeUtils;
 import org.apache.calcite.avatica.util.Spaces;
 import org.apache.calcite.linq4j.Ord;
+import org.apache.calcite.plan.volcano.VolcanoPlanner;
+import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Project;
 import org.apache.calcite.runtime.CalciteException;
 import org.apache.calcite.sql.SqlAggFunction;
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/util/trace/RuntimeStatisticsSketch.java b/polardbx-calcite/src/main/java/org/apache/calcite/util/trace/RuntimeStatisticsSketch.java
index 74079a35e..278251350 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/util/trace/RuntimeStatisticsSketch.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/util/trace/RuntimeStatisticsSketch.java
@@ -47,6 +47,8 @@ public class RuntimeStatisticsSketch {
      */
     protected final long rowCount;
 
+    protected final long runtimeFilteredRowCount;
+
     /**
      * Sum of produced bytes;
      */
@@ -68,11 +70,13 @@ public class RuntimeStatisticsSketch {
     protected final int spillCnt;
 
     public RuntimeStatisticsSketch(double startupDuration, double duration, double workerDuration, long rowCount,
+                                   long runtimeFilteredRowCount,
                                    long outputBytes, long memory, int instances, int spillCnt) {
         this.startupDuration = startupDuration;
         this.duration = duration;
         this.workerDuration = workerDuration;
         this.rowCount = rowCount;
+        this.runtimeFilteredRowCount = runtimeFilteredRowCount;
         this.outputBytes = outputBytes;
         this.memory = memory;
         this.instances = instances;
@@ -95,6 +99,10 @@ public long getRowCount() {
         return rowCount;
     }
 
+    public long getRuntimeFilteredRowCount() {
+        return runtimeFilteredRowCount;
+    }
+
     public long getMemory() {
         return memory;
     }
diff --git a/polardbx-calcite/src/main/java/org/apache/calcite/util/trace/RuntimeStatisticsSketchExt.java b/polardbx-calcite/src/main/java/org/apache/calcite/util/trace/RuntimeStatisticsSketchExt.java
index ff099e809..14da52439 100644
--- a/polardbx-calcite/src/main/java/org/apache/calcite/util/trace/RuntimeStatisticsSketchExt.java
+++ b/polardbx-calcite/src/main/java/org/apache/calcite/util/trace/RuntimeStatisticsSketchExt.java
@@ -160,13 +160,13 @@ public class RuntimeStatisticsSketchExt extends RuntimeStatisticsSketch {
     protected boolean hasInputOperator;
 
     public RuntimeStatisticsSketchExt(long startupDurationNano, long durationNano, long closeDurationNano,
-                                      long workerDurationNano, long rowCount, long outputBytes, long memory,
+                                      long workerDurationNano, long rowCount, long runtimeFilteredCount, long outputBytes, long memory,
                                       int instances, boolean hasInputOperator, int spillCnt) {
 
         super((double) startupDurationNano / 1e9,
             (double) durationNano / 1e9,
             (double) workerDurationNano / 1e9,
-            rowCount,
+            rowCount, runtimeFilteredCount,
             outputBytes,
             memory,
             instances,
diff --git a/polardbx-calcite/src/test/java/org/apache/calcite/sql/test/BuildForeignKeyNameTest.java b/polardbx-calcite/src/test/java/org/apache/calcite/sql/test/BuildForeignKeyNameTest.java
new file mode 100644
index 000000000..ea4dffee5
--- /dev/null
+++ b/polardbx-calcite/src/test/java/org/apache/calcite/sql/test/BuildForeignKeyNameTest.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql.test;
+
+import org.apache.calcite.sql.SqlCreateTable;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.HashSet;
+
+public class BuildForeignKeyNameTest {
+
+    @Test
+    public void testBuildForeignKeyName() {
+        String fkName1 = "tab_ibfk_1";
+        String fkName2 = " tab_ibfk_2";
+        String fkName3 = " a tab_ibfk_3";
+
+        Assert.assertEquals("`tab_ibfk_1`", SqlCreateTable.buildForeignKeyName(fkName1, new HashSet<>(),""));
+        Assert.assertEquals("` tab_ibfk_2`", SqlCreateTable.buildForeignKeyName(fkName2, new HashSet<>(),""));
+        Assert.assertEquals("` a tab_ibfk_3`", SqlCreateTable.buildForeignKeyName(fkName3, new HashSet<>(),""));
+    }
+}
diff --git a/polardbx-calcite/src/test/java/org/apache/calcite/sql/test/SqlTypeNameTest.java b/polardbx-calcite/src/test/java/org/apache/calcite/sql/test/SqlTypeNameTest.java
index b8f624390..262012405 100644
--- a/polardbx-calcite/src/test/java/org/apache/calcite/sql/test/SqlTypeNameTest.java
+++ b/polardbx-calcite/src/test/java/org/apache/calcite/sql/test/SqlTypeNameTest.java
@@ -364,6 +364,20 @@ public class SqlTypeNameTest {
         null,
         tn);
   }
+
+  @Test public void testSigned() {
+    SqlTypeName tn = SqlTypeName.get("SIGNED");
+    assertEquals(SqlTypeName.SIGNED, tn);
+
+    tn = SqlTypeName.get("SIGNED INTEGER");
+    assertEquals(SqlTypeName.SIGNED, tn);
+
+    tn = SqlTypeName.get("UNSIGNED");
+    assertEquals(SqlTypeName.UNSIGNED, tn);
+
+    tn = SqlTypeName.get("UNSIGNED INTEGER");
+    assertEquals(SqlTypeName.UNSIGNED, tn);
+  }
 }
 
 // End SqlTypeNameTest.java
diff --git a/polardbx-calcite/src/test/java/org/apache/calcite/sql/validate/ColumnarForbidTest.java b/polardbx-calcite/src/test/java/org/apache/calcite/sql/validate/ColumnarForbidTest.java
new file mode 100644
index 000000000..53a12bbc1
--- /dev/null
+++ b/polardbx-calcite/src/test/java/org/apache/calcite/sql/validate/ColumnarForbidTest.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.calcite.sql.validate;
+
+import com.alibaba.polardbx.common.exception.TddlRuntimeException;
+import org.apache.calcite.sql.SqlColumnDeclaration;
+import org.apache.calcite.sql.SqlDataTypeSpec;
+import org.apache.calcite.sql.SqlIdentifier;
+import org.junit.Test;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class ColumnarForbidTest {
+
+    @Test(expected = TddlRuntimeException.class)
+    public void ForbidCitableModifyUnsupportedType() {
+        SqlIdentifier sqlIdentifier = mock(SqlIdentifier.class);
+        SqlColumnDeclaration sqlColumnDeclaration = mock(SqlColumnDeclaration.class);
+        SqlDataTypeSpec sqlDataTypeSpec = mock(SqlDataTypeSpec.class);
+
+        when(sqlColumnDeclaration.getDataType()).thenReturn(sqlDataTypeSpec);
+        when(sqlDataTypeSpec.getTypeName()).thenReturn(sqlIdentifier);
+        when(sqlIdentifier.getLastName()).thenReturn("binary");
+
+        SqlValidatorImpl.validateUnsupportedTypeWithCciWhenModifyColumn(sqlColumnDeclaration);
+    }
+
+    @Test
+    public void ForbidCitableModifySupportedType() {
+        SqlIdentifier sqlIdentifier = mock(SqlIdentifier.class);
+        SqlColumnDeclaration sqlColumnDeclaration = mock(SqlColumnDeclaration.class);
+        SqlDataTypeSpec sqlDataTypeSpec = mock(SqlDataTypeSpec.class);
+
+        when(sqlColumnDeclaration.getDataType()).thenReturn(sqlDataTypeSpec);
+        when(sqlDataTypeSpec.getTypeName()).thenReturn(sqlIdentifier);
+        when(sqlIdentifier.getLastName()).thenReturn("char");
+
+        SqlValidatorImpl.validateUnsupportedTypeWithCciWhenModifyColumn(sqlColumnDeclaration);
+    }
+}
diff --git a/polardbx-calcite/src/test/java/org/apache/calcite/tools/PlannerTest.java b/polardbx-calcite/src/test/java/org/apache/calcite/tools/PlannerTest.java
index d76b54733..8111846e1 100644
--- a/polardbx-calcite/src/test/java/org/apache/calcite/tools/PlannerTest.java
+++ b/polardbx-calcite/src/test/java/org/apache/calcite/tools/PlannerTest.java
@@ -1026,7 +1026,7 @@ public RelDataType deriveType(SqlValidator validator,
         SqlValidatorScope scope, SqlCall call) {
       // Check for COUNT(*) function.  If it is we don't
       // want to try and derive the "*"
-      if (call.isCountStar()) {
+      if (call.isCountStar() || call.isCountLiteral()) {
         return validator.getTypeFactory().createSqlType(SqlTypeName.BIGINT);
       }
       return super.deriveType(validator, scope, call);
diff --git a/polardbx-common/pom.xml b/polardbx-common/pom.xml
index 865607db5..9910129de 100644
--- a/polardbx-common/pom.xml
+++ b/polardbx-common/pom.xml
@@ -4,7 +4,7 @@
     
         com.alibaba.polardbx
         polardbx
-        5.4.18-SNAPSHOT
+        ${revision}
         ../pom.xml
     
     polardbx-common
@@ -12,6 +12,7 @@
     ${project.artifactId} module for polardbx ${project.version}
 
     
+
         
             commons-lang
             commons-lang
@@ -66,13 +67,6 @@
             test
         
 
-        
-        
-            junit
-            junit
-            test
-        
-
         
             org.openjdk.jol
             jol-core
@@ -95,6 +89,11 @@
             bcprov-jdk15on
             ${bouncycastle.version}
         
+        
+            org.bouncycastle
+            bcpkix-jdk15on
+            ${bouncycastle.version}
+        
 
         
             org.yaml
@@ -122,13 +121,21 @@
                     slf4j-simple
                     org.slf4j
                 
+                
+                    org.apache.arrow
+                    arrow-vector
+                
+                
+                    org.apache.arrow
+                    arrow-memory-netty
+                
             
         
 
         
             org.apache.hadoop
             hadoop-client
-            ${hadoop-client.version}
+            ${hadoop.version}
             
                 
                     commons-logging
@@ -162,10 +169,6 @@
         
 
 
-        
-            com.google.protobuf
-            protobuf-java
-        
         
             com.google.protobuf
             protobuf-java
@@ -174,6 +177,22 @@
             com.emc.ecs
             nfs-client
         
+
+        
+            org.roaringbitmap
+            RoaringBitmap
+        
+
+        
+            com.github.ben-manes.caffeine
+            caffeine
+        
+        
+            org.mockito
+            mockito-inline
+            test
+        
+
     
     
         
diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/ColumnarTableOptions.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/ColumnarTableOptions.java
new file mode 100644
index 000000000..cee81209c
--- /dev/null
+++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/ColumnarTableOptions.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.common;
+
+public class ColumnarTableOptions {
+    public static final String DICTIONARY_COLUMNS = "DICTIONARY_COLUMNS";
+}
diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/Engine.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/Engine.java
index f02dd92bc..21639977a 100644
--- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/Engine.java
+++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/Engine.java
@@ -20,6 +20,7 @@
 import com.alibaba.polardbx.common.utils.TStringUtil;
 
 public enum Engine {
+
     INNODB,
     MRG_MYISAM,
     BLACKHOLE,
@@ -31,8 +32,12 @@ public enum Engine {
     LOCAL_DISK,
     EXTERNAL_DISK,
     S3,
+    ABS,
     OSS,
-    NFS;
+    NFS,
+    MEMORY;
+
+    public static final Engine DEFAULT_COLUMNAR_ENGINE = OSS;
 
     public static Engine of(String engineName) {
         if (TStringUtil.isEmpty(engineName)) {
@@ -45,6 +50,22 @@ public static Engine of(String engineName) {
         }
     }
 
+    public static boolean hasCache(Engine engine) {
+        if (engine == null) {
+            return false;
+        }
+        switch (engine) {
+        case OSS:
+        case EXTERNAL_DISK:
+        case NFS:
+        case S3:
+        case ABS:
+            return true;
+        default:
+            return false;
+        }
+    }
+
     public static boolean isFileStore(Engine engine) {
         if (engine == null) {
             return false;
@@ -55,6 +76,24 @@ public static boolean isFileStore(Engine engine) {
         case EXTERNAL_DISK:
         case S3:
         case NFS:
+        case ABS:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    public static boolean supportColumnar(Engine engine) {
+        if (engine == null) {
+            return false;
+        }
+        switch (engine) {
+        case OSS:
+        case LOCAL_DISK:
+        case EXTERNAL_DISK:
+        case NFS:
+        case S3:
+        case ABS:
             return true;
         default:
             return false;
diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/IInnerConnection.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/IInnerConnection.java
new file mode 100644
index 000000000..57fd1e89e
--- /dev/null
+++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/IInnerConnection.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.common;
+
+import com.alibaba.polardbx.common.jdbc.ITransactionPolicy;
+
+import java.util.function.Consumer;
+
+public interface IInnerConnection {
+    ITransactionPolicy getTrxPolicy();
+
+    void setTrxPolicy(ITransactionPolicy trxPolicy);
+
+    void addExecutionContextInjectHook(Consumer hook);
+
+    void clearExecutionContextInjectHooks();
+
+    void setTimeZone(String timeZone);
+}
diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/IInnerConnectionManager.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/IInnerConnectionManager.java
new file mode 100644
index 000000000..798c4fe70
--- /dev/null
+++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/IInnerConnectionManager.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.common;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+
+public interface IInnerConnectionManager {
+    Connection getConnection() throws SQLException;
+
+    Connection getConnection(String schema) throws SQLException;
+}
diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/IOrderInvariantHash.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/IOrderInvariantHash.java
new file mode 100644
index 000000000..5ada94b73
--- /dev/null
+++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/IOrderInvariantHash.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.common;
+
+public interface IOrderInvariantHash {
+    IOrderInvariantHash add(long x);
+
+    Long getResult();
+}
diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/OrderInvariantHasher.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/OrderInvariantHasher.java
index 2325f0a30..230bb566c 100644
--- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/OrderInvariantHasher.java
+++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/OrderInvariantHasher.java
@@ -16,7 +16,7 @@
 
 package com.alibaba.polardbx.common;
 
-public class OrderInvariantHasher {
+public class OrderInvariantHasher implements IOrderInvariantHash {
     private final long p;
     private final long q;
     private final long r;
@@ -30,14 +30,17 @@ public OrderInvariantHasher() {
         result = null;
     }
 
-    public void add(long x) {
+    @Override
+    public OrderInvariantHasher add(long x) {
         if (result == null) {
             result = x;
         } else {
             result = p + q * (result + x) + r * result * x;
         }
+        return this;
     }
 
+    @Override
     public Long getResult() {
         return result;
     }
diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/RevisableOrderInvariantHash.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/RevisableOrderInvariantHash.java
new file mode 100644
index 000000000..b9b9953d9
--- /dev/null
+++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/RevisableOrderInvariantHash.java
@@ -0,0 +1,128 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.common;
+
+/**
+ * @author yaozhili
+ * 

+ * This hash aims to hash an array containing LONG elements. and has the following properties:
+ * 1. Order-invariant: these two arrays lead to the same hash code: [1, 2, 3] and [3, 2, 1]
+ * 2. Revisable: use add(x) to calculate the hash code when adding an element into the array, + * and use remove(x) to calculate the hash code when removing an element. + *

+ * Initial hash value is 0. When two DynamicHash are added, remember to remove two 0 from the result. + * Similarly, When n DynamicHash are added, remember to remove n 0 from the result. + *

+ *

+ * See {@link RevisableOrderInvariantHashTest} for usage examples. + */ +public class RevisableOrderInvariantHash implements IOrderInvariantHash { + private static final long p = 3860031L; + private static final long q = 2779L; + private static final long r = 2L; + private static final long m = 1L << 31; + private static final long m_1 = m - 1; + private final ModularInverseSolver modularInverseSolver = new ModularInverseSolver(); + + /** + * Initial value is 0. When two DynamicHash are added, remember to remove one 0 from the result. + * Similarly, When n DynamicHash are added, remember to remove (n-1) 0 from the result. + */ + private Long result = 0L; + + public static long mod(long x) { + // Calculate x mod 2^31 equals getting the low 30-bit of x. + // For negative x, the result is positive. + return x & m_1; + } + + @Override + public RevisableOrderInvariantHash add(long x) { + // We omit some mod operation and let some intermediate results exceed (2^31 - 1), + // which is ok if they do not exceed (2^63 - 1). + result = mod(p + q * mod((result + mod(x))) + r * mod(result * mod(x))); + return this; + } + + /** + * May cause overflow. + */ + public RevisableOrderInvariantHash addNoMod(long x) { + result = p + q * (result + x) + r * result * x; + return this; + } + + public RevisableOrderInvariantHash remove(long x) { + long a = mod(result - mod(p + q * mod(x))); + long b = mod(q + r * mod(x)); + if (a % b == 0) { + result = a / b; + } else { + // Find modular inverse. + long inverse = modularInverseSolver.solve(b, m); + result = mod(a * inverse); + } + return this; + } + + @Override + public Long getResult() { + return result; + } + + public RevisableOrderInvariantHash reset() { + result = 0L; + return this; + } + + public RevisableOrderInvariantHash reset(long value) { + result = value; + return this; + } + + public static class ModularInverseSolver { + /** + * i and j are used for calculate modular inverse. + */ + private long i; + private long j; + + public long solve(long i0, long j0) { + this.i = i0; + this.j = j0; + extendedEuclidean(); + return this.i; + } + + private void extendedEuclidean() { + if (0 == j) { + i = 1; + } else { + // i, j are input of this level. + long i0 = i, j0 = j; + i = j0; + j = i0 % j0; + extendedEuclidean(); + // i, j are output of next level. + long i1 = i, j1 = j; + // i, j are output of this level. + i = j1; + j = i1 - (i0 / j0) * j1; + } + } + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/TddlConstants.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/TddlConstants.java index 171079f6c..99ca31c9a 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/TddlConstants.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/TddlConstants.java @@ -82,4 +82,10 @@ public class TddlConstants { public static final String SQL_MODE = "sql_mode"; public static final String FOREIGN_KEY_PREFIX = ""; + + public static final int LONG_ENOUGH_TIMEOUT_FOR_DDL_ON_XPROTO_CONN = 7 * 24 * 60 * 60 * 1000; + + public static final String BLACK_LIST_CONF = "BLACK_LIST_CONF"; + + public static final String ENABLE_JAVA_UDF = "ENABLE_JAVA_UDF"; } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcConstants.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcConstants.java index 20b7a95b9..99a57a7e2 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcConstants.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcConstants.java @@ -23,4 +23,6 @@ public interface CdcConstants { int FAILURE_CODE = 500; int SUCCESS_CODE = 200; + + String CONFIG_KEY_PREFIX = "cdc_"; } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcDDLContext.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcDDLContext.java index ba1e6a6e0..2a8fcf344 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcDDLContext.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcDDLContext.java @@ -16,32 +16,54 @@ package com.alibaba.polardbx.common.cdc; +import com.alibaba.polardbx.common.ddl.Job; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.common.utils.Pair; import lombok.AllArgsConstructor; import lombok.Getter; +import lombok.RequiredArgsConstructor; +import lombok.Setter; import java.util.Map; import java.util.Set; +/** + * Created by ziyang.lb + **/ +@RequiredArgsConstructor @AllArgsConstructor @Getter +@Setter public class CdcDDLContext { private final String schemaName; private final String tableName; private final String sqlKind; private final String ddlSql; - private final DdlVisibility visibility; - + private final CdcDdlMarkVisibility visibility; + /** + * 新老引擎都有jobId + */ private final Long jobId; - + /** + * 新引擎专用 + */ private final Long taskId; private final DdlType ddlType; + private final boolean newDdlEngine; private final Map extendParams; - - private final boolean isRefreshTableMetaInfo; - + /** + * 老引擎专用 + */ + private final Job job; + /** + * 打标时是否需要重刷拓扑元数据信息 + */ + private final boolean refreshTableMetaInfo; + /** + * 新的拓扑元数据信息 + */ private final Map> newTableTopology; private final Pair tablesExtInfoPair; + private Long versionId; } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcDdlMarkVisibility.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcDdlMarkVisibility.java new file mode 100644 index 000000000..7692c0d50 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcDdlMarkVisibility.java @@ -0,0 +1,46 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.cdc; + +public enum CdcDdlMarkVisibility { + /** + * 可见性为私有 + * PolarX专有的ddl类型,不写入逻辑binlog文件,即:不支持复制到原生MySQL,也不支持复制到PolarDB-X + */ + Private(0), + /** + * 可见性为公开 + * CDC需要将该类型的ddl sql转换为单机形态,写入逻辑binlog文件,以支持复制到原生MySQL;以注释形态写入到逻辑binlog文件,以支持复制到PolarDB-X + */ + Public(1), + /** + * PolarDB-X专有的ddl(类型专有,或者语法专有) + * 1.支持复制到另一个PolarDB-X实例,以注释的形式写入逻辑binlog的Query event + * 2.不支持复制到单机MySQL实例,在逻辑binlog的Query event中不予记录 + */ + Protected(2); + + private final int value; + + CdcDdlMarkVisibility(int value) { + this.value = value; + } + + public int getValue() { + return value; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcDdlRecord.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcDdlRecord.java new file mode 100644 index 000000000..352dcc25b --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcDdlRecord.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.cdc; + +import lombok.Data; + +import java.sql.ResultSet; +import java.sql.SQLException; + +@Data +public class CdcDdlRecord { + public final long id; + public final long jobId; + public final String sqlKind; + public final String schemaName; + public final String tableName; + public final String gmtCreated; + public final String ddlSql; + public final String metaInfo; + public final int visibility; + public final String ext; + + public static CdcDdlRecord fill(ResultSet rs) throws SQLException { + return new CdcDdlRecord( + rs.getLong("ID"), + rs.getLong("JOB_ID"), + rs.getString("SQL_KIND"), + rs.getString("SCHEMA_NAME"), + rs.getString("TABLE_NAME"), + rs.getString("GMT_CREATED"), + rs.getString("DDL_SQL"), + rs.getString("META_INFO"), + rs.getInt("VISIBILITY"), + rs.getString("EXT") + ); + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcManagerHelper.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcManagerHelper.java index 7ce2c58c8..5aa62b859 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcManagerHelper.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/CdcManagerHelper.java @@ -16,14 +16,19 @@ package com.alibaba.polardbx.common.cdc; +import com.alibaba.polardbx.common.ddl.Job; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.common.model.lifecycle.AbstractLifecycle; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.extension.ExtensionLoader; +import java.util.List; import java.util.Map; import java.util.Set; +/** + * @author ziyang.lb 2020-12-05 + **/ public class CdcManagerHelper { private static volatile CdcManagerHelper instance; private final ICdcManager cdcManager; @@ -49,44 +54,138 @@ public void initialize() { } } - public void notifyDdl(String schemaName, String tableName, String sqlKind, String ddlSql, - DdlVisibility visibility, + //老ddl引擎打标方法 + public void notifyDdl(String schemaName, String tableName, String sqlKind, String ddlSql, Job job, + CdcDdlMarkVisibility visibility, Map extendParams) { - CdcDDLContext context = new CdcDDLContext(schemaName, tableName, sqlKind, ddlSql, visibility, null, null, - null, extendParams, false, null, null); + Long jobId = job == null ? null : job.getId(); + DdlType ddlType = job == null ? null : typeTransfer(job.getType()); + CdcDDLContext context = new CdcDDLContext(schemaName, tableName, sqlKind, ddlSql, visibility, jobId, null, + ddlType, false, extendParams, job, false, null, null); cdcManager.notifyDdl(context); } + //新ddl引擎打标方法 public void notifyDdlNew(String schemaName, String tableName, String sqlKind, String ddlSql, DdlType ddlType, Long jobId, Long taskId, - DdlVisibility visibility, + CdcDdlMarkVisibility visibility, Map extendParams) { - CdcDDLContext context = - new CdcDDLContext(schemaName, tableName, sqlKind, ddlSql, visibility, jobId, taskId, ddlType, - extendParams, false, null, null); + CdcDDLContext context = new CdcDDLContext(schemaName, tableName, sqlKind, ddlSql, visibility, jobId, taskId, + ddlType, true, extendParams, null, false, null, null); cdcManager.notifyDdl(context); } + public void notifyDdlWithContext(CdcDDLContext cdcDDLContext) { + cdcManager.notifyDdl(cdcDDLContext); + } + + public List queryDdl(String schemaName, + String tableName, + String sqlKind, + CdcDdlMarkVisibility visibility, + Long versionId, + DdlType ddlType, + Map extendParams) { + CdcDDLContext context = new CdcDDLContext( + schemaName, + tableName, + sqlKind, + null, + visibility, + null, + null, + ddlType, + true, + extendParams, + null, + false, + null, + null, + versionId); + return cdcManager.getDdlRecord(context); + } + + public List queryDdlByJobId(Long jobId) { + CdcDDLContext context = new CdcDDLContext( + null, + null, + null, + null, + null, + jobId, + null, + null, + true, + null, + null, + false, + null, + null, + null); + return cdcManager.getDdlRecord(context); + } + + //新ddl引擎打标方法 public void notifyDdlNew(String schemaName, String tableName, String sqlKind, String ddlSql, DdlType ddlType, - Long jobId, Long taskId, DdlVisibility visibility, Map extendParams, + Long jobId, Long taskId, CdcDdlMarkVisibility visibility, Map extendParams, boolean isRefreshTableMetaInfo, Map> newTableTopology) { - CdcDDLContext context = - new CdcDDLContext(schemaName, tableName, sqlKind, ddlSql, visibility, jobId, taskId, ddlType, - extendParams, isRefreshTableMetaInfo, newTableTopology, null); + CdcDDLContext context = new CdcDDLContext(schemaName, tableName, sqlKind, ddlSql, visibility, jobId, taskId, + ddlType, true, extendParams, null, isRefreshTableMetaInfo, newTableTopology, null); cdcManager.notifyDdl(context); } + //新ddl引擎打标方法 public void notifyDdlNew(String schemaName, String tableName, String sqlKind, String ddlSql, DdlType ddlType, - Long jobId, Long taskId, DdlVisibility visibility, Map extendParams, + Long jobId, Long taskId, CdcDdlMarkVisibility visibility, Map extendParams, boolean isRefreshTableMetaInfo, Map> newTableTopology, Pair cdcMetaPair) { CdcDDLContext context = new CdcDDLContext(schemaName, tableName, sqlKind, ddlSql, visibility, jobId, taskId, - ddlType, extendParams, isRefreshTableMetaInfo, newTableTopology, cdcMetaPair); + ddlType, true, extendParams, null, isRefreshTableMetaInfo, newTableTopology, cdcMetaPair); cdcManager.notifyDdl(context); } + private DdlType typeTransfer(Job.JobType jobType) { + switch (jobType) { + case CREATE_TABLE: + return DdlType.CREATE_TABLE; + case DROP_TABLE: + return DdlType.DROP_TABLE; + case ALTER_TABLE: + return DdlType.ALTER_TABLE; + case RENAME_TABLE: + return DdlType.RENAME_TABLE; + case TRUNCATE_TABLE: + return DdlType.TRUNCATE_TABLE; + + case CREATE_INDEX: + return DdlType.CREATE_INDEX; + case DROP_INDEX: + return DdlType.DROP_INDEX; + + case CREATE_GLOBAL_INDEX: + return DdlType.CREATE_GLOBAL_INDEX; + case CHECK_GLOBAL_INDEX: + return DdlType.CHECK_GLOBAL_INDEX; + case CHECK_COLUMNAR_INDEX: + return DdlType.CHECK_COLUMNAR_INDEX; + case RENAME_GLOBAL_INDEX: + return DdlType.RENAME_GLOBAL_INDEX; + case DROP_GLOBAL_INDEX: + return DdlType.DROP_GLOBAL_INDEX; + case ALTER_GLOBAL_INDEX: + return DdlType.ALTER_GLOBAL_INDEX; + case MOVE_TABLE: + return DdlType.MOVE_DATABASE;//TODO,ziyang.lb 待确认,应该是MoveDataBase?待确认 + + case UNSUPPORTED: + return DdlType.UNSUPPORTED; + default: + throw new RuntimeException("unsupported ddltype :" + jobType); + } + } + public void checkCdcBeforeStorageRemove(Set storageInstIds, String identifier) { cdcManager.checkCdcBeforeStorageRemove(storageInstIds, identifier); } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/DdlScope.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/DdlScope.java new file mode 100644 index 000000000..105a4237d --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/DdlScope.java @@ -0,0 +1,34 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.cdc; + +public enum DdlScope { + + Schema(0), + + Instance(1); + + int value; + + DdlScope(int value) { + this.value = value; + } + + public int getValue() { + return value; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/DdlVisibility.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/DdlVisibility.java deleted file mode 100644 index 715c862b5..000000000 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/DdlVisibility.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.common.cdc; - -public enum DdlVisibility { - - Public(1), - - Private(0); - - int value; - - DdlVisibility(int value) { - this.value = value; - } - - public int getValue() { - return value; - } -} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/ICdcManager.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/ICdcManager.java index 7bdbd9183..fbead4d47 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/ICdcManager.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/ICdcManager.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.common.cdc; +import java.util.List; import java.util.Set; public interface ICdcManager { @@ -34,14 +35,21 @@ public interface ICdcManager { String REFRESH_CREATE_SQL_4_PHY_TABLE = "REFRESH_CREATE_SQL_4_PHY_TABLE"; /** - * 通知CdcManager, 是否是标记 ORGINAL_DDL; + * 通知CdcManager, 是否是标记 ORIGINAL_DDL; */ - String USE_ORGINAL_DDL = "USE_ORGINAL_DDL"; + String USE_ORIGINAL_DDL = "USE_ORIGINAL_DDL"; /** * 标识Foreign Keys DDL */ String FOREIGN_KEYS_DDL = "FOREIGN_KEYS_DDL"; + /** + * 是否在 ORIGINAL_DDL 中增加 DDL_ID + */ + String USE_DDL_VERSION_ID = "USE_DDL_VERSION_ID"; + + Long DEFAULT_DDL_VERSION_ID = -1L; + /** * 是否使用OMC */ @@ -58,6 +66,12 @@ public interface ICdcManager { String CDC_IS_GSI = "CDC_IS_GSI"; String CDC_GSI_PRIMARY_TABLE = "CDC_GSI_PRIMARY_TABLE"; String CDC_GROUP_NAME = "cdc_group_name"; + String CDC_ACTUAL_ALTER_TABLE_GROUP_FLAG = "cdc_actual_alter_table_group_flag"; + String CDC_TABLE_GROUP_MANUAL_CREATE_FLAG = "cdc_table_group_manual_create_flag"; + String CDC_DDL_SCOPE = "cdc_ddl_scope"; + String POLARDBX_SERVER_ID = "polardbx_server_id"; + String DDL_ID = "DDL_ID"; + String EXCHANGE_NAMES_MAPPING = "EXCHANGE_NAMES_MAPPING"; /** * 发送Cdc通用指令 @@ -66,6 +80,14 @@ public interface ICdcManager { void notifyDdl(CdcDDLContext cdcDDLContext); + /** + * 查询用于 DDL 打标的 CDC 系统表记录 + */ + List getDdlRecord(CdcDDLContext cdcDdlContext); + + /** + * make sure cdc has receive storage change instruction before removing storage。 + */ void checkCdcBeforeStorageRemove(Set storageInstIds, String identifier); enum InstructionType { diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/RplConstants.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/RplConstants.java index 53a366fa0..aa2c3cc9f 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/RplConstants.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/RplConstants.java @@ -27,4 +27,8 @@ public class RplConstants { public final static String SUB_CHANNEL = "SUB_CHANNEL"; public final static String IS_ALL = "IS_ALL"; + + public static final String RPL_FULL_VALID_DB = "dbName"; + + public static final String RPL_FULL_VALID_TB = "tbName"; } diff --git a/polardbx-server/src/main/java/com/alibaba/polardbx/cdc/entity/DDLExtInfo.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/entity/DDLExtInfo.java similarity index 75% rename from polardbx-server/src/main/java/com/alibaba/polardbx/cdc/entity/DDLExtInfo.java rename to polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/entity/DDLExtInfo.java index c571329e9..5b7d24cd7 100644 --- a/polardbx-server/src/main/java/com/alibaba/polardbx/cdc/entity/DDLExtInfo.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/cdc/entity/DDLExtInfo.java @@ -14,8 +14,9 @@ * limitations under the License. */ -package com.alibaba.polardbx.cdc.entity; +package com.alibaba.polardbx.common.cdc.entity; +import com.alibaba.polardbx.common.cdc.DdlScope; import lombok.Getter; /** @@ -41,7 +42,6 @@ public class DDLExtInfo { * mark if used OMC with the ddl sql */ private Boolean useOMC; - /** * sub sequence for one task , support for multi mark in one task */ @@ -50,18 +50,19 @@ public class DDLExtInfo { * sql mode for logic ddl event, null and empty for this field has different meaning */ private String sqlMode = null; - private String originalDdl = null; - private Boolean isGsi = false; - private String groupName = null; - @Getter private Boolean foreignKeysDdl = false; - @Getter private String flags2; + private int ddlScope = DdlScope.Schema.getValue(); + private Boolean manuallyCreatedTableGroup; + private boolean enableImplicitTableGroup; + + @Getter + private Long ddlId; public Long getTaskId() { return taskId; @@ -143,4 +144,31 @@ public void setFlags2(String flags2) { this.flags2 = flags2; } + public int getDdlScope() { + return ddlScope; + } + + public void setDdlScope(int ddlScope) { + this.ddlScope = ddlScope; + } + + public Boolean getManuallyCreatedTableGroup() { + return manuallyCreatedTableGroup; + } + + public void setManuallyCreatedTableGroup(Boolean manuallyCreatedTableGroup) { + this.manuallyCreatedTableGroup = manuallyCreatedTableGroup; + } + + public boolean isEnableImplicitTableGroup() { + return enableImplicitTableGroup; + } + + public void setEnableImplicitTableGroup(boolean enableImplicitTableGroup) { + this.enableImplicitTableGroup = enableImplicitTableGroup; + } + + public void setDdlId(Long ddlId) { + this.ddlId = ddlId; + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/CharsetName.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/CharsetName.java index d250784a2..8c3e87f7f 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/CharsetName.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/CharsetName.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.common.charset; +import com.alibaba.polardbx.common.utils.version.InstanceVersion; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -418,6 +419,7 @@ public enum CharsetName { .put("utf8mb3", UTF8) .put("UTF8MB3", UTF8) + // for utf8mb4 .put("utf8mb4", UTF8MB4) .put("UTF8MB4", UTF8MB4) @@ -819,6 +821,16 @@ public int getMaxLen() { } public CollationName getDefaultCollationName() { + if (InstanceVersion.isMYSQL80()) { + // We should dynamically decide the default collation name here, because the initialization of default + // collation name happens before the setting of MySQL version in InstanceVersion. + switch (defaultCollationName) { + case UTF8MB4_GENERAL_CI: + return UTF8MB4_0900_AI_CI; + default: + return defaultCollationName; + } + } return defaultCollationName; } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/CollationName.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/CollationName.java index a7c9c4767..ae9e8574d 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/CollationName.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/CollationName.java @@ -22,6 +22,7 @@ import com.google.common.collect.ImmutableMap; import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Optional; @@ -54,7 +55,7 @@ public enum CollationName { GBK_CHINESE_CI(28, false, true), LATIN5_TURKISH_CI(30, false, true), ARMSCII8_GENERAL_CI(32, false, true), - UTF8_GENERAL_CI(33, false, true), + UTF8_GENERAL_CI(33, false, true, "utf8mb3_general_ci"), UCS2_GENERAL_CI(35, false, true), CP866_GENERAL_CI(36, false, true), KEYBCS2_GENERAL_CI(37, false, true), @@ -111,32 +112,32 @@ public enum CollationName { GBK_BIN(87, true, false), LATIN5_BIN(78, true, false), ARMSCII8_BIN(64, true, false), - UTF8_BIN(83, true, false), - UTF8_UNICODE_CI(192, false, false), - UTF8_ICELANDIC_CI(193, false, false), - UTF8_LATVIAN_CI(194, false, false), - UTF8_ROMANIAN_CI(195, false, false), - UTF8_SLOVENIAN_CI(196, false, false), - UTF8_POLISH_CI(197, false, false), - UTF8_ESTONIAN_CI(198, false, false), - UTF8_SPANISH_CI(199, false, false), - UTF8_SWEDISH_CI(200, false, false), - UTF8_TURKISH_CI(201, false, false), - UTF8_CZECH_CI(202, false, false), - UTF8_DANISH_CI(203, false, false), - UTF8_LITHUANIAN_CI(204, false, false), - UTF8_SLOVAK_CI(205, false, false), - UTF8_SPANISH2_CI(206, false, false), - UTF8_ROMAN_CI(207, false, false), - UTF8_PERSIAN_CI(208, false, false), - UTF8_ESPERANTO_CI(209, false, false), - UTF8_HUNGARIAN_CI(210, false, false), - UTF8_SINHALA_CI(211, false, false), - UTF8_GERMAN2_CI(212, false, false), - UTF8_CROATIAN_CI(213, false, false), - UTF8_UNICODE_520_CI(214, false, false), - UTF8_VIETNAMESE_CI(215, false, false), - UTF8_GENERAL_MYSQL500_CI(223, false, false), + UTF8_BIN(83, true, false, "utf8mb3_bin"), + UTF8_UNICODE_CI(192, false, false, "utf8mb3_unicode_ci"), + UTF8_ICELANDIC_CI(193, false, false, "utf8mb3_icelandic_ci"), + UTF8_LATVIAN_CI(194, false, false, "utf8mb3_latvian_ci"), + UTF8_ROMANIAN_CI(195, false, false, "utf8mb3_romanian_ci"), + UTF8_SLOVENIAN_CI(196, false, false, "utf8mb3_slovenian_ci"), + UTF8_POLISH_CI(197, false, false, "utf8mb3_polish_ci"), + UTF8_ESTONIAN_CI(198, false, false, "utf8mb3_estonian_ci"), + UTF8_SPANISH_CI(199, false, false, "utf8mb3_spanish_ci"), + UTF8_SWEDISH_CI(200, false, false, "utf8mb3_swedish_ci"), + UTF8_TURKISH_CI(201, false, false, "utf8mb3_turkish_ci"), + UTF8_CZECH_CI(202, false, false, "utf8mb3_czech_ci"), + UTF8_DANISH_CI(203, false, false, "utf8mb3_danish_ci"), + UTF8_LITHUANIAN_CI(204, false, false, "utf8mb3_lithuanian_ci"), + UTF8_SLOVAK_CI(205, false, false, "utf8mb3_slovak_ci"), + UTF8_SPANISH2_CI(206, false, false, "utf8mb3_spanish2_ci"), + UTF8_ROMAN_CI(207, false, false, "utf8mb3_roman_ci"), + UTF8_PERSIAN_CI(208, false, false, "utf8mb3_persian_ci"), + UTF8_ESPERANTO_CI(209, false, false, "utf8mb3_esperanto_ci"), + UTF8_HUNGARIAN_CI(210, false, false, "utf8mb3_hungarian_ci"), + UTF8_SINHALA_CI(211, false, false, "utf8mb3_sinhala_ci"), + UTF8_GERMAN2_CI(212, false, false, "utf8mb3_german2_ci"), + UTF8_CROATIAN_CI(213, false, false, "utf8mb3_croatian_ci"), + UTF8_UNICODE_520_CI(214, false, false, "utf8mb3_unicode_520_ci"), + UTF8_VIETNAMESE_CI(215, false, false, "utf8mb3_vietnamese_ci"), + UTF8_GENERAL_MYSQL500_CI(223, false, false, "utf8mb3_general_mysql500_ci"), UCS2_BIN(90, true, false), UCS2_UNICODE_CI(128, false, false), UCS2_ICELANDIC_CI(129, false, false), @@ -312,17 +313,14 @@ public enum CollationName { UTF8MB4_VI_0900_AS_CS(300, true, false, true), UTF8MB4_ZH_0900_AS_CS(308, true, false, true); - /** - * Collect all collation names to map so we can check them in O(1) - */ - public static Map COLLATION_NAME_MAP = Arrays.stream(values()) - .collect(Collectors.toMap(Enum::name, Function.identity())); - public static ImmutableList POLAR_DB_X_IMPLEMENTED_COLLATION_NAMES = ImmutableList.of( // for utf8 UTF8_GENERAL_CI, UTF8_BIN, UTF8_UNICODE_CI, UTF8_GENERAL_MYSQL500_CI, - // for utf8mb + // for utf8mb4 in MySQL 8.0 + UTF8MB4_ZH_0900_AS_CS, + + // for utf8mb4 UTF8MB4_GENERAL_CI, UTF8MB4_BIN, UTF8MB4_UNICODE_CI, // for utf16 @@ -362,21 +360,92 @@ public enum CollationName { /** * Mapping from upper case collation string to collation name. */ - static Map STRING_TO_COLLATION_MAP_UPPER = Arrays.stream(CollationName.values()) - .collect(Collectors.toMap(k -> k.name(), k -> k)); + static Map STRING_TO_COLLATION_MAP_UPPER = new HashMap<>(); /** * Mapping from lower case collation string to collation name. */ - static Map STRING_TO_COLLATION_MAP_LOWER = Arrays.stream(CollationName.values()) - .collect(Collectors.toMap(k -> k.name(), k -> k)); + static Map STRING_TO_COLLATION_MAP_LOWER = new HashMap<>(); static { // Initialize both upper case & lower case to check implementation in O(1). for (CollationName collationName : POLAR_DB_X_IMPLEMENTED_COLLATION_NAMES) { POLAR_DB_X_IMPLEMENTED_COLLATION_NAME_STRINGS.add(collationName.name().toUpperCase()); POLAR_DB_X_IMPLEMENTED_COLLATION_NAME_STRINGS.add(collationName.name().toLowerCase()); + + // If the implemented collation had alias, also storing the alias. + if (collationName.getAlias() != null) { + String aliasCollation = collationName.getAlias(); + POLAR_DB_X_IMPLEMENTED_COLLATION_NAME_STRINGS.add(aliasCollation.toUpperCase()); + POLAR_DB_X_IMPLEMENTED_COLLATION_NAME_STRINGS.add(aliasCollation.toLowerCase()); + } } + + for (CollationName collationName : CollationName.values()) { + // Mapping from upper case collation string to collation name. + STRING_TO_COLLATION_MAP_UPPER.put(collationName.name(), collationName); + // Mapping from lower case collation string to collation name. + STRING_TO_COLLATION_MAP_LOWER.put(collationName.name().toLowerCase(), collationName); + } + + // Mapping from UTF8MB3 in mysql 8.0 style to UTF8MB4 in mysql 5.7 style. + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_bin", UTF8_BIN); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_croatian_ci", UTF8_CROATIAN_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_czech_ci", UTF8_CZECH_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_danish_ci", UTF8_DANISH_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_esperanto_ci", UTF8_ESPERANTO_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_estonian_ci", UTF8_ESTONIAN_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_general_ci", UTF8_GENERAL_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_general_mysql500_ci", UTF8_GENERAL_MYSQL500_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_german2_ci", UTF8_GERMAN2_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_hungarian_ci", UTF8_HUNGARIAN_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_icelandic_ci", UTF8_ICELANDIC_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_latvian_ci", UTF8_LATVIAN_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_lithuanian_ci", UTF8_LITHUANIAN_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_persian_ci", UTF8_PERSIAN_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_polish_ci", UTF8_POLISH_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_romanian_ci", UTF8_ROMANIAN_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_roman_ci", UTF8_ROMAN_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_sinhala_ci", UTF8_SINHALA_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_slovak_ci", UTF8_SLOVAK_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_slovenian_ci", UTF8_SLOVENIAN_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_spanish2_ci", UTF8_SPANISH2_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_spanish_ci", UTF8_SPANISH_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_swedish_ci", UTF8_SWEDISH_CI); + // STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_tolower_ci", UTF8_TOLOWER_CI); // not support yet. + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_turkish_ci", UTF8_TURKISH_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_unicode_520_ci", UTF8_UNICODE_520_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_unicode_ci", UTF8_UNICODE_CI); + STRING_TO_COLLATION_MAP_LOWER.put("utf8mb3_vietnamese_ci", UTF8_VIETNAMESE_CI); + + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_BIN", UTF8_BIN); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_CROATIAN_CI", UTF8_CROATIAN_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_CZECH_CI", UTF8_CZECH_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_DANISH_CI", UTF8_DANISH_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_ESPERANTO_CI", UTF8_ESPERANTO_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_ESTONIAN_CI", UTF8_ESTONIAN_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_GENERAL_CI", UTF8_GENERAL_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_GENERAL_MYSQL500_CI", UTF8_GENERAL_MYSQL500_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_GERMAN2_CI", UTF8_GERMAN2_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_HUNGARIAN_CI", UTF8_HUNGARIAN_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_ICELANDIC_CI", UTF8_ICELANDIC_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_LATVIAN_CI", UTF8_LATVIAN_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_LITHUANIAN_CI", UTF8_LITHUANIAN_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_PERSIAN_CI", UTF8_PERSIAN_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_POLISH_CI", UTF8_POLISH_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_ROMANIAN_CI", UTF8_ROMANIAN_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_ROMAN_CI", UTF8_ROMAN_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_SINHALA_CI", UTF8_SINHALA_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_SLOVAK_CI", UTF8_SLOVAK_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_SLOVENIAN_CI", UTF8_SLOVENIAN_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_SPANISH2_CI", UTF8_SPANISH2_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_SPANISH_CI", UTF8_SPANISH_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_SWEDISH_CI", UTF8_SWEDISH_CI); + // STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_TOLOWER_CI", UTF8_TOLOWER_CI); // not support yet. + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_TURKISH_CI", UTF8_TURKISH_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_UNICODE_520_CI", UTF8_UNICODE_520_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_UNICODE_CI", UTF8_UNICODE_CI); + STRING_TO_COLLATION_MAP_UPPER.put("UTF8MB3_VIETNAMESE_CI", UTF8_VIETNAMESE_CI); } /** @@ -394,15 +463,30 @@ public enum CollationName { */ private final boolean isMySQL80NewSupported; + /** + * The alias of this collation. + */ + private final String alias; + CollationName(int mysqlCollationId, boolean isCaseSensitive, boolean isDefaultCollation) { - this(mysqlCollationId, isCaseSensitive, isDefaultCollation, false); + this(mysqlCollationId, isCaseSensitive, isDefaultCollation, false, null); + } + + CollationName(int mysqlCollationId, boolean isCaseSensitive, boolean isDefaultCollation, String alias) { + this(mysqlCollationId, isCaseSensitive, isDefaultCollation, false, alias); } CollationName(int mysqlCollationId, boolean isCaseSensitive, boolean isDefaultCollation, boolean isMySQL80NewSupported) { + this(mysqlCollationId, isCaseSensitive, isDefaultCollation, isMySQL80NewSupported, null); + } + + CollationName(int mysqlCollationId, boolean isCaseSensitive, boolean isDefaultCollation, + boolean isMySQL80NewSupported, String alias) { this.mysqlCollationId = mysqlCollationId; this.isCaseSensitive = isCaseSensitive; this.isMySQL80NewSupported = isMySQL80NewSupported; + this.alias = alias; } public boolean isCaseSensitive() { @@ -417,6 +501,10 @@ public int getMysqlCollationId() { return mysqlCollationId; } + public String getAlias() { + return alias; + } + /** * Find the collation name enum from collation string. * @@ -595,6 +683,7 @@ public static CollationName defaultNumericCollation() { .put(new MixCollationKey(UTF8MB4_GENERAL_CI, LATIN1_SPANISH_CI), UTF8MB4_GENERAL_CI) .put(new MixCollationKey(UTF8MB4_GENERAL_CI, BIG5_BIN), UTF8MB4_GENERAL_CI) .put(new MixCollationKey(UTF8MB4_GENERAL_CI, BIG5_CHINESE_CI), UTF8MB4_GENERAL_CI) + .put(new MixCollationKey(UTF8MB4_BIN, UTF8MB4_GENERAL_CI), UTF8MB4_BIN) .put(new MixCollationKey(UTF8MB4_BIN, UTF16LE_GENERAL_CI), UTF16LE_GENERAL_CI) .put(new MixCollationKey(UTF8MB4_BIN, UTF16LE_BIN), UTF16LE_BIN) .put(new MixCollationKey(UTF8MB4_BIN, ASCII_GENERAL_CI), UTF8MB4_BIN) @@ -808,6 +897,11 @@ public static CollationName defaultNumericCollation() { .put(new MixCollationKey(BINARY, LATIN1_SPANISH_CI), BINARY) .put(new MixCollationKey(BINARY, BIG5_BIN), BINARY) .put(new MixCollationKey(BINARY, BIG5_CHINESE_CI), BINARY) + // If UTF8MB4_0900_AI_CI exists, it means the server is in MySQL 80 mode and unicode_ci and general_ci + // should be compatible with it. + .put(new MixCollationKey(UTF8MB4_0900_AI_CI, UTF8MB4_GENERAL_CI), UTF8MB4_0900_AI_CI) + .put(new MixCollationKey(UTF8MB4_0900_AI_CI, UTF8MB4_UNICODE_CI), UTF8MB4_0900_AI_CI) + .put(new MixCollationKey(UTF8MB4_0900_AI_CI, UTF8MB4_BIN), UTF8MB4_BIN) .put(new MixCollationKey(UTF8MB4_0900_AI_CI, UTF8_GENERAL_CI), UTF8MB4_0900_AI_CI) .put(new MixCollationKey(UTF8MB4_0900_AI_CI, UTF8_BIN), UTF8MB4_0900_AI_CI) .put(new MixCollationKey(UTF8MB4_0900_AI_CI, UTF8_UNICODE_CI), UTF8MB4_0900_AI_CI) @@ -915,11 +1009,7 @@ public static CollationName getMixOfCollation(CollationName... collationNames) { } public static CollationName findCollationName(String collation) { - if (TStringUtil.isEmpty(collation)) { - return null; - } - return Arrays.stream(values()) - .filter(c -> c.name().equalsIgnoreCase(collation)) - .findFirst().orElse(null); + // The old logic has risk that the alias collation like UTF8MB3_xxx will not be recognized. + return of(collation, false); } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/MySQLCharsetDDLValidator.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/MySQLCharsetDDLValidator.java index 90493544e..241cd60d5 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/MySQLCharsetDDLValidator.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/MySQLCharsetDDLValidator.java @@ -31,10 +31,10 @@ public class MySQLCharsetDDLValidator { * @return the validity */ public static boolean checkCollation(String collationNameStr) { - return Optional.ofNullable(collationNameStr) - .map(String::toUpperCase) - .map(CollationName.COLLATION_NAME_MAP::containsKey) - .orElse(false); + + CollationName collationName = CollationName.of(collationNameStr, false); + + return collationName != null; } /** @@ -56,10 +56,7 @@ public static boolean checkCharset(String charsetNameStr) { * @return TRUE if the name is valid and the collation match the charset. */ public static boolean checkCharsetCollation(String charsetNameStr, String collationNameStr) { - boolean isCollationValid = Optional.ofNullable(collationNameStr) - .map(String::toUpperCase) - .map(CollationName.COLLATION_NAME_MAP::containsKey) - .orElse(false); + boolean isCollationValid = checkCollation(collationNameStr); boolean isCharsetValid = Optional.ofNullable(charsetNameStr) .map(String::toUpperCase) diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/MySQLUnicodeUtils.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/MySQLUnicodeUtils.java index 5fe234ec1..13955b5f5 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/MySQLUnicodeUtils.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/charset/MySQLUnicodeUtils.java @@ -16,14 +16,10 @@ package com.alibaba.polardbx.common.charset; -import com.alibaba.polardbx.common.utils.GeneralUtil; import io.airlift.slice.DynamicSliceOutput; import io.airlift.slice.Slice; import io.airlift.slice.SliceOutput; -import java.nio.charset.Charset; -import java.util.Optional; - public class MySQLUnicodeUtils { public static byte[][] LATIN1_TO_UTF8_BYTES = { {(byte) 0x00}, {(byte) 0x01}, {(byte) 0x02}, {(byte) 0x03}, {(byte) 0x04}, {(byte) 0x05}, {(byte) 0x06}, @@ -94,20 +90,45 @@ public static Slice latin1ToUtf8(byte[] latin1Bytes) { return sliceOutput.slice(); } - public static int utf8ToLatin1(byte[] buff, int begin, int end, byte[] res) { + public static boolean utf8ToLatin1(byte[] buff, int begin, int end, byte[] res) { int pos = 0; + boolean isUtf8FromLatin1 = true; while (begin < end && pos < res.length) { int uc1 = ((int) buff[begin++]) & 0xFF; // 0xxxxxxx if (uc1 < 0x80) { res[pos++] = (byte) uc1; } else if (begin < end) { + if (uc1 != 0xC2 && uc1 != 0xC3) { + isUtf8FromLatin1 = false; + } int uc2 = ((int) buff[begin++]) & 0xFF; res[pos++] = (byte) (((uc1 & 0x1f) << 6) | (uc2 ^ 0x80)); } else { res[pos++] = (byte) 0xFF; } } - return pos; + return isUtf8FromLatin1; + } + + public static boolean utf8ToLatin1(Slice slice, byte[] res, int len) { + int pos = 0, begin = 0, end = slice.length(); + boolean isUtf8FromLatin1 = true; + while (begin < end && pos < len) { + int uc1 = ((int) slice.getByte(begin++)) & 0xFF; + // 0xxxxxxx + if (uc1 < 0x80) { + res[pos++] = (byte) uc1; + } else if (begin < end) { + if (uc1 != 0xC2 && uc1 != 0xC3) { + isUtf8FromLatin1 = false; + } + int uc2 = ((int) slice.getByte(begin++)) & 0xFF; + res[pos++] = (byte) (((uc1 & 0x1f) << 6) | (uc2 ^ 0x80)); + } else { + res[pos++] = (byte) 0xFF; + } + } + return isUtf8FromLatin1; } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/SequenceAttribute.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/SequenceAttribute.java index 2c250095b..28413ad08 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/SequenceAttribute.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/SequenceAttribute.java @@ -22,6 +22,14 @@ public class SequenceAttribute { public enum Type { NEW, GROUP, SIMPLE, TIME, NA; + + public static Type fromString(String typeStr) { + try { + return Type.valueOf(typeStr.toUpperCase()); + } catch (Exception e) { + return NA; + } + } } public static final int TRUE = 1; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/ServerVariables.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/ServerVariables.java index 8a00745f9..426149b6b 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/ServerVariables.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/ServerVariables.java @@ -16,8 +16,11 @@ package com.alibaba.polardbx.common.constants; +import com.alibaba.polardbx.common.cdc.CdcConstants; import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.properties.DynamicConfig; import com.google.common.collect.ImmutableSet; +import org.apache.commons.lang.StringUtils; import java.util.HashSet; import java.util.Set; @@ -69,9 +72,22 @@ public class ServerVariables { static { variables.add("activate_all_roles_on_login"); + variables.add("admin_address"); + variables.add("admin_port"); + variables.add("admin_ssl_ca"); + variables.add("admin_ssl_capath"); + variables.add("admin_ssl_cert"); + variables.add("admin_ssl_cipher"); + variables.add("admin_ssl_crl"); + variables.add("admin_ssl_crlpath"); + variables.add("admin_ssl_key"); + variables.add("admin_tls_ciphersuites"); + variables.add("admin_tls_version"); variables.add("appliedindex_force_delay"); variables.add("audit_log_current_session"); variables.add("audit_log_filter_id"); + variables.add("authentication_policy"); + variables.add("auto_generate_certs"); variables.add("auto_increment_increment"); variables.add("auto_increment_offset"); variables.add("auto_savepoint"); @@ -88,23 +104,34 @@ public class ServerVariables { variables.add("binlog_direct_non_transactional_updates"); variables.add("binlog_encryption"); variables.add("binlog_error_action"); + variables.add("binlog_expire_logs_auto_purge"); variables.add("binlog_expire_logs_seconds"); variables.add("binlog_format"); variables.add("binlog_group_commit_sync_delay"); variables.add("binlog_group_commit_sync_no_delay_count"); + variables.add("binlog_gtid_simple_recovery"); variables.add("binlog_max_flush_queue_time"); variables.add("binlog_order_commits"); + variables.add("binlog_rotate_encryption_master_key_at_startup"); + variables.add("binlog_row_event_max_size"); variables.add("binlog_row_image"); variables.add("binlog_row_metadata"); variables.add("binlog_row_value_options"); variables.add("binlog_rows_query_key_content"); variables.add("binlog_rows_query_log_events"); variables.add("binlog_stmt_cache_size"); + variables.add("binlog_transaction_compression"); + variables.add("binlog_transaction_compression_level_zstd"); variables.add("binlog_transaction_dependency_history_size"); variables.add("binlog_transaction_dependency_tracking"); variables.add("binlogging_impossible_mode"); variables.add("block_encryption_mode"); + variables.add("build_id"); variables.add("bulk_insert_buffer_size"); + variables.add("caching_sha2_password_auto_generate_rsa_keys"); + variables.add("caching_sha2_password_digest_rounds"); + variables.add("caching_sha2_password_private_key_path"); + variables.add("caching_sha2_password_public_key_path"); variables.add("ccl_max_waiting_count"); variables.add("ccl_queue_bucket_count"); variables.add("ccl_queue_bucket_size"); @@ -118,13 +145,18 @@ public class ServerVariables { variables.add("character_set_system"); variables.add("character_sets_dir"); variables.add("check_proxy_users"); + variables.add("client_endpoint_ip"); + variables.add("cluster_id"); variables.add("collation_connection"); variables.add("collation_database"); variables.add("collation_server"); + variables.add("commit_pos_watcher"); variables.add("commit_pos_watcher_interval"); variables.add("completion_type"); variables.add("concurrent_insert"); variables.add("connect_timeout"); + variables.add("connection_memory_chunk_size"); + variables.add("connection_memory_limit"); variables.add("consensus_auto_leader_transfer"); variables.add("consensus_auto_leader_transfer_check_seconds"); variables.add("consensus_auto_reset_match_index"); @@ -133,12 +165,21 @@ public class ServerVariables { variables.add("consensus_checksum"); variables.add("consensus_configure_change_timeout"); variables.add("consensus_disable_election"); + variables.add("consensus_disable_fifo_cache"); variables.add("consensus_dynamic_easyindex"); + variables.add("consensus_election_timeout"); + variables.add("consensus_flow_control"); variables.add("consensus_force_promote"); + variables.add("consensus_force_recovery"); variables.add("consensus_force_sync_epoch_diff"); + variables.add("consensus_heartbeat_thread_cnt"); variables.add("consensus_index_buf_enabled"); + variables.add("consensus_io_thread_cnt"); variables.add("consensus_large_batch_ratio"); + variables.add("consensus_large_event_count_limit"); variables.add("consensus_large_event_limit"); + variables.add("consensus_large_event_size_limit"); + variables.add("consensus_large_event_split_size"); variables.add("consensus_large_trx"); variables.add("consensus_large_trx_split_size"); variables.add("consensus_leader_stop_apply"); @@ -152,6 +193,7 @@ public class ServerVariables { variables.add("consensus_max_log_size"); variables.add("consensus_max_packet_size"); variables.add("consensus_min_delay_index"); + variables.add("consensus_msg_compress_option"); variables.add("consensus_new_follower_threshold"); variables.add("consensus_old_compact_mode"); variables.add("consensus_optimistic_heartbeat"); @@ -161,16 +203,20 @@ public class ServerVariables { variables.add("consensus_prefetch_wakeup_ratio"); variables.add("consensus_prefetch_window_size"); variables.add("consensus_replicate_with_cache_log"); + variables.add("consensus_safe_for_reset_master"); variables.add("consensus_send_timeout"); variables.add("consensus_sync_follower_meta_interva"); + variables.add("consensus_worker_thread_cnt"); variables.add("consensuslog_revise"); variables.add("core_file"); + variables.add("create_admin_listener_thread"); variables.add("cte_max_recursion_depth"); variables.add("datadir"); variables.add("date_format"); variables.add("datetime_format"); variables.add("debug"); variables.add("debug_sync"); + variables.add("default_authentication_plugin"); variables.add("default_collation_for_utf8mb4"); variables.add("default_password_lifetime"); variables.add("default_storage_engine"); @@ -182,10 +228,14 @@ public class ServerVariables { variables.add("delayed_insert_timeout"); variables.add("delayed_queue_size"); variables.add("disable_wait_commitindex"); + variables.add("disabled_storage_engines"); variables.add("disconnect_on_expired_password"); variables.add("div_precision_increment"); variables.add("enable_appliedindex_checker"); variables.add("enable_balancer"); + variables.add("enable_changeset"); + variables.add("enable_physical_backfill"); + variables.add("enable_polarx_rpc"); variables.add("end_markers_in_json"); variables.add("enforce_gtid_consistency"); variables.add("engine_condition_pushdown"); @@ -193,6 +243,7 @@ public class ServerVariables { variables.add("error_count"); variables.add("event_scheduler"); variables.add("expire_logs_days"); + variables.add("explain_format"); variables.add("explicit_defaults_for_timestamp"); variables.add("external_user"); variables.add("flush"); @@ -211,9 +262,12 @@ public class ServerVariables { variables.add("galaxyx_max_queued_messages"); variables.add("galaxyx_socket_recv_buffer"); variables.add("galaxyx_socket_send_buffer"); + variables.add("gcn_write_event"); variables.add("general_log"); variables.add("general_log_file"); variables.add("generated_random_password_length"); + variables.add("global_connection_memory_limit"); + variables.add("global_connection_memory_tracking"); variables.add("global_query_wait_timeout"); variables.add("group_concat_max_len"); variables.add("group_replication_consistency"); @@ -233,6 +287,7 @@ public class ServerVariables { variables.add("have_query_cache"); variables.add("have_rtree_keys"); variables.add("have_ssl"); + variables.add("have_statement_timeout"); variables.add("have_symlink"); variables.add("histogram_generation_max_mem_size"); variables.add("host_cache_size"); @@ -245,13 +300,19 @@ public class ServerVariables { variables.add("ignore_builtin_innodb"); variables.add("ignore_db_dirs"); variables.add("immediate_server_version"); + variables.add("import_tablespace_iterator_interval"); + variables.add("import_tablespace_iterator_interval_ms"); variables.add("information_schema_stats_expiry"); variables.add("init_connect"); variables.add("init_file"); + variables.add("init_replica"); variables.add("init_slave"); + variables.add("inner_schema_list"); + variables.add("inner_user_list"); variables.add("innodb_adaptive_flushing"); variables.add("innodb_adaptive_flushing_lwm"); variables.add("innodb_adaptive_hash_index"); + variables.add("innodb_adaptive_hash_index_parts"); variables.add("innodb_adaptive_max_sleep_delay"); variables.add("innodb_additional_mem_pool_size"); variables.add("innodb_api_bk_commit_interval"); @@ -261,6 +322,8 @@ public class ServerVariables { variables.add("innodb_api_trx_level"); variables.add("innodb_autoextend_increment"); variables.add("innodb_autoinc_lock_mode"); + variables.add("innodb_btree_sampling"); + variables.add("innodb_buffer_pool_chunk_size"); variables.add("innodb_buffer_pool_dump_at_shutdown"); variables.add("innodb_buffer_pool_dump_now"); variables.add("innodb_buffer_pool_dump_pct"); @@ -279,25 +342,40 @@ public class ServerVariables { variables.add("innodb_cleanout_max_cleans_on_page"); variables.add("innodb_cleanout_max_scans_on_page"); variables.add("innodb_cleanout_mode"); + variables.add("innodb_cleanout_safe_mode"); + variables.add("innodb_cleanout_write_redo"); variables.add("innodb_cmp_per_index_enabled"); variables.add("innodb_commit_cleanout_max_rows"); variables.add("innodb_commit_concurrency"); + variables.add("innodb_commit_seq"); + variables.add("innodb_commit_snapshot_search_enabled"); variables.add("innodb_compression_failure_threshold_pct"); variables.add("innodb_compression_level"); variables.add("innodb_compression_pad_pct_max"); variables.add("innodb_concurrency_tickets"); variables.add("innodb_concurrency_tickets_hotspot"); + variables.add("innodb_current_snapshot_seq"); variables.add("innodb_data_file_path"); variables.add("innodb_data_file_purge"); variables.add("innodb_data_file_purge_all_at_shutdown"); + variables.add("innodb_data_file_purge_dir"); variables.add("innodb_data_file_purge_immediate"); variables.add("innodb_data_file_purge_interval"); variables.add("innodb_data_file_purge_max_size"); variables.add("innodb_data_home_dir"); + variables.add("innodb_ddl_buffer_size"); + variables.add("innodb_ddl_threads"); variables.add("innodb_deadlock_detect"); + variables.add("innodb_dedicated_server"); variables.add("innodb_default_row_format"); + variables.add("innodb_directories"); variables.add("innodb_disable_sort_file_cache"); variables.add("innodb_doublewrite"); + variables.add("innodb_doublewrite_batch_size"); + variables.add("innodb_doublewrite_dir"); + variables.add("innodb_doublewrite_files"); + variables.add("innodb_doublewrite_pages"); + variables.add("innodb_extend_and_initialize"); variables.add("innodb_fast_shutdown"); variables.add("innodb_fatal_semaphore_wait_threshold"); variables.add("innodb_file_format"); @@ -313,6 +391,8 @@ public class ServerVariables { variables.add("innodb_flushing_avg_loops"); variables.add("innodb_force_load_corrupted"); variables.add("innodb_force_recovery"); + variables.add("innodb_freeze_db_if_no_cn_heartbeat_enable"); + variables.add("innodb_freeze_db_if_no_cn_heartbeat_timeout_sec"); variables.add("innodb_fsync_threshold"); variables.add("innodb_ft_aux_table"); variables.add("innodb_ft_cache_size"); @@ -346,6 +426,7 @@ public class ServerVariables { variables.add("innodb_log_spin_cpu_pct_hwm"); variables.add("innodb_log_wait_for_flush_spin_hwm"); variables.add("innodb_log_write_ahead_size"); + variables.add("innodb_log_writer_threads"); variables.add("innodb_lru_scan_depth"); variables.add("innodb_max_dirty_pages_pct"); variables.add("innodb_max_dirty_pages_pct_lwm"); @@ -357,12 +438,14 @@ public class ServerVariables { variables.add("innodb_monitor_enable"); variables.add("innodb_monitor_reset"); variables.add("innodb_monitor_reset_all"); + variables.add("innodb_numa_interleave"); variables.add("innodb_old_blocks_pct"); variables.add("innodb_old_blocks_time"); variables.add("innodb_online_alter_log_max_size"); variables.add("innodb_open_files"); variables.add("innodb_optimize_fulltext_only"); variables.add("innodb_optimize_point_storage"); + variables.add("innodb_page_cleaners"); variables.add("innodb_page_size"); variables.add("innodb_parallel_read_threads"); variables.add("innodb_prepare_wait_timeout"); @@ -379,6 +462,8 @@ public class ServerVariables { variables.add("innodb_read_ahead_threshold"); variables.add("innodb_read_io_threads"); variables.add("innodb_read_only"); + variables.add("innodb_redo_log_archive_dirs"); + variables.add("innodb_redo_log_capacity"); variables.add("innodb_redo_log_encrypt"); variables.add("innodb_replication_delay"); variables.add("innodb_rollback_on_timeout"); @@ -386,6 +471,8 @@ public class ServerVariables { variables.add("innodb_scn_history_interval"); variables.add("innodb_scn_history_keep_days"); variables.add("innodb_scn_history_task_enabled"); + variables.add("innodb_segment_reserve_factor"); + variables.add("innodb_snapshot_seq"); variables.add("innodb_snapshot_update_gcn"); variables.add("innodb_sort_buffer_size"); variables.add("innodb_spin_wait_delay"); @@ -409,9 +496,13 @@ public class ServerVariables { variables.add("innodb_tcn_block_cache_type"); variables.add("innodb_tcn_cache_level"); variables.add("innodb_tcn_cache_replace_after_commit"); + variables.add("innodb_temp_data_file_path"); + variables.add("innodb_temp_tablespaces_dir"); variables.add("innodb_thread_concurrency"); variables.add("innodb_thread_sleep_delay"); + variables.add("innodb_tmpdir"); variables.add("innodb_transaction_group"); + variables.add("innodb_txn_cached_list_keep_size"); variables.add("innodb_txn_undo_page_reuse_max_percent"); variables.add("innodb_undo_directory"); variables.add("innodb_undo_log_encrypt"); @@ -421,9 +512,12 @@ public class ServerVariables { variables.add("innodb_undo_space_reserved_size"); variables.add("innodb_undo_space_supremum_size"); variables.add("innodb_undo_tablespaces"); + variables.add("innodb_use_fdatasync"); variables.add("innodb_use_native_aio"); variables.add("innodb_use_sys_malloc"); + variables.add("innodb_validate_tablespace_paths"); variables.add("innodb_version"); + variables.add("innodb_vision_use_commit_snapshot_debug"); variables.add("innodb_write_io_threads"); variables.add("innodb_write_non_innodb_gtids"); variables.add("insert_id"); @@ -439,6 +533,7 @@ public class ServerVariables { variables.add("key_cache_division_limit"); variables.add("keyring_operations"); variables.add("kill_idle_transaction"); + variables.add("kill_idle_transaction_timeout"); variables.add("large_files_support"); variables.add("large_page_size"); variables.add("large_pages"); @@ -449,7 +544,9 @@ public class ServerVariables { variables.add("lc_time_names"); variables.add("license"); variables.add("local_infile"); + variables.add("lock_instance_mode"); variables.add("lock_wait_timeout"); + variables.add("locked_in_memory"); variables.add("log_bin"); variables.add("log_bin_basename"); variables.add("log_bin_index"); @@ -457,12 +554,16 @@ public class ServerVariables { variables.add("log_bin_use_v1_row_events"); variables.add("log_error"); variables.add("log_error_services"); + variables.add("log_error_suppression_list"); variables.add("log_error_verbosity"); variables.add("log_output"); variables.add("log_queries_not_using_indexes"); + variables.add("log_raw"); + variables.add("log_replica_updates"); variables.add("log_slave_updates"); variables.add("log_slow_admin_statements"); variables.add("log_slow_extra"); + variables.add("log_slow_replica_statements"); variables.add("log_slow_slave_statements"); variables.add("log_statements_unsafe_for_binlog"); variables.add("log_throttle_queries_not_using_indexes"); @@ -473,7 +574,9 @@ public class ServerVariables { variables.add("low_priority_updates"); variables.add("lower_case_file_system"); variables.add("lower_case_table_names"); + variables.add("maintain_max_connections"); variables.add("maintain_user_list"); + variables.add("mandatory_roles"); variables.add("master_info_repository"); variables.add("master_verify_checksum"); variables.add("max_allowed_packet"); @@ -483,6 +586,7 @@ public class ServerVariables { variables.add("max_connect_errors"); variables.add("max_connections"); variables.add("max_delayed_threads"); + variables.add("max_digest_length"); variables.add("max_error_count"); variables.add("max_execution_time"); variables.add("max_heap_table_size"); @@ -512,17 +616,35 @@ public class ServerVariables { variables.add("myisam_stats_method"); variables.add("myisam_use_mmap"); variables.add("mysql_native_password_proxy_users"); + variables.add("mysqlx_bind_address"); + variables.add("mysqlx_compression_algorithms"); variables.add("mysqlx_connect_timeout"); + variables.add("mysqlx_deflate_default_compression_level"); + variables.add("mysqlx_deflate_max_client_compression_level"); variables.add("mysqlx_document_id_unique_prefix"); variables.add("mysqlx_enable_hello_notice"); variables.add("mysqlx_idle_worker_thread_timeout"); variables.add("mysqlx_interactive_timeout"); + variables.add("mysqlx_lz4_default_compression_level"); + variables.add("mysqlx_lz4_max_client_compression_level"); variables.add("mysqlx_max_allowed_packet"); variables.add("mysqlx_max_connections"); variables.add("mysqlx_min_worker_threads"); + variables.add("mysqlx_port"); + variables.add("mysqlx_port_open_timeout"); variables.add("mysqlx_read_timeout"); + variables.add("mysqlx_socket"); + variables.add("mysqlx_ssl_ca"); + variables.add("mysqlx_ssl_capath"); + variables.add("mysqlx_ssl_cert"); + variables.add("mysqlx_ssl_cipher"); + variables.add("mysqlx_ssl_crl"); + variables.add("mysqlx_ssl_crlpath"); + variables.add("mysqlx_ssl_key"); variables.add("mysqlx_wait_timeout"); variables.add("mysqlx_write_timeout"); + variables.add("mysqlx_zstd_default_compression_level"); + variables.add("mysqlx_zstd_max_client_compression_level"); variables.add("named_pipe"); variables.add("ndb-allow-copying-alter-table"); variables.add("ndb_autoincrement_prefetch_sz"); @@ -554,16 +676,21 @@ public class ServerVariables { variables.add("net_retry_count"); variables.add("net_write_timeout"); variables.add("new"); + variables.add("new_rpc"); + variables.add("ngram_token_size"); variables.add("offline_mode"); variables.add("old"); variables.add("old_alter_table"); variables.add("old_passwords"); variables.add("only_report_warning_when_skip_sequence"); variables.add("open_files_limit"); + variables.add("opt_enable_rds_priv_strategy"); + variables.add("opt_force_index_pct_cached"); variables.add("opt_indexstat"); variables.add("opt_outline_enabled"); variables.add("opt_tablestat"); variables.add("optimizer_join_cache_level"); + variables.add("optimizer_max_subgraph_pairs"); variables.add("optimizer_prune_level"); variables.add("optimizer_search_depth"); variables.add("optimizer_switch"); @@ -575,56 +702,79 @@ public class ServerVariables { variables.add("original_commit_timestamp"); variables.add("original_server_version"); variables.add("outline_allowed_sql_digest_truncate"); + variables.add("outline_partitions"); variables.add("parser_max_mem_size"); variables.add("partial_revokes"); variables.add("password_history"); variables.add("password_require_current"); variables.add("password_reuse_interval"); variables.add("performance_point_dbug_enabled"); + variables.add("performance_point_enabled"); + variables.add("performance_point_iostat_interval"); + variables.add("performance_point_iostat_volume_size"); variables.add("performance_point_lock_rwlock_enabled"); variables.add("performance_schema"); variables.add("performance_schema_accounts_size"); variables.add("performance_schema_digests_size"); + variables.add("performance_schema_error_size"); variables.add("performance_schema_events_stages_history_long_size"); variables.add("performance_schema_events_stages_history_size"); variables.add("performance_schema_events_statements_history_long_size"); variables.add("performance_schema_events_statements_history_size"); + variables.add("performance_schema_events_transactions_history_long_size"); + variables.add("performance_schema_events_transactions_history_size"); variables.add("performance_schema_events_waits_history_long_size"); variables.add("performance_schema_events_waits_history_size"); variables.add("performance_schema_hosts_size"); variables.add("performance_schema_max_cond_classes"); variables.add("performance_schema_max_cond_instances"); + variables.add("performance_schema_max_digest_length"); variables.add("performance_schema_max_digest_sample_age"); variables.add("performance_schema_max_file_classes"); variables.add("performance_schema_max_file_handles"); variables.add("performance_schema_max_file_instances"); + variables.add("performance_schema_max_index_stat"); + variables.add("performance_schema_max_memory_classes"); + variables.add("performance_schema_max_metadata_locks"); variables.add("performance_schema_max_mutex_classes"); variables.add("performance_schema_max_mutex_instances"); + variables.add("performance_schema_max_prepared_statements_instances"); + variables.add("performance_schema_max_program_instances"); variables.add("performance_schema_max_rwlock_classes"); variables.add("performance_schema_max_rwlock_instances"); variables.add("performance_schema_max_socket_classes"); variables.add("performance_schema_max_socket_instances"); + variables.add("performance_schema_max_sql_text_length"); variables.add("performance_schema_max_stage_classes"); variables.add("performance_schema_max_statement_classes"); + variables.add("performance_schema_max_statement_stack"); variables.add("performance_schema_max_table_handles"); variables.add("performance_schema_max_table_instances"); + variables.add("performance_schema_max_table_lock_stat"); variables.add("performance_schema_max_thread_classes"); variables.add("performance_schema_max_thread_instances"); variables.add("performance_schema_session_connect_attrs_size"); variables.add("performance_schema_setup_actors_size"); variables.add("performance_schema_setup_objects_size"); + variables.add("performance_schema_show_processlist"); variables.add("performance_schema_users_size"); + variables.add("persist_only_admin_x509_subject"); + variables.add("persist_sensitive_variables_in_plaintext"); + variables.add("persisted_globals_load"); + variables.add("physical_backfill_opt"); variables.add("pid_file"); variables.add("plugin_dir"); variables.add("polarx_connect_timeout"); variables.add("polarx_max_allowed_packet"); variables.add("polarx_max_connections"); + variables.add("polarx_rpc_auto_cpu_affinity"); variables.add("polarx_rpc_enable_epoll_in_tasker"); variables.add("polarx_rpc_enable_kill_log"); variables.add("polarx_rpc_enable_perf_hist"); variables.add("polarx_rpc_enable_tasker"); variables.add("polarx_rpc_enable_thread_pool_log"); variables.add("polarx_rpc_epoll_events_per_thread"); + variables.add("polarx_rpc_epoll_extra_groups"); variables.add("polarx_rpc_epoll_group_ctx_refresh_time"); variables.add("polarx_rpc_epoll_group_dynamic_threads"); variables.add("polarx_rpc_epoll_group_dynamic_threads_shrink_time"); @@ -632,21 +782,33 @@ public class ServerVariables { variables.add("polarx_rpc_epoll_group_tasker_multiply"); variables.add("polarx_rpc_epoll_group_thread_deadlock_check_interval"); variables.add("polarx_rpc_epoll_group_thread_scale_thresh"); + variables.add("polarx_rpc_epoll_groups"); + variables.add("polarx_rpc_epoll_threads_per_group"); variables.add("polarx_rpc_epoll_timeout"); + variables.add("polarx_rpc_epoll_work_queue_capacity"); + variables.add("polarx_rpc_force_all_cores"); variables.add("polarx_rpc_galaxy_protocol"); variables.add("polarx_rpc_galaxy_version"); variables.add("polarx_rpc_max_allowed_packet"); variables.add("polarx_rpc_max_cached_output_buffer_pages"); + variables.add("polarx_rpc_max_epoll_wait_total_threads"); variables.add("polarx_rpc_max_queued_messages"); variables.add("polarx_rpc_mcs_spin_cnt"); + variables.add("polarx_rpc_min_auto_epoll_groups"); + variables.add("polarx_rpc_multi_affinity_in_group"); variables.add("polarx_rpc_net_write_timeout"); + variables.add("polarx_rpc_request_cache_instances"); + variables.add("polarx_rpc_request_cache_max_length"); + variables.add("polarx_rpc_request_cache_number"); variables.add("polarx_rpc_session_poll_rwlock_spin_cnt"); variables.add("polarx_rpc_shared_session_lifetime"); variables.add("polarx_rpc_skip_name_resolve"); variables.add("polarx_rpc_tcp_fixed_dealing_buf"); variables.add("polarx_rpc_tcp_keep_alive"); + variables.add("polarx_rpc_tcp_listen_queue"); variables.add("polarx_rpc_tcp_recv_buf"); variables.add("polarx_rpc_tcp_send_buf"); + variables.add("polarx_udf_function_list"); variables.add("port"); variables.add("preload_buffer_size"); variables.add("print_gtid_info_during_recovery"); @@ -656,6 +818,7 @@ public class ServerVariables { variables.add("protocol_compression_algorithms"); variables.add("protocol_version"); variables.add("proxy_user"); + variables.add("pseudo_replica_mode"); variables.add("pseudo_slave_mode"); variables.add("pseudo_thread_id"); variables.add("query_alloc_block_size"); @@ -672,6 +835,7 @@ public class ServerVariables { variables.add("rbr_exec_mode"); variables.add("rds_audit_log_buffer_size"); variables.add("rds_audit_log_connection_policy"); + variables.add("rds_audit_log_dir"); variables.add("rds_audit_log_enabled"); variables.add("rds_audit_log_event_buffer_size"); variables.add("rds_audit_log_flush"); @@ -682,6 +846,9 @@ public class ServerVariables { variables.add("rds_audit_log_statement_policy"); variables.add("rds_audit_log_strategy"); variables.add("rds_audit_log_version"); + variables.add("rds_kill_connections"); + variables.add("rds_kill_user_list"); + variables.add("rds_release_date"); variables.add("rds_reserved_connections"); variables.add("rds_version"); variables.add("read_buffer_size"); @@ -704,21 +871,48 @@ public class ServerVariables { variables.add("relay_log_purge"); variables.add("relay_log_recovery"); variables.add("relay_log_space_limit"); + variables.add("replica_allow_batching"); + variables.add("replica_checkpoint_group"); + variables.add("replica_checkpoint_period"); + variables.add("replica_compressed_protocol"); + variables.add("replica_exec_mode"); + variables.add("replica_load_tmpdir"); + variables.add("replica_max_allowed_packet"); + variables.add("replica_net_timeout"); + variables.add("replica_parallel_type"); + variables.add("replica_parallel_workers"); + variables.add("replica_pending_jobs_size_max"); + variables.add("replica_preserve_commit_order"); variables.add("replica_read_timeout"); + variables.add("replica_skip_errors"); + variables.add("replica_sql_verify_checksum"); + variables.add("replica_transaction_retries"); + variables.add("replica_type_conversions"); + variables.add("replication_optimize_for_static_plugin_config"); + variables.add("replication_sender_observe_commit_only"); variables.add("report_host"); variables.add("report_password"); variables.add("report_port"); variables.add("report_user"); + variables.add("require_row_format"); variables.add("require_secure_transport"); variables.add("reset_consensus_prefetch_cache"); variables.add("resultset_metadata"); + variables.add("rotate_log_table"); + variables.add("rotate_log_table_last_name"); + variables.add("rpc_port"); variables.add("rpl_read_size"); variables.add("rpl_semi_sync_master_wait_point"); + variables.add("rpl_stop_replica_timeout"); variables.add("rpl_stop_slave_timeout"); variables.add("schema_definition_cache"); variables.add("secondary_engine_cost_threshold"); variables.add("secure_auth"); variables.add("secure_file_priv"); + variables.add("select_into_buffer_size"); + variables.add("select_into_disk_sync"); + variables.add("select_into_disk_sync_delay"); + variables.add("sequence_read_skip_cache"); variables.add("server_id"); variables.add("server_id_bits"); variables.add("server_uuid"); @@ -728,17 +922,23 @@ public class ServerVariables { variables.add("session_track_state_change"); variables.add("session_track_system_variables"); variables.add("session_track_transaction_info"); + variables.add("sha256_password_auto_generate_rsa_keys"); + variables.add("sha256_password_private_key_path"); variables.add("sha256_password_proxy_users"); + variables.add("sha256_password_public_key_path"); variables.add("shared_memory"); variables.add("shared_memory_base_name"); variables.add("show_compatibility_56"); variables.add("show_create_table_skip_secondary_engine"); variables.add("show_create_table_verbosity"); + variables.add("show_gipk_in_create_table_and_information_schema"); variables.add("show_old_temporals"); variables.add("skip_external_locking"); variables.add("skip_name_resolve"); variables.add("skip_networking"); + variables.add("skip_replica_start"); variables.add("skip_show_database"); + variables.add("skip_slave_start"); variables.add("slave_allow_batching"); variables.add("slave_checkpoint_group"); variables.add("slave_checkpoint_period"); @@ -761,11 +961,13 @@ public class ServerVariables { variables.add("slow_query_log_file"); variables.add("socket"); variables.add("sort_buffer_size"); + variables.add("source_verify_checksum"); variables.add("sql-mode"); variables.add("sql_auto_is_null"); variables.add("sql_big_selects"); variables.add("sql_big_tables"); variables.add("sql_buffer_result"); + variables.add("sql_generate_invisible_primary_key"); variables.add("sql_log_bin"); variables.add("sql_log_off"); variables.add("sql_low_priority_updates"); @@ -773,6 +975,7 @@ public class ServerVariables { variables.add("sql_mode"); variables.add("sql_notes"); variables.add("sql_quote_show_create"); + variables.add("sql_replica_skip_counter"); variables.add("sql_require_primary_key"); variables.add("sql_safe_updates"); variables.add("sql_select_limit"); @@ -786,6 +989,8 @@ public class ServerVariables { variables.add("ssl_crlpath"); variables.add("ssl_fips_mode"); variables.add("ssl_key"); + variables.add("ssl_session_cache_mode"); + variables.add("ssl_session_cache_timeout"); variables.add("storage_engine"); variables.add("stored_program_cache"); variables.add("stored_program_definition_cache"); @@ -795,14 +1000,17 @@ public class ServerVariables { variables.add("sync_master_info"); variables.add("sync_relay_log"); variables.add("sync_relay_log_info"); + variables.add("sync_source_info"); variables.add("system_time_zone"); variables.add("table_definition_cache"); variables.add("table_encryption_privilege_check"); variables.add("table_open_cache"); variables.add("table_open_cache_instances"); variables.add("tablespace_definition_cache"); + variables.add("temptable_max_mmap"); variables.add("temptable_max_ram"); variables.add("temptable_use_mmap"); + variables.add("terminology_use_previous"); variables.add("thread_cache_size"); variables.add("thread_concurrency"); variables.add("thread_handling"); @@ -814,6 +1022,7 @@ public class ServerVariables { variables.add("time_zone"); variables.add("timed_mutexes"); variables.add("timestamp"); + variables.add("tls_ciphersuites"); variables.add("tls_version"); variables.add("tmp_table_size"); variables.add("tmpdir"); @@ -832,20 +1041,14 @@ public class ServerVariables { variables.add("version_comment"); variables.add("version_compile_machine"); variables.add("version_compile_os"); + variables.add("version_compile_zlib"); variables.add("version_tokens_session"); variables.add("version_tokens_session_number"); variables.add("wait_timeout"); variables.add("warning_count"); variables.add("weak_consensus_mode"); variables.add("windowing_use_high_precision"); - variables.add("hotspot"); - variables.add("hotspot_lock_type"); - variables.add("hotspot_for_autocommit"); - variables.add("hotspot_update_max_wait_time"); - variables.add("innodb_hotspot_kill_lock_holder"); - variables.add("innodb_concurrency_tickets_hotspot"); - variables.add("innodb_hotspot_lock_wait_timeout"); - variables.add("enable_changeset"); + variables.add("xa_detach_on_prepare"); readonlyVariables.add("audit_log_current_session"); readonlyVariables.add("audit_log_filter_id"); @@ -1284,6 +1487,8 @@ public class ServerVariables { mysqlBothVariables.add("collation_database"); mysqlBothVariables.add("collation_server"); mysqlBothVariables.add("completion_type"); + mysqlBothVariables.add("connection_memory_chunk_size"); + mysqlBothVariables.add("connection_memory_limit"); mysqlBothVariables.add("cte_max_recursion_depth"); mysqlBothVariables.add("debug"); mysqlBothVariables.add("default_collation_for_utf8mb4"); @@ -1294,16 +1499,20 @@ public class ServerVariables { mysqlBothVariables.add("div_precision_increment"); mysqlBothVariables.add("end_markers_in_json"); mysqlBothVariables.add("eq_range_index_dive_limit"); + mysqlBothVariables.add("explain_format"); mysqlBothVariables.add("explicit_defaults_for_timestamp"); mysqlBothVariables.add("force_revise"); mysqlBothVariables.add("foreign_key_checks"); mysqlBothVariables.add("generated_random_password_length"); + mysqlBothVariables.add("global_connection_memory_tracking"); mysqlBothVariables.add("global_query_wait_timeout"); mysqlBothVariables.add("group_concat_max_len"); mysqlBothVariables.add("group_replication_consistency"); mysqlBothVariables.add("gtid_owned"); mysqlBothVariables.add("histogram_generation_max_mem_size"); mysqlBothVariables.add("information_schema_stats_expiry"); + mysqlBothVariables.add("innodb_ddl_buffer_size"); + mysqlBothVariables.add("innodb_ddl_threads"); mysqlBothVariables.add("innodb_ft_enable_stopword"); mysqlBothVariables.add("innodb_ft_user_stopword_table"); mysqlBothVariables.add("innodb_global_query_wait_timeout"); @@ -1402,11 +1611,13 @@ public class ServerVariables { mysqlBothVariables.add("session_track_system_variables"); mysqlBothVariables.add("session_track_transaction_info"); mysqlBothVariables.add("show_create_table_verbosity"); + mysqlBothVariables.add("show_gipk_in_create_table_and_information_schema"); mysqlBothVariables.add("show_old_temporals"); mysqlBothVariables.add("sort_buffer_size"); mysqlBothVariables.add("sql_auto_is_null"); mysqlBothVariables.add("sql_big_selects"); mysqlBothVariables.add("sql_buffer_result"); + mysqlBothVariables.add("sql_generate_invisible_primary_key"); mysqlBothVariables.add("sql_log_off"); mysqlBothVariables.add("sql_mode"); mysqlBothVariables.add("sql_notes"); @@ -1430,6 +1641,7 @@ public class ServerVariables { mysqlBothVariables.add("version_tokens_session_number"); mysqlBothVariables.add("wait_timeout"); mysqlBothVariables.add("windowing_use_high_precision"); + mysqlBothVariables.add("xa_detach_on_prepare"); mysqlGlobalVariables.add("Ndb_conflict_last_conflict_epoch"); mysqlGlobalVariables.add("Ndb_replica_max_replicated_epoch"); mysqlGlobalVariables.add("Ndb_slave_max_replicated_epoch"); @@ -1496,6 +1708,7 @@ public class ServerVariables { mysqlGlobalVariables.add("authentication_ldap_simple_server_port"); mysqlGlobalVariables.add("authentication_ldap_simple_tls"); mysqlGlobalVariables.add("authentication_ldap_simple_user_search_attr"); + mysqlGlobalVariables.add("authentication_policy"); mysqlGlobalVariables.add("authentication_windows_log_level"); mysqlGlobalVariables.add("authentication_windows_use_principal_name"); mysqlGlobalVariables.add("auto_generate_certs"); @@ -1508,6 +1721,7 @@ public class ServerVariables { mysqlGlobalVariables.add("binlog_checksum"); mysqlGlobalVariables.add("binlog_encryption"); mysqlGlobalVariables.add("binlog_error_action"); + mysqlGlobalVariables.add("binlog_expire_logs_auto_purge"); mysqlGlobalVariables.add("binlog_expire_logs_seconds"); mysqlGlobalVariables.add("binlog_group_commit_sync_delay"); mysqlGlobalVariables.add("binlog_group_commit_sync_no_delay_count"); @@ -1521,6 +1735,7 @@ public class ServerVariables { mysqlGlobalVariables.add("binlog_stmt_cache_size"); mysqlGlobalVariables.add("binlog_transaction_dependency_history_size"); mysqlGlobalVariables.add("binlog_transaction_dependency_tracking"); + mysqlGlobalVariables.add("build_id"); mysqlGlobalVariables.add("caching_sha2_password_auto_generate_rsa_keys"); mysqlGlobalVariables.add("caching_sha2_password_digest_rounds"); mysqlGlobalVariables.add("caching_sha2_password_private_key_path"); @@ -1532,6 +1747,7 @@ public class ServerVariables { mysqlGlobalVariables.add("character_set_system"); mysqlGlobalVariables.add("character_sets_dir"); mysqlGlobalVariables.add("check_proxy_users"); + mysqlGlobalVariables.add("client_endpoint_ip"); mysqlGlobalVariables.add("clone_autotune_concurrency"); mysqlGlobalVariables.add("clone_buffer_size"); mysqlGlobalVariables.add("clone_ddl_timeout"); @@ -1544,6 +1760,8 @@ public class ServerVariables { mysqlGlobalVariables.add("clone_ssl_cert"); mysqlGlobalVariables.add("clone_ssl_key"); mysqlGlobalVariables.add("clone_valid_donor_list"); + mysqlGlobalVariables.add("cluster_id"); + mysqlGlobalVariables.add("commit_pos_watcher"); mysqlGlobalVariables.add("commit_pos_watcher_interval"); mysqlGlobalVariables.add("concurrent_insert"); mysqlGlobalVariables.add("connect_timeout"); @@ -1558,12 +1776,21 @@ public class ServerVariables { mysqlGlobalVariables.add("consensus_checksum"); mysqlGlobalVariables.add("consensus_configure_change_timeout"); mysqlGlobalVariables.add("consensus_disable_election"); + mysqlGlobalVariables.add("consensus_disable_fifo_cache"); mysqlGlobalVariables.add("consensus_dynamic_easyindex"); + mysqlGlobalVariables.add("consensus_election_timeout"); + mysqlGlobalVariables.add("consensus_flow_control"); mysqlGlobalVariables.add("consensus_force_promote"); + mysqlGlobalVariables.add("consensus_force_recovery"); mysqlGlobalVariables.add("consensus_force_sync_epoch_diff"); + mysqlGlobalVariables.add("consensus_heartbeat_thread_cnt"); mysqlGlobalVariables.add("consensus_index_buf_enabled"); + mysqlGlobalVariables.add("consensus_io_thread_cnt"); mysqlGlobalVariables.add("consensus_large_batch_ratio"); + mysqlGlobalVariables.add("consensus_large_event_count_limit"); mysqlGlobalVariables.add("consensus_large_event_limit"); + mysqlGlobalVariables.add("consensus_large_event_size_limit"); + mysqlGlobalVariables.add("consensus_large_event_split_size"); mysqlGlobalVariables.add("consensus_large_trx"); mysqlGlobalVariables.add("consensus_large_trx_split_size"); mysqlGlobalVariables.add("consensus_leader_stop_apply"); @@ -1577,6 +1804,7 @@ public class ServerVariables { mysqlGlobalVariables.add("consensus_max_log_size"); mysqlGlobalVariables.add("consensus_max_packet_size"); mysqlGlobalVariables.add("consensus_min_delay_index"); + mysqlGlobalVariables.add("consensus_msg_compress_option"); mysqlGlobalVariables.add("consensus_new_follower_threshold"); mysqlGlobalVariables.add("consensus_old_compact_mode"); mysqlGlobalVariables.add("consensus_optimistic_heartbeat"); @@ -1586,8 +1814,10 @@ public class ServerVariables { mysqlGlobalVariables.add("consensus_prefetch_wakeup_ratio"); mysqlGlobalVariables.add("consensus_prefetch_window_size"); mysqlGlobalVariables.add("consensus_replicate_with_cache_log"); + mysqlGlobalVariables.add("consensus_safe_for_reset_master"); mysqlGlobalVariables.add("consensus_send_timeout"); mysqlGlobalVariables.add("consensus_sync_follower_meta_interva"); + mysqlGlobalVariables.add("consensus_worker_thread_cnt"); mysqlGlobalVariables.add("consensuslog_revise"); mysqlGlobalVariables.add("core_file"); mysqlGlobalVariables.add("create_admin_listener_thread"); @@ -1609,6 +1839,8 @@ public class ServerVariables { mysqlGlobalVariables.add("disconnect_on_expired_password"); mysqlGlobalVariables.add("dragnet.log_error_filter_rules"); mysqlGlobalVariables.add("enable_appliedindex_checker"); + mysqlGlobalVariables.add("enable_physical_backfill"); + mysqlGlobalVariables.add("enable_polarx_rpc"); mysqlGlobalVariables.add("enforce_gtid_consistency"); mysqlGlobalVariables.add("event_scheduler"); mysqlGlobalVariables.add("expire_logs_days"); @@ -1626,8 +1858,10 @@ public class ServerVariables { mysqlGlobalVariables.add("galaxyx_max_queued_messages"); mysqlGlobalVariables.add("galaxyx_socket_recv_buffer"); mysqlGlobalVariables.add("galaxyx_socket_send_buffer"); + mysqlGlobalVariables.add("gcn_write_event"); mysqlGlobalVariables.add("general_log"); mysqlGlobalVariables.add("general_log_file"); + mysqlGlobalVariables.add("global_connection_memory_limit"); mysqlGlobalVariables.add("group_replication_advertise_recovery_endpoints"); mysqlGlobalVariables.add("group_replication_allow_local_lower_version_join"); mysqlGlobalVariables.add("group_replication_auto_increment_increment"); @@ -1702,10 +1936,14 @@ public class ServerVariables { mysqlGlobalVariables.add("have_symlink"); mysqlGlobalVariables.add("host_cache_size"); mysqlGlobalVariables.add("hostname"); + mysqlGlobalVariables.add("import_tablespace_iterator_interval"); + mysqlGlobalVariables.add("import_tablespace_iterator_interval_ms"); mysqlGlobalVariables.add("init_connect"); mysqlGlobalVariables.add("init_file"); mysqlGlobalVariables.add("init_replica"); mysqlGlobalVariables.add("init_slave"); + mysqlGlobalVariables.add("inner_schema_list"); + mysqlGlobalVariables.add("inner_user_list"); mysqlGlobalVariables.add("innodb_adaptive_flushing"); mysqlGlobalVariables.add("innodb_adaptive_flushing_lwm"); mysqlGlobalVariables.add("innodb_adaptive_hash_index"); @@ -1719,6 +1957,7 @@ public class ServerVariables { mysqlGlobalVariables.add("innodb_autoextend_increment"); mysqlGlobalVariables.add("innodb_autoinc_lock_mode"); mysqlGlobalVariables.add("innodb_background_drop_list_empty"); + mysqlGlobalVariables.add("innodb_btree_sampling"); mysqlGlobalVariables.add("innodb_buffer_pool_chunk_size"); mysqlGlobalVariables.add("innodb_buffer_pool_debug"); mysqlGlobalVariables.add("innodb_buffer_pool_dump_at_shutdown"); @@ -1740,17 +1979,23 @@ public class ServerVariables { mysqlGlobalVariables.add("innodb_cleanout_max_cleans_on_page"); mysqlGlobalVariables.add("innodb_cleanout_max_scans_on_page"); mysqlGlobalVariables.add("innodb_cleanout_mode"); + mysqlGlobalVariables.add("innodb_cleanout_safe_mode"); + mysqlGlobalVariables.add("innodb_cleanout_write_redo"); mysqlGlobalVariables.add("innodb_cmp_per_index_enabled"); mysqlGlobalVariables.add("innodb_commit_cleanout_max_rows"); mysqlGlobalVariables.add("innodb_commit_concurrency"); + mysqlGlobalVariables.add("innodb_commit_seq"); + mysqlGlobalVariables.add("innodb_commit_snapshot_search_enabled"); mysqlGlobalVariables.add("innodb_compress_debug"); mysqlGlobalVariables.add("innodb_compression_failure_threshold_pct"); mysqlGlobalVariables.add("innodb_compression_level"); mysqlGlobalVariables.add("innodb_compression_pad_pct_max"); mysqlGlobalVariables.add("innodb_concurrency_tickets"); + mysqlGlobalVariables.add("innodb_current_snapshot_seq"); mysqlGlobalVariables.add("innodb_data_file_path"); mysqlGlobalVariables.add("innodb_data_file_purge"); mysqlGlobalVariables.add("innodb_data_file_purge_all_at_shutdown"); + mysqlGlobalVariables.add("innodb_data_file_purge_dir"); mysqlGlobalVariables.add("innodb_data_file_purge_immediate"); mysqlGlobalVariables.add("innodb_data_file_purge_interval"); mysqlGlobalVariables.add("innodb_data_file_purge_max_size"); @@ -1780,6 +2025,8 @@ public class ServerVariables { mysqlGlobalVariables.add("innodb_flushing_avg_loops"); mysqlGlobalVariables.add("innodb_force_load_corrupted"); mysqlGlobalVariables.add("innodb_force_recovery"); + mysqlGlobalVariables.add("innodb_freeze_db_if_no_cn_heartbeat_enable"); + mysqlGlobalVariables.add("innodb_freeze_db_if_no_cn_heartbeat_timeout_sec"); mysqlGlobalVariables.add("innodb_fsync_threshold"); mysqlGlobalVariables.add("innodb_ft_aux_table"); mysqlGlobalVariables.add("innodb_ft_cache_size"); @@ -1843,6 +2090,7 @@ public class ServerVariables { mysqlGlobalVariables.add("innodb_read_io_threads"); mysqlGlobalVariables.add("innodb_read_only"); mysqlGlobalVariables.add("innodb_redo_log_archive_dirs"); + mysqlGlobalVariables.add("innodb_redo_log_capacity"); mysqlGlobalVariables.add("innodb_redo_log_encrypt"); mysqlGlobalVariables.add("innodb_replication_delay"); mysqlGlobalVariables.add("innodb_rollback_on_timeout"); @@ -1852,6 +2100,7 @@ public class ServerVariables { mysqlGlobalVariables.add("innodb_scn_history_keep_days"); mysqlGlobalVariables.add("innodb_scn_history_task_enabled"); mysqlGlobalVariables.add("innodb_segment_reserve_factor"); + mysqlGlobalVariables.add("innodb_snapshot_seq"); mysqlGlobalVariables.add("innodb_snapshot_update_gcn"); mysqlGlobalVariables.add("innodb_sort_buffer_size"); mysqlGlobalVariables.add("innodb_spin_wait_delay"); @@ -1878,6 +2127,7 @@ public class ServerVariables { mysqlGlobalVariables.add("innodb_thread_sleep_delay"); mysqlGlobalVariables.add("innodb_trx_purge_view_update_only_debug"); mysqlGlobalVariables.add("innodb_trx_rseg_n_slots_debug"); + mysqlGlobalVariables.add("innodb_txn_cached_list_keep_size"); mysqlGlobalVariables.add("innodb_txn_undo_page_reuse_max_percent"); mysqlGlobalVariables.add("innodb_undo_directory"); mysqlGlobalVariables.add("innodb_undo_log_encrypt"); @@ -1890,6 +2140,7 @@ public class ServerVariables { mysqlGlobalVariables.add("innodb_use_native_aio"); mysqlGlobalVariables.add("innodb_validate_tablespace_paths"); mysqlGlobalVariables.add("innodb_version"); + mysqlGlobalVariables.add("innodb_vision_use_commit_snapshot_debug"); mysqlGlobalVariables.add("innodb_write_io_threads"); mysqlGlobalVariables.add("innodb_write_non_innodb_gtids"); mysqlGlobalVariables.add("internal_tmp_disk_storage_engine"); @@ -1934,6 +2185,7 @@ public class ServerVariables { mysqlGlobalVariables.add("keyring_okv_conf_dir"); mysqlGlobalVariables.add("keyring_operations"); mysqlGlobalVariables.add("kill_idle_transaction"); + mysqlGlobalVariables.add("kill_idle_transaction_timeout"); mysqlGlobalVariables.add("language"); mysqlGlobalVariables.add("large_files_support"); mysqlGlobalVariables.add("large_page_size"); @@ -1941,6 +2193,7 @@ public class ServerVariables { mysqlGlobalVariables.add("lc_messages_dir"); mysqlGlobalVariables.add("license"); mysqlGlobalVariables.add("local_infile"); + mysqlGlobalVariables.add("lock_instance_mode"); mysqlGlobalVariables.add("lock_order"); mysqlGlobalVariables.add("lock_order_debug_loop"); mysqlGlobalVariables.add("lock_order_debug_missing_arc"); @@ -1983,6 +2236,7 @@ public class ServerVariables { mysqlGlobalVariables.add("loose_innodb_prepare_wait_timeout"); mysqlGlobalVariables.add("lower_case_file_system"); mysqlGlobalVariables.add("lower_case_table_names"); + mysqlGlobalVariables.add("maintain_max_connections"); mysqlGlobalVariables.add("maintain_user_list"); mysqlGlobalVariables.add("mandatory_roles"); mysqlGlobalVariables.add("master_info_repository"); @@ -2083,19 +2337,27 @@ public class ServerVariables { mysqlGlobalVariables.add("ndbinfo_table_prefix"); mysqlGlobalVariables.add("ndbinfo_version"); mysqlGlobalVariables.add("net_buffer_length"); + mysqlGlobalVariables.add("new_rpc"); mysqlGlobalVariables.add("ngram_token_size"); mysqlGlobalVariables.add("offline_mode"); mysqlGlobalVariables.add("old"); mysqlGlobalVariables.add("only_report_warning_when_skip_sequence"); mysqlGlobalVariables.add("open_files_limit"); + mysqlGlobalVariables.add("opt_enable_rds_priv_strategy"); + mysqlGlobalVariables.add("opt_force_index_pct_cached"); mysqlGlobalVariables.add("opt_indexstat"); mysqlGlobalVariables.add("opt_outline_enabled"); mysqlGlobalVariables.add("opt_tablestat"); + mysqlGlobalVariables.add("optimizer_max_subgraph_pairs"); + mysqlGlobalVariables.add("outline_partitions"); mysqlGlobalVariables.add("partial_revokes"); mysqlGlobalVariables.add("password_history"); mysqlGlobalVariables.add("password_require_current"); mysqlGlobalVariables.add("password_reuse_interval"); mysqlGlobalVariables.add("performance_point_dbug_enabled"); + mysqlGlobalVariables.add("performance_point_enabled"); + mysqlGlobalVariables.add("performance_point_iostat_interval"); + mysqlGlobalVariables.add("performance_point_iostat_volume_size"); mysqlGlobalVariables.add("performance_point_lock_rwlock_enabled"); mysqlGlobalVariables.add("performance_schema"); mysqlGlobalVariables.add("performance_schema_accounts_size"); @@ -2143,7 +2405,9 @@ public class ServerVariables { mysqlGlobalVariables.add("performance_schema_show_processlist"); mysqlGlobalVariables.add("performance_schema_users_size"); mysqlGlobalVariables.add("persist_only_admin_x509_subject"); + mysqlGlobalVariables.add("persist_sensitive_variables_in_plaintext"); mysqlGlobalVariables.add("persisted_globals_load"); + mysqlGlobalVariables.add("physical_backfill_opt"); mysqlGlobalVariables.add("pid_file"); mysqlGlobalVariables.add("plugin_dir"); mysqlGlobalVariables.add("plugin_load"); @@ -2151,12 +2415,14 @@ public class ServerVariables { mysqlGlobalVariables.add("polarx_connect_timeout"); mysqlGlobalVariables.add("polarx_max_allowed_packet"); mysqlGlobalVariables.add("polarx_max_connections"); + mysqlGlobalVariables.add("polarx_rpc_auto_cpu_affinity"); mysqlGlobalVariables.add("polarx_rpc_enable_epoll_in_tasker"); mysqlGlobalVariables.add("polarx_rpc_enable_kill_log"); mysqlGlobalVariables.add("polarx_rpc_enable_perf_hist"); mysqlGlobalVariables.add("polarx_rpc_enable_tasker"); mysqlGlobalVariables.add("polarx_rpc_enable_thread_pool_log"); mysqlGlobalVariables.add("polarx_rpc_epoll_events_per_thread"); + mysqlGlobalVariables.add("polarx_rpc_epoll_extra_groups"); mysqlGlobalVariables.add("polarx_rpc_epoll_group_ctx_refresh_time"); mysqlGlobalVariables.add("polarx_rpc_epoll_group_dynamic_threads"); mysqlGlobalVariables.add("polarx_rpc_epoll_group_dynamic_threads_shrink_time"); @@ -2164,27 +2430,40 @@ public class ServerVariables { mysqlGlobalVariables.add("polarx_rpc_epoll_group_tasker_multiply"); mysqlGlobalVariables.add("polarx_rpc_epoll_group_thread_deadlock_check_interval"); mysqlGlobalVariables.add("polarx_rpc_epoll_group_thread_scale_thresh"); + mysqlGlobalVariables.add("polarx_rpc_epoll_groups"); + mysqlGlobalVariables.add("polarx_rpc_epoll_threads_per_group"); mysqlGlobalVariables.add("polarx_rpc_epoll_timeout"); + mysqlGlobalVariables.add("polarx_rpc_epoll_work_queue_capacity"); + mysqlGlobalVariables.add("polarx_rpc_force_all_cores"); mysqlGlobalVariables.add("polarx_rpc_galaxy_protocol"); mysqlGlobalVariables.add("polarx_rpc_galaxy_version"); mysqlGlobalVariables.add("polarx_rpc_max_allowed_packet"); mysqlGlobalVariables.add("polarx_rpc_max_cached_output_buffer_pages"); + mysqlGlobalVariables.add("polarx_rpc_max_epoll_wait_total_threads"); mysqlGlobalVariables.add("polarx_rpc_max_queued_messages"); mysqlGlobalVariables.add("polarx_rpc_mcs_spin_cnt"); + mysqlGlobalVariables.add("polarx_rpc_min_auto_epoll_groups"); + mysqlGlobalVariables.add("polarx_rpc_multi_affinity_in_group"); mysqlGlobalVariables.add("polarx_rpc_net_write_timeout"); + mysqlGlobalVariables.add("polarx_rpc_request_cache_instances"); + mysqlGlobalVariables.add("polarx_rpc_request_cache_max_length"); + mysqlGlobalVariables.add("polarx_rpc_request_cache_number"); mysqlGlobalVariables.add("polarx_rpc_session_poll_rwlock_spin_cnt"); mysqlGlobalVariables.add("polarx_rpc_shared_session_lifetime"); mysqlGlobalVariables.add("polarx_rpc_skip_name_resolve"); mysqlGlobalVariables.add("polarx_rpc_tcp_fixed_dealing_buf"); mysqlGlobalVariables.add("polarx_rpc_tcp_keep_alive"); + mysqlGlobalVariables.add("polarx_rpc_tcp_listen_queue"); mysqlGlobalVariables.add("polarx_rpc_tcp_recv_buf"); mysqlGlobalVariables.add("polarx_rpc_tcp_send_buf"); + mysqlGlobalVariables.add("polarx_udf_function_list"); mysqlGlobalVariables.add("port"); mysqlGlobalVariables.add("print_gtid_info_during_recovery"); mysqlGlobalVariables.add("protocol_compression_algorithms"); mysqlGlobalVariables.add("protocol_version"); mysqlGlobalVariables.add("rds_audit_log_buffer_size"); mysqlGlobalVariables.add("rds_audit_log_connection_policy"); + mysqlGlobalVariables.add("rds_audit_log_dir"); mysqlGlobalVariables.add("rds_audit_log_enabled"); mysqlGlobalVariables.add("rds_audit_log_event_buffer_size"); mysqlGlobalVariables.add("rds_audit_log_flush"); @@ -2194,6 +2473,9 @@ public class ServerVariables { mysqlGlobalVariables.add("rds_audit_log_statement_policy"); mysqlGlobalVariables.add("rds_audit_log_strategy"); mysqlGlobalVariables.add("rds_audit_log_version"); + mysqlGlobalVariables.add("rds_kill_connections"); + mysqlGlobalVariables.add("rds_kill_user_list"); + mysqlGlobalVariables.add("rds_release_date"); mysqlGlobalVariables.add("rds_reserved_connections"); mysqlGlobalVariables.add("rds_version"); mysqlGlobalVariables.add("read_only"); @@ -2239,6 +2521,9 @@ public class ServerVariables { mysqlGlobalVariables.add("reset_consensus_prefetch_cache"); mysqlGlobalVariables.add("rewriter_enabled"); mysqlGlobalVariables.add("rewriter_verbose"); + mysqlGlobalVariables.add("rotate_log_table"); + mysqlGlobalVariables.add("rotate_log_table_last_name"); + mysqlGlobalVariables.add("rpc_port"); mysqlGlobalVariables.add("rpl_read_size"); mysqlGlobalVariables.add("rpl_semi_sync_master_enabled"); mysqlGlobalVariables.add("rpl_semi_sync_master_timeout"); @@ -2260,6 +2545,7 @@ public class ServerVariables { mysqlGlobalVariables.add("rpl_stop_slave_timeout"); mysqlGlobalVariables.add("schema_definition_cache"); mysqlGlobalVariables.add("secure_file_priv"); + mysqlGlobalVariables.add("sequence_read_skip_cache"); mysqlGlobalVariables.add("server_id"); mysqlGlobalVariables.add("server_id_bits"); mysqlGlobalVariables.add("server_uuid"); @@ -2308,6 +2594,8 @@ public class ServerVariables { mysqlGlobalVariables.add("ssl_crlpath"); mysqlGlobalVariables.add("ssl_fips_mode"); mysqlGlobalVariables.add("ssl_key"); + mysqlGlobalVariables.add("ssl_session_cache_mode"); + mysqlGlobalVariables.add("ssl_session_cache_timeout"); mysqlGlobalVariables.add("stored_program_cache"); mysqlGlobalVariables.add("stored_program_definition_cache"); mysqlGlobalVariables.add("super_read_only"); @@ -2445,6 +2733,7 @@ public class ServerVariables { mysqlDynamicVariables.add("authentication_ldap_simple_server_port"); mysqlDynamicVariables.add("authentication_ldap_simple_tls"); mysqlDynamicVariables.add("authentication_ldap_simple_user_search_attr"); + mysqlDynamicVariables.add("authentication_policy"); mysqlDynamicVariables.add("auto_increment_increment"); mysqlDynamicVariables.add("auto_increment_offset"); mysqlDynamicVariables.add("auto_savepoint"); @@ -2457,6 +2746,7 @@ public class ServerVariables { mysqlDynamicVariables.add("binlog_direct_non_transactional_updates"); mysqlDynamicVariables.add("binlog_encryption"); mysqlDynamicVariables.add("binlog_error_action"); + mysqlDynamicVariables.add("binlog_expire_logs_auto_purge"); mysqlDynamicVariables.add("binlog_expire_logs_seconds"); mysqlDynamicVariables.add("binlog_format"); mysqlDynamicVariables.add("binlog_group_commit_sync_delay"); @@ -2501,6 +2791,7 @@ public class ServerVariables { mysqlDynamicVariables.add("collation_connection"); mysqlDynamicVariables.add("collation_database"); mysqlDynamicVariables.add("collation_server"); + mysqlDynamicVariables.add("commit_pos_watcher"); mysqlDynamicVariables.add("commit_pos_watcher_interval"); mysqlDynamicVariables.add("completion_type"); mysqlDynamicVariables.add("concurrent_insert"); @@ -2508,6 +2799,8 @@ public class ServerVariables { mysqlDynamicVariables.add("connection_control_failed_connections_threshold"); mysqlDynamicVariables.add("connection_control_max_connection_delay"); mysqlDynamicVariables.add("connection_control_min_connection_delay"); + mysqlDynamicVariables.add("connection_memory_chunk_size"); + mysqlDynamicVariables.add("connection_memory_limit"); mysqlDynamicVariables.add("consensus_auto_leader_transfer"); mysqlDynamicVariables.add("consensus_auto_leader_transfer_check_seconds"); mysqlDynamicVariables.add("consensus_auto_reset_match_index"); @@ -2516,12 +2809,21 @@ public class ServerVariables { mysqlDynamicVariables.add("consensus_checksum"); mysqlDynamicVariables.add("consensus_configure_change_timeout"); mysqlDynamicVariables.add("consensus_disable_election"); + mysqlDynamicVariables.add("consensus_disable_fifo_cache"); mysqlDynamicVariables.add("consensus_dynamic_easyindex"); + mysqlDynamicVariables.add("consensus_election_timeout"); + mysqlDynamicVariables.add("consensus_flow_control"); mysqlDynamicVariables.add("consensus_force_promote"); + mysqlDynamicVariables.add("consensus_force_recovery"); mysqlDynamicVariables.add("consensus_force_sync_epoch_diff"); + mysqlDynamicVariables.add("consensus_heartbeat_thread_cnt"); mysqlDynamicVariables.add("consensus_index_buf_enabled"); + mysqlDynamicVariables.add("consensus_io_thread_cnt"); mysqlDynamicVariables.add("consensus_large_batch_ratio"); + mysqlDynamicVariables.add("consensus_large_event_count_limit"); mysqlDynamicVariables.add("consensus_large_event_limit"); + mysqlDynamicVariables.add("consensus_large_event_size_limit"); + mysqlDynamicVariables.add("consensus_large_event_split_size"); mysqlDynamicVariables.add("consensus_large_trx"); mysqlDynamicVariables.add("consensus_large_trx_split_size"); mysqlDynamicVariables.add("consensus_leader_stop_apply"); @@ -2535,6 +2837,7 @@ public class ServerVariables { mysqlDynamicVariables.add("consensus_max_log_size"); mysqlDynamicVariables.add("consensus_max_packet_size"); mysqlDynamicVariables.add("consensus_min_delay_index"); + mysqlDynamicVariables.add("consensus_msg_compress_option"); mysqlDynamicVariables.add("consensus_new_follower_threshold"); mysqlDynamicVariables.add("consensus_old_compact_mode"); mysqlDynamicVariables.add("consensus_optimistic_heartbeat"); @@ -2544,8 +2847,10 @@ public class ServerVariables { mysqlDynamicVariables.add("consensus_prefetch_wakeup_ratio"); mysqlDynamicVariables.add("consensus_prefetch_window_size"); mysqlDynamicVariables.add("consensus_replicate_with_cache_log"); + mysqlDynamicVariables.add("consensus_safe_for_reset_master"); mysqlDynamicVariables.add("consensus_send_timeout"); mysqlDynamicVariables.add("consensus_sync_follower_meta_interva"); + mysqlDynamicVariables.add("consensus_worker_thread_cnt"); mysqlDynamicVariables.add("consensuslog_revise"); mysqlDynamicVariables.add("cte_max_recursion_depth"); mysqlDynamicVariables.add("debug"); @@ -2564,11 +2869,14 @@ public class ServerVariables { mysqlDynamicVariables.add("div_precision_increment"); mysqlDynamicVariables.add("dragnet.log_error_filter_rules"); mysqlDynamicVariables.add("enable_appliedindex_checker"); + mysqlDynamicVariables.add("enable_physical_backfill"); + mysqlDynamicVariables.add("enable_polarx_rpc"); mysqlDynamicVariables.add("end_markers_in_json"); mysqlDynamicVariables.add("enforce_gtid_consistency"); mysqlDynamicVariables.add("eq_range_index_dive_limit"); mysqlDynamicVariables.add("event_scheduler"); mysqlDynamicVariables.add("expire_logs_days"); + mysqlDynamicVariables.add("explain_format"); mysqlDynamicVariables.add("explicit_defaults_for_timestamp"); mysqlDynamicVariables.add("flush"); mysqlDynamicVariables.add("flush_time"); @@ -2582,9 +2890,12 @@ public class ServerVariables { mysqlDynamicVariables.add("galaxyx_max_queued_messages"); mysqlDynamicVariables.add("galaxyx_socket_recv_buffer"); mysqlDynamicVariables.add("galaxyx_socket_send_buffer"); + mysqlDynamicVariables.add("gcn_write_event"); mysqlDynamicVariables.add("general_log"); mysqlDynamicVariables.add("general_log_file"); mysqlDynamicVariables.add("generated_random_password_length"); + mysqlDynamicVariables.add("global_connection_memory_limit"); + mysqlDynamicVariables.add("global_connection_memory_tracking"); mysqlDynamicVariables.add("global_query_wait_timeout"); mysqlDynamicVariables.add("group_concat_max_len"); mysqlDynamicVariables.add("group_replication_advertise_recovery_endpoints"); @@ -2654,6 +2965,8 @@ public class ServerVariables { mysqlDynamicVariables.add("host_cache_size"); mysqlDynamicVariables.add("identity"); mysqlDynamicVariables.add("immediate_server_version"); + mysqlDynamicVariables.add("import_tablespace_iterator_interval"); + mysqlDynamicVariables.add("import_tablespace_iterator_interval_ms"); mysqlDynamicVariables.add("information_schema_stats_expiry"); mysqlDynamicVariables.add("init_connect"); mysqlDynamicVariables.add("init_replica"); @@ -2666,6 +2979,7 @@ public class ServerVariables { mysqlDynamicVariables.add("innodb_api_trx_level"); mysqlDynamicVariables.add("innodb_autoextend_increment"); mysqlDynamicVariables.add("innodb_background_drop_list_empty"); + mysqlDynamicVariables.add("innodb_btree_sampling"); mysqlDynamicVariables.add("innodb_buffer_pool_dump_at_shutdown"); mysqlDynamicVariables.add("innodb_buffer_pool_dump_now"); mysqlDynamicVariables.add("innodb_buffer_pool_dump_pct"); @@ -2683,20 +2997,26 @@ public class ServerVariables { mysqlDynamicVariables.add("innodb_cleanout_max_cleans_on_page"); mysqlDynamicVariables.add("innodb_cleanout_max_scans_on_page"); mysqlDynamicVariables.add("innodb_cleanout_mode"); + mysqlDynamicVariables.add("innodb_cleanout_safe_mode"); + mysqlDynamicVariables.add("innodb_cleanout_write_redo"); mysqlDynamicVariables.add("innodb_cmp_per_index_enabled"); mysqlDynamicVariables.add("innodb_commit_cleanout_max_rows"); mysqlDynamicVariables.add("innodb_commit_concurrency"); + mysqlDynamicVariables.add("innodb_commit_snapshot_search_enabled"); mysqlDynamicVariables.add("innodb_compress_debug"); mysqlDynamicVariables.add("innodb_compression_failure_threshold_pct"); mysqlDynamicVariables.add("innodb_compression_level"); mysqlDynamicVariables.add("innodb_compression_pad_pct_max"); mysqlDynamicVariables.add("innodb_concurrency_tickets"); + mysqlDynamicVariables.add("innodb_current_snapshot_seq"); mysqlDynamicVariables.add("innodb_data_file_purge"); mysqlDynamicVariables.add("innodb_data_file_purge_all_at_shutdown"); mysqlDynamicVariables.add("innodb_data_file_purge_immediate"); mysqlDynamicVariables.add("innodb_data_file_purge_interval"); mysqlDynamicVariables.add("innodb_data_file_purge_max_size"); + mysqlDynamicVariables.add("innodb_ddl_buffer_size"); mysqlDynamicVariables.add("innodb_ddl_log_crash_reset_debug"); + mysqlDynamicVariables.add("innodb_ddl_threads"); mysqlDynamicVariables.add("innodb_deadlock_detect"); mysqlDynamicVariables.add("innodb_default_row_format"); mysqlDynamicVariables.add("innodb_disable_sort_file_cache"); @@ -2711,6 +3031,8 @@ public class ServerVariables { mysqlDynamicVariables.add("innodb_flush_neighbors"); mysqlDynamicVariables.add("innodb_flush_sync"); mysqlDynamicVariables.add("innodb_flushing_avg_loops"); + mysqlDynamicVariables.add("innodb_freeze_db_if_no_cn_heartbeat_enable"); + mysqlDynamicVariables.add("innodb_freeze_db_if_no_cn_heartbeat_timeout_sec"); mysqlDynamicVariables.add("innodb_fsync_threshold"); mysqlDynamicVariables.add("innodb_ft_aux_table"); mysqlDynamicVariables.add("innodb_ft_enable_diag_print"); @@ -2764,6 +3086,7 @@ public class ServerVariables { mysqlDynamicVariables.add("innodb_rds_flashback_enabled"); mysqlDynamicVariables.add("innodb_read_ahead_threshold"); mysqlDynamicVariables.add("innodb_redo_log_archive_dirs"); + mysqlDynamicVariables.add("innodb_redo_log_capacity"); mysqlDynamicVariables.add("innodb_redo_log_encrypt"); mysqlDynamicVariables.add("innodb_replication_delay"); mysqlDynamicVariables.add("innodb_rollback_segments"); @@ -2797,6 +3120,7 @@ public class ServerVariables { mysqlDynamicVariables.add("innodb_transaction_group"); mysqlDynamicVariables.add("innodb_trx_purge_view_update_only_debug"); mysqlDynamicVariables.add("innodb_trx_rseg_n_slots_debug"); + mysqlDynamicVariables.add("innodb_txn_cached_list_keep_size"); mysqlDynamicVariables.add("innodb_txn_undo_page_reuse_max_percent"); mysqlDynamicVariables.add("innodb_undo_log_encrypt"); mysqlDynamicVariables.add("innodb_undo_log_truncate"); @@ -2805,6 +3129,7 @@ public class ServerVariables { mysqlDynamicVariables.add("innodb_undo_space_supremum_size"); mysqlDynamicVariables.add("innodb_undo_tablespaces"); mysqlDynamicVariables.add("innodb_use_fdatasync"); + mysqlDynamicVariables.add("innodb_vision_use_commit_snapshot_debug"); mysqlDynamicVariables.add("innodb_write_non_innodb_gtids"); mysqlDynamicVariables.add("insert_id"); mysqlDynamicVariables.add("interactive_timeout"); @@ -2833,10 +3158,12 @@ public class ServerVariables { mysqlDynamicVariables.add("keyring_okv_conf_dir"); mysqlDynamicVariables.add("keyring_operations"); mysqlDynamicVariables.add("kill_idle_transaction"); + mysqlDynamicVariables.add("kill_idle_transaction_timeout"); mysqlDynamicVariables.add("last_insert_id"); mysqlDynamicVariables.add("lc_messages"); mysqlDynamicVariables.add("lc_time_names"); mysqlDynamicVariables.add("local_infile"); + mysqlDynamicVariables.add("lock_instance_mode"); mysqlDynamicVariables.add("lock_wait_timeout"); mysqlDynamicVariables.add("log_bin_trust_function_creators"); mysqlDynamicVariables.add("log_bin_use_v1_row_events"); @@ -2860,6 +3187,7 @@ public class ServerVariables { mysqlDynamicVariables.add("long_query_time"); mysqlDynamicVariables.add("loose_innodb_prepare_wait_timeout"); mysqlDynamicVariables.add("low_priority_updates"); + mysqlDynamicVariables.add("maintain_max_connections"); mysqlDynamicVariables.add("maintain_user_list"); mysqlDynamicVariables.add("mandatory_roles"); mysqlDynamicVariables.add("master_info_repository"); @@ -2966,12 +3294,16 @@ public class ServerVariables { mysqlDynamicVariables.add("net_retry_count"); mysqlDynamicVariables.add("net_write_timeout"); mysqlDynamicVariables.add("new"); + mysqlDynamicVariables.add("new_rpc"); mysqlDynamicVariables.add("offline_mode"); mysqlDynamicVariables.add("old_alter_table"); mysqlDynamicVariables.add("only_report_warning_when_skip_sequence"); + mysqlDynamicVariables.add("opt_enable_rds_priv_strategy"); + mysqlDynamicVariables.add("opt_force_index_pct_cached"); mysqlDynamicVariables.add("opt_indexstat"); mysqlDynamicVariables.add("opt_outline_enabled"); mysqlDynamicVariables.add("opt_tablestat"); + mysqlDynamicVariables.add("optimizer_max_subgraph_pairs"); mysqlDynamicVariables.add("optimizer_prune_level"); mysqlDynamicVariables.add("optimizer_search_depth"); mysqlDynamicVariables.add("optimizer_switch"); @@ -2983,24 +3315,31 @@ public class ServerVariables { mysqlDynamicVariables.add("original_commit_timestamp"); mysqlDynamicVariables.add("original_server_version"); mysqlDynamicVariables.add("outline_allowed_sql_digest_truncate"); + mysqlDynamicVariables.add("outline_partitions"); mysqlDynamicVariables.add("parser_max_mem_size"); mysqlDynamicVariables.add("partial_revokes"); mysqlDynamicVariables.add("password_history"); mysqlDynamicVariables.add("password_require_current"); mysqlDynamicVariables.add("password_reuse_interval"); mysqlDynamicVariables.add("performance_point_dbug_enabled"); + mysqlDynamicVariables.add("performance_point_enabled"); + mysqlDynamicVariables.add("performance_point_iostat_interval"); + mysqlDynamicVariables.add("performance_point_iostat_volume_size"); mysqlDynamicVariables.add("performance_point_lock_rwlock_enabled"); mysqlDynamicVariables.add("performance_schema_max_digest_sample_age"); mysqlDynamicVariables.add("performance_schema_show_processlist"); + mysqlDynamicVariables.add("physical_backfill_opt"); mysqlDynamicVariables.add("polarx_connect_timeout"); mysqlDynamicVariables.add("polarx_max_allowed_packet"); mysqlDynamicVariables.add("polarx_max_connections"); + mysqlDynamicVariables.add("polarx_rpc_auto_cpu_affinity"); mysqlDynamicVariables.add("polarx_rpc_enable_epoll_in_tasker"); mysqlDynamicVariables.add("polarx_rpc_enable_kill_log"); mysqlDynamicVariables.add("polarx_rpc_enable_perf_hist"); mysqlDynamicVariables.add("polarx_rpc_enable_tasker"); mysqlDynamicVariables.add("polarx_rpc_enable_thread_pool_log"); mysqlDynamicVariables.add("polarx_rpc_epoll_events_per_thread"); + mysqlDynamicVariables.add("polarx_rpc_epoll_extra_groups"); mysqlDynamicVariables.add("polarx_rpc_epoll_group_ctx_refresh_time"); mysqlDynamicVariables.add("polarx_rpc_epoll_group_dynamic_threads"); mysqlDynamicVariables.add("polarx_rpc_epoll_group_dynamic_threads_shrink_time"); @@ -3008,19 +3347,30 @@ public class ServerVariables { mysqlDynamicVariables.add("polarx_rpc_epoll_group_tasker_multiply"); mysqlDynamicVariables.add("polarx_rpc_epoll_group_thread_deadlock_check_interval"); mysqlDynamicVariables.add("polarx_rpc_epoll_group_thread_scale_thresh"); + mysqlDynamicVariables.add("polarx_rpc_epoll_groups"); + mysqlDynamicVariables.add("polarx_rpc_epoll_threads_per_group"); mysqlDynamicVariables.add("polarx_rpc_epoll_timeout"); + mysqlDynamicVariables.add("polarx_rpc_epoll_work_queue_capacity"); + mysqlDynamicVariables.add("polarx_rpc_force_all_cores"); mysqlDynamicVariables.add("polarx_rpc_galaxy_protocol"); mysqlDynamicVariables.add("polarx_rpc_galaxy_version"); mysqlDynamicVariables.add("polarx_rpc_max_allowed_packet"); mysqlDynamicVariables.add("polarx_rpc_max_cached_output_buffer_pages"); + mysqlDynamicVariables.add("polarx_rpc_max_epoll_wait_total_threads"); mysqlDynamicVariables.add("polarx_rpc_max_queued_messages"); mysqlDynamicVariables.add("polarx_rpc_mcs_spin_cnt"); + mysqlDynamicVariables.add("polarx_rpc_min_auto_epoll_groups"); + mysqlDynamicVariables.add("polarx_rpc_multi_affinity_in_group"); mysqlDynamicVariables.add("polarx_rpc_net_write_timeout"); + mysqlDynamicVariables.add("polarx_rpc_request_cache_instances"); + mysqlDynamicVariables.add("polarx_rpc_request_cache_max_length"); + mysqlDynamicVariables.add("polarx_rpc_request_cache_number"); mysqlDynamicVariables.add("polarx_rpc_session_poll_rwlock_spin_cnt"); mysqlDynamicVariables.add("polarx_rpc_shared_session_lifetime"); mysqlDynamicVariables.add("polarx_rpc_skip_name_resolve"); mysqlDynamicVariables.add("polarx_rpc_tcp_fixed_dealing_buf"); mysqlDynamicVariables.add("polarx_rpc_tcp_keep_alive"); + mysqlDynamicVariables.add("polarx_rpc_tcp_listen_queue"); mysqlDynamicVariables.add("polarx_rpc_tcp_recv_buf"); mysqlDynamicVariables.add("polarx_rpc_tcp_send_buf"); mysqlDynamicVariables.add("preload_buffer_size"); @@ -3051,6 +3401,8 @@ public class ServerVariables { mysqlDynamicVariables.add("rds_audit_log_statement_policy"); mysqlDynamicVariables.add("rds_audit_log_strategy"); mysqlDynamicVariables.add("rds_audit_log_version"); + mysqlDynamicVariables.add("rds_kill_connections"); + mysqlDynamicVariables.add("rds_release_date"); mysqlDynamicVariables.add("rds_reserved_connections"); mysqlDynamicVariables.add("rds_version"); mysqlDynamicVariables.add("read_buffer_size"); @@ -3090,6 +3442,7 @@ public class ServerVariables { mysqlDynamicVariables.add("resultset_metadata"); mysqlDynamicVariables.add("rewriter_enabled"); mysqlDynamicVariables.add("rewriter_verbose"); + mysqlDynamicVariables.add("rotate_log_table"); mysqlDynamicVariables.add("rpl_read_size"); mysqlDynamicVariables.add("rpl_semi_sync_master_enabled"); mysqlDynamicVariables.add("rpl_semi_sync_master_timeout"); @@ -3114,6 +3467,7 @@ public class ServerVariables { mysqlDynamicVariables.add("select_into_buffer_size"); mysqlDynamicVariables.add("select_into_disk_sync"); mysqlDynamicVariables.add("select_into_disk_sync_delay"); + mysqlDynamicVariables.add("sequence_read_skip_cache"); mysqlDynamicVariables.add("server_id"); mysqlDynamicVariables.add("server_id_bits"); mysqlDynamicVariables.add("session_track_gtids"); @@ -3126,6 +3480,7 @@ public class ServerVariables { mysqlDynamicVariables.add("show_compatibility_56"); mysqlDynamicVariables.add("show_create_table_skip_secondary_engine"); mysqlDynamicVariables.add("show_create_table_verbosity"); + mysqlDynamicVariables.add("show_gipk_in_create_table_and_information_schema"); mysqlDynamicVariables.add("show_old_temporals"); mysqlDynamicVariables.add("slave_allow_batching"); mysqlDynamicVariables.add("slave_checkpoint_group"); @@ -3150,6 +3505,7 @@ public class ServerVariables { mysqlDynamicVariables.add("sql_auto_is_null"); mysqlDynamicVariables.add("sql_big_selects"); mysqlDynamicVariables.add("sql_buffer_result"); + mysqlDynamicVariables.add("sql_generate_invisible_primary_key"); mysqlDynamicVariables.add("sql_log_bin"); mysqlDynamicVariables.add("sql_log_off"); mysqlDynamicVariables.add("sql_mode"); @@ -3165,6 +3521,8 @@ public class ServerVariables { mysqlDynamicVariables.add("ssl_cert"); mysqlDynamicVariables.add("ssl_fips_mode"); mysqlDynamicVariables.add("ssl_key"); + mysqlDynamicVariables.add("ssl_session_cache_mode"); + mysqlDynamicVariables.add("ssl_session_cache_timeout"); mysqlDynamicVariables.add("stored_program_cache"); mysqlDynamicVariables.add("stored_program_definition_cache"); mysqlDynamicVariables.add("super_read_only"); @@ -3223,14 +3581,16 @@ public class ServerVariables { mysqlDynamicVariables.add("wait_timeout"); mysqlDynamicVariables.add("weak_consensus_mode"); mysqlDynamicVariables.add("windowing_use_high_precision"); + mysqlDynamicVariables.add("xa_detach_on_prepare"); + + // OFF/ON 切换需要经过中间状态,持久化有风险 globalBannedVariables.add("gtid_mode"); - globalBannedVariables.add("max_allowed_packet"); - globalBannedVariables.add("max_user_connections"); - globalBannedVariables.add("max_connections"); + // CN 和 DN 重名,暂不支持设置: globalBannedVariables.add("autocommmit"); globalBannedVariables.add("read_only"); globalBannedVariables.add("auto_increment_increment"); + globalBannedVariables.add("super_write"); ImmutableSet.Builder modifiableTimerTaskVarBuilder = new ImmutableSet.Builder<>(); @@ -3256,6 +3616,12 @@ public static boolean contains(String variable) { return variables.contains(variable.toLowerCase()); } + /** + * Check if the DN variable could be set by SET SESSION + * + * @param variable name of the variable + * @return true if the variable could be set by SET SESSION + */ public static boolean isWritable(String variable) { return writableVariables.contains(variable.toLowerCase()); } @@ -3288,6 +3654,12 @@ public static boolean isMysqlSession(String variable) { return mysqlSessionVariables.contains(variable.toLowerCase()); } + /** + * Check if the DN variable could be set for MySQL + * + * @param variable name of the variable + * @return true if the variable could be set for MySQL + */ public static boolean isMysqlDynamic(String variable) { return mysqlDynamicVariables.contains(variable.toLowerCase()); } @@ -3295,4 +3667,12 @@ public static boolean isMysqlDynamic(String variable) { public static boolean isGlobalBanned(String variable) { return globalBannedVariables.contains(variable.toLowerCase()); } + + public static boolean isGlobalBlackList(String variable) { + return DynamicConfig.getInstance().getBlacklistConf().contains(variable.toLowerCase()); + } + + public static boolean isCdcGlobal(String variable) { + return StringUtils.startsWith(variable.toLowerCase(), CdcConstants.CONFIG_KEY_PREFIX); + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/SystemTables.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/SystemTables.java index ca1e7a22f..b2a8cd6c6 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/SystemTables.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/SystemTables.java @@ -44,6 +44,16 @@ public class SystemTables { public static final String DRDS_SYSTEM_LOCK = "__drds__system__lock__"; public static final String TXC_UNDO_LOG = "txc_undo_log"; public static final String POLARDBX_ASYNC_COMMIT_TX_LOG_TABLE = "polarx_global_trx_log"; + // Table A. + public static final String POLARDBX_GLOBAL_TX_LOG_TABLE = "`mysql`.`polarx_global_trx_log`"; + // Table B. + public static final String POLARDBX_GLOBAL_TX_LOG_TABLE_ARCHIVE = "`mysql`.`polarx_global_trx_log_archive`"; + // A tmp table when switching table A and table B. + public static final String POLARDBX_GLOBAL_TX_LOG_TABLE_TMP = "`mysql`.`polarx_global_trx_log_tmp`"; + public static final String POLARDBX_GLOBAL_TX_LOG_TABLE_PREFIX = "polarx_global_trx_log"; + public static final String POLARDBX_GLOBAL_TX_LOG_TABLE_DB = "mysql"; + public static final String POLARDBX_GLOBAL_TX_LOG_TABLE_ARCHIVE_TABLE = "polarx_global_trx_log_archive"; + public static final String DRDS_REDO_LOG = "__drds_redo_log"; public static final String TDDL_RULE = "tddl_rule"; public static final String TDDL_RULE_STATUS = "tddl_rule_status"; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/TransactionAttribute.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/TransactionAttribute.java index cb229ed5b..0bb0df180 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/TransactionAttribute.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/constants/TransactionAttribute.java @@ -18,6 +18,8 @@ import com.alibaba.polardbx.common.jdbc.ITransactionPolicy; +import java.util.concurrent.atomic.AtomicLong; + public class TransactionAttribute { public static final IsolationLevel DEFAULT_ISOLATION_LEVEL = IsolationLevel.READ_COMMITTED; @@ -67,4 +69,22 @@ public class TransactionAttribute { public static final int DEFAULT_TSO_HEARTBEAT_INTERVAL = 60000; + public static final AtomicLong LAST_LOG_AUTO_SP_TIME = new AtomicLong(0); + public static final AtomicLong LAST_LOG_AUTO_SP_OPT_TIME = new AtomicLong(0); + public static final AtomicLong LAST_LOG_AUTO_SP_RELEASE = new AtomicLong(0); + public static final AtomicLong LAST_LOG_AUTO_SP_ROLLBACK = new AtomicLong(0); + public static final AtomicLong LAST_LOG_TRX_LOG_V2 = new AtomicLong(0); + public static final AtomicLong LAST_LOG_XA_TSO = new AtomicLong(0); + public static final AtomicLong LAST_LOG_AUTO_COMMIT_TSO = new AtomicLong(0); + + /** + * Default Columnar TSO purge Interval in milliseconds: 1 min + */ + public static final int DEFAULT_COLUMNAR_TSO_PURGE_INTERVAL = 60000; + + /** + * Default Columnar TSO update Interval in milliseconds: 10 seconds + */ + public static final int DEFAULT_COLUMNAR_TSO_UPDATE_INTERVAL = 3000; + } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/Decimal.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/Decimal.java index 3443427c6..719cd3881 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/Decimal.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/Decimal.java @@ -45,6 +45,10 @@ public class Decimal extends Number implements Comparable { public static final Decimal MAX_UNSIGNED = Decimal.fromUnsigned(UInt64.fromLong(-1L)); + public static final int MAX_64_BIT_PRECISION = 18; + + public static final int MAX_128_BIT_PRECISION = 38; + private final DecimalStructure decimalStructure; public Decimal() { @@ -61,12 +65,7 @@ public Decimal(DecimalStructure decimalStructure) { public Decimal(long longVal, int scale) { this(new DecimalStructure()); - // parse long & set scale. - DecimalConverter.longToDecimal(longVal, this.decimalStructure); - // shift by scale value. - FastDecimalUtils.shift(this.decimalStructure, this.decimalStructure, -scale); - - FastDecimalUtils.round(this.decimalStructure, this.decimalStructure, scale, HALF_UP); + this.decimalStructure.setLongWithScale(longVal, scale); } public static Decimal fromBigDecimal(BigDecimal bd) { @@ -214,4 +213,34 @@ public DecimalStructure getDecimalStructure() { public Decimal copy() { return new Decimal(decimalStructure.copy()); } + + /** + * shift by newScale and get the decimal64 value + * Warning1: change the internal data of current decimal value! + * Warning2: the result long value is inaccurate + */ + public long unscaleInternal(int newScale) { + if (newScale != 0) { + FastDecimalUtils.shift(this.getDecimalStructure(), this.getDecimalStructure(), + newScale); + } + return this.longValue(); + } + + /** + * get the unscaled long value of current decimal + */ + public long unscale() { + return unscale(new DecimalStructure()); + } + + public long unscale(DecimalStructure bufferStructure) { + Decimal decimal = this; + if (scale() != 0) { + decimal = new Decimal(bufferStructure); + FastDecimalUtils.shift(this.getDecimalStructure(), bufferStructure, + this.scale()); + } + return decimal.longValue(); + } } \ No newline at end of file diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalBox.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalBox.java index de5966b59..c1d29d867 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalBox.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalBox.java @@ -30,14 +30,17 @@ public class DecimalBox { private int carry; private boolean isSumZero; + private int scale; - public DecimalBox() { + public DecimalBox(int scale) { sum = new DecimalStructure(); intVal1 = 0; intVal2 = 0; fracVal = 0; carry = 0; isSumZero = true; + + this.scale = scale; } public void add(int a1, int a2, int b) { @@ -173,6 +176,9 @@ private void doAddToSum4(long a1, long a2, long b) { } private int countFractions(long b) { + if (scale > 0) { + return scale; + } if (b == 0) { return 0; } @@ -187,4 +193,8 @@ private int countFractions(long b) { } return 9; } + + public void setScale(int scale) { + this.scale = scale; + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalConverter.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalConverter.java index 8b917b41d..6893ae5d1 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalConverter.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalConverter.java @@ -17,23 +17,60 @@ package com.alibaba.polardbx.common.datatype; import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.binlog.LogBuffer; import com.alibaba.polardbx.common.utils.time.parser.StringNumericParser; import com.google.common.primitives.UnsignedLongs; import io.airlift.slice.Slice; -import java.nio.charset.Charset; - -import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.*; +import java.math.BigDecimal; +import java.math.RoundingMode; + +import static com.alibaba.polardbx.common.datatype.Decimal.MAX_128_BIT_PRECISION; +import static com.alibaba.polardbx.common.datatype.Decimal.MAX_64_BIT_PRECISION; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.BINARY_SIZE; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DIG_BASE; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DIG_MASK; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DIG_PER_DEC1; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DIG_TO_BYTES; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_BAD_NUM; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_DIV_ZERO; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_ERROR; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_OK; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_OOM; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_OVERFLOW; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_TRUNCATED; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.MAX_DECIMAL_PRECISION; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.MAX_VALUE_IN_WORDS; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.POW_10; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.WORDS_LEN; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.roundUp; +import static com.alibaba.polardbx.common.utils.binlog.LogBuffer.DIG_MAX; +import static com.alibaba.polardbx.common.utils.binlog.LogBuffer.SIZE_OF_INT32; +import static com.alibaba.polardbx.common.utils.binlog.LogBuffer.dig2bytes; public class DecimalConverter { public static final DecimalStructure UNSIGNED_ZERO = new DecimalStructure(); public static final DecimalStructure SIGNED_ZERO = new DecimalStructure(); + private static final long UNSIGNED_MAX_LONG = 0xffffffffffffffffL; + private static final long MAX_UNSIGNED_LONG_DIV_DIG_BASE = UnsignedLongs.divide(UNSIGNED_MAX_LONG, DIG_BASE); static { unsignedlongToDecimal(0L, UNSIGNED_ZERO); longToDecimal(0L, SIGNED_ZERO); } + public static boolean isDecimal64(int precision) { + return precision <= MAX_64_BIT_PRECISION && precision > 0; + } + + public static boolean isDecimal128(int precision) { + return precision <= MAX_128_BIT_PRECISION && precision > 0; + } + + public static boolean isDecimal64(Decimal decimal) { + return isDecimal64(decimal.precision()); + } + /** * Convert decimal to its binary fixed-length representation * two representations of the same length can be compared with memcmp @@ -681,9 +718,6 @@ public static long[] decimal2Long(DecimalStructure from) { return new long[] {to, E_DEC_OK}; } - private static final long UNSIGNED_MAX_LONG = 0xffffffffffffffffL; - private static final long MAX_UNSIGNED_LONG_DIV_DIG_BASE = UnsignedLongs.divide(UNSIGNED_MAX_LONG, DIG_BASE); - /** * Convert decimal to unsigned long value. * @@ -917,4 +951,216 @@ public static void rescale(DecimalStructure from, DecimalStructure to, int preci boundValue.copyTo(to); } } + + public static long getUnscaledDecimal(byte[] buffer, int precision, int scale) { + int position = 0; + int origin = 0; + int limit = buffer.length; + int intg = precision - scale; + int intg0 = intg / 9; + int frac0 = scale / 9; + int intg0x = intg - intg0 * 9; + int frac0x = scale - frac0 * 9; + int binSize = intg0 * 4 + dig2bytes[intg0x] + frac0 * 4 + dig2bytes[frac0x]; + if (position + binSize > origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position + binSize - origin)); + } else { + BigDecimal decimal = + new BigDecimal(getDecimalString(buffer, position, intg, scale, intg0, frac0, intg0x, frac0x)) + .setScale(scale, RoundingMode.HALF_UP); + return decimal.unscaledValue().longValue(); + } + } + + public static Decimal getDecimal(byte[] buffer, int precision, int scale) { + int position = 0; + int origin = 0; + int limit = buffer.length; + int intg = precision - scale; + int intg0 = intg / 9; + int frac0 = scale / 9; + int intg0x = intg - intg0 * 9; + int frac0x = scale - frac0 * 9; + int binSize = intg0 * 4 + dig2bytes[intg0x] + frac0 * 4 + dig2bytes[frac0x]; + if (position + binSize > origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position + binSize - origin)); + } else { + String str = getDecimalString(buffer, position, intg, scale, intg0, frac0, intg0x, frac0x); + return Decimal.fromString(str); + } + } + + /** + * TODO Optimize getting unscaled long + * + * @see LogBuffer#getDecimal0(int, int, int, int, int, int, int) + */ + static String getDecimalString(byte[] buffer, int begin, int intg, int frac, int intg0, int frac0, + int intg0x, int frac0x) { + final int mask = ((buffer[begin] & 0x80) == 0x80) ? 0 : -1; + int from = begin; + + /* max string length */ + final int len = ((mask != 0) ? 1 : 0) + ((intg != 0) ? intg : 1) // NL + + ((frac != 0) ? 1 : 0) + frac; + char[] buf = new char[len]; + int pos = 0; + + if (mask != 0) /* decimal sign */ { + buf[pos++] = ('-'); + } + + final byte[] d_copy = buffer; + d_copy[begin] ^= 0x80; /* clear sign */ + int mark = pos; + + if (intg0x != 0) { + final int i = dig2bytes[intg0x]; + int x = 0; + switch (i) { + case 1: + x = d_copy[from] /* one byte */; + break; + case 2: + x = getInt16BE(d_copy, from); + break; + case 3: + x = getInt24BE(d_copy, from); + break; + case 4: + x = getInt32BE(d_copy, from); + break; + } + from += i; + x ^= mask; + if (x < 0 || x >= POW_10[intg0x + 1]) { + throw new IllegalArgumentException("bad format, x exceed: " + x + ", " + POW_10[intg0x + 1]); + } + if (x != 0 /* !digit || x != 0 */) { + for (int j = intg0x; j > 0; j--) { + final int divisor = POW_10[j - 1]; + final int y = x / divisor; + if (mark < pos || y != 0) { + buf[pos++] = ((char) ('0' + y)); + } + x -= y * divisor; + } + } + } + + for (final int stop = from + intg0 * SIZE_OF_INT32; from < stop; from += SIZE_OF_INT32) { + int x = getInt32BE(d_copy, from); + x ^= mask; + if (x < 0 || x > DIG_MAX) { + throw new IllegalArgumentException("bad format, x exceed: " + x + ", " + DIG_MAX); + } + if (x != 0) { + if (mark < pos) { + for (int i = DIG_PER_DEC1; i > 0; i--) { + final int divisor = POW_10[i - 1]; + final int y = x / divisor; + buf[pos++] = ((char) ('0' + y)); + x -= y * divisor; + } + } else { + for (int i = DIG_PER_DEC1; i > 0; i--) { + final int divisor = POW_10[i - 1]; + final int y = x / divisor; + if (mark < pos || y != 0) { + buf[pos++] = ((char) ('0' + y)); + } + x -= y * divisor; + } + } + } else if (mark < pos) { + for (int i = DIG_PER_DEC1; i > 0; i--) { + buf[pos++] = ('0'); + } + } + } + + if (mark == pos) + /* fix 0.0 problem, only '.' may cause BigDecimal parsing exception. */ { + buf[pos++] = ('0'); + } + + if (frac > 0) { + buf[pos++] = ('.'); + mark = pos; + + for (final int stop = from + frac0 * SIZE_OF_INT32; from < stop; from += SIZE_OF_INT32) { + int x = getInt32BE(d_copy, from); + x ^= mask; + if (x < 0 || x > DIG_MAX) { + throw new IllegalArgumentException("bad format, x exceed: " + x + ", " + DIG_MAX); + } + if (x != 0) { + for (int i = DIG_PER_DEC1; i > 0; i--) { + final int divisor = POW_10[i - 1]; + final int y = x / divisor; + buf[pos++] = ((char) ('0' + y)); + x -= y * divisor; + } + } else { + for (int i = DIG_PER_DEC1; i > 0; i--) { + buf[pos++] = ('0'); + } + } + } + + if (frac0x != 0) { + final int i = dig2bytes[frac0x]; + int x = 0; + switch (i) { + case 1: + x = d_copy[from] /* one byte */; + break; + case 2: + x = getInt16BE(d_copy, from); + break; + case 3: + x = getInt24BE(d_copy, from); + break; + case 4: + x = getInt32BE(d_copy, from); + break; + } + x ^= mask; + if (x != 0) { + final int dig = DIG_PER_DEC1 - frac0x; + x *= POW_10[dig]; + if (x < 0 || x > DIG_MAX) { + throw new IllegalArgumentException("bad format, x exceed: " + x + ", " + DIG_MAX); + } + for (int j = DIG_PER_DEC1; j > dig; j--) { + final int divisor = POW_10[j - 1]; + final int y = x / divisor; + buf[pos++] = ((char) ('0' + y)); + x -= y * divisor; + } + } + } + + if (mark == pos) + /* make number more friendly */ { + buf[pos++] = ('0'); + } + } + + d_copy[begin] ^= 0x80; /* restore sign */ + return String.valueOf(buf, 0, pos); + } + + private static int getInt16BE(byte[] buffer, int pos) { + return buffer[pos] << 8 | 255 & buffer[pos + 1]; + } + + private static int getInt24BE(byte[] buffer, int pos) { + return buffer[pos] << 16 | (255 & buffer[pos + 1]) << 8 | 255 & buffer[pos + 2]; + } + + private static int getInt32BE(byte[] buffer, int pos) { + return buffer[pos] << 24 | (255 & buffer[pos + 1]) << 16 | (255 & buffer[pos + 2]) << 8 | 255 & buffer[pos + 3]; + } + } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalStructure.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalStructure.java index c7569658a..c79db0f85 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalStructure.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalStructure.java @@ -22,14 +22,19 @@ import java.io.Serializable; +import static com.alibaba.polardbx.common.datatype.DecimalRoundMod.HALF_UP; public class DecimalStructure extends DecimalTypeBase implements Serializable { private Slice decimalMemorySegment; + public static Slice allocateDecimalSlice() { + return Slices.allocate(DECIMAL_MEMORY_SIZE); + } + public DecimalStructure() { - decimalMemorySegment = Slices.allocate(DECIMAL_MEMORY_SIZE); + decimalMemorySegment = allocateDecimalSlice(); } public DecimalStructure(Slice decimalMemorySegment) { @@ -248,4 +253,14 @@ public int toDiv(DivStructure div) { public Slice getDecimalMemorySegment() { return decimalMemorySegment; } + + public void setLongWithScale(long longVal, int scale) { + this.reset(); + // parse long & set scale. + DecimalConverter.longToDecimal(longVal, this); + // shift by scale value. + FastDecimalUtils.shift(this, this, -scale); + + FastDecimalUtils.round(this, this, scale, HALF_UP); + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalTypeBase.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalTypeBase.java index b8220b547..1f1b136f7 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalTypeBase.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/DecimalTypeBase.java @@ -17,6 +17,8 @@ package com.alibaba.polardbx.common.datatype; public class DecimalTypeBase { + public static final int DEFAULT_SCALE = 0; + // for divide precision. public final static int DEFAULT_DIV_PRECISION_INCREMENT = 4; public final static String DIV_PRECISION_INCREMENT = "div_precision_increment"; @@ -34,6 +36,9 @@ public class DecimalTypeBase { public static final int E_DEC_OOM = 16; public static final int E_DEC_ERROR = 31; public static final int E_DEC_FATAL_ERROR = 30; + // decimal sum result + public static final int E_DEC_DEC64 = 32; + public static final int E_DEC_DEC128 = 33; public static final int DIV_PRECISION_INCREMENT_DEFAULT = 4; public static final byte[] BUFF_OFFSETS = { @@ -90,7 +95,7 @@ public class DecimalTypeBase { /** * Get the pow(10, position). */ - protected static int[] POW_10 = { + public static int[] POW_10 = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000}; /** diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/FastDecimalUtils.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/FastDecimalUtils.java index 3c381abdf..ac84e4616 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/FastDecimalUtils.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/FastDecimalUtils.java @@ -16,6 +16,12 @@ package com.alibaba.polardbx.common.datatype; +import com.alibaba.polardbx.common.utils.BigDecimalUtil; +import com.google.common.annotations.VisibleForTesting; + +import java.math.BigInteger; + +import static com.alibaba.polardbx.common.datatype.Decimal.MAX_128_BIT_PRECISION; import static com.alibaba.polardbx.common.datatype.DecimalRoundMod.HALF_UP; import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DIG_BASE; import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DIG_MASK; @@ -185,6 +191,102 @@ public static int round(DecimalStructure from, DecimalStructure to, int scale, D return doDecimalRound(from, to, scale, mode); } + /** + * @param buffer Buffer pre-allocated by caller + */ + public static int setLongWithScale(DecimalStructure buffer, DecimalStructure result, + long longVal, int scale) { + buffer.reset(); + // parse long & set scale. + DecimalConverter.longToDecimal(longVal, buffer); + // shift by scale value. + shift(buffer, buffer, -scale); + + return round(buffer, result, scale, HALF_UP); + } + + /** + * @param lowBits unsigned long + * @param buffer Buffer pre-allocated by caller + */ + public static int setDecimal128WithScale(DecimalStructure buffer, DecimalStructure result, + long lowBits, long highBits, int scale) { + buffer.reset(); + result.reset(); + byte[] int128Bytes = BigDecimalUtil.fastInt128ToBytes(lowBits, highBits); + DecimalConverter.parseString(int128Bytes, buffer, false); + // shift by scale value. + shift(buffer, buffer, -scale); + return round(buffer, result, scale, HALF_UP); + } + + @VisibleForTesting + public static long[] convertToDecimal128(Decimal decimal) { + long[] decimal128 = new long[2]; + convertToDecimal128(decimal, decimal128); + return decimal128; + } + + @VisibleForTesting + public static Decimal convert128ToDecimal(long[] decimal128, int scale) { + DecimalStructure buffer = new DecimalStructure(); + DecimalStructure result = new DecimalStructure(); + setDecimal128WithScale(buffer, result, decimal128[0], decimal128[1], scale); + return new Decimal(result); + } + + /** + * low performance, should be used in test only + */ + @VisibleForTesting + public static void convertToDecimal128(Decimal decimal, long[] result) { + if (!DecimalConverter.isDecimal128(decimal.precision())) { + throw new IllegalArgumentException("Decimal precision: " + decimal.precision() + + " exceeds range of decimal128: " + MAX_128_BIT_PRECISION); + } + DecimalStructure bufferStructure = new DecimalStructure(); + Decimal unscaledDecimal = decimal; + if (decimal.scale() != 0) { + unscaledDecimal = new Decimal(bufferStructure); + FastDecimalUtils.shift(decimal.getDecimalStructure(), bufferStructure, + decimal.scale()); + } + BigInteger bigInteger; + if (decimal.getDecimalStructure().isZero()) { + bigInteger = BigInteger.ZERO; + } else { + bigInteger = new BigInteger(unscaledDecimal.toString()); + } + boolean isNeg = bigInteger.signum() < 0; + bigInteger = bigInteger.abs(); + long low = 0L, high = 0L; + byte[] byteArray = bigInteger.toByteArray(); + if (byteArray.length <= 8) { + for (int i = 0; i < byteArray.length; i++) { + low = (low << 8) | (byteArray[i] & 0xFF); + } + } else if (byteArray.length <= 16) { + int lowStart = byteArray.length - 8; + for (int i = 0; i < lowStart; i++) { + high = (high << 8) | (byteArray[i] & 0xFF); + } + for (int i = lowStart; i < byteArray.length; i++) { + low = (low << 8) | (byteArray[i] & 0xFF); + } + } else { + throw new IllegalArgumentException("Decimal representation is larger than 128 bits"); + } + if (isNeg) { + low = ~low + 1; + high = ~high; + if (low == 0) { + high += 1; + } + } + result[0] = low; + result[1] = high; + } + protected static int doAdd(DecimalStructure from1, DecimalStructure from2, DecimalStructure to) { int error; int from1IntWords = DecimalTypeBase.roundUp(from1.getIntegers()); diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/RawBytesDecimalUtils.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/RawBytesDecimalUtils.java index b18177fe3..b6db2ffe3 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/RawBytesDecimalUtils.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/RawBytesDecimalUtils.java @@ -62,6 +62,27 @@ public static int hashCode(Slice memorySegment) { ? -result : result; } + public static int hashCode(long decimal64, int scale) { + if (decimal64 >= DIG_MASK || decimal64 < 0 || scale >= 9) { + // go to normal method + return 0; + } + + int fractionVal = (int) ((decimal64 % POW_10[scale]) * POW_10[9 - scale]); + int integerVal = (int) (decimal64 / POW_10[scale]); + int result = 1; + if (integerVal != 0) { + result = 31 * result + integerVal; + } + if (fractionVal != 0) { + result = 31 * result + fractionVal; + } + return result; + } + + /** + * compare two decimal values in format of raw bytes. + */ public static boolean equals(Slice left, Slice right) { Preconditions.checkPositionIndexes(FRACTIONS_OFFSET, FRACTIONS_OFFSET + 1, left.length()); @@ -71,10 +92,10 @@ public static boolean equals(Slice left, Slice right) { return false; } - int leftIntWords = roundUp(((int)left.getByteUnchecked(INTEGERS_OFFSET) & 0xFF)); - int leftFracWords = roundUp(((int)left.getByteUnchecked(FRACTIONS_OFFSET) & 0xFF)); - int rightIntWords = roundUp(((int)right.getByteUnchecked(INTEGERS_OFFSET) & 0xFF)); - int rightFracWords = roundUp(((int)right.getByteUnchecked(FRACTIONS_OFFSET) & 0xFF)); + int leftIntWords = roundUp(((int) left.getByteUnchecked(INTEGERS_OFFSET) & 0xFF)); + int leftFracWords = roundUp(((int) left.getByteUnchecked(FRACTIONS_OFFSET) & 0xFF)); + int rightIntWords = roundUp(((int) right.getByteUnchecked(INTEGERS_OFFSET) & 0xFF)); + int rightFracWords = roundUp(((int) right.getByteUnchecked(FRACTIONS_OFFSET) & 0xFF)); int endPos1 = leftIntWords, endPos2 = rightIntWords; int bufPos1 = 0, bufPos2 = 0; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/RowValue.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/RowValue.java new file mode 100644 index 000000000..fc536d9ca --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/datatype/RowValue.java @@ -0,0 +1,43 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.datatype; + +import java.util.List; + +public class RowValue { + + private List values; + + public RowValue(List values) { + this.values = values; + } + + public List getValues() { + return values; + } + + public void setValues(List values) { + this.values = values; + } + + @Override + public String toString() { + return "RowValue{" + + "values=" + values + + '}'; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/Job.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/Job.java index 40ed8bb1f..9d4cfb90e 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/Job.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/Job.java @@ -25,7 +25,8 @@ public enum JobSource { public enum JobType { UNSUPPORTED, CREATE_TABLE, ALTER_TABLE, DROP_TABLE, RENAME_TABLE, TRUNCATE_TABLE, CREATE_INDEX, DROP_INDEX, CREATE_GLOBAL_INDEX, ALTER_GLOBAL_INDEX, DROP_GLOBAL_INDEX, RENAME_GLOBAL_INDEX, CHECK_GLOBAL_INDEX, - MOVE_TABLE, ALTER_TABLEGROUP, DRAIN_NODE, ALTER_TABLE_SET_TABLEGROUP, ALTER_TABLEGROUP_ADD_TABLE, + CHECK_COLUMNAR_INDEX, MOVE_TABLE, ALTER_TABLEGROUP, DRAIN_NODE, ALTER_TABLE_SET_TABLEGROUP, + ALTER_TABLEGROUP_ADD_TABLE, MERGE_TABLEGROUP } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/foreignkey/ForeignKeyData.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/foreignkey/ForeignKeyData.java index 5fe2c13df..e68b1a83b 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/foreignkey/ForeignKeyData.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/foreignkey/ForeignKeyData.java @@ -47,6 +47,24 @@ public class ForeignKeyData { public static final String FOREIGN_KEY_CHECKS = "foreign_key_checks"; + public ForeignKeyData() { + } + + public ForeignKeyData(String schema, String tableName, String constraint, String indexName, List columns, + String refSchema, String refTableName, List refColumns, ReferenceOptionType onDelete, + ReferenceOptionType onUpdate) { + this.schema = schema; + this.tableName = tableName; + this.constraint = constraint; + this.indexName = indexName; + this.columns = columns; + this.refSchema = refSchema; + this.refTableName = refTableName; + this.refColumns = refColumns; + this.onDelete = onDelete; + this.onUpdate = onUpdate; + } + public static enum ReferenceOptionType { RESTRICT("RESTRICT"), CASCADE("CASCADE"), SET_NULL("SET NULL"), SET_DEFAULT("SET DEFAULT"), NO_ACTION("NO ACTION"); diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/newengine/DdlPlanState.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/newengine/DdlPlanState.java index 0c2ff7381..125b822fb 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/newengine/DdlPlanState.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/newengine/DdlPlanState.java @@ -21,7 +21,7 @@ public enum DdlPlanState { INIT, EXECUTING, SUCCESS, - TERMINATED - ; + TERMINATED, + PAUSE_ON_NON_MAINTENANCE_WINDOW; } \ No newline at end of file diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/newengine/DdlType.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/newengine/DdlType.java index 37c40eb4c..a0a9f0e83 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/newengine/DdlType.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/ddl/newengine/DdlType.java @@ -37,6 +37,9 @@ public enum DdlType { ALTER_FUNCTION, PUSH_DOWN_UDF, + CREATE_VIEW, + ALTER_VIEW, + DROP_VIEW, CREATE_JAVA_FUNCTION, DROP_JAVA_FUNCTION, @@ -45,6 +48,7 @@ public enum DdlType { DROP_GLOBAL_INDEX, RENAME_GLOBAL_INDEX, CHECK_GLOBAL_INDEX, + CHECK_COLUMNAR_INDEX, MOVE_DATABASE, REBALANCE, @@ -57,8 +61,23 @@ public enum DdlType { ALTER_TABLE_SET_TABLEGROUP, ALTER_TABLEGROUP_ADD_TABLE, + ALTER_TABLE_RENAME_PARTITION, + + ALTER_TABLE_ADD_COLUMN, + ALTER_TABLE_DROP_COLUMN, + ALTER_TABLE_MODIFY_COLUMN, + ALTER_TABLE_CHANGE_COLUMN, + MERGE_TABLEGROUP; + public enum AlterColumnSpecification { + AlterColumnName, + AlterColumnType, + AlterColumnDefault, + AlterColumnComment, + AlterColumnOrder + } + public static boolean needDefaultDdlShareLock(DdlType type) { if (type == null) { return true; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/EncdbException.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/EncdbException.java new file mode 100644 index 000000000..305f2a5ca --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/EncdbException.java @@ -0,0 +1,42 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; + +/** + * @author pangzhaoxing + */ +public class EncdbException extends TddlRuntimeException { + + public EncdbException(String... params) { + super(ErrorCode.ERR_ENCDB, params); + } + + public EncdbException(String param, Throwable e) { + super(ErrorCode.ERR_ENCDB, param, e); + } + + public EncdbException(Throwable e) { + super(ErrorCode.ERR_ENCDB, e.getMessage(), e); + } + + public EncdbException(ErrorCode errorCode, String... params) { + super(errorCode, params); + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/AsymCrypto.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/AsymCrypto.java new file mode 100644 index 000000000..3704dcc24 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/AsymCrypto.java @@ -0,0 +1,526 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.cipher; + +import com.alibaba.polardbx.common.encdb.EncdbException; +import com.alibaba.polardbx.common.encdb.enums.AsymmAlgo; +import com.alibaba.polardbx.common.encdb.utils.RSAUtil; +import com.google.common.primitives.Bytes; +import org.bouncycastle.asn1.ASN1EncodableVector; +import org.bouncycastle.asn1.ASN1InputStream; +import org.bouncycastle.asn1.ASN1Integer; +import org.bouncycastle.asn1.ASN1OctetString; +import org.bouncycastle.asn1.ASN1Sequence; +import org.bouncycastle.asn1.ASN1SequenceParser; +import org.bouncycastle.asn1.BEROctetString; +import org.bouncycastle.asn1.DERSequence; +import org.bouncycastle.asn1.pkcs.PrivateKeyInfo; +import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; +import org.bouncycastle.crypto.CryptoException; +import org.bouncycastle.crypto.InvalidCipherTextException; +import org.bouncycastle.crypto.engines.SM2Engine; +import org.bouncycastle.crypto.params.ECDomainParameters; +import org.bouncycastle.crypto.params.ECPrivateKeyParameters; +import org.bouncycastle.crypto.params.ECPublicKeyParameters; +import org.bouncycastle.crypto.params.ParametersWithRandom; +import org.bouncycastle.crypto.signers.SM2Signer; +import org.bouncycastle.jcajce.provider.asymmetric.ec.BCECPrivateKey; +import org.bouncycastle.jcajce.provider.asymmetric.ec.BCECPublicKey; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.math.ec.ECPoint; +import org.bouncycastle.math.ec.custom.gm.SM2P256V1Curve; +import org.bouncycastle.openssl.PEMKeyPair; +import org.bouncycastle.openssl.PEMParser; +import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter; +import org.bouncycastle.util.encoders.Hex; + +import javax.crypto.Cipher; +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.Reader; +import java.io.StringReader; +import java.math.BigInteger; +import java.security.InvalidAlgorithmParameterException; +import java.security.InvalidKeyException; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.NoSuchAlgorithmException; +import java.security.NoSuchProviderException; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.security.SecureRandom; +import java.security.Security; +import java.security.Signature; +import java.security.SignatureException; +import java.security.spec.ECGenParameterSpec; +import java.security.spec.InvalidKeySpecException; +import java.security.spec.MGF1ParameterSpec; +import java.security.spec.PSSParameterSpec; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class AsymCrypto { + private final static int MAX_SM2_ENCRYPTION_RETRY = 10; + + static { + if (Security.getProvider(BouncyCastleProvider.PROVIDER_NAME) == null) { + Security.addProvider(new BouncyCastleProvider()); + } + } + + public static KeyPair generateAsymKeyPair(AsymmAlgo asymmAlgo, int keySize) { + switch (asymmAlgo) { + case RSA: + return generateRsaKeyPair(keySize); + case SM2: + return generateSm2KeyPair(); + default: + throw new EncdbException("unsupported asymmetric algorithm"); + } + } + + /** + * SM2算法生成密钥对 + */ + public static KeyPair generateSm2KeyPair() { + try { + final ECGenParameterSpec sm2Spec = new ECGenParameterSpec("sm2p256v1"); + // 获取一个椭圆曲线类型的密钥对生成器 + final KeyPairGenerator kpg = KeyPairGenerator.getInstance("EC", new BouncyCastleProvider()); + SecureRandom random = new SecureRandom(); + // 使用SM2的算法区域初始化密钥生成器 + kpg.initialize(sm2Spec, random); + // 获取密钥对 + KeyPair keyPair = kpg.generateKeyPair(); + return keyPair; + } catch (Exception e) { + throw new EncdbException(e); + } + } + + /** + * 生成RSA KeyPair + */ + public static KeyPair generateRsaKeyPair(int keySize) { + try { + KeyPairGenerator kpGen = KeyPairGenerator.getInstance("RSA"); + kpGen.initialize(keySize, new SecureRandom()); + return kpGen.generateKeyPair(); + } catch (Exception e) { + throw new EncdbException(e); + } + } + + private static PrivateKey loadPrivateKey(final Reader reader) + throws IOException, NoSuchAlgorithmException, InvalidKeySpecException { + try (PEMParser pemParser = new PEMParser(reader)) { + Object readObject = pemParser.readObject(); + while (readObject != null) { + PrivateKeyInfo privateKeyInfo = getPrivateKeyInfoOrNull(readObject); + if (privateKeyInfo != null) { + return new JcaPEMKeyConverter().getPrivateKey(privateKeyInfo); + } + readObject = pemParser.readObject(); + } + } + + return null; + } + + /** + * Find a PrivateKeyInfo in the PEM object details. Returns null if the PEM + * object type is unknown. + */ + private static PrivateKeyInfo getPrivateKeyInfoOrNull(Object pemObject) throws NoSuchAlgorithmException { + PrivateKeyInfo privateKeyInfo = null; + if (pemObject instanceof PEMKeyPair) { + PEMKeyPair pemKeyPair = (PEMKeyPair) pemObject; + privateKeyInfo = pemKeyPair.getPrivateKeyInfo(); + } else if (pemObject instanceof PrivateKeyInfo) { + privateKeyInfo = (PrivateKeyInfo) pemObject; + } else { + System.err.printf("Unknown object '{}' from PEMParser\n", pemObject); + } + + return privateKeyInfo; + } + + private static PrivateKey importPrivateKey(final String keypem) + throws IOException, NoSuchAlgorithmException, InvalidKeySpecException { + try (StringReader certReader = new StringReader(keypem); + BufferedReader reader = new BufferedReader(certReader)) { + return loadPrivateKey(reader); + } + } + + private static PublicKey importPublicKey(final String keyStr) throws IOException { + try (PEMParser pemParser = new PEMParser(new StringReader(keyStr))) { + SubjectPublicKeyInfo pkInfo = (SubjectPublicKeyInfo) pemParser.readObject(); + JcaPEMKeyConverter converter = new JcaPEMKeyConverter().setProvider("BC"); + + return converter.getPublicKey(pkInfo); + } + } + + public static byte[] rsaPKCS1EncryptPem(String pukPemString, byte[] input) throws CryptoException { + try { + return RSAUtil.encryptRsaOaepSha256PKCS1(RSAUtil.getPublicKeyPKCS1(pukPemString), input); + } catch (Exception e) { + throw new CryptoException("rsaEncryptPem encryption error", e); + } + } + + public static byte[] rsaPKCS1DecryptPem(String priPemString, byte[] input) throws CryptoException { + try { + PrivateKey privateKey = importPrivateKey(priPemString); + return RSAUtil.decryptRsaOaepSha256PKCS1(privateKey, input); + } catch (Exception e) { + throw new CryptoException("rsaEncryptPem decryption error", e); + } + } + + /** + * use rsaPKCS1EncryptPem instead + */ + @Deprecated + public static byte[] rsaPKCS1WrapAesKey(String pukPem, byte[] key) throws CryptoException { + try { + PublicKey publicKey = importPublicKey(pukPem); + Cipher c = Cipher.getInstance("RSA/None/PKCS1Padding"); + SecretKey secretKey = new SecretKeySpec(key, "AES"); + c.init(Cipher.WRAP_MODE, publicKey); + return c.wrap(secretKey); + } catch (Exception e) { + throw new CryptoException("rsaPKCS1WrapAesKey wrap error", e); + } + } + + /** + * use rsaPKCS1DecryptPem instead + */ + @Deprecated + public static byte[] rsaPKCS1UnwrapAesKey(String priPem, byte[] wrappedKey) throws CryptoException { + try { + PrivateKey privateKey = importPrivateKey(priPem); + Cipher c = Cipher.getInstance("RSA/None/PKCS1Padding", "BC"); + c.init(Cipher.UNWRAP_MODE, privateKey); + return c.unwrap(wrappedKey, "AES", Cipher.SECRET_KEY).getEncoded(); + } catch (Exception e) { + throw new CryptoException("rsaPKCS1UnwrapAesKey unwrap error", e); + } + } + + public static byte[] sm2EncryptRaw(byte[] puk, byte[] input) throws InvalidCipherTextException { + SM2Engine engine = new SM2Engine(SM2Constants.MODE); + + ECPoint point = SM2Constants.CURVE.decodePoint(puk); + ECPublicKeyParameters ecPukParam = new ECPublicKeyParameters(point, SM2Constants.DOMAIN_PARAMS); + + engine.init(true, new ParametersWithRandom(ecPukParam, new SecureRandom())); + return engine.processBlock(input, 0, input.length); + } + + public static byte[] sm2DecryptRaw(byte[] pri, byte[] input) throws InvalidCipherTextException { + SM2Engine engine = new SM2Engine(SM2Constants.MODE); + + BigInteger d = new BigInteger(1, pri); + ECPrivateKeyParameters ecPriparam = new ECPrivateKeyParameters(d, SM2Constants.DOMAIN_PARAMS); + + engine.init(false, ecPriparam); + return engine.processBlock(input, 0, input.length); + } + + public static byte[] sm2EncryptRaw(String pukString, byte[] input) throws InvalidCipherTextException { + return sm2EncryptRaw(Hex.decode(pukString), input); + } + + public static byte[] sm2DecryptRaw(String priString, byte[] input) throws InvalidCipherTextException { + return sm2DecryptRaw(Hex.decode(priString), input); + } + + private static byte[] convertSm2CipherFromAsn1Bytes(byte[] asn1Bytes) throws IOException { + + /** SM2 cipher format in bytes sequence: + * Compression byte + * 32 bytes C1.x || 32 bytes C1.y + * 32 bytes C3 + * real cipher + */ + ASN1InputStream asn1InputStream = new ASN1InputStream(asn1Bytes); + ASN1SequenceParser asn1SequenceParser = ((ASN1Sequence) asn1InputStream.readObject()).parser(); + asn1InputStream.close(); + + Byte compression = 0x04; + ASN1Integer c1_x = (ASN1Integer) asn1SequenceParser.readObject().toASN1Primitive(); + ASN1Integer c1_y = (ASN1Integer) asn1SequenceParser.readObject().toASN1Primitive(); + ASN1OctetString c3 = (ASN1OctetString) asn1SequenceParser.readObject().toASN1Primitive(); + ASN1OctetString cipher = (ASN1OctetString) asn1SequenceParser.readObject().toASN1Primitive(); + + List sm2Cipher = new ArrayList<>(); + sm2Cipher.add(compression); + byte[] pointBytes = c1_x.getValue().toByteArray(); + if (pointBytes.length == 33) { + pointBytes = Arrays.copyOfRange(pointBytes, 1, pointBytes.length); + } + sm2Cipher.addAll(Bytes.asList(pointBytes)); + pointBytes = c1_y.getValue().toByteArray(); + if (pointBytes.length == 33) { + pointBytes = Arrays.copyOfRange(pointBytes, 1, pointBytes.length); + } + sm2Cipher.addAll(Bytes.asList(pointBytes)); + sm2Cipher.addAll(Bytes.asList(c3.getOctets())); + sm2Cipher.addAll(Bytes.asList(cipher.getOctets())); + + return Bytes.toArray(sm2Cipher); + } + + private static byte[] convertSm2CipherToAsn1Bytes(byte[] data, boolean compression) + throws IOException, ConvertCipherException { + /** SM2 cipher format in bytes sequence: + * Compression byte + * 32 bytes C1.x || 32 bytes C1.y + * 32 bytes C3 + * real cipher + */ + byte[] uncompressedBytes = data; + if (compression) { + uncompressedBytes = Arrays.copyOfRange(data, 1, data.length); + } + + BigInteger c1_x = new BigInteger(Arrays.copyOfRange(uncompressedBytes, 0, 32)); + BigInteger c1_y = new BigInteger(Arrays.copyOfRange(uncompressedBytes, 32, 64)); + if (c1_x.toByteArray().length != 32) { + throw new ConvertCipherException( + "convert c1_x to BigInteger fail:" + + "\nc1_x=" + Hex.toHexString(c1_x.toByteArray()) + + "\nfrom=" + Hex.toHexString(Arrays.copyOfRange(uncompressedBytes, 0, 32))); + } + if (c1_y.toByteArray().length != 32) { + throw new ConvertCipherException( + "convert c1_y to BigInteger fail:" + + "\nc1_y=" + Hex.toHexString(c1_y.toByteArray()) + + "\nfrom=" + Hex.toHexString(Arrays.copyOfRange(uncompressedBytes, 32, 64))); + } + + ASN1OctetString c3 = new BEROctetString(Arrays.copyOfRange(uncompressedBytes, 64, 96)); + ASN1OctetString cipher = + new BEROctetString(Arrays.copyOfRange(uncompressedBytes, 96, uncompressedBytes.length)); + + ASN1EncodableVector encodableVector = new ASN1EncodableVector(); + encodableVector.add(new ASN1Integer(c1_x)); + encodableVector.add(new ASN1Integer(c1_y)); + encodableVector.add(c3); + encodableVector.add(cipher); + byte[] asn1Bytes = new DERSequence(encodableVector).getEncoded(); + + return asn1Bytes; + } + + public static byte[] sm2EncryptPem(String pubPemString, byte[] input) + throws IOException, InvalidCipherTextException { + PublicKey sm2PublicKey = importPublicKey(pubPemString); + ECPublicKeyParameters ecPublicKeyParameters = + new ECPublicKeyParameters(((BCECPublicKey) sm2PublicKey).getQ(), SM2Constants.DOMAIN_PARAMS); + SM2Engine engine = new SM2Engine(SM2Constants.MODE); + + int i = MAX_SM2_ENCRYPTION_RETRY; + String exceptionMsg = ""; + while (i-- > 0) { + try { + engine.init(true, new ParametersWithRandom(ecPublicKeyParameters, new SecureRandom())); + byte[] output = engine.processBlock(input, 0, input.length); + + // encode ASN1 + boolean compression = (output.length - input.length) % 2 != 0; + + return convertSm2CipherToAsn1Bytes(output, compression); + } catch (ConvertCipherException e) { + exceptionMsg = e.getMessage(); + } + } + throw new InvalidCipherTextException(exceptionMsg); + } + + public static byte[] sm2DecryptPem(String priPemString, byte[] input) + throws InvalidCipherTextException, NoSuchAlgorithmException, InvalidKeySpecException, IOException { + PrivateKey sm2PrivateKey = importPrivateKey(priPemString); + ECPrivateKeyParameters ecPrivateKeyParameters = + new ECPrivateKeyParameters(((BCECPrivateKey) sm2PrivateKey).getD(), SM2Constants.DOMAIN_PARAMS); + SM2Engine engine = new SM2Engine(SM2Constants.MODE); + engine.init(false, ecPrivateKeyParameters); + + byte[] decoded = convertSm2CipherFromAsn1Bytes(input); + return engine.processBlock(decoded, 0, decoded.length); + } + + // https://gist.github.com/nielsutrecht/0c1538b22e67c61b890a1b435a22fc99 + // https://github.com/hyperchain/javasdk/blob/master/src/main/java/cn/hyperchain/sdk/crypto/sm/sm2/SM2Util.java + // https://github.com/bcgit/bc-java/blob/bc3b92f1f0e78b82e2584c5fb4b226a13e7f8b3b/prov/src/test/java/org/bouncycastle/jce/provider/test/SM2SignatureTest.java + // https://github.com/bcgit/bc-java/blob/bc3b92f1f0e78b82e2584c5fb4b226a13e7f8b3b/core/src/test/java/org/bouncycastle/crypto/test/SM2SignerTest.java + // https://stackoverflow.com/questions/53728536/how-to-sign-with-rsassa-pss-in-java-correctly + // https://www.openssl.org/docs/man3.0/man3/EVP_PKEY_CTX_set_rsa_mgf1_md.html + // https://www.openssl.org/docs/man1.1.1/man3/EVP_DigestSignInit.html + + /** + * @param algorithm signature algorithm, i.e., SM3withSM2, SHA256withRSA/PSS, etc. + */ + public static byte[] signRsaWithSha256(String priPemString, byte[] input) + throws NoSuchAlgorithmException, InvalidKeyException, SignatureException, NoSuchProviderException, + InvalidKeySpecException, IOException, InvalidAlgorithmParameterException { + PrivateKey privateKey = importPrivateKey(priPemString); + Signature signature = Signature.getInstance("SHA256withRSA/PSS", "BC"); + signature.setParameter(new PSSParameterSpec("SHA256", "MGF1", MGF1ParameterSpec.SHA256, 32, 1)); + signature.initSign(privateKey); + signature.update(input); + return signature.sign(); + } + + public static boolean verifyRsaWithSha256(String pukPemString, byte[] input, byte[] sig) + throws NoSuchAlgorithmException, NoSuchProviderException, InvalidKeyException, SignatureException, IOException, + InvalidAlgorithmParameterException { + PublicKey publicKey = importPublicKey(pukPemString); + Signature signature = Signature.getInstance("SHA256withRSA/PSS", "BC"); + signature.setParameter(new PSSParameterSpec("SHA256", "MGF1", MGF1ParameterSpec.SHA256, 32, 1)); + signature.initVerify(publicKey); + signature.update(input); + return signature.verify(sig); + } + + public static byte[] sm2SignPem(String priPemString, byte[] input) + throws NoSuchAlgorithmException, InvalidKeySpecException, IOException, NoSuchProviderException, + InvalidKeyException, SignatureException, CryptoException { + PrivateKey sm2PrivateKey = importPrivateKey(priPemString); + + // ECPrivateKeyParameters cbPrivateKeyParameters = new ECPrivateKeyParameters(((BCECPrivateKey)sm2PrivateKey).getD(), SM2Constants.DOMAIN_PARAMS); + // SM2Signer signer = new SM2Signer(); + // signer.init(true, cbPrivateKeyParameters); + // signer.update(input, 0, input.length); + // return signer.generateSignature(); + + Signature signature = Signature.getInstance("SM3withSM2", "BC"); + signature.initSign(sm2PrivateKey); + signature.update(input); + return signature.sign(); + } + + public static boolean sm2VerifyPem(String pubPemString, byte[] input, byte[] sig) + throws IOException, InvalidKeyException, SignatureException, NoSuchAlgorithmException, NoSuchProviderException { + PublicKey sm2PublicKey = importPublicKey(pubPemString); + + // ECPublicKeyParameters ecPublicKeyParameters = new ECPublicKeyParameters(((BCECPublicKey)sm2PublicKey).getQ(), SM2Constants.DOMAIN_PARAMS); + // SM2Signer signer = new SM2Signer(); + // signer.init(false, ecPublicKeyParameters); + // signer.update(input, 0, input.length); + // return signer.verifySignature(sig); + + Signature signature = Signature.getInstance("SM3withSM2", "BC"); + signature.initVerify(sm2PublicKey); + signature.update(input); + return signature.verify(sig); + } + + public static byte[] sm2SignRaw(byte[] pri, byte[] input) throws CryptoException { + ECPrivateKeyParameters privateKeyParameters = + new ECPrivateKeyParameters(new BigInteger(1, pri), SM2Constants.DOMAIN_PARAMS); + // AsymmetricCipherKeyPair asymmetricCipherKeyPair = new AsymmetricCipherKeyPair(null, privateKeyParameters); + // CipherParameters param = new ParametersWithRandom(asymmetricCipherKeyPair.getPrivate(), new SecureRandom()); + // CipherParameters param = new ParametersWithRandom(privateKeyParameters, new SecureRandom()); + + SM2Signer signer = new SM2Signer(); + // signer.init(true, param); + signer.init(true, privateKeyParameters); + signer.update(input, 0, input.length); + return signer.generateSignature(); + } + + public static boolean sm2VerifyRaw(byte[] puk, byte[] input, byte[] sig) { + ECPoint ecPoint = SM2Constants.CURVE.decodePoint(puk); + ECPublicKeyParameters ecPublicKeyParameters = new ECPublicKeyParameters(ecPoint, SM2Constants.DOMAIN_PARAMS); + + SM2Signer signer = new SM2Signer(); + signer.init(false, ecPublicKeyParameters); + signer.update(input, 0, input.length); + return signer.verifySignature(sig); + } + + public static byte[] sign(AsymmAlgo alg, String priPemString, byte[] input) + throws InvalidKeyException, NoSuchAlgorithmException, SignatureException, NoSuchProviderException, + InvalidKeySpecException, InvalidAlgorithmParameterException, IOException, CryptoException { + switch (alg) { + case RSA: + return signRsaWithSha256(priPemString, input); + case SM2: + return sm2SignPem(priPemString, input); + default: + throw new RuntimeException("Panic! Not support signature algorithm"); + } + } + + public static boolean verify(AsymmAlgo alg, String pubPemString, byte[] input, byte[] sig) + throws InvalidKeyException, NoSuchAlgorithmException, NoSuchProviderException, SignatureException, + InvalidAlgorithmParameterException, IOException { + switch (alg) { + case RSA: + return verifyRsaWithSha256(pubPemString, input, sig); + case SM2: + return sm2VerifyPem(pubPemString, input, sig); + default: + throw new RuntimeException("Panic! Not support signature algorithm"); + } + } + + public static class SM2Constants { + /* + * 以下为SM2推荐曲线参数 + */ + public static final SM2P256V1Curve CURVE = new SM2P256V1Curve(); + public final static BigInteger SM2_ECC_P = CURVE.getQ(); + public final static BigInteger SM2_ECC_A = CURVE.getA().toBigInteger(); + public final static BigInteger SM2_ECC_B = CURVE.getB().toBigInteger(); + public final static BigInteger SM2_ECC_N = CURVE.getOrder(); + public final static BigInteger SM2_ECC_H = CURVE.getCofactor(); + public final static BigInteger SM2_ECC_GX = new BigInteger( + "32C4AE2C1F1981195F9904466A39C9948FE30BBFF2660BE1715A4589334C74C7", 16); + public final static BigInteger SM2_ECC_GY = new BigInteger( + "BC3736A2F4F6779C59BDCEE36B692153D0A9877CC62A474002DF32E52139F0A0", 16); + public static final ECPoint G_POINT = CURVE.createPoint(SM2_ECC_GX, SM2_ECC_GY); + public static final ECDomainParameters DOMAIN_PARAMS = + new ECDomainParameters(CURVE, G_POINT, SM2_ECC_N, SM2_ECC_H); + + public static final SM2Engine.Mode MODE = SM2Engine.Mode.C1C3C2; + } +} + +/** + * ConvertCipherException + */ +class ConvertCipherException extends InvalidCipherTextException { + public ConvertCipherException() { + } + + public ConvertCipherException(String message) { + super(message); + } + + public ConvertCipherException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/CipherForMySQL.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/CipherForMySQL.java new file mode 100644 index 000000000..7d3ab781a --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/CipherForMySQL.java @@ -0,0 +1,325 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.cipher; + +import com.alibaba.polardbx.common.encdb.EncdbException; +import com.alibaba.polardbx.common.encdb.enums.CCFlags; +import com.alibaba.polardbx.common.encdb.enums.Constants; +import com.alibaba.polardbx.common.encdb.utils.HashUtil; +import com.alibaba.polardbx.common.encdb.utils.Utils; +import com.google.common.primitives.Bytes; +import org.bouncycastle.crypto.CryptoException; + +import java.io.Serializable; +import java.nio.BufferUnderflowException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.zip.DataFormatException; + +import static com.alibaba.polardbx.common.encdb.utils.Utils.swapBytesByPivot; + +public class CipherForMySQL { + + public static final byte VERSION = 64; + private final byte[] data; + private final int type; + private final Constants.EncAlgo algo; + private final int nonce_start_inclu, nonce_end_exclu; + private final int body_start_inclu, body_end_exclu; + + public static CipherForMySQL buildCipher(byte[] data) { + return new CipherForMySQL(data); + } + + public static CipherForMySQL buildCipher(int type, Constants.EncAlgo algo) { + return new CipherForMySQL(type, algo); + } + + public static boolean verifyCheckCode(List data, byte expected) { + byte computed = xorArray(data); + return computed == expected; + } + + public static boolean verifyCheckCode(byte[] data, int start, int end, byte expected) { + byte computed = xorArray(data, start, end); + return computed == expected; + } + + private CipherForMySQL(int type, Constants.EncAlgo algo) { + this.type = type; + this.algo = algo; + this.data = null; + nonce_start_inclu = -1; + nonce_end_exclu = -1; + body_start_inclu = -1; + body_end_exclu = -1; + } + + private CipherForMySQL(byte[] data) { + if (data.length < 11) { // shortest possible cipher, with no body at all + throw new EncdbException("cipher cannot be " + data.length + " bytes"); + } + this.data = data; + byte checkCode = data[0]; + int version = data[1]; + if (version == 64) { + if (data.length < 12) { // shortest possible cipher for this version, with no body at all + throw new EncdbException("cipher cannot be " + data.length + " bytes"); + } + if (!verifyCheckCode(data, 1, data.length - 1, checkCode)) { + throw new EncdbException("cipher data check code verify failed"); + } + // version 64 + // |code(1)|version(1)|type(1)|algo(1)|nonce(8)|body(x)| + this.type = data[2] & 0xff; + this.algo = Constants.EncAlgo.from(data[3] & 0xff); + this.nonce_start_inclu = 4; + this.nonce_end_exclu = 12; + this.body_start_inclu = 12; + this.body_end_exclu = data.length; + } else { + // temporary cipher format + // |code(1)|type(1)|version&algo(1)|body(x)|nonce(8)| + if (!verifyCheckCode(data, 1, data.length - 9, checkCode)) { + throw new EncdbException("cipher data check code verify failed"); + } + this.type = data[1] & 0xff; + this.algo = Constants.EncAlgo.from(data[2] & 0b00001111); + this.nonce_start_inclu = data.length - 8; + this.nonce_end_exclu = data.length; + this.body_start_inclu = 3; + this.body_end_exclu = data.length - 8; + } + } + + public byte[] getNonce() { + return Arrays.copyOfRange(this.data, nonce_start_inclu, nonce_end_exclu); + } + + public boolean checkNonce(byte[] nonce) { + if (nonce == null || nonce.length != nonce_end_exclu - nonce_start_inclu) { + return false; + } + for (int i = 0; i < nonce.length; i++) { + if (nonce[i] != this.data[nonce_start_inclu + i]) { + return false; + } + } + return true; + } + + public byte[] decrypt(byte[] key) throws NoSuchAlgorithmException, CryptoException, DataFormatException { + byte[] ivSub, dataSub; + byte[] tmpData = null; + + try { + switch (this.algo) { + case AES_128_GCM: + ivSub = + Arrays.copyOfRange(this.data, this.body_start_inclu, this.body_start_inclu + SymCrypto.GCMIVLength); + // EncDB CipherForMySQL format: TAG || DATA, convert to java format: DATA || TAG + dataSub = Bytes.toArray(swapBytesByPivot( + Arrays.copyOfRange(this.data, this.body_start_inclu + SymCrypto.GCMIVLength, this.body_end_exclu), + SymCrypto.GCMTagLength)); + tmpData = SymCrypto.aesGcmDecrypt(key, dataSub, ivSub); + break; + case SM4_128_GCM: + ivSub = + Arrays.copyOfRange(this.data, this.body_start_inclu, this.body_start_inclu + SymCrypto.GCMIVLength); + // EncDB CipherForMySQL format: TAG || DATA, convert to java format: DATA || TAG + dataSub = Bytes.toArray(swapBytesByPivot( + Arrays.copyOfRange(this.data, this.body_start_inclu + SymCrypto.GCMIVLength, this.body_end_exclu), + SymCrypto.GCMTagLength)); + tmpData = SymCrypto.sm4GcmDecrypt(key, dataSub, ivSub); + break; + case AES_128_CBC: + ivSub = + Arrays.copyOfRange(this.data, this.body_start_inclu, this.body_start_inclu + SymCrypto.CBCIVLength); + dataSub = + Arrays.copyOfRange(this.data, this.body_start_inclu + SymCrypto.CBCIVLength, this.body_end_exclu); + + tmpData = SymCrypto.aesCBCDecrypt(key, dataSub, ivSub); + break; + case AES_128_ECB: + tmpData = SymCrypto.aesECBDecrypt(key, + Arrays.copyOfRange(this.data, this.body_start_inclu, this.body_end_exclu)); + break; + case SM4_128_CBC: + ivSub = + Arrays.copyOfRange(this.data, this.body_start_inclu, this.body_start_inclu + SymCrypto.CBCIVLength); + dataSub = + Arrays.copyOfRange(this.data, this.body_start_inclu + SymCrypto.CBCIVLength, this.body_end_exclu); + tmpData = SymCrypto.sm4CBCDecrypt(key, dataSub, ivSub); + break; + case SM4_128_ECB: + tmpData = SymCrypto.sm4ECBDecrypt(key, + Arrays.copyOfRange(this.data, this.body_start_inclu, this.body_end_exclu)); + break; + case AES_128_CTR: + ivSub = + Arrays.copyOfRange(this.data, this.body_start_inclu, this.body_start_inclu + SymCrypto.CTRIVLength); + dataSub = + Arrays.copyOfRange(this.data, this.body_start_inclu + SymCrypto.CBCIVLength, this.body_end_exclu); + tmpData = SymCrypto.aesCTRDecrypt(key, dataSub, ivSub); + break; + case SM4_128_CTR: + ivSub = + Arrays.copyOfRange(this.data, this.body_start_inclu, this.body_start_inclu + SymCrypto.CTRIVLength); + dataSub = + Arrays.copyOfRange(this.data, this.body_start_inclu + SymCrypto.CBCIVLength, this.body_end_exclu); + tmpData = SymCrypto.sm4CTRDecrypt(key, dataSub, ivSub); + break; + default: + break; + } + + if (tmpData == null) { + throw new EncdbException("decrypt failed"); + } + } catch (BufferUnderflowException e) { + throw new DataFormatException("Wrong encdb cipher bytes"); + } + + if (!verifyCheckCode(tmpData, 0, tmpData.length - 2, tmpData[tmpData.length - 1])) { + throw new EncdbException("plain data check code verify failed"); + } + return Arrays.copyOfRange(tmpData, 0, tmpData.length - 1); + } + + /*CTR throw iv */ + private byte[] generateIv(CCFlags flag, byte[] dataIn) throws NoSuchAlgorithmException { + int ivLen = 0; + switch (algo) { + case AES_128_GCM: + case SM4_128_GCM: + ivLen = SymCrypto.GCMIVLength; + if (flag == CCFlags.DET) { + byte[] tmp = HashUtil.doSHA256(dataIn); + List ivTmp = Bytes.asList(tmp); + assert ivLen <= tmp.length; + return Bytes.toArray(ivTmp.subList(0, ivLen)); + } else { + return Utils.generateIv(ivLen); + } + case AES_128_ECB: + case SM4_128_ECB: + break; + case AES_128_CBC: + case SM4_128_CBC: + ivLen = SymCrypto.CBCIVLength; + if (flag == CCFlags.DET) { + /*result is 32 bytes iv*/ + byte[] tmp = + (algo == Constants.EncAlgo.SM4_128_CBC) ? HashUtil.doSM3(dataIn) : + HashUtil.doSHA256(dataIn); + + List ivTmp = Bytes.asList(tmp); + assert ivLen <= tmp.length; + return Bytes.toArray(ivTmp.subList(0, ivLen)); + } else { + return Utils.generateIv(ivLen); + } + default: + throw new NoSuchAlgorithmException("Unsupported algorithm " + algo.name()); + } + return new byte[0]; + } + + public byte[] encrypt(CCFlags flag, byte[] key, byte[] inputPlain, byte[] nonce) + throws NoSuchAlgorithmException, CryptoException { + + List encBytes = new ArrayList<>(); + + encBytes.add((byte) 0);//for check code + encBytes.add(VERSION); + encBytes.add((byte) type); + encBytes.add((byte) algo.getVal()); + encBytes.addAll(Bytes.asList(nonce)); + + byte checkCode = xorArray(Bytes.asList(inputPlain)); + + byte[] inputWCheckCode = new byte[inputPlain.length + 1]; + System.arraycopy(inputPlain, 0, inputWCheckCode, 0, inputPlain.length); + inputWCheckCode[inputWCheckCode.length - 1] = checkCode; + + //prepare iv + byte[] iv = generateIv(flag, inputWCheckCode); + + //actual encryption + byte[] tmpData = null; + switch (algo) { + case AES_128_GCM: + encBytes.addAll(Bytes.asList(iv)); + tmpData = SymCrypto.aesGcmEncrypt(key, inputWCheckCode, iv); + // Java format: DATA || TAG, convert to EncDB CipherV0 format: TAG || DATA + encBytes.addAll(swapBytesByPivot(tmpData, tmpData.length - SymCrypto.GCMTagLength)); + break; + case AES_128_ECB: + encBytes.addAll(Bytes.asList(SymCrypto.aesECBEncrypt(key, inputWCheckCode))); + break; + case AES_128_CBC: + encBytes.addAll(Bytes.asList(iv)); + encBytes.addAll(Bytes.asList(SymCrypto.aesCBCEncrypt(key, inputWCheckCode, iv))); + break; + case SM4_128_ECB: + encBytes.addAll(Bytes.asList(SymCrypto.sm4ECBEncrypt(key, inputWCheckCode))); + break; + case SM4_128_CBC: + encBytes.addAll(Bytes.asList(iv)); + List tmp = Bytes.asList(SymCrypto.sm4CBCEncrypt(key, inputWCheckCode, iv)); + encBytes.addAll(tmp); + break; + case SM4_128_GCM: + encBytes.addAll(Bytes.asList(iv)); + tmpData = SymCrypto.sm4GcmEncrypt(key, inputWCheckCode, iv); + encBytes.addAll(swapBytesByPivot(tmpData, tmpData.length - SymCrypto.GCMTagLength)); + break; + default: + throw new NoSuchAlgorithmException("Unsupported algorithm " + algo.name()); + } + + byte[] res = Bytes.toArray(encBytes); + res[0] = xorArray(res, 1, res.length - 1);//check code; + + return res; + } + + public static byte xorArray(List data) { + byte ret = 0; + + for (Byte datum : data) { + ret ^= datum; + } + return ret; + } + + public static byte xorArray(byte[] data, int start, int end) { + byte ret = 0; + + for (int i = start; i <= end; i++) { + ret ^= data[i]; + } + return ret; + } + + public int getEncType() { + return this.type; + } + +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/CipherSuite.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/CipherSuite.java new file mode 100644 index 000000000..e5e8ee15c --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/CipherSuite.java @@ -0,0 +1,84 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.cipher; + +import com.alibaba.polardbx.common.encdb.enums.Constants; +import com.alibaba.polardbx.common.encdb.enums.TeeType; +import com.alibaba.polardbx.common.encdb.enums.AsymmAlgo; +import com.alibaba.polardbx.common.encdb.enums.HashAlgo; + +import static com.alibaba.polardbx.common.encdb.enums.OrdinalEnum.searchEnum; + +public class CipherSuite { + + private final Constants.EncAlgo symmAlgo; + private final HashAlgo hashAlgo; + private final AsymmAlgo asymmAlgo; + + public TeeType getTeeType() { + return teeType; + } + + final TeeType teeType; + + public HashAlgo getHashAlgo() { + return hashAlgo; + } + + public CipherSuite(TeeType backendTeeType, String serverInfoString) { + teeType = backendTeeType; + + /* + * serverInfoString returned from backend is in this format + * SM2_WITH_SM4_128_CBC_SM3 or RSA_WITH_AES_128_CBC_SHA256 + */ + String[] elements = serverInfoString.split("_"); + assert elements.length == 6; + + asymmAlgo = searchEnum(AsymmAlgo.class, elements[0]); + symmAlgo = searchEnum(Constants.EncAlgo.class, elements[2] + "_" + elements[3] + "_" + elements[4]); + hashAlgo = searchEnum(HashAlgo.class, elements[5]); + } + + public CipherSuite(TeeType backendTeeType) { + this(backendTeeType, getDefaultCipherSuiteByTeeType(backendTeeType)); + } + + public static String getDefaultCipherSuiteByTeeType(TeeType backendTeeType) { + switch (backendTeeType) { + case FPGA_SMX: + return "SM2_WITH_SM4_128_CBC_SM3"; + case MOCK: + return "SM2_WITH_SM4_128_CBC_SM3"; + default: + return "RSA_WITH_AES_128_CBC_SHA256"; + } + } + + public Constants.EncAlgo getSymmAlgo() { + return symmAlgo; + } + + public AsymmAlgo getAsymmAlgo() { + return asymmAlgo; + } + + @Override + public String toString() { + return String.join("_", asymmAlgo.toString(), "WITH", symmAlgo.toString(), hashAlgo.toString()); + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/Envelope.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/Envelope.java new file mode 100644 index 000000000..adcc4f677 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/Envelope.java @@ -0,0 +1,152 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.cipher; + +import com.alibaba.polardbx.common.encdb.enums.AsymmAlgo; +import com.google.common.primitives.Bytes; +import org.bouncycastle.crypto.CryptoException; + +import java.io.IOException; +import java.nio.BufferUnderflowException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.security.spec.InvalidKeySpecException; +import java.util.ArrayList; +import java.util.List; +import java.util.zip.DataFormatException; + +public class Envelope { + byte[] encryptedKey; + byte[] data; //AES-128-CBC wrapped, for encrypted data, this also including IV and padding + CipherSuite cipherSuite; + + public Envelope(byte[] data) { + this.data = data; + } + + public byte[] getBytes() throws DataFormatException { + /*2 bytes short to store the encryptedKey length*/ + int bufSize = 2 + encryptedKey.length + data.length; + ByteBuffer buf = ByteBuffer.allocate(bufSize).order(ByteOrder.LITTLE_ENDIAN); + + buf.putShort((short) encryptedKey.length); + buf.put(encryptedKey); + buf.put(data); + + return buf.array(); + } + + public static Envelope fromBytes(byte[] bytes) throws DataFormatException { + try { + ByteBuffer buf = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN); + Envelope env = new Envelope(null); + + short encryptedKeyLen = buf.getShort(); + env.encryptedKey = new byte[encryptedKeyLen]; + buf.get(env.encryptedKey); + + int dataLen = buf.remaining(); + env.data = new byte[dataLen]; + buf.get(env.data); + + return env; + } catch (BufferUnderflowException e) { + throw new DataFormatException("Wrong encdb envelope bytes"); + } + } + + public Envelope seal(String pukString, boolean usePemSMx) throws CryptoException, IOException { + SecureRandom sr = new SecureRandom(); + byte[] tempKey = null, iv = null, tempData = null; + List encryptedDataBlob = new ArrayList<>(); + + assert cipherSuite != null; + + if (cipherSuite.getAsymmAlgo() == AsymmAlgo.SM2 && cipherSuite.getSymmAlgo().name().startsWith("SM4")) { + tempKey = new byte[SymCrypto.SM4_KEY_SIZE]; + iv = new byte[SymCrypto.CBCIVLength]; + sr.nextBytes(tempKey); + sr.nextBytes(iv); + + encryptedDataBlob.addAll(Bytes.asList(iv)); + tempData = SymCrypto.sm4CBCEncrypt(tempKey, data, iv); + encryptedDataBlob.addAll(Bytes.asList(tempData)); + data = Bytes.toArray(encryptedDataBlob); + encryptedKey = usePemSMx ? AsymCrypto.sm2EncryptPem(pukString, tempKey) + : AsymCrypto.sm2EncryptRaw(pukString, tempKey); + + } else if (cipherSuite.getAsymmAlgo() == AsymmAlgo.RSA && cipherSuite.getSymmAlgo().name().startsWith("AES")) { + tempKey = new byte[SymCrypto.AES_128_KEY_SIZE]; + iv = new byte[SymCrypto.CBCIVLength]; + sr.nextBytes(tempKey); + sr.nextBytes(iv); + + encryptedDataBlob.addAll(Bytes.asList(iv)); + tempData = SymCrypto.aesCBCEncrypt(tempKey, data, iv); + encryptedDataBlob.addAll(Bytes.asList(tempData)); + data = Bytes.toArray(encryptedDataBlob); + + encryptedKey = AsymCrypto.rsaPKCS1EncryptPem(pukString, tempKey); + } else { + throw new CryptoException("Not supported seal algorithm"); + } + + return this; + } + + public Envelope seal(String pukString) throws CryptoException, IOException { + return seal(pukString, pukString.startsWith("-----BEGIN")); + } + + public byte[] open(String priString) + throws CryptoException, NoSuchAlgorithmException, InvalidKeySpecException, IOException { + return open(priString, priString.startsWith("-----BEGIN")); + } + + public byte[] open(String priString, boolean usePemSMx) + throws CryptoException, NoSuchAlgorithmException, InvalidKeySpecException, IOException { + byte[] plaintext = null; + byte[] tempKey = null, iv = null, tempData = null; + List encryptedDataBlob = Bytes.asList(data); + + int idx = 0; + iv = Bytes.toArray(encryptedDataBlob.subList(0, idx + SymCrypto.CBCIVLength)); + tempData = Bytes.toArray(encryptedDataBlob.subList(SymCrypto.CBCIVLength, encryptedDataBlob.size())); + + assert cipherSuite != null; + + if (cipherSuite.getAsymmAlgo() == AsymmAlgo.SM2 && cipherSuite.getSymmAlgo().name().startsWith("SM4")) { + tempKey = usePemSMx ? AsymCrypto.sm2DecryptPem(priString, encryptedKey) + : AsymCrypto.sm2DecryptRaw(priString, encryptedKey); + plaintext = SymCrypto.sm4CBCDecrypt(tempKey, tempData, iv); + } else if (cipherSuite.getAsymmAlgo() == AsymmAlgo.RSA && cipherSuite.getSymmAlgo().name().startsWith("AES")) { + tempKey = AsymCrypto.rsaPKCS1DecryptPem(priString, encryptedKey); + plaintext = SymCrypto.aesCBCDecrypt(tempKey, tempData, iv); + } else { + throw new CryptoException("Not supported seal algorithm"); + } + + return plaintext; + } + + public Envelope setCiperSuite(CipherSuite cipherSuite) { + this.cipherSuite = cipherSuite; + return this; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/SymCrypto.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/SymCrypto.java new file mode 100644 index 000000000..fb2274d98 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/cipher/SymCrypto.java @@ -0,0 +1,272 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.cipher; + +import com.sun.crypto.provider.SunJCE; +import org.bouncycastle.crypto.CryptoException; +import org.bouncycastle.jce.provider.BouncyCastleProvider; + +import javax.crypto.Cipher; +import javax.crypto.SecretKey; +import javax.crypto.spec.GCMParameterSpec; +import javax.crypto.spec.IvParameterSpec; +import javax.crypto.spec.SecretKeySpec; +import java.security.InvalidAlgorithmParameterException; +import java.security.Provider; +import java.security.SecureRandom; +import java.security.Security; +import java.util.Objects; + +public class SymCrypto { + public static final int AES_BLOCK_SIZE = 16; + public static final int AES_128_KEY_SIZE = AES_BLOCK_SIZE; + public static final int SM4_BLOCK_SIZE = 16; + public static final int SM4_KEY_SIZE = 16; + public static final int CLWW_ORE_KEY_SIZE = 32; + + public static final int GCMTagLength = 16; + public static final int GCMIVLength = 12; + public static final int CBCIVLength = 16; + public static final int CTRIVLength = 16; + + static { + Provider sunJceProvider = new SunJCE(); + if (Security.getProvider(sunJceProvider.getName()) == null) { + Security.addProvider(sunJceProvider); + } + if (Security.getProvider(BouncyCastleProvider.PROVIDER_NAME) == null) { + Security.addProvider(new BouncyCastleProvider()); + } + } + + /** + * + */ + private static byte[] gcmEncrypt(byte[] key, byte[] data, byte[] iv, byte[] aad, String algorithm) + throws CryptoException { + try { + SecretKey secretKey = new SecretKeySpec(key, algorithm); + Cipher cipher = Cipher.getInstance(algorithm + "/GCM/NoPadding"); + + if (iv == null) { + throw new Exception("GCM mode IV should of length " + GCMIVLength); + } + + GCMParameterSpec parameterSpec = new GCMParameterSpec(GCMTagLength * 8, iv); + cipher.init(Cipher.ENCRYPT_MODE, secretKey, parameterSpec); + if (aad != null) { + cipher.updateAAD(aad); + } + + return cipher.doFinal(data); + } catch (Exception e) { + throw new CryptoException("gcmEncrypt error", e); + } + } + + public static byte[] aesGcmEncrypt(byte[] key, byte[] data, byte[] iv, byte[] aad) throws CryptoException { + return gcmEncrypt(key, data, iv, aad, "AES"); + } + + public static byte[] aesGcmEncrypt(byte[] key, byte[] data, byte[] iv) throws CryptoException { + return gcmEncrypt(key, data, iv, null, "AES"); + } + + public static byte[] sm4GcmEncrypt(byte[] key, byte[] data, byte[] iv, byte[] aad) throws CryptoException { + return gcmEncrypt(key, data, iv, aad, "SM4"); + } + + public static byte[] sm4GcmEncrypt(byte[] key, byte[] data, byte[] iv) throws CryptoException { + return gcmEncrypt(key, data, iv, null, "SM4"); + } + + /* + * @cipherBytes: cipher data, including tag, in format like cipher || tag, and tag is of length GCMTagLength + * @iv: iv should of length GCMIVLength if not null + * @mac: i.e., tag, is of length GCMTagLength + * @return: data + */ + private static byte[] gcmDecrypt(byte[] key, byte[] cipherBytes, byte[] iv, byte[] aad, String algorithm) + throws CryptoException { + try { + SecretKey secretKey = new SecretKeySpec(key, algorithm); + Cipher cipher = Cipher.getInstance(algorithm + "/GCM/NoPadding"); + + if (iv.length != GCMIVLength) { + throw new Exception("GCM mode IV should of length " + GCMIVLength); + } + GCMParameterSpec parameterSpec = new GCMParameterSpec(GCMTagLength * 8, iv); + cipher.init(Cipher.DECRYPT_MODE, secretKey, parameterSpec); + if (aad != null) { + cipher.updateAAD(aad); + } + + return cipher.doFinal(cipherBytes); + } catch (Exception e) { + throw new CryptoException("gcmDecrypt error", e); + } + } + + public static byte[] aesGcmDecrypt(byte[] key, byte[] cipherBytes, byte[] iv, byte[] aad) throws CryptoException { + return gcmDecrypt(key, cipherBytes, iv, aad, "AES"); + } + + public static byte[] aesGcmDecrypt(byte[] key, byte[] cipherBytes, byte[] iv) throws CryptoException { + return gcmDecrypt(key, cipherBytes, iv, null, "AES"); + } + + public static byte[] sm4GcmDecrypt(byte[] key, byte[] cipherBytes, byte[] iv, byte[] aad) throws CryptoException { + return gcmDecrypt(key, cipherBytes, iv, aad, "SM4"); + } + + public static byte[] sm4GcmDecrypt(byte[] key, byte[] cipherBytes, byte[] iv) throws CryptoException { + return gcmDecrypt(key, cipherBytes, iv, null, "SM4"); + } + + /* + * @key: symmetric key + * @data: + * plain data for encryption + * cipher data for decryption + * @algorithm: AES, SM4, etc. + * @forEncryption: + * true for encryption + * false for decryption + */ + private static byte[] ecbPKCS7Cipher(byte[] key, byte[] data, String algorithm, boolean forEncryption) + throws CryptoException { + try { + SecretKeySpec secretKey = new SecretKeySpec(key, algorithm); + // Note: PKCS5Padding enables SunJCE support. PKCS7Padding leads to BouncyCastle security provider, + // which lacks hardware acceleration for AES. + Cipher cipher = Cipher.getInstance(algorithm + "/ECB/PKCS5Padding"); + cipher.init(forEncryption ? Cipher.ENCRYPT_MODE : Cipher.DECRYPT_MODE, secretKey); + + return cipher.doFinal(data); + } catch (Exception e) { + String errMsg = algorithm + " ecbPKCS7Cipher " + (forEncryption ? " encryption" : " decryption") + " error"; + throw new CryptoException(errMsg, e); + } + } + + public static byte[] aesECBEncrypt(byte[] key, byte[] data) throws CryptoException { + return ecbPKCS7Cipher(key, data, "AES", true); + } + + public static byte[] aesECBDecrypt(byte[] key, byte[] cipherBytes) throws CryptoException { + return ecbPKCS7Cipher(key, cipherBytes, "AES", false); + } + + public static byte[] sm4ECBEncrypt(byte[] key, byte[] data) throws CryptoException { + return ecbPKCS7Cipher(key, data, "SM4", true); + } + + public static byte[] sm4ECBDecrypt(byte[] key, byte[] cipherBytes) throws CryptoException { + return ecbPKCS7Cipher(key, cipherBytes, "SM4", false); + } + + /* + * @key: symmetric key + * @data: + * plain data for encryption + * cipher data for decryption + * @iv: iv should of length CBCIVLength if not null + * @algorithm: AES, SM4, etc. + * @forEncryption: + * true for encryption + * false for decryption + */ + private static byte[] cbcPKCS7Cipher(byte[] key, byte[] data, byte[] iv, String algorithm, boolean forEncryption) + throws CryptoException { + try { + SecretKeySpec secretKey = new SecretKeySpec(key, algorithm); + // Note: PKCS5Padding enables SunJCE support. PKCS7Padding leads to BouncyCastle security provider, + // which lacks hardware acceleration for AES. + Cipher cipher = Cipher.getInstance(algorithm + "/CBC/PKCS5Padding"); + + if (iv == null && forEncryption) { + SecureRandom secureRandom = new SecureRandom(); + iv = new byte[CBCIVLength]; + secureRandom.nextBytes(iv); + } + + if (Objects.requireNonNull(iv).length != CBCIVLength) { + throw new InvalidAlgorithmParameterException("CBC mode IV should of length " + CBCIVLength); + } + + IvParameterSpec ivParameterSpec = new IvParameterSpec(iv); + cipher.init(forEncryption ? Cipher.ENCRYPT_MODE : Cipher.DECRYPT_MODE, secretKey, ivParameterSpec); + + return cipher.doFinal(data); + } catch (Exception e) { + String errMsg = algorithm + " cbcPKCS7Cipher " + (forEncryption ? " encryption" : " decryption") + " error"; + throw new CryptoException(errMsg, e); + } + } + + public static byte[] sm4CBCEncrypt(byte[] key, byte[] data, byte[] iv) throws CryptoException { + return cbcPKCS7Cipher(key, data, iv, "SM4", true); + } + + public static byte[] sm4CBCDecrypt(byte[] key, byte[] cipherBytes, byte[] iv) throws CryptoException { + return cbcPKCS7Cipher(key, cipherBytes, iv, "SM4", false); + } + + public static byte[] aesCBCEncrypt(byte[] key, byte[] data, byte[] iv) throws CryptoException { + return cbcPKCS7Cipher(key, data, iv, "AES", true); + } + + public static byte[] aesCBCDecrypt(byte[] key, byte[] cipherBytes, byte[] iv) throws CryptoException { + return cbcPKCS7Cipher(key, cipherBytes, iv, "AES", false); + } + + /* + * @key: symmetric key + * @data: + * plain data for encryption + * cipher data for decryption + * @algorithm: AES, SM4, etc. + * @forEncryption: + * true for encryption + * false for decryption + */ + private static byte[] ctrCipher(byte[] key, byte[] data, byte[] iv, String algorithm, boolean forEncryption) + throws CryptoException { + try { + SecretKeySpec secretKey = new SecretKeySpec(key, algorithm); + // Note: PKCS5Padding enables SunJCE support. PKCS7Padding leads to BouncyCastle security provider, + // which lacks hardware acceleration for AES. + Cipher cipher = Cipher.getInstance(algorithm + "/CTR/NoPadding"); + IvParameterSpec ivSpec = new IvParameterSpec(iv); + cipher.init(forEncryption ? Cipher.ENCRYPT_MODE : Cipher.DECRYPT_MODE, secretKey, ivSpec); + + return cipher.doFinal(data); + } catch (Exception e) { + String errMsg = algorithm + " ctr " + (forEncryption ? " encryption" : " decryption") + " error"; + throw new CryptoException(errMsg, e); + } + } + + public static byte[] aesCTRDecrypt(byte[] key, byte[] cipherBytes, byte[] iv) throws CryptoException { + return ctrCipher(key, cipherBytes, iv, "AES", false); + } + + public static byte[] sm4CTRDecrypt(byte[] key, byte[] cipherBytes, byte[] iv) throws CryptoException { + return ctrCipher(key, cipherBytes, iv, "SM4", false); + } + +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/AsymmAlgo.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/AsymmAlgo.java new file mode 100644 index 000000000..e0479c9c7 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/AsymmAlgo.java @@ -0,0 +1,42 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.enums; + +import java.util.Arrays; + +public enum AsymmAlgo implements OrdinalEnum { + RSA(1), + SM2(2); + + private final int val; + + AsymmAlgo(int i) { + this.val = i; + } + + public static AsymmAlgo from(int i) { + return Arrays.stream(AsymmAlgo.values()) + .filter(e -> e.val == i) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("invalid value")); + } + + @Override + public int getVal() { + return val; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/CCFlags.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/CCFlags.java new file mode 100644 index 000000000..9e8dbf40b --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/CCFlags.java @@ -0,0 +1,43 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.enums; + +import java.util.Arrays; + +public enum CCFlags implements OrdinalEnum { + RND(0), + DET(1); + + private final int val; + + CCFlags(int val) { + this.val = val; + } + + @Override + public int getVal() { + return val; + } + + public static CCFlags from(int i) { + return Arrays.stream(CCFlags.values()) + .filter(e -> e.val == i) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("invalid value")); + } + +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/Constants.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/Constants.java new file mode 100644 index 000000000..8761cfe3b --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/Constants.java @@ -0,0 +1,237 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.enums; + +import com.alibaba.polardbx.common.encdb.cipher.SymCrypto; + +import java.util.Arrays; + +public class Constants { + + public enum Stateless implements OrdinalEnum { + /* + 用户在enclave中的根密钥在当前session结束后不会过期直到被覆盖 + */ + NeverExpire(0), + + /* + 用户在enclave中的根密钥在当前session结束后自动过期 + */ + SessionExpire(1); + + private final int val; + + Stateless(int val) { + this.val = val; + } + + @Override + public int getVal() { + return val; + } + } + + //DekMode, DekGenMode + public enum DekGenMode implements OrdinalEnum { + + /* + 用户新的数据密钥在enclave中生成 + */ + ENCLAVE(0), + + /* + 用户新的数据密钥在本地sdk中生成 + */ + LOCAL(1); + + private final int val; + + DekGenMode(int i) { + this.val = i; + } + + @Override + public int getVal() { + return val; + } + + public static DekGenMode from(int i) { + return Arrays.stream(DekGenMode.values()) + .filter(e -> e.val == i) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("invalid value")); + + } + } + + public enum EncScheme implements OrdinalEnum { + /* + 数据加密过程中初始向量(e.g., iv)会随机生成 + */ + RND(1), + + /* + 数据加密过程中初始向量(e.g., iv)会按规律生成 + */ + DET(2); + + private final int val; + + EncScheme(int i) { + this.val = i; + } + + @Override + public int getVal() { + return val; + } + + public static EncScheme from(int i) { + return Arrays.stream(EncScheme.values()) + .filter(e -> e.val == i) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("invalid value")); + } + } + + /** + * Symmetric algorithms to encrypt or decrypt data + */ + public enum EncAlgo implements OrdinalEnum { + AES_128_GCM(0), + AES_128_ECB(1), + AES_128_CTR(2), + AES_128_CBC(3), + SM4_128_CBC(4), + SM4_128_ECB(5), + SM4_128_CTR(6), + SM4_128_GCM(7), + + CLWW_ORE(8); + + private final int val; + + EncAlgo(int val) { + this.val = val; + } + + @Override + public int getVal() { + return val; + } + + public static EncAlgo from(int i) { + return Arrays.stream(EncAlgo.values()) + .filter(e -> e.val == i) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("invalid value")); + } + } + + public enum SDKMode { + /* + 该模式下,用户需设置数据库的连接(dbConnection)和mek等参数。该模式支持 + 1. 各种数据类型的加解密 + 2. 根密钥的导入,数据密钥的导入导出(明文或者密文态),ACL签发,导入KMS密钥等操作 + */ + Default, + + /* + 该模式下,用户需设置数据库的连接(dbConnection),不设置明文的mek。该模式支持 + 1. 不支持数据的加解密 + 2. 数据密钥的导入导出(密文态),导入KMS密钥等操作 + * */ + NoMekBypass, + + /* + 该模式下,用户设置明文的mek,目标数据库的库名,用户名,TeeType。该模式支持 + 1. 支持各种数据类型的加解密 + 2. 数据密钥的导入(明文态),生成密态的ACL + */ + Offline, + + /* + 用户自己表级别单一的DEK,或者自动生成表级别单一的DEK + */ + SimpleDek, + /* + 使用服务器端约定的固定DEK,POC会使用 + */ + ConstantDek, + + /* + CryptoOnly + */ + Crypto + } + + public enum KeyMgmtType { + /* + * 适用于RDS PG和PolarDB PG模式下 + */ + PG, + /* + * 适用于服务化的密文元数据管理系统(也叫keystore) + */ + KEYSTORE_SERVER, + /* + * 适用于PolarDB MySQL(连接proxy的情况下) + */ + POLARDB_MYSQL, + /* + * 适用于RDS MySQL + */ + RDS_MYSQL, + } + + /** + * flags to enable/disable + */ + public static boolean VERBOSE = false; + public static boolean SHOW_RA_LOG = false; + public static boolean BYPASS_RA = false; + + /*JSON KEYS*/ + public static final String ROOT_KEY_ACL = "ACL"; + public static final String ACL_KEY_ALLOW_ENTRIES = "allow entries"; + public static final String ACL_KEY_MAC = "mac"; + public static final String OWNER_ID = "ownerID"; + public static final String USERNAME = "username"; + public static final String USER_ID = "userID"; + + public static final int ENCDB_KEY_SIZE = SymCrypto.AES_128_KEY_SIZE; + public static final int ENCDB_IV_SIZE = SymCrypto.GCMIVLength; + public static final int ENCDB_MAC_SIZE = SymCrypto.AES_BLOCK_SIZE; + + public enum CipherVersionFormat { + VERSION_0(0); // VERSION_0 = static_cast(0b00000000) + + private final int value; + + CipherVersionFormat(int value) { + this.value = value; + } + + public int getValue() { + return value; + } + } + + public static final int CIPHER_VERSION_0_NONCE_LEN = 8; +} + + diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/HashAlgo.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/HashAlgo.java new file mode 100644 index 000000000..b01be8759 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/HashAlgo.java @@ -0,0 +1,43 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.enums; + +import java.util.Arrays; + +public enum HashAlgo implements OrdinalEnum { + // MUST match that in encdb::Hash::Alg + SHA256(0), + SM3(1); + + private final int val; + + HashAlgo(int i) { + this.val = i; + } + + public static HashAlgo from(int i) { + return Arrays.stream(HashAlgo.values()) + .filter(e -> e.val == i) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("invalid value")); + } + + @Override + public int getVal() { + return val; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/MsgKeyConstants.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/MsgKeyConstants.java new file mode 100644 index 000000000..d68a44a2a --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/MsgKeyConstants.java @@ -0,0 +1,136 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.enums; + +public class MsgKeyConstants { + //no under score for mrenclave, xxid, keyname + public static final String REQUEST_TYPE = "request_type"; + public static final String MEKID = "mekid"; + public static final String MRENCLAVE = "mrenclave"; + public static final String MRENCLAVE_SIG = "mrenclave_sig"; + public static final String SERVER_INFO = "server_info"; + public static final String SERVER_NONE = "nonce"; + public static final String SERVER_INFO_MAC = "server_info_mac"; + + public static final String VERSION = "version"; + public static final String TEE_TYPE = "tee_type"; + + public static final String CIPHER_SUITE = "cipher_suite"; + public static final String PUBLIC_KEY = "public_key"; + public static final String PUBLIC_KEY_HASH = "public_key_hash"; + public static final String QUOTE = "quote"; + public static final String ENVELOPE = "envelope"; + public static final String MEK = "mek"; + public static final String MEK_ID = "mekid"; + public static final String EXPIRE = "expire"; + public static final String ACL_LIST = "acl_list"; + public static final String ENVELOPED = "enveloped"; + + public static final String OLD_MEK = "old_mek"; + public static final String KMS_MEK = "kms_mek"; + public static final String KEYNAME = "keyname"; + public static final String CIPHER_CONTEXT = "CipherContext"; + public static final String CTXID = "ctxid"; + + public static final String DEKID = "dekid"; + public static final String DEK = "dek"; + public static final String ENCRYPTED_DEK = "encrypted_dek"; + public static final String GROUPID = "groupid"; + public static final String GROUP_AUTH = "group_auth"; + public static final String ALGORITHM = "algorithm"; + public static final String POLICY = "policy"; + public static final String FLAGS = "flags"; + public static final String ROTATED = "rotated"; + + public static final String OLD_CTXID = "old_ctxid"; + public static final String NEW_CTXID = "new_ctxid"; + public static final String USERNAME = "username"; //get_current_username + + //RA_GET_Quote RA_VERIFY_QUOTE and other related operations + public static final String CHALLENGE = "challenge"; + public static final String USER_DATA = "user_data"; + public static final String ENCLAVE_DATA = "enclave_data"; + public static final String KEY_META_DATA = "key_meta_data"; + + //used by export data key + public static final String TABLE_NAME = "tableName"; + public static final String COLUMN_TYPE = "columnType"; + public static final String COLUMN_NAME = "columnName"; + + // used by BCL + public static final String ISSUER_MEKID = "issuer_mekid"; + public static final String SUBJECT_MEKID = "subject_mekid"; + public static final String PUKID = "pukid"; + public static final String ISSUER_PUKID = "issuer_pukid"; + public static final String SUBJECT_PUKID = "subject_pukid"; + // public static final String PUBLIC_KEY ="public_key"; + public static final String ISSUER_PUK = "issuer_puk"; + public static final String SUBJECT_PUK = "subject_puk"; + public static final String PRIMARY_SIG = "private_sig"; + public static final String MEK_SIG = "mek_sig"; + public static final String SERIAL_NUM = "serial_num"; + public static final String ISSUER_SIG = "issuer_sig"; + public static final String SUBJECT_SIG = "subject_sig"; + public static final String BCL = "bcl"; + public static final String BCL_BODY_VALIDITY = "validity"; + public static final String BCL_BODY_POLICIES = "policies"; + public static final String BRL = "brl"; + public static final String BRL_BODY_REVOKED = "revoked"; + public static final String BRL_PUKID = "brl_pukid"; + public static final String BRL_SIG = "brl_sig"; + + //used by inplace encrypt + public static final String KEYNAME_USER = "user"; + public static final String KEYNAME_DATABASE = "database"; + public static final String KEYNAME_SCHEMA = "schema"; + public static final String KEYNAME_TABLE = "table"; + public static final String KEYNAME_COLUMN = "column"; + public static final String RECURSIVE_SEARCH = "recursive_search"; + + public static final String DEST_KEYNAME = "dest_keyname"; + public static final String SRC_KEYNAME = "src_keyname"; + + public static final String ENC_RULE = "enc_rule"; + + public static final String RULES = "rules"; + + public static final String NAME = "name"; + + public static final String ENABLED = "enabled"; + + public static final String META = "meta"; + + public static final String USERS = "users"; + + public static final String RESTRICTED_ACCESS = "restrictedAccess"; + public static final String FULL_ACCESS = "fullAccess"; + + public static final String DATABASES = "databases"; + + public static final String TABLES = "tables"; + + public static final String COLUMNS = "columns"; + + public static final String DESCRIPTION = "description"; + + public static final String STATUS = "status"; + + public static final String BODY = "body"; + + public static final int BCL_VERSION_1 = 1; + public static final int BRL_VERSION_1 = 1; +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/MsgType.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/MsgType.java new file mode 100644 index 000000000..95aaf227c --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/MsgType.java @@ -0,0 +1,68 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.enums; + +public final class MsgType { + // Server info + public static final int SERVER_INFO_GET = 0x00; //request server info + public static final int SERVER_GET_NONCE = 0x01; // request a new server nonce + + // Remote attestation + public static final int RA_CHALLENGE = 0x10; + public static final int RA_01 = 0x11; //always combine message 0 and message 1 + public static final int RA_2 = 0x12; + public static final int RA_3 = 0x13; + public static final int RA_4 = 0x14; + public static final int RA_GET_PUBLIC_KEY = 0x15; + public static final int RA_GET_QUOTE = 0x16; //generate RA quote + public static final int RA_VERIFY_QUOTE = 0x17; + + // MEK + public static final int MEK_PROVISION = 0x30; + public static final int MEK_UPDATE = 0x31; + public static final int MEK_IMPORT_FROM_KMS = 0x32; //import MEK in KMS format + public static final int MEK_EXPORT = 0x33; + + // DEK + public static final int DEK_GENERATE = 0x50; //generate in enclave + public static final int DEK_INSERT = 0x51; //insert DEK from local, should be encrypted in specified format + public static final int DEK_UPDATE = 0x52; //update DEK info + public static final int DEK_GET_BY_NAME = 0x53; //get DEK by keyname + public static final int DEK_GET_BY_ID = 0x54; //get DEK by ctxid + public static final int DEK_GET_NEXT_DEK_ID = 0x55; //allocate a new dek entry and get latest dekid + public static final int DEK_UPDATE_ATTRIBUTE = 0x56; //update DEK CC attributes info + public static final int DEK_UPDATE_ALGORITHM = 0x57; //update DEK CC algorithm info + public static final int DEK_COPY_KEYNAME = 0x58; + public static final int KEYNAME_GENERATE = 0x59; + public static final int KEYNAME_SEARCH = 0x5A; + + // BCL + public static final int BCL_ISSUE = 0x70; // issue or update BCL + public static final int BCL_GET = 0x71; // get BCL, including related BRL + public static final int BCL_REVOKE = 0x72; // issue BRL to revoke BCL + public static final int BCL_REGISTER = 0x73; // register user public key + + // keyname + + // EnclaveRequestType + public static final int ENCLAVE_EXPORT_MEK = 0x90; + public static final int ENCLAVE_GEN_KEY_PAIR = 0x91; + + public static final int ENC_RULE_IMPORT = 0xA0; + + public static final int ENC_RULE_DELETE = 0xA1; +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/OrdinalEnum.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/OrdinalEnum.java new file mode 100644 index 000000000..9daba9434 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/OrdinalEnum.java @@ -0,0 +1,32 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.enums; + +public interface OrdinalEnum { + + int getVal(); + + static > T searchEnum(Class enumeration, + String search) { + for (T each : enumeration.getEnumConstants()) { + if (each.name().compareToIgnoreCase(search) == 0) { + return each; + } + } + return null; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/Symmetric.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/Symmetric.java new file mode 100644 index 000000000..75c372f8f --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/Symmetric.java @@ -0,0 +1,62 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.enums; + +import java.util.Arrays; + +public class Symmetric { + + /** + * Algorithm parameters + */ + public enum Params implements OrdinalEnum { + AES_BLOCK_SIZE(16), + SM4_BLOCK_SIZE(16), + MAX_BLOCK_SIZE(16), + + AES_128_KEY_SIZE(16), + SM4_128_KEY_SIZE(16), + MAX_KEY_SIZE(16), + + GCM_IV_SIZE(12), + CBC_IV_SIZE(16), + CTR_IV_SIZE(16), + ECB_IV_SIZE(0), + MAX_IV_SIZE(16), + + GCM_TAG_SIZE(16); + + private final int val; + + Params(int val) { + this.val = val; + } + + @Override + public int getVal() { + return val; + } + + public static Params from(int i) { + return Arrays.stream(Params.values()) + .filter(e -> e.val == i) + .findAny() + .orElseThrow(() -> new IllegalArgumentException("invalid value")); + } + } + +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/TeeType.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/TeeType.java new file mode 100644 index 000000000..dd558647c --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/enums/TeeType.java @@ -0,0 +1,46 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.enums; + +public enum TeeType { + /* + * 服务端使用Intel SGX机制 + * Include det, rnd + */ + IntelSGX, + /* + * 服务端使用Intel SGX2, RA 形式是DCAP + */ + IntelSGX2, + /* + * 服务端使用FPGA机制,并且使用国密算法 + */ + FPGA_SMX, + /* + * 服务端使用HSM_MEK机制,并且使用国密算法.对外表现跟FPGA类似 + */ + HSM_MEK, + /* + * 类似于SGX Type + */ + MOCK, + /* + * In this mode, only det,rnd,ore types available + */ + NO_TEE, +} + diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/HKDF.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/HKDF.java new file mode 100644 index 000000000..dce8b40d6 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/HKDF.java @@ -0,0 +1,48 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.utils; + +import org.bouncycastle.crypto.Digest; +import org.bouncycastle.crypto.RuntimeCryptoException; +import org.bouncycastle.crypto.digests.SHA256Digest; +import org.bouncycastle.crypto.digests.SM3Digest; +import org.bouncycastle.crypto.generators.HKDFBytesGenerator; +import org.bouncycastle.crypto.params.HKDFParameters; + +public class HKDF { + private static byte[] deriveHkdf(int length, byte[] secret, byte[] salt, byte[] info, Digest digest) { + HKDFBytesGenerator hkdf = new HKDFBytesGenerator(digest); + HKDFParameters params = new HKDFParameters(secret, salt, info); + + hkdf.init(params); + byte[] okm = new byte[length]; + hkdf.generateBytes(okm, 0, length); + + return okm; + } + + public static byte[] deriveWithSHA256(int length, byte[] secret, byte[] salt, byte[] info) + throws RuntimeCryptoException { + return deriveHkdf(length, secret, salt, info, new SHA256Digest()); + } + + public static byte[] deriveWithSM3(int length, byte[] secret, byte[] salt, byte[] info) + throws RuntimeCryptoException { + return deriveHkdf(length, secret, salt, info, new SM3Digest()); + } +} + diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/HMAC.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/HMAC.java new file mode 100644 index 000000000..3bf118880 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/HMAC.java @@ -0,0 +1,60 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.utils; + +import com.alibaba.polardbx.common.encdb.enums.HashAlgo; +import org.bouncycastle.crypto.Digest; +import org.bouncycastle.crypto.digests.SHA256Digest; +import org.bouncycastle.crypto.digests.SM3Digest; +import org.bouncycastle.crypto.macs.HMac; +import org.bouncycastle.crypto.params.KeyParameter; + +/** + * HMAC + */ +public class HMAC { + private static byte[] signHmac(byte[] key, byte[] data, Digest digest) { + KeyParameter keyParameter = new KeyParameter(key); + HMac hMac = new HMac(digest); + + hMac.init(keyParameter); + hMac.update(data, 0, data.length); + byte[] result = new byte[hMac.getMacSize()]; + hMac.doFinal(result, 0); + + return result; + } + + public static byte[] signWithSM3(byte[] key, byte[] data) { + return signHmac(key, data, new SM3Digest()); + } + + public static byte[] signWithSHA256(byte[] key, byte[] data) { + return signHmac(key, data, new SHA256Digest()); + } + + public static byte[] hmac(HashAlgo hashAlg, byte[] key, byte[] data) { + switch (hashAlg) { + case SHA256: + return signWithSHA256(key, data); + case SM3: + return signWithSM3(key, data); + default: + throw new RuntimeException("Panic! Not support hmac algorithm"); + } + } +} \ No newline at end of file diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/HashUtil.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/HashUtil.java new file mode 100644 index 000000000..528ad6577 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/HashUtil.java @@ -0,0 +1,82 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.utils; + +import com.alibaba.polardbx.common.encdb.enums.HashAlgo; +import org.bouncycastle.crypto.digests.SM3Digest; + +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +public class HashUtil { + + /** + * 计算SHA256,注意不能计算太大对象,因为是全内存的 + * + * @param rawData raw data + * @return 长度为32的byte数组 + */ + public static byte[] doSHA256(byte[] rawData) { + try { + MessageDigest digest = MessageDigest.getInstance("SHA-256"); + return digest.digest(rawData); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("Panic! hash with SHA256 failed", e); + } + } + + public static byte[] doMd5(byte[] data) { + try { + MessageDigest digest = MessageDigest.getInstance("MD5"); + + return digest.digest(data); + } catch (NoSuchAlgorithmException e) { + e.printStackTrace(); + throw new RuntimeException("Panic! hash with MD5 failed", e); + } + } + + /** + * 返回长度=32的byte数组 + * + * @param rawData source data + * @return 长度为32的byte数组 + */ + public static byte[] doSM3(byte[] rawData) { + SM3Digest digest = new SM3Digest(); + digest.update(rawData, 0, rawData.length); + byte[] hash = new byte[digest.getDigestSize()]; + digest.doFinal(hash, 0); + return hash; + } + + public static byte[] hash(HashAlgo hashAlg, byte[] rawData) { + switch (hashAlg) { + case SHA256: + return doSHA256(rawData); + case SM3: + return doSM3(rawData); + default: + throw new RuntimeException("Panic! Not support hash algorithm"); + } + } + + public static String hash(HashAlgo alg, String msg) { + return Utils.bytesTobase64(HashUtil.hash(alg, msg.getBytes(StandardCharsets.UTF_8))); + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/RSAUtil.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/RSAUtil.java new file mode 100644 index 000000000..443cf6bbe --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/RSAUtil.java @@ -0,0 +1,283 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.utils; + +import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.bouncycastle.openssl.PEMKeyPair; +import org.bouncycastle.openssl.PEMParser; +import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter; + +import javax.crypto.BadPaddingException; +import javax.crypto.Cipher; +import javax.crypto.IllegalBlockSizeException; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.spec.OAEPParameterSpec; +import javax.crypto.spec.PSource; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.StringReader; +import java.security.AlgorithmParameters; +import java.security.InvalidKeyException; +import java.security.KeyFactory; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.NoSuchAlgorithmException; +import java.security.NoSuchProviderException; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.security.SecureRandom; +import java.security.Security; +import java.security.spec.AlgorithmParameterSpec; +import java.security.spec.InvalidKeySpecException; +import java.security.spec.MGF1ParameterSpec; +import java.security.spec.PKCS8EncodedKeySpec; +import java.security.spec.X509EncodedKeySpec; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; + +public class RSAUtil { + + /*jceSecurity will result in OOM if BouncyCastleProvider was not used correctly. + https://timerbin.iteye.com/blog/2151969 + */ + static { + if (Security.getProvider(BouncyCastleProvider.PROVIDER_NAME) == null) { + Security.addProvider(new BouncyCastleProvider()); + } + } + + public static PublicKey getPublicKey(String base64PublicKey) { + base64PublicKey = extractPemBlock(base64PublicKey); + PublicKey publicKey = null; + try { + X509EncodedKeySpec keySpec = new X509EncodedKeySpec(Base64.getDecoder().decode(base64PublicKey.getBytes())); + KeyFactory keyFactory = KeyFactory.getInstance("RSA"); + publicKey = keyFactory.generatePublic(keySpec); + return publicKey; + } catch (NoSuchAlgorithmException e) { + e.printStackTrace(); + } catch (InvalidKeySpecException e) { + e.printStackTrace(); + } + return publicKey; + } + + public static PublicKey getPublicKeyPKCS1(final String keyStr) throws IOException { + try (PEMParser pemParser = new PEMParser(new StringReader(keyStr))) { + SubjectPublicKeyInfo pkInfo = (SubjectPublicKeyInfo) pemParser.readObject(); + JcaPEMKeyConverter converter = new JcaPEMKeyConverter()/*.setProvider("BC")*/; + + return converter.getPublicKey(pkInfo); + } + } + + public static PrivateKey getPrivateKeyPKCS1(String base64PrivateKey) { + /*Java does not support load PKCS1 privateKey*/ + PrivateKey privateKey = null; + try { + StringReader certReader = new StringReader(base64PrivateKey); + BufferedReader reader = new BufferedReader(certReader); + + PEMParser pemParser = new PEMParser(reader); + JcaPEMKeyConverter converter = new JcaPEMKeyConverter()/*.setProvider("BC")*/; + Object object = pemParser.readObject(); + KeyPair kp = converter.getKeyPair((PEMKeyPair) object); + privateKey = kp.getPrivate(); + } catch (IOException e) { + e.printStackTrace(); + } + + return privateKey; + } + + public static PrivateKey getPrivateKey(String base64PrivateKey) { + base64PrivateKey = extractPemBlock(base64PrivateKey); + PrivateKey privateKey = null; + PKCS8EncodedKeySpec keySpec = new PKCS8EncodedKeySpec(Base64.getDecoder().decode(base64PrivateKey.getBytes())); + KeyFactory keyFactory = null; + try { + keyFactory = KeyFactory.getInstance("RSA"); + } catch (NoSuchAlgorithmException e) { + e.printStackTrace(); + } + try { + privateKey = keyFactory.generatePrivate(keySpec); + } catch (InvalidKeySpecException e) { + e.printStackTrace(); + } + return privateKey; + } + + public static byte[] encrypt(String data, String publicKey) + throws BadPaddingException, IllegalBlockSizeException, InvalidKeyException, NoSuchPaddingException, + NoSuchAlgorithmException { + Cipher cipher = Cipher.getInstance("RSA/None/PKCS1Padding"); + cipher.init(Cipher.ENCRYPT_MODE, getPublicKey(publicKey)); + return cipher.doFinal(data.getBytes()); + } + + public static String decrypt(byte[] data, PrivateKey privateKey) + throws NoSuchPaddingException, NoSuchAlgorithmException, InvalidKeyException, BadPaddingException, + IllegalBlockSizeException { + Cipher cipher = Cipher.getInstance("RSA/ECB/PKCS1Padding"); + cipher.init(Cipher.DECRYPT_MODE, privateKey); + return new String(cipher.doFinal(data)); + } + + /** + * 按 `----BEGIN` 和 `-----END` 提取内容 + */ + public static String extractPemBlock(String pem) { + if (pem == null) { + throw new IllegalArgumentException("Invalid pem(null)"); + } + List ret = new ArrayList<>(); + String[] pemLines = pem.split("(\r\n)|(\r)|(\n)"); + boolean isInPem = false; + for (String pemLine : pemLines) { + if (isInPem) { + if (pemLine.trim().startsWith("-----END ")) { + break; + } + ret.add(pemLine.trim()); + } + if (pemLine.trim().startsWith("-----BEGIN ")) { + isInPem = true; + } + } + + return String.join("", ret); +// return StringUtils.join(ret.toArray()); + } + + public static byte[] encrypt256(String data, String publicKey) + throws BadPaddingException, IllegalBlockSizeException, InvalidKeyException, NoSuchPaddingException, + NoSuchAlgorithmException { + Cipher cipher = null; + + try { + cipher = Cipher.getInstance("RSA/None/PKCS1Padding", "BC"); + } catch (NoSuchProviderException e) { + e.printStackTrace(); + } + + assert cipher != null; + cipher.init(Cipher.ENCRYPT_MODE, getPublicKey(publicKey)); + return cipher.doFinal(data.getBytes()); + } + + public static String decrypt(String data, String base64PrivateKey) + throws IllegalBlockSizeException, InvalidKeyException, BadPaddingException, NoSuchAlgorithmException, + NoSuchPaddingException { + return decrypt(Base64.getDecoder().decode(data.getBytes()), getPrivateKey(base64PrivateKey)); + } + + /** + * 使用 RSA OAEP_SHA256 加密 + * + * @param publicKey publicKey + * @param content content + * @return encrypted bytes + */ + public static byte[] encryptRsaOaepSha256(PublicKey publicKey, byte[] content) { + try { + AlgorithmParameters algp = AlgorithmParameters.getInstance("OAEP"); + AlgorithmParameterSpec paramSpec = + new OAEPParameterSpec("SHA-256", "MGF1", MGF1ParameterSpec.SHA256, PSource.PSpecified.DEFAULT); + algp.init(paramSpec); + Cipher cipher = Cipher.getInstance("RSA/ECB/OAEPWithSHA-256AndMGF1Padding"); + cipher.init(Cipher.ENCRYPT_MODE, publicKey, algp); + return cipher.doFinal(content); + } catch (Exception e) { + throw new RuntimeException("Encrypt RSA OAEP_SHA256 failed", e); + } + } + + /** + * 使用 RSA OAEP_SHA256 解密 + * + * @param privateKey privateKey + * @param cipherText encrypted bytes + * @return plain bytes + */ + public static byte[] decryptRsaOaepSha256(PrivateKey privateKey, byte[] cipherText) { + try { + AlgorithmParameters algp = AlgorithmParameters.getInstance("OAEP"); + AlgorithmParameterSpec paramSpec = + new OAEPParameterSpec("SHA-256", "MGF1", MGF1ParameterSpec.SHA256, PSource.PSpecified.DEFAULT); + algp.init(paramSpec); + Cipher cipher = Cipher.getInstance("RSA/ECB/OAEPWithSHA-256AndMGF1Padding"); + cipher.init(Cipher.DECRYPT_MODE, privateKey, algp); + return cipher.doFinal(cipherText); + } catch (Exception e) { + throw new RuntimeException("Decrypt RSA OAEP_SHA256 failed", e); + } + } + + public static byte[] encryptRsaOaepSha256PKCS1(PublicKey publicKey, byte[] content) { + try { + AlgorithmParameters algp = AlgorithmParameters.getInstance("OAEP"); + AlgorithmParameterSpec paramSpec = + new OAEPParameterSpec("SHA-256", "MGF1", MGF1ParameterSpec.SHA256, PSource.PSpecified.DEFAULT); + algp.init(paramSpec); + Cipher cipher = Cipher.getInstance("RSA/NONE/OAEPWithSHA-256AndMGF1Padding"); + cipher.init(Cipher.ENCRYPT_MODE, publicKey, algp); + return cipher.doFinal(content); + } catch (Exception e) { + throw new RuntimeException("Encrypt RSA OAEP_SHA256 failed", e); + } + } + + public static KeyPair generateRsa2048KeyPair() { + return generateRsaKeyPair(2048); + } + + public static KeyPair generateRsa3072KeyPair() { + return generateRsaKeyPair(3072); + } + + /** + * 生成RSA KeyPair + * keySize: 2048 + */ + public static KeyPair generateRsaKeyPair(int keySize) { + try { + KeyPairGenerator kpGen = KeyPairGenerator.getInstance("RSA"); + kpGen.initialize(keySize, new SecureRandom()); + return kpGen.generateKeyPair(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public static byte[] decryptRsaOaepSha256PKCS1(PrivateKey privateKey, byte[] cipherText) { + try { + AlgorithmParameters algp = AlgorithmParameters.getInstance("OAEP"); + AlgorithmParameterSpec paramSpec = + new OAEPParameterSpec("SHA-256", "MGF1", MGF1ParameterSpec.SHA256, PSource.PSpecified.DEFAULT); + algp.init(paramSpec); + Cipher cipher = Cipher.getInstance("RSA/NONE/OAEPWithSHA-256AndMGF1Padding"); + cipher.init(Cipher.DECRYPT_MODE, privateKey, algp); + return cipher.doFinal(cipherText); + } catch (Exception e) { + throw new RuntimeException("Decrypt RSA OAEP_SHA256 failed", e); + } + } + +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/SignUtil.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/SignUtil.java new file mode 100644 index 000000000..a41cbef05 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/SignUtil.java @@ -0,0 +1,156 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.utils; + +import com.alibaba.polardbx.common.encdb.cipher.AsymCrypto; +import com.alibaba.polardbx.common.encdb.enums.AsymmAlgo; +import com.alibaba.polardbx.common.encdb.enums.HashAlgo; +import com.google.common.primitives.Bytes; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.security.Signature; +import java.util.ArrayList; +import java.util.List; + +/** + * 签名工具类 + */ +public class SignUtil { + + /** + * ECDSA公钥验签 + */ + public static boolean verifyWithSha256Ecdsa(PublicKey publicKey, byte[] message, byte[] sign) { + return verify(publicKey, "SHA256withECDSA", message, sign); + } + + /** + * ECDSA私钥签名 + */ + public static byte[] signWithSha256Ecdsa(PrivateKey privateKey, byte[] message) { + return sign(privateKey, "SHA256withECDSA", message); + } + + /** + * 使用RSA PKCS1_5 SHA256验签 + */ + public static boolean verifySha256Rsa(PublicKey publicKey, byte[] message, byte[] sign) { + return verify(publicKey, "SHA256withRSA", message, sign); + } + + /** + * 公钥验签 + */ + public static boolean verify(PublicKey publicKey, String algorithm, byte[] message, byte[] sign) { + try { + Signature signature = Signature.getInstance(algorithm); + signature.initVerify(publicKey); + signature.update(message); + return signature.verify(sign); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * 使用RSA PKCS1_5 SHA256签名 + */ + public static byte[] signWithSha256Rsa(PrivateKey privateKey, byte[] message) { + return sign(privateKey, "SHA256withRSA", message); + } + + /** + * 私钥签名 + */ + public static byte[] sign(PrivateKey privateKey, String algorithm, byte[] message) { + try { + Signature signature = Signature.getInstance(algorithm); + signature.initSign(privateKey); + signature.update(message); + return signature.sign(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + // #pragma pack(1) + // struct HmacBlob { + // Hash::Alg alg; // signature algorithm + // uint8_t data[]; // signture value + // }; + // #pragma pack() + public static byte[] signHmac(HashAlgo alg, byte[] key, byte[] data) { + List result = new ArrayList<>(); + result.add((byte) alg.getVal()); + + byte[] sig = HMAC.hmac(alg, key, data); + result.addAll(Bytes.asList(sig)); + + return Bytes.toArray(result); + } + + public static boolean verifyHmac(byte[] key, byte[] sigBlob, byte[] data) { + ByteBuffer bb = ByteBuffer.wrap(sigBlob).order(ByteOrder.LITTLE_ENDIAN); + + HashAlgo alg = HashAlgo.from(bb.get()); + + int sigLen = bb.remaining(); + byte[] sig = new byte[sigLen]; + bb.get(sig); + + return signHmac(alg, key, data) == sig; + } + + // #pragma pack(1) + // struct SignatureBlob { + // Asymmetric::Alg alg; // signature algorithm + // uint8_t data[]; // signture value + // }; + // #pragma pack() + public static byte[] sign(AsymmAlgo alg, String privateKeyPemString, byte[] data) throws RuntimeException { + List result = new ArrayList<>(); + result.add((byte) alg.getVal()); + + try { + byte[] sig = AsymCrypto.sign(alg, privateKeyPemString, data); + result.addAll(Bytes.asList(sig)); + } catch (Exception e) { + throw new RuntimeException("Sign signature blob fail", e); + } + + return Bytes.toArray(result); + } + + public static boolean verify(String publicKeyPemString, byte[] sigBlob, byte[] data) throws RuntimeException { + ByteBuffer bb = ByteBuffer.wrap(sigBlob).order(ByteOrder.LITTLE_ENDIAN); + + AsymmAlgo alg = AsymmAlgo.from(bb.get()); + + int sigLen = bb.remaining(); + byte[] sig = new byte[sigLen]; + bb.get(sig); + + try { + return AsymCrypto.verify(alg, publicKeyPemString, data, sig); + } catch (Exception e) { + throw new RuntimeException("Verify signature blob fail", e); + } + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/Utils.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/Utils.java new file mode 100644 index 000000000..89e3d1faf --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/encdb/utils/Utils.java @@ -0,0 +1,441 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.encdb.utils; + +import com.alibaba.polardbx.common.encdb.EncdbException; +import com.alibaba.polardbx.common.encdb.enums.CCFlags; +import com.alibaba.polardbx.common.encdb.enums.Constants; +import org.bouncycastle.crypto.digests.MD5Digest; +import org.bouncycastle.util.encoders.Hex; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.security.SecureRandom; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.Timestamp; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import static com.alibaba.polardbx.common.encdb.enums.Constants.ENCDB_KEY_SIZE; +import static com.alibaba.polardbx.common.encdb.enums.OrdinalEnum.searchEnum; + +public class Utils { + private static Logger logger = LoggerFactory.getLogger(Utils.class); + public static final int MIN_ALLOWED_VER_DIFF = 2; + + /* Julian-date equivalents of Day 0 in Unix and Postgres reckoning */ + public static long POSTGRES_EPOCH_JDATE = 2451545; /* == date2j(2000, 1, 1) */ + public static long UNIX_EPOCH_JDATE = 2440588; /* == date2j(1970, 1, 1) */ + public static long USECS_PER_DAY = 86400000000L; + public static long SECS_PER_DAY = 86400L; + + public static String bytesTobase64(byte[] param) { + return Base64.getEncoder().encodeToString(param); + } + + public static byte[] base64ToBytes(String param) { + return Base64.getDecoder().decode(param); + } + + public static String bytesToPgHexString(byte[] data) { + return "\\x" + Hex.toHexString(data); + } + + public static byte[] pgHexStringToBytes(String s) { + return pgHexStringToBytes(s.getBytes(StandardCharsets.UTF_8)); + } + + public static byte[] pgHexStringToBytes(byte[] s) { + if (s == null) { + return null; + } + + // Starting with PG 9.0, a new hex format is supported + // that starts with "\x". Figure out which format we're + // dealing with here. + // + if (s.length < 2 || s[0] != '\\' || s[1] != 'x') { + return null; + } + return toBytesHexEscaped(s); + } + + private static byte[] toBytesHexEscaped(byte[] s) { + byte[] output = new byte[(s.length - 2) / 2]; + for (int i = 0; i < output.length; i++) { + byte b1 = gethex(s[2 + i * 2]); + byte b2 = gethex(s[2 + i * 2 + 1]); + // squid:S3034 + // Raw byte values should not be used in bitwise operations in combination with shifts + output[i] = (byte) ((b1 << 4) | (b2 & 0xff)); + } + return output; + } + + private static byte gethex(byte b) { + // 0-9 == 48-57 + if (b <= 57) { + return (byte) (b - 48); + } + + // a-f == 97-102 + if (b >= 97) { + return (byte) (b - 97 + 10); + } + + // A-F == 65-70 + return (byte) (b - 65 + 10); + } + + public static String getEncdbDir() { + return "/tmp/encdb-" + System.getProperty("user.name"); + } + + public static String getLocalConfigPath(String name) { + return getEncdbDir() + "/" + name; + } + + public static String readResourceStream(String resourcePath) throws IOException { + InputStream in = Utils.class.getResourceAsStream(resourcePath); + if (in == null) { + throw new FileNotFoundException("Resoure '" + resourcePath + "' not found in package."); + } + StringBuilder sb = new StringBuilder(); + + try (BufferedReader br = new BufferedReader(new InputStreamReader(in))) { + String line; + while ((line = br.readLine()) != null) { + sb.append(line).append(System.lineSeparator()); + } + } + + return sb.toString(); + } + + public static short getUserID(String username) { + long hashID = Utils.hashToID(username); + + //hashID is of 8 bytes, hex string of 16 characters + //!!! hex hashID into 18 characters to make sure that BigInterger is always positive. !!! + BigInteger biHashID = new BigInteger(Hex.decode(String.format("%018X", hashID))); + BigInteger m = new BigInteger(String.valueOf(Short.MAX_VALUE)); + + return biHashID.mod(m).shortValue(); + } + + public static long hashToID(String valueString) { + MD5Digest md5 = new MD5Digest(); + byte[] plainBytes = valueString.getBytes(); + byte[] resBuf = new byte[md5.getDigestSize()]; + md5.update(plainBytes, 0, plainBytes.length); + md5.doFinal(resBuf, 0); + + ByteBuffer buf = ByteBuffer.wrap(resBuf); + buf.order(ByteOrder.LITTLE_ENDIAN); + + long hashVal = 0; + while (buf.remaining() > (Long.SIZE >> 8)) { + hashVal ^= buf.getLong(); + } + + return hashVal; + } + + public static byte[] readBinaryFromFile(String filename) throws IOException { + return Files.readAllBytes(Paths.get(filename)); + } + + public static void writeBinaryToFile(String filename, byte[] data) throws IOException { + try (OutputStream outputStream = new FileOutputStream(filename)) { + outputStream.write(data); + } + } + + public static byte[] getRootKeyBytes(String rootKeyStr) { + if (rootKeyStr == null || rootKeyStr.isEmpty()) { + return null; + } + + if (rootKeyStr.toLowerCase().startsWith("0x")) { + rootKeyStr = rootKeyStr.substring(2); + } + + if (rootKeyStr.length() != ENCDB_KEY_SIZE * 2) { + logger.error("expect root key lenght is 16 bytes(32-chars) in hex string format."); + return null; + } + return Hex.decode(rootKeyStr); + } + + public static void showDemoInfo(String tag, String value) { + if (!tag.isEmpty()) { + logger.info("===================== " + tag + " ====================="); + } + logger.info(value); + } + + public static List swapBytesByPivot(byte[] tmpResult, int index) { + assert index < tmpResult.length; + ArrayList result = new ArrayList<>(); + for (int i = 0; i < tmpResult.length; ++i) { + result.add(tmpResult[(index++) % tmpResult.length]); + } + return result; + } + + public static List swapBytesByPivot(List tmpResult, int index) { + assert index < tmpResult.size(); + ArrayList result = new ArrayList<>(); + for (int i = 0; i < tmpResult.size(); ++i) { + result.add(tmpResult.get((index++) % tmpResult.size())); + } + return result; + } + + public static byte[] generateIv(int ivLen) { + byte[] iv = new byte[ivLen]; + new SecureRandom().nextBytes(iv); + return iv; + } + + //old format + //public static String[] splitKeyname(String kName) { + // return kName.split("\\|"); + //} + + //new format 需要处理'\\' '|' + public static String[] splitKeyname(String keyname) { + if (keyname.length() < 3 || keyname.charAt(0) != '|' || keyname.charAt(keyname.length() - 1) != '|') { + throw new EncdbException("split keyName error: the first byte and the last byte are not '|'."); + } + + String[] splitKname = new String[5]; + int splitKnameIdx = 0; + int beginIndx = 1; + int escapeCnt = 0; + for (int i = 1; i < keyname.length(); i++) { + if (keyname.charAt(i) == '|' && (i == keyname.length() - 1 || escapeCnt % 2 == 0)) { + if (splitKnameIdx >= 5) { + throw new EncdbException("split keyname error:" + keyname + " format error."); + } + splitKname[splitKnameIdx] = deformatKeynameComponent(keyname.substring(beginIndx, i)); + escapeCnt = 0; + beginIndx = i + 1; + splitKnameIdx++; + } + if (keyname.charAt(i) == '\\') { + escapeCnt += 1; + } else { + escapeCnt = 0; + } + } + return splitKname; + } + + private static String deformatKeynameComponent(String subInput) { + /* sub '\' before any '|' or '\' character */ + String ret = ""; + if (subInput.length() == 0) { + throw new EncdbException("deformat keyName error: the subInput length is 0."); + } + boolean escape = false; + for (int i = 0; i < subInput.length(); i++) { + if (escape) { + if (subInput.charAt(i) == '|' || subInput.charAt(i) == '\\') { + ret += subInput.charAt(i); + escape = false; + } else { + throw new EncdbException( + "deformat keyName error: the subInput format is error. subInput is:" + subInput); + } + } else { + if (subInput.charAt(i) == '\\') { + escape = true; + } else if (subInput.charAt(i) == '|') { + throw new EncdbException( + "deformat keyName error: the subInput format is error. subInput is:" + subInput); + } else { + ret += subInput.charAt(i); + } + } + } + if (escape) { + throw new EncdbException("deformat keyName error: the subInput format is error. subInput is:" + subInput); + } + return ret; + } + + //old format + //public static String buildKeyname(String username, String dbname, String tblName, String colName) { + // return username + "|" + dbname + "|" + tblName + "|" + colName; + //} + + //new format 先用 keyStore 方式从 encdb 获取,后面改为 sdk 侧自己生成。因为 sdk 现有架构,buildKeyname 的请求比较多 + public static String buildKeyname(Connection dbConnection, String username, String dbname, String schemaName, + String tblName, String colName) { + try { + if (schemaName == null) { + if (dbConnection == null) { + schemaName = "default"; + } else { + schemaName = dbConnection.getSchema(); + } + } + } catch (Exception throwables) { + throw new EncdbException("get schema for table:" + tblName + " failed", throwables); + } + //String keyname = keyStore.keynameGenerate(username, dbname, schemaName, tblName, colName); + String keyname = internalBuildKeyname(username, dbname, schemaName, tblName, colName); + return keyname; + } + + private static String internalBuildKeyname(String username, String dbname, String schemaName, String tblName, + String colName) { + ArrayList nameMember = new ArrayList<>(); + nameMember.add(username); + nameMember.add(dbname); + nameMember.add(schemaName); + nameMember.add(tblName); + nameMember.add(colName); + int lastNonIdx = -1; + for (int i = nameMember.size() - 1; i >= 0; i--) { + if (lastNonIdx >= 0 && (nameMember.get(i) == null || nameMember.get(i).isEmpty())) { + throw new EncdbException("internal build keyName fail: empty inputs must be consecutive at the end. " + + " username:" + username + + " dbname:" + dbname + + " schemaName:" + schemaName + + " tblName:" + tblName + + " colName:" + colName); + } + if (lastNonIdx < 0 && !(nameMember.get(i) == null) && !(nameMember.get(i).isEmpty())) { + lastNonIdx = i; + } + } + if (lastNonIdx < 0) { + throw new EncdbException("internal build keyName error: all inputs are null."); + } + //TODO: maybe only username default keyname will be used in future. + if (dbname == null || dbname.isEmpty()) { + throw new EncdbException("internal build keyName error: database name cannot be null."); + } + + String keyname = "|"; + for (int i = 0; i <= lastNonIdx; i++) { + if (nameMember.get(i) == null || nameMember.get(i).isEmpty()) { + throw new EncdbException("internal build keyName error: cannot be null for subinputIndex:" + i + "."); + } + keyname += formatKeynameComponent(nameMember.get(i)) + "|"; + } + return keyname; + } + + private static String formatKeynameComponent(String subInput) { + /* add '\' before any '|' or '\' character */ + String ret = ""; + for (int i = 0; i < subInput.length(); i++) { + if (subInput.charAt(i) == '|' || subInput.charAt(i) == '\\') { + ret += '\\' + subInput.charAt(i); + } else { + ret += subInput.charAt(i); + } + } + return ret; + } + + public static long convertTimeStampNoTimeZoneToMicroSecond(Timestamp val) { + return val.toInstant().toEpochMilli() * 1000; + } + + public static Timestamp convertMicroSecondToTimeStampNoTimeZone(long val) { + return new Timestamp(val / 1000); + } + + public static int getPatchVer(String currentSdkVer) { + return Integer.parseInt(currentSdkVer.substring(currentSdkVer.lastIndexOf(".") + 1)); + } + + public static String getMajorMinorVer(String leastClientVer) { + return leastClientVer.substring(0, leastClientVer.lastIndexOf(".")); + } + + public static byte[] uuidStringToBytes(String uuid_str) { + String hex_str = uuid_str.replaceAll("-", ""); + if (hex_str.startsWith("{") && hex_str.endsWith("}")) { + hex_str = hex_str.replaceAll("\\{", ""); + hex_str = hex_str.replaceAll("\\}", ""); + } + + if (hex_str.length() % 2 != 0) { + throw new EncdbException("invalid uuid composite: \"" + uuid_str + "\""); + } + + return Hex.decode(hex_str); + } + + public static String bytesToUuidString(byte[] uuid_bytes) { + StringBuilder sb = new StringBuilder(Hex.toHexString(uuid_bytes)); + /* + * We print uuid values as a string of 8, 4, 4, 4, and then 12 + * hexadecimal characters, with each group is separated by a hyphen + * ("-"). Therefore, add the hyphens at the appropriate places here. + */ + int[] hyphen_pos = {8, 4, 4, 4, 12}; + char hyphen_char = '-'; + int position = 0; + for (int i : hyphen_pos) { + position += i; + sb.insert(position, hyphen_char); + position++; + } + sb.deleteCharAt(position - 1); + + return sb.toString(); + } + + public static String genRandomUuidString() { + return UUID.randomUUID().toString(); + } + + public static String toGeneralizedTimeString(Timestamp ts) { + return new SimpleDateFormat("yyyyMMddHHmmssZ").format(ts); + } + + public static String currentGeneralizedTimeString() { + Timestamp ts = new Timestamp(System.currentTimeMillis()); + return new SimpleDateFormat("yyyyMMddHHmmssZ").format(ts); + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/eventlogger/EventLogger.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/eventlogger/EventLogger.java index 44b63db90..0941c2646 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/eventlogger/EventLogger.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/eventlogger/EventLogger.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.common.eventlogger; +import com.alibaba.polardbx.common.properties.DynamicConfig; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; @@ -26,6 +27,9 @@ public class EventLogger { private final static String LOG_FORMAT = "%s %s %s"; public static void log(EventType type, String msg) { + if (EventType.isTrxEvent(type) && !DynamicConfig.getInstance().isEnableTrxEventLog()) { + return; + } String logContent = String.format(LOG_FORMAT, type.getLevel().name(), type.name(), msg); logger.info(logContent); } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/eventlogger/EventType.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/eventlogger/EventType.java index 85f1285fd..61231da08 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/eventlogger/EventType.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/eventlogger/EventType.java @@ -16,11 +16,26 @@ package com.alibaba.polardbx.common.eventlogger; +import java.util.EnumSet; + public enum EventType { /** * Rebalance数据分布信息 */ REBALANCE_INFO(EventLevel.INFO), + + /** + * 用于统计一些DDL发生的次数 + */ + DDL_INFO(EventLevel.INFO), + /** + * TwoPhaseDdl信息 + */ + TWO_PHASE_DDL_INFO(EventLevel.INFO), + /** + * TwoPhaseDdl信息 + */ + TWO_PHASE_DDL_WARN(EventLevel.WARN), /** * DDL发生错误,通常由bug引起 */ @@ -40,12 +55,19 @@ public enum EventType { */ DN_HA(EventLevel.INFO), + /** + * STORAGE POOL INFO need do ha + */ + STORAGE_POOL_INFO(EventLevel.INFO), /** * event for creating db with mode=auto */ CREATE_AUTO_MODE_DB(EventLevel.WARN), CREATE_DATABASE_LIKE_AS(EventLevel.INFO), + + STANDARD_TO_ENTERPRISE(EventLevel.INFO), + ONLINE(EventLevel.INFO), OFFLINE(EventLevel.INFO), @@ -61,6 +83,7 @@ public enum EventType { XRPC_NEW_VALID_CLIENT(EventLevel.INFO), XRPC_AUTH_TIMEOUT(EventLevel.WARN), XRPC_KILL_CLIENT(EventLevel.WARN), + XPLAN_FEEDBACK_DISABLE(EventLevel.INFO), DML_ERROR(EventLevel.WARN), @@ -74,7 +97,18 @@ public enum EventType { INIT_OSS(EventLevel.INFO), CLOSE_OSS(EventLevel.INFO), - OPTIMIZER_ALERT(EventLevel.INFO); + OPTIMIZER_ALERT(EventLevel.INFO), + + AUTO_SP(EventLevel.INFO), + AUTO_SP_OPT(EventLevel.INFO), + AUTO_SP_ERR(EventLevel.INFO), + + TRX_LOG_ERR(EventLevel.INFO), + TRX_RECOVER(EventLevel.INFO), + TRX_INFO(EventLevel.INFO), + TRX_ERR(EventLevel.INFO), + + CDC_WARN(EventLevel.WARN); private final EventLevel level; @@ -85,4 +119,12 @@ public enum EventType { public EventLevel getLevel() { return this.level; } + + private final static EnumSet TRX_EVENT = EnumSet.of( + AUTO_SP_ERR, TRX_LOG_ERR, TRX_RECOVER, TRX_ERR + ); + + public static boolean isTrxEvent(EventType t) { + return TRX_EVENT.contains(t); + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ErrorCode.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ErrorCode.java index ad1710a96..d3512268e 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ErrorCode.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ErrorCode.java @@ -706,6 +706,10 @@ public enum ErrorCode { ERR_ROWCOUNT_COLLECT(ErrorType.Optimizer, 4530), + ERR_STATISTIC_JOB_INTERRUPTED(ErrorType.Optimizer, 4531), + + ERR_IN_PRUNING(ErrorType.Optimizer, 4532), + // ============= executor 从4600下标开始================ // ERR_FUNCTION(ErrorType.Executor, 4600), @@ -830,6 +834,9 @@ public enum ErrorCode { ERR_FK_CONVERT_TO_CHARSET(ErrorType.Executor, 4696), ERR_FK_REFERENCING_COLUMN_NOT_EXIST(ErrorType.Executor, 4697), + ERR_UNSUPPORTED_COLUMN_TYPE_WITH_CCI(ErrorType.Executor, 4698), + ERR_DDL_WITH_CCI(ErrorType.Executor, 4699), + ERR_TABLE_ALREADY_EXISTS(ErrorType.Executor, 4643), ERR_PENDING_DDL_JOB_EXISTS(ErrorType.Executor, 4644), @@ -997,6 +1004,11 @@ public enum ErrorCode { ERR_CHARACTER_NOT_SUPPORT(ErrorType.Priviledge, 5121), ERR_DATATYPE_NOT_SUPPORT(ErrorType.Priviledge, 5122), + ERR_INSTANCE_READ_ONLY_OPTION_NOT_SUPPORT(ErrorType.Priviledge, 5123), + + ERR_ENCDB(ErrorType.Priviledge, 5123), + + ERR_LBAC(ErrorType.Priviledge, 5124), ERR_CREATE_USER_FAILED(ErrorType.Account, 5200), @@ -1144,11 +1156,18 @@ public enum ErrorCode { ERR_DATA_NOT_FOUND(ErrorType.Procedure, 5516), + // ================列存索引相关异常从5600开始================== + /** + * 列存索引校验相关错误 + */ + ERR_COLUMNAR_INDEX_CHECKER(ErrorType.Executor, 5600), + // ================鉴权相关异常================== ERR_AUTH_AKSK_FAIL(ErrorType.Auth, 6001), ERR_BASELINE(ErrorType.Baseline, 7001), + ERR_PLAN_COST(ErrorType.Baseline, 7002), // ================View 异常 7901 - 8000================== ERR_VIEW(ErrorType.Executor, 7901), @@ -1227,6 +1246,7 @@ public enum ErrorCode { // ================= concurrency control Related Exceptions =================== ERR_CCL(ErrorType.CCL, 9201), + ERR_CCL_RESCHEDULE(ErrorType.CCL, 9202), ERR_LOGICAL_TABLE_UNSUPPORTED(ErrorType.Executor, 9203), @@ -1235,6 +1255,7 @@ public enum ErrorCode { ERR_REPLICATION_RESULT(ErrorType.CDC, 9204), ERR_REPLICA_NOT_SUPPORT(ErrorType.CDC, 9205), + ERR_INSTANCE_READ_ONLY_OPTION_SET_FAILED(ErrorType.CDC, 9206), ERR_PARTITION_MANAGEMENT(ErrorType.Executor, 9300), @@ -1283,6 +1304,7 @@ public enum ErrorCode { ERR_SUBPARTITION_STRATEGY_IS_NOT_EQUAL(ErrorType.Executor, 9337), ERR_REDUNDANCY_PARTITION_DEFINITION(ErrorType.Executor, 9338), ERR_REDUNDANCY_SUBPARTITION_DEFINITION(ErrorType.Executor, 9339), + ERR_AUTO_CREATE_TABLEGROUP(ErrorType.Executor, 9340), // ============= 私有协议 从10000下标开始================ ERR_X_PROTOCOL_BAD_PACKET(ErrorType.Xprotocol, 10000), @@ -1308,7 +1330,18 @@ public enum ErrorCode { ERR_FILE_STORAGE_READ_ONLY(ErrorType.OSS, 11012), ERR_OSS_CONNECT(ErrorType.OSS, 11013), ERR_FILE_STORAGE_EXISTS(ErrorType.OSS, 11014), - ERR_BACK_FILL_TIMEOUT(ErrorType.OSS, 11015); + ERR_BACK_FILL_TIMEOUT(ErrorType.OSS, 11015), + ERR_ARCHIVE_NOT_ENABLED(ErrorType.OSS, 11016), + ERR_ARCHIVE_TABLE_EXISTS(ErrorType.OSS, 11017), + + // ============= Columnar Related Exceptions ================ + ERR_BINARY_PREDICATE(ErrorType.ColumnarIndexPrune, 12000), + ERR_BITMAP_ROW_GROUP_INDEX(ErrorType.ColumnarIndexPrune, 12001), + ERR_LOAD_CSV_FILE(ErrorType.ColumnarAccess, 12002), + ERR_LOAD_DEL_FILE(ErrorType.ColumnarAccess, 12003), + ERR_LOAD_ORC_FILE(ErrorType.ColumnarAccess, 12004), + ERR_COLUMNAR_SNAPSHOT(ErrorType.GMS, 12005), + ERR_COLUMNAR_SCHEMA(ErrorType.GMS, 12006); private static final String errorMessagePre = "ERR-CODE: [PXC-"; private final int code; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ErrorType.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ErrorType.java index 76816bca6..b2204a891 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ErrorType.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ErrorType.java @@ -18,6 +18,6 @@ public enum ErrorType { Config, Atom, Group, Route, Sequence, Parser, Optimizer, Executor, Server, Account, Priviledge, Auth, Transaction, - Baseline, Mpp, GMS, Xprotocol, CDC, CCL, OSS, Procedure, Function, + Baseline, Mpp, GMS, Xprotocol, CDC, CCL, OSS, Procedure, Function, ColumnarIndexPrune, ColumnarAccess, Other, Net } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ResourceBundleUtil.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ResourceBundleUtil.java index 38c2ccfb2..82b28b9de 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ResourceBundleUtil.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/exception/code/ResourceBundleUtil.java @@ -34,7 +34,6 @@ public class ResourceBundleUtil { private static final ResourceBundleUtil instance = new ResourceBundleUtil("res/ErrorCode"); public static final String DEFAULT_PLACEHOLDER_PREFIX = "${"; public static final String DEFAULT_PLACEHOLDER_SUFFIX = "}"; - public static final int SYSTEM_PROPERTIES_MODE_NEVER = 0; public static final int SYSTEM_PROPERTIES_MODE_FALLBACK = 1; public static final int SYSTEM_PROPERTIES_MODE_OVERRIDE = 2; @@ -189,8 +188,7 @@ private String resolvePlaceholder(String placeholder, int systemPropertiesMode) protected String getString(String key) { String value = null; - - if (value == null && bundle.containsKey(key)) { + if (bundle.containsKey(key)) { value = bundle.getString(key); } if (value == null) { diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/IConnection.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/IConnection.java index 240942328..d17a25162 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/IConnection.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/IConnection.java @@ -101,6 +101,15 @@ default int getShareReadViewSeq() { throw new UnsupportedOperationException("Connection does not support share read view."); } + /** + * force releases any database locks currently held, even though + * the auto-commit mode has been disabled + * + * @throws SQLException if a database access error occurs, the connection + * must be discard. + */ + void forceRollback() throws SQLException; + void discard(Throwable t); void kill() throws SQLException; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/ITransactionPolicy.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/ITransactionPolicy.java index 78829ad05..efa20b327 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/ITransactionPolicy.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/ITransactionPolicy.java @@ -18,6 +18,7 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.DynamicConfig; import com.alibaba.polardbx.common.utils.TStringUtil; import java.util.EnumSet; @@ -30,10 +31,28 @@ enum TransactionClass { TSO, + /** + * PolarDB-X TSO Async Commit 事务 + */ + TSO_ASYNC_COMMIT, + + /** + * 只读 PolarDB-X TSO 事务 + */ TSO_READONLY, AUTO_COMMIT_SINGLE_SHARD, + /** + * XA transaction with Commit TimeStamp. + */ + XA_TSO, + + /** + * DRDS 2PC 事务 + */ + BEST_EFFORT, + /** * PolarDb-X 2PC 优化的 TSO 事务(省略事务日志) */ @@ -45,46 +64,73 @@ enum TransactionClass { MPP_READ_ONLY_TRANSACTION, - AUTO_COMMIT; + COLUMNAR_READ_ONLY_TRANSACTION, + + AUTO_COMMIT, + + AUTO_COMMIT_TSO, + + ARCHIVE; public boolean isA(EnumSet set) { return set.contains(this); } - public static EnumSet DISTRIBUTED_TRANSACTION = EnumSet + public static final EnumSet DISTRIBUTED_TRANSACTION = EnumSet .of(TransactionClass.XA, + TransactionClass.XA_TSO, TransactionClass.TSO, TransactionClass.TSO_READONLY, TransactionClass.AUTO_COMMIT_SINGLE_SHARD, - TSO_2PC_OPT); + TSO_2PC_OPT, + TransactionClass.ARCHIVE); - public static EnumSet EXPLICIT_TRANSACTION = EnumSet + public static final EnumSet EXPLICIT_TRANSACTION = EnumSet .of(TransactionClass.XA, + TransactionClass.XA_TSO, TransactionClass.TSO, TransactionClass.ALLOW_READ_CROSS_DB, - TSO_2PC_OPT); + TransactionClass.COBAR_STYLE, + TSO_2PC_OPT, + TransactionClass.ARCHIVE); - public static EnumSet TSO_TRANSACTION = EnumSet + public static final EnumSet TSO_TRANSACTION = EnumSet .of(TransactionClass.TSO, TransactionClass.TSO_READONLY, TransactionClass.AUTO_COMMIT_SINGLE_SHARD, TSO_2PC_OPT); - public static EnumSet ALLOW_FOLLOW_READ_TRANSACTION = EnumSet + public static final EnumSet ALLOW_FOLLOW_READ_TRANSACTION = EnumSet .of(TransactionClass.AUTO_COMMIT, TransactionClass.TSO_READONLY, TransactionClass.AUTO_COMMIT_SINGLE_SHARD, TransactionClass.MPP_READ_ONLY_TRANSACTION); - public static EnumSet SUPPORT_INVENTORY_TRANSACTION = EnumSet + public static final EnumSet SUPPORT_INVENTORY_TRANSACTION = EnumSet .of(TransactionClass.XA, TransactionClass.ALLOW_READ_CROSS_DB, TransactionClass.AUTO_COMMIT); - public static EnumSet SUPPORT_SHARE_READVIEW_TRANSACTION = EnumSet + public static final EnumSet SUPPORT_SHARE_READVIEW_TRANSACTION = EnumSet .of(TransactionClass.XA, + TransactionClass.XA_TSO, TransactionClass.TSO, - TSO_2PC_OPT); + TSO_2PC_OPT, + TransactionClass.ARCHIVE); + + public static final EnumSet SUPPORT_PARALLEL_GET_CONNECTION_TRANSACTION = EnumSet + .of(TransactionClass.XA, + TransactionClass.XA_TSO, + TransactionClass.TSO, + TransactionClass.AUTO_COMMIT, + TransactionClass.AUTO_COMMIT_SINGLE_SHARD, + TransactionClass.AUTO_COMMIT_TSO, + TransactionClass.TSO_READONLY, + TransactionClass.ARCHIVE); + + public static final EnumSet ALLOW_GROUP_PARALLELISM_WITHOUT_SHARE_READVIEW_TRANSACTION = + EnumSet.of(TransactionClass.AUTO_COMMIT, + TransactionClass.AUTO_COMMIT_TSO); } Free FREE = new Free(); @@ -92,6 +138,7 @@ public boolean isA(EnumSet set) { NoTransaction NO_TRANSACTION = new NoTransaction(); DefaultPolicy XA = new DefaultPolicy(TransactionClass.XA); Tso TSO = new Tso(); + DefaultPolicy ARCHIVE = new DefaultPolicy(TransactionClass.ARCHIVE); TransactionClass getTransactionType(boolean isAutoCommit, boolean isReadOnly); @@ -146,7 +193,7 @@ public TransactionClass getTransactionType(boolean isAutoCommit, boolean isReadO return TransactionClass.AUTO_COMMIT_SINGLE_SHARD; } else if (isReadOnly) { return TransactionClass.TSO_READONLY; - } else if (isAutoCommit) { + } else if (isAutoCommit && !DynamicConfig.getInstance().isForbidAutoCommitTrx()) { return TransactionClass.AUTO_COMMIT; } else { return TransactionClass.TSO; @@ -189,7 +236,7 @@ public DefaultPolicy(TransactionClass type, boolean auto) { @Override public TransactionClass getTransactionType(boolean isAutoCommit, boolean isReadOnly) { - if (!auto && isAutoCommit) { + if (!auto && isAutoCommit && !DynamicConfig.getInstance().isForbidAutoCommitTrx()) { return TransactionClass.AUTO_COMMIT; } return type; @@ -220,6 +267,8 @@ static ITransactionPolicy of(String name) { return ITransactionPolicy.ALLOW_READ_CROSS_DB; case "NO_TRANSACTION": return ITransactionPolicy.NO_TRANSACTION; + case "ARCHIVE": + return ITransactionPolicy.ARCHIVE; default: throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, "Unknown transaction policy: " + name); } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/PruneRawString.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/PruneRawString.java index 57b08b7ae..60264b93a 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/PruneRawString.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/PruneRawString.java @@ -17,13 +17,10 @@ package com.alibaba.polardbx.common.jdbc; import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.common.utils.TStringUtil; import com.google.common.collect.Lists; -import com.google.common.collect.Sets; import java.util.BitSet; import java.util.List; -import java.util.Set; /** * @author fangwu @@ -88,11 +85,16 @@ public Object getObj(int index, int skIndex) { @Override public String display() { - String rs = "PruneRaw(" + buildRawString() + ")"; - if (rs.length() > 4096) { - return rs.substring(0, 4096) + "..."; + StringBuilder stringBuilder = new StringBuilder(); + if (size() == super.getObjList().size()) { + stringBuilder.append("NonPruneRaw(" + buildRawString() + ")"); + } else { + stringBuilder.append("PruneRaw(" + buildRawString() + ")"); } - return rs; + if (stringBuilder.length() > 4096) { + return stringBuilder.substring(0, 4096) + "..."; + } + return stringBuilder.toString(); } @Override @@ -195,12 +197,16 @@ private void paramCheck() { if (indexes == null || indexes.isEmpty()) { GeneralUtil.nestedException("RawString init ERROR MUITI_INDEX mode with invalid indexes:" + indexes); } + return; default: GeneralUtil.nestedException("RawString init ERROR mode invalid :" + pruneMode); } } public void merge(PruneRawString pruneRawString) { + if (size() == super.getObjList().size()) { + return; + } if (pruneMode == PRUNE_MODE.RANGE) { transformModeToMultiIndex(); } @@ -208,6 +214,11 @@ public void merge(PruneRawString pruneRawString) { pruneRawString.transformModeToMultiIndex(); } indexes.or(pruneRawString.indexes); + if (indexes.cardinality() == super.getObjList().size()) { + pruneMode = PRUNE_MODE.RANGE; + startIndex = 0; + endIndex = super.getObjList().size(); + } } private void transformModeToMultiIndex() { @@ -230,4 +241,29 @@ private void transformModeToMultiIndex() { public enum PRUNE_MODE { RANGE, MULTI_INDEX; } + + /** + * do the same thing as RawString.pruneStep, but return itself. + * WARNING: this method will change this PruneRawString itself. + */ + @Override + public PruneRawString pruneStep(int curIndex) { + pruneMode = PruneRawString.PRUNE_MODE.RANGE; + startIndex = curIndex; + endIndex = curIndex + 1; + return this; + } + + @Override + public PruneRawString clone() { + if (pruneMode == PRUNE_MODE.RANGE) { + return new PruneRawString(super.getObjList(), pruneMode, startIndex, endIndex, null); + } else { + return new PruneRawString(super.getObjList(), pruneMode, -1, -1, (BitSet) indexes.clone()); + } + } + + public int getSourceSize() { + return super.size(); + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/RawString.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/RawString.java index 86db3f3a2..3887908ca 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/RawString.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/RawString.java @@ -210,4 +210,15 @@ public Object acquireObject(int subIndex, int skIndex) { return getObj(subIndex, skIndex); } } + + /** + * build one PruneRawString with one object(target by curIndex) inside this RawString + */ + public PruneRawString pruneStep(int curIndex) { + return new PruneRawString(this.getObjList(), PruneRawString.PRUNE_MODE.RANGE, curIndex, curIndex + 1, null); + } + + public RawString clone() { + return new RawString(this.getObjList()); + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/TableName.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/TableName.java index 314f9a7f3..d0f8c5165 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/TableName.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/jdbc/TableName.java @@ -36,11 +36,6 @@ public String getTableName() { return this.tableName; } - public void setTableName(String tableName) throws SQLException { - this.checkName(tableName); - this.tableName = tableName.trim(); - } - private void checkName(String tName) throws SQLException { if (StringUtils.isEmptyOrWhitespaceOnly(tName)) { throw new SQLException("tableName should not be empty", "S1009"); @@ -63,9 +58,6 @@ private void checkName(String tName) throws SQLException { case '\u001a': needsHexEscape = true; break; - case ' ': - needsHexEscape = true; - break; case '"': needsHexEscape = true; break; @@ -80,7 +72,6 @@ private void checkName(String tName) throws SQLException { throw new SQLException("tableName format error: " + this.tableName, "S1009"); } } - } } @@ -89,4 +80,3 @@ public String toString() { return getTableName(); } } - diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/EmptyLockingFunctionHandle.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/EmptyLockingFunctionHandle.java new file mode 100644 index 000000000..0a8fe3285 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/EmptyLockingFunctionHandle.java @@ -0,0 +1,54 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.lock; + +import java.util.List; + +/** + * @author pangzhaoxing + */ +public class EmptyLockingFunctionHandle implements LockingFunctionHandle { + + public Integer tryAcquireLock(String lockName, int timeout) { + return 0; + } + + public Integer release(String lockName) { + return 0; + } + + public Integer releaseAllLocks() { + return 0; + } + + public Integer isFreeLock(String lockName) { + return 0; + } + + public String isUsedLock(String lockName) { + return null; + } + + public Integer isMyLockValid(String lockName) { + return 0; + } + + @Override + public List getAllMyLocks() { + return null; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingConfig.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingConfig.java new file mode 100644 index 000000000..deb0f8b5a --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingConfig.java @@ -0,0 +1,44 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.lock; + +/** + * @author pangzhaoxing + */ +public class LockingConfig { + + /** + * the maximum number of the locks. let heartbeat delay = 50ms, heartbeat interval = 2s, max_number_of_locks < 2s/50ms + */ + public static final int MAX_LOCKS_NUMBER = 64; + + /** + * the heart_beat to maintain the lock (in millisecond). + */ + public static final int HEART_BEAT_INTERVAL = 10000; + + public static final int MAX_RETRY_TIMES = 3; + + /** + * 租约机制 + * the expiration time (in second). + * if expiration_time is too small, the lock will be vulnerable to network delay. + * but if it is too large, the invalid lock will not be replaced just in time. + */ + public static final int EXPIRATION_TIME = 60; + +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingFunctionHandle.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingFunctionHandle.java index 9e463e58a..47c095315 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingFunctionHandle.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingFunctionHandle.java @@ -16,463 +16,52 @@ package com.alibaba.polardbx.common.lock; -import com.alibaba.polardbx.common.constants.SystemTables; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.logical.ITConnection; -import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; import java.sql.SQLException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -public class LockingFunctionHandle { - private static final Logger logger = LoggerFactory.getLogger(LockingFunctionHandle.class); - - private static final String SQL_GET_DEADLOCK_SNAPSHOT = - "select lock_name, drds_session, need_list from " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION + " for update"; - - - private static final ScheduledExecutorService HEARTBEAT_EXECUTOR = - new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("heartbeat-thread-pool", true)); - - private static final Map HEARTBEAT_FUTURES = new ConcurrentHashMap<>(); - private LockingFunctionManager manager; - - private ITConnection tddlConnection; - private String drdsSession; - private LockingFunctionSystableDao dao; - - private Map reentries; - - LockingFunctionHandle(LockingFunctionManager manager, ITConnection tddlConnection, String drdsSession) { - this.manager = manager; - this.tddlConnection = tddlConnection; - this.reentries = new HashMap<>(); - this.drdsSession = drdsSession; - this.dao = new LockingFunctionSystableDao(manager, drdsSession); - } - - public Integer tryAcquireLock(final String lockName, final int timeout) throws SQLException, InterruptedException { - - Integer reentryTimes = 0; - if ((reentryTimes = reentries.get(lockName)) != null) { - - if (dao.isMyLockValid(lockName)) { - - reentries.put(lockName, reentryTimes + 1); - return 1; - } else { - - reentries.remove(lockName); - GeneralUtil.nestedException( - "the lock: " + lockName + " has been abnormally lost. please try again to get this lock."); - } - } - - boolean successToCreateLock = false; - try { - successToCreateLock = dao.tryCreateLock(lockName); - } catch (SQLException e) { - logger.info("innodb deadlock occur, retry", e); - - return tryAcquireLock(lockName, timeout); - } - if (successToCreateLock) { - - postHandleAfterGetLock(lockName); - return 1; - } - - boolean deadlock = false; - boolean unRepeatedRead = false; - try (Connection connectionInTransaction = manager.getConnection()) { - try { - connectionInTransaction.setAutoCommit(false); - - DeadlockAvoidanceHelper helper = new DeadlockAvoidanceHelper(connectionInTransaction, lockName); - if (!(unRepeatedRead = helper.unRepeatedRead()) && !(deadlock = helper.tryLockAndDetect())) { - - dao.registerNeedList(connectionInTransaction, lockName); - } - - connectionInTransaction.commit(); - } catch (SQLException e) { - connectionInTransaction.rollback(); - throw e; - } finally { - connectionInTransaction.setAutoCommit(true); - } - } - - if (unRepeatedRead) { - logger.info("retry the acquisition!"); - return tryAcquireLock(lockName, timeout); - } - if (deadlock) { - throw new TddlRuntimeException(ErrorCode.ERR_USER_LOCK_DEADLOCK); - } - - return tryAcquireWithinTimeout(lockName, timeout); - } - - public Integer release(String lockName) throws SQLException { - Integer releaseResult = null; - Integer reentryTimes; - if ((reentryTimes = reentries.get(lockName)) != null) { - - try (Connection connectionInTransaction = manager.getConnection()) { - try { - connectionInTransaction.setAutoCommit(false); - - if (dao.isMyLockValid(connectionInTransaction, lockName)) { - - reentryTimes--; - if (reentryTimes >= 0) { - reentries.put(lockName, reentryTimes); - releaseResult = 1; - } else { - - releaseResult = - (dao.grantLock(connectionInTransaction, lockName) || dao.deleteLockTuple( - connectionInTransaction, lockName)) ? 1 : 0; - reentries.remove(lockName); - endHeartbeatTask(lockName); - } - } else { - - reentries.remove(lockName); - releaseResult = 0; - endHeartbeatTask(lockName); - } - - connectionInTransaction.commit(); - } catch (SQLException e) { - connectionInTransaction.rollback(); - throw e; - } finally { - connectionInTransaction.setAutoCommit(true); - } - } - } else { - - boolean exists = dao.checkLockExistence(lockName); - releaseResult = exists ? 0 : null; - } - return releaseResult; - } - - public Integer releaseAllLocks() throws SQLException { - Integer count = 0; - try (Connection connection = manager.getConnection()) { - count = releaseAllLocks(connection); - } - manager.removeHandle(drdsSession); - return count; - } - - public Integer isFreeLock(String lockName) throws SQLException { - return dao.isFreeLock(lockName); - } - - public String isUsedLock(String lockName) throws SQLException { - return dao.isUsedLock(lockName); - } - - private boolean exceedMaxActiveLockCounts() { - return HEARTBEAT_FUTURES.size() + 1 > LockingFunctionManager.MAX_LOCKS_NUMBER; - } - - private Integer releaseAllLocks(Connection connectionInTransaction) throws SQLException { - int count = 0; - for (Iterator iter = reentries.entrySet().iterator(); iter.hasNext(); ) { - Map.Entry entry = (Map.Entry) iter.next(); - String lockName = entry.getKey(); - Integer reentryTimes = entry.getValue(); - - endHeartbeatTask(lockName); - - try { - connectionInTransaction.setAutoCommit(false); - - if (dao.isMyLockValid(connectionInTransaction, lockName) && !dao - .grantLock(connectionInTransaction, lockName)) { - dao.deleteLockTuple(connectionInTransaction, lockName); - } - connectionInTransaction.commit(); - } catch (SQLException e) { - connectionInTransaction.rollback(); - throw e; - } finally { - connectionInTransaction.setAutoCommit(true); - } - - iter.remove(); - count += (reentryTimes + 1); - } - return count; - } - - private void postHandleAfterGetLock(String lockName) { - - reentries.put(lockName, 0); - - if (exceedMaxActiveLockCounts()) { - try { - release(lockName); - } catch (SQLException e) { +import java.util.List; - } - GeneralUtil.nestedException( - "the lock number exceeds the maximum lock number " + LockingFunctionManager.MAX_LOCKS_NUMBER); - } - startHeartbeatTask(lockName); - } - - private Integer tryAcquireWithinTimeout(final String lockName, final int timeout) - throws SQLException, InterruptedException { - - long restTime = timeout < 0 ? Integer.MAX_VALUE : timeout * 1000; - boolean successToGetLock = false; - while (!successToGetLock) { - long startTime = System.currentTimeMillis(); - if (Thread.interrupted()) { - throw GeneralUtil.nestedException("interrupted"); - } - if (dao.checkLockHolder(lockName)) { - logger.debug(lockName + " lock belongs to " + drdsSession); - - successToGetLock = true; - } - try (Connection connectionInTransaction = manager.getConnection()) { - try { - connectionInTransaction.setAutoCommit(false); - if (!dao.checkLockValidityForUpdate(connectionInTransaction, lockName) && dao.trySeizeLock( - connectionInTransaction, lockName)) { - - dao.unregisterNeedList(connectionInTransaction, lockName); - successToGetLock = true; - } else { - - if (restTime <= 0) { - - dao.unregisterNeedList(connectionInTransaction, lockName); - break; - } else { - - Thread.sleep(100); - } - } - - connectionInTransaction.commit(); - } catch (SQLException e) { - connectionInTransaction.rollback(); - throw e; - } finally { - connectionInTransaction.setAutoCommit(true); - } - } - - restTime -= (System.currentTimeMillis() - startTime); - } - if (successToGetLock) { - - postHandleAfterGetLock(lockName); - } - return successToGetLock ? 1 : 0; - } - - private void startHeartbeatTask(String lockName) { - logger.debug("start heartbeat: " + drdsSession); - HeartBeatRunnable heartBeatRunnable = new HeartBeatRunnable(lockName); - ScheduledFuture future = - HEARTBEAT_EXECUTOR.scheduleAtFixedRate(heartBeatRunnable, 1000, LockingFunctionManager.HEART_BEAT_INTERVAL, - TimeUnit.MILLISECONDS); - HEARTBEAT_FUTURES.put(getHeartbeatTag(lockName), future); - } - - private void endHeartbeatTask(String lockName) { - logger.debug("end heartbeat: " + drdsSession); - String heartbeatTag = getHeartbeatTag(lockName); - ScheduledFuture future = HEARTBEAT_FUTURES.get(heartbeatTag); - if (future != null) { - future.cancel(false); - HEARTBEAT_FUTURES.remove(heartbeatTag); - } - } - - private String getHeartbeatTag(String lockName) { - return drdsSession + "_" + lockName; - } - - private class DeadlockAvoidanceHelper { - private String tryAcquiredLockName; - - private Map> allocationMatrix; - - private Map needMatrix; - - private Map availableList; - - private Set sessions; - - private Connection connectionInTransaction; - - DeadlockAvoidanceHelper(Connection connectionInTransaction, String tryAcquiredLockName) throws SQLException { - this.connectionInTransaction = connectionInTransaction; - this.tryAcquiredLockName = tryAcquiredLockName; - - this.availableList = new HashMap<>(); - this.needMatrix = new HashMap<>(); - this.allocationMatrix = new HashMap<>(); - this.sessions = new HashSet<>(); - - try (PreparedStatement ps = connectionInTransaction.prepareStatement(SQL_GET_DEADLOCK_SNAPSHOT)) { - ResultSet resultSet = ps.executeQuery(); - logger.debug("lock_name | drds_session | need_list"); - while (resultSet.next()) { - String lockName = resultSet.getString(LockingFunctionSystableDao.COLUMN_LOCK_NAME); - String session = resultSet.getString(LockingFunctionSystableDao.COLUMN_DRDS_SESSION); - String needList = resultSet.getString(LockingFunctionSystableDao.COLUMN_NEED_LIST); - logger.debug(lockName + " | " + session + " | " + needList); - sessions.add(session); - - availableList.put(lockName, 0); - - Map allocationList = null; - if ((allocationList = allocationMatrix.get(session)) != null) { - allocationList.put(lockName, 1); - } else { - allocationList = new HashMap<>(); - allocationList.put(lockName, 1); - allocationMatrix.put(session, allocationList); - } - - if (!"".equals(needList)) { - String[] sessionsInNeedList = needList.split(","); - for (String sessionInNeedList : sessionsInNeedList) { - if (sessionInNeedList.length() >= 3) { - needMatrix.put(sessionInNeedList, lockName); - sessions.add(sessionInNeedList); - if (!allocationMatrix.containsKey(sessionInNeedList)) { - allocationMatrix.put(sessionInNeedList, new HashMap<>()); - } - } - } - } - } - } - - needMatrix.put(drdsSession, tryAcquiredLockName); - sessions.add(drdsSession); - if (!allocationMatrix.containsKey(drdsSession)) { - allocationMatrix.put(drdsSession, new HashMap<>()); - } - - for (String session : sessions) { - if (!needMatrix.containsKey(session)) { - needMatrix.put(session, ""); - } - } - } - - public boolean unRepeatedRead() { - return !availableList.containsKey(tryAcquiredLockName); - } - - public boolean tryLockAndDetect() { - logger.debug("deadlock snapshot: " + drdsSession + " need: " + tryAcquiredLockName); - logger.debug(sessions.toString()); - logger.debug(allocationMatrix.toString()); - logger.debug(needMatrix.toString()); - logger.debug(availableList.toString()); - boolean detected = false; - - while (!sessions.isEmpty()) { - boolean found = false; - for (Iterator iter = sessions.iterator(); iter.hasNext(); ) { - String session = iter.next(); - String needLock = needMatrix.get(session); - - if (needLock.isEmpty() || availableList.get(needLock) == 1) { - - Map allocationList = allocationMatrix.get(session); - for (String lockName : availableList.keySet()) { - if (allocationList.containsKey(lockName)) { - availableList.put(lockName, 1); - } - } - - iter.remove(); - found = true; - } - } - if (!found) { - detected = true; - break; - } - } - return detected; - } - - } - - private class HeartBeatRunnable implements Runnable { - private String lockName; - - HeartBeatRunnable(String lockName) { - this.lockName = lockName; - } - - private boolean heartbeat(String lockName) throws SQLException { - - if (tddlConnection == null || tddlConnection.isClosed()) { - return false; - } - int updates = 0; - try (Connection connectionInTransaction = manager.getConnection()) { - try { - connectionInTransaction.setAutoCommit(false); - - if (dao.isMyLockValid(connectionInTransaction, lockName)) { - updates = dao.updateModifiedTime(connectionInTransaction, lockName); - logger.debug("heartbeat: " + drdsSession + ", " + lockName); - } - connectionInTransaction.commit(); - } catch (SQLException e) { - connectionInTransaction.rollback(); - throw e; - } finally { - connectionInTransaction.setAutoCommit(true); - } - } - return updates == 1; - } - - @Override - public void run() { - try { - if (!heartbeat(lockName)) { - endHeartbeatTask(lockName); - } - } catch (SQLException e) { - logger.error("heartbeat task for lock " + lockName + " failed", e); - } - } - } +/** + * session level + * + * @author pangzhaoxing + */ +public interface LockingFunctionHandle { + + /** + * Tries to obtain a lock with a name given by the string str, using a timeout of timeout seconds. A negative timeout value means infinite timeout. + * Returns 1 if the lock was obtained successfully, 0 if the attempt timed out (for example, because another client has previously locked the name), or NULL if an error occurred + */ + Integer tryAcquireLock(String lockName, int timeout); + + /** + * Releases the lock named by the string str that was obtained with tryAcquireLock(). + * Returns 1 if the lock was released, 0 if the lock was not established by this thread (in which case the lock is not released), and NULL if the named lock did not exist. + */ + Integer release(String lockName); + + /** + * Releases all named locks held by the current session + * returns the number of locks released (0 if there were none) + */ + Integer releaseAllLocks(); + + /** + * Checks whether the lock named str is free to use (that is, not locked). + * Returns 1 if the lock is free (no one is using the lock), 0 if the lock is in use, and NULL if an error occurs + */ + Integer isFreeLock(String lockName); + + /** + * Checks whether the lock named str is in use (that is, locked). + * If so, it returns the connection identifier of the client session that holds the lock. Otherwise, it returns NULL. + */ + String isUsedLock(String lockName); + + /** + * Checks whether the lock named str is belong to current session. + * Returns 1 if the lock was valid, 0 if the lock was not valid + */ + Integer isMyLockValid(String lockName); + + List getAllMyLocks(); } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingFunctionManager.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingFunctionManager.java deleted file mode 100644 index b70d9eb12..000000000 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingFunctionManager.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.common.lock; - -import com.alibaba.polardbx.common.TddlNode; -import com.alibaba.polardbx.common.constants.SystemTables; -import com.alibaba.polardbx.common.logical.ITConnection; -import com.alibaba.polardbx.common.utils.MasterSlaveUtil; - -import javax.sql.DataSource; -import java.sql.Connection; -import java.sql.SQLException; -import java.util.HashMap; -import java.util.Map; - -public class LockingFunctionManager { - - static final int MAX_LOCKS_NUMBER = 16; - - static final int EXPIRATION_TIME = 60; - - static final int HEART_BEAT_INTERVAL = 2000; - - private static final LockingFunctionManager INSTANCE = new LockingFunctionManager(); - - private Map distributedLockHandles; - - private DataSource dataSource; - - private LockingFunctionManager() { - } - - public void init(DataSource dataSource) { - this.distributedLockHandles = new HashMap<>(); - this.dataSource = dataSource; - } - - public static LockingFunctionManager getInstance() { - return INSTANCE; - } - - public synchronized LockingFunctionHandle getHandle(ITConnection drdsConnection, long connectionId) { - return new PolarDBXLockingFunctionHandle(this, drdsConnection, sessionId(connectionId)); - } - - public void removeHandle(String sessionId) { - distributedLockHandles.remove(sessionId); - } - - private String sessionId(long connectionId) { - return TddlNode.getNodeId() + ":" + connectionId; - } - - Connection getConnection() throws SQLException { - return MasterSlaveUtil.getMasterConntetion(this.dataSource); - } - -} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingFunctionSystableDao.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingFunctionSystableDao.java deleted file mode 100644 index fe41aecf6..000000000 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/LockingFunctionSystableDao.java +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.common.lock; - -import com.alibaba.polardbx.common.constants.SystemTables; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -public class LockingFunctionSystableDao { - private static final Logger logger = LoggerFactory.getLogger(LockingFunctionSystableDao.class); - - static final String COLUMN_ID = "id"; - static final String COLUMN_LOCK_NAME = "lock_name"; - static final String COLUMN_DRDS_SESSION = "drds_session"; - static final String COLUMN_NEED_LIST = "need_list"; - static final String COLUMN_GMT_MODIFIED = "gmt_modified"; - static final String COLUMN_GMT_CREATED = "gmt_created"; - static final String COLUMN_EXPIRED = "expired"; - - private static final String SQL_CHECK_LOCK_HOLDER = - "select lock_name, drds_session, (date_add(gmt_modified, interval " + LockingFunctionManager.EXPIRATION_TIME - + " second) < now()) as expired from " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION + " where lock_name=?"; - private static final String SQL_CHECK_LOCK_VALIDITY_FOR_UPDATE = - "select lock_name, drds_session, gmt_modified, now() as now, (date_add(gmt_modified, interval " - + LockingFunctionManager.EXPIRATION_TIME + " second)< now()) as expired from " - + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION + " where lock_name=? for update"; - private static final String SQL_IS_MY_LOCK_VALID = - "select lock_name, drds_session, (date_add(gmt_modified, interval " + LockingFunctionManager.EXPIRATION_TIME - + " second)< now()) as expired from " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION - + " where lock_name=? and drds_session=? for update"; - private static final String SQL_IS_MY_LOCK_VALID_WITHOUT_FOR_UPDATE = - "select lock_name, drds_session, (date_add(gmt_modified, interval " + LockingFunctionManager.EXPIRATION_TIME - + " second)< now()) as expired from " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION - + " where lock_name=? and drds_session=?"; - private static final String SQL_CHECK_LOCK_EXISTENCE = - "select lock_name, drds_session from " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION + " where lock_name=?"; - private static final String SQL_GET_NEED_LIST = - "select need_list from " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION + " where lock_name=?"; - private static final String SQL_UPDATE_SESSION_AND_NEED_LIST = - "update " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION + " set drds_session=?, need_list=? where lock_name=?"; - private static final String SQL_TRY_CREATE_LOCK = - "insert ignore into " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION - + " (lock_name, drds_session, gmt_created, gmt_modified) values (?, ?, now(), now())"; - private static final String SQL_DELETE_LOCK_TUPLE = - "delete from " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION + " where lock_name = ? and drds_session = ?"; - private static final String SQL_REGISTER_NEED_LIST = "update " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION - + " set need_list=concat(need_list,?) where lock_name=?"; - private static final String SQL_UPDATE_NEED_LIST = - "update " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION + " set need_list=? where lock_name=?"; - private static final String SQL_TRY_SEIZE_LOCK = "update " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION - + " set drds_session=?, gmt_modified = now() where lock_name=?"; - private static final String SQL_UPDATE_MODIFIED_TIME = "update " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION - + " set gmt_modified=now() where lock_name=? and drds_session=?"; - private static final String SQL_CHECK_LOCK = - "select lock_name, drds_session, (date_add(gmt_modified, interval " + LockingFunctionManager.EXPIRATION_TIME - + " second)< now()) as expired from " + SystemTables.DRDS_SYSTABLE_LOCKING_FUNCTION + " where lock_name=?"; - - private LockingFunctionManager manager; - private String drdsSession; - - LockingFunctionSystableDao(LockingFunctionManager manager, String drdsSession) { - this.manager = manager; - this.drdsSession = drdsSession; - } - - public boolean isMyLockValid(String lockName) throws SQLException { - try (Connection connection = manager.getConnection(); - PreparedStatement ps = connection.prepareStatement(SQL_IS_MY_LOCK_VALID_WITHOUT_FOR_UPDATE)) { - ps.setString(1, lockName); - ps.setString(2, drdsSession); - ResultSet resultSet = ps.executeQuery(); - while (resultSet.next()) { - int expired = resultSet.getInt(COLUMN_EXPIRED); - return expired != 1; - } - } - return false; - } - - public boolean tryCreateLock(String lockName) throws SQLException { - int updates = 0; - try (Connection connection = manager.getConnection(); - PreparedStatement ps = connection.prepareStatement(SQL_TRY_CREATE_LOCK)) { - ps.setString(1, lockName); - ps.setString(2, drdsSession); - updates = ps.executeUpdate(); - logger.debug(connection + "try create lock: " + (updates == 1)); - } - return updates == 1; - } - - public boolean checkLockHolder(String lockName) throws SQLException { - try (Connection connection = manager.getConnection(); - PreparedStatement ps = connection.prepareStatement(SQL_CHECK_LOCK_HOLDER)) { - ps.setString(1, lockName); - ResultSet resultSet = ps.executeQuery(); - while (resultSet.next()) { - int expired = resultSet.getInt(COLUMN_EXPIRED); - String session = resultSet.getString(COLUMN_DRDS_SESSION); - return expired != 1 && session.equals(drdsSession); - } - } - return false; - } - - public boolean checkLockValidityForUpdate(Connection connectionInTransaction, String lockName) - throws SQLException { - try (PreparedStatement ps = connectionInTransaction.prepareStatement(SQL_CHECK_LOCK_VALIDITY_FOR_UPDATE)) { - ps.setString(1, lockName); - ResultSet resultSet = ps.executeQuery(); - while (resultSet.next()) { - int expired = resultSet.getInt(COLUMN_EXPIRED); - if (expired == 1) { - logger.debug(drdsSession + " find the lock: " + lockName + " is expired!"); - } - return expired != 1; - } - } - return false; - } - - public boolean trySeizeLock(Connection connectionInTransaction, String lockName) throws SQLException { - int updates = 0; - try (PreparedStatement ps = connectionInTransaction.prepareStatement(SQL_TRY_SEIZE_LOCK)) { - ps.setString(1, drdsSession); - ps.setString(2, lockName); - updates = ps.executeUpdate(); - if (updates == 1) { - logger.debug(drdsSession + " seize the lock " + lockName); - } - } - return updates == 1; - } - - public int registerNeedList(Connection connectionInTransaction, String lockName) throws SQLException { - int updates = 0; - try (PreparedStatement ps = connectionInTransaction.prepareStatement(SQL_REGISTER_NEED_LIST)) { - ps.setString(1, drdsSession + ","); - ps.setString(2, lockName); - updates = ps.executeUpdate(); - logger.debug(drdsSession + " register"); - } - return updates; - } - - public void unregisterNeedList(Connection connectionInTransaction, String lockName) throws SQLException { - String needList = null; - try (PreparedStatement getNeedListPs = connectionInTransaction.prepareStatement(SQL_GET_NEED_LIST)) { - getNeedListPs.setString(1, lockName); - ResultSet rs = getNeedListPs.executeQuery(); - while (rs.next()) { - needList = rs.getString("need_list"); - } - logger.debug(drdsSession + " unregister"); - } - - try (PreparedStatement updateNeedListPs = connectionInTransaction.prepareStatement(SQL_UPDATE_NEED_LIST)) { - if (needList != null) { - int index = needList.indexOf(drdsSession + ","); - if (index != -1) { - needList = needList.substring(0, index) + needList.substring(index + drdsSession.length() + 1); - updateNeedListPs.setString(1, needList); - updateNeedListPs.setString(2, lockName); - updateNeedListPs.executeUpdate(); - } - } - } - } - - public boolean isMyLockValid(Connection connectionInTransaction, String lockName) throws SQLException { - try (PreparedStatement ps = connectionInTransaction.prepareStatement(SQL_IS_MY_LOCK_VALID)) { - ps.setString(1, lockName); - ps.setString(2, drdsSession); - ResultSet resultSet = ps.executeQuery(); - while (resultSet.next()) { - int expired = resultSet.getInt(COLUMN_EXPIRED); - return expired != 1; - } - } - return false; - } - - public boolean grantLock(Connection connectionInTransaction, String lockName) throws SQLException { - int updates = 0; - String needList = null; - try (PreparedStatement getNeedListPs = connectionInTransaction.prepareStatement(SQL_GET_NEED_LIST)) { - getNeedListPs.setString(1, lockName); - ResultSet resultSet = getNeedListPs.executeQuery(); - while (resultSet.next()) { - needList = resultSet.getString(COLUMN_NEED_LIST); - } - } - - try (PreparedStatement updateSessionPs = connectionInTransaction - .prepareStatement(SQL_UPDATE_SESSION_AND_NEED_LIST)) { - if (needList != null && !needList.isEmpty()) { - - int index = needList.indexOf(","); - String newNeedList = needList.substring(index + 1); - String grantedSession = needList.substring(0, index); - - updateSessionPs.setString(1, grantedSession); - updateSessionPs.setString(2, newNeedList); - updateSessionPs.setString(3, lockName); - updates = updateSessionPs.executeUpdate(); - if (updates == 1) { - logger.debug(drdsSession + " grant the lock: " + lockName + " to " + grantedSession); - } - } - } - return updates == 1; - } - - public boolean deleteLockTuple(Connection connectionInTransaction, String lockName) throws SQLException { - int updates = 0; - try (PreparedStatement ps = connectionInTransaction.prepareStatement(SQL_DELETE_LOCK_TUPLE)) { - ps.setString(1, lockName); - ps.setString(2, drdsSession); - updates = ps.executeUpdate(); - logger.debug(drdsSession + " delete the lock: " + lockName); - } - return updates == 1; - } - - public boolean checkLockExistence(String lockName) throws SQLException { - try (Connection connection = manager.getConnection(); - PreparedStatement ps = connection.prepareStatement(SQL_CHECK_LOCK_EXISTENCE)) { - ps.setString(1, lockName); - ResultSet resultSet = ps.executeQuery(); - while (resultSet.next()) { - return true; - } - } - return false; - } - - public Integer isFreeLock(String lockName) throws SQLException { - try (Connection connection = manager.getConnection(); - PreparedStatement ps = connection.prepareStatement(SQL_CHECK_LOCK)) { - ps.setString(1, lockName); - ResultSet rs = ps.executeQuery(); - while (rs.next()) { - int expired = rs.getInt(COLUMN_EXPIRED); - return expired == 1 ? 1 : 0; - } - return 1; - } - } - - public String isUsedLock(String lockName) throws SQLException { - try (Connection connection = manager.getConnection(); - PreparedStatement ps = connection.prepareStatement(SQL_CHECK_LOCK)) { - ps.setString(1, lockName); - ResultSet rs = ps.executeQuery(); - while (rs.next()) { - int expired = rs.getInt(COLUMN_EXPIRED); - String session = rs.getString(COLUMN_DRDS_SESSION); - return expired == 1 ? null : session; - } - return null; - } - } - - public Integer updateModifiedTime(Connection connection, String lockName) throws SQLException { - int updates = 0; - try (PreparedStatement ps = connection.prepareStatement(SQL_UPDATE_MODIFIED_TIME)) { - ps.setString(1, lockName); - ps.setString(2, drdsSession); - updates = ps.executeUpdate(); - if (updates == 1) { - logger.debug("heartbeat for lock: " + lockName + ", session: " + drdsSession); - } - } - return updates; - } - -} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/PolarDBXLockingFunctionHandle.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/PolarDBXLockingFunctionHandle.java deleted file mode 100644 index 36ac94594..000000000 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/lock/PolarDBXLockingFunctionHandle.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.common.lock; - -import com.alibaba.polardbx.common.logical.ITConnection; - -public class PolarDBXLockingFunctionHandle extends LockingFunctionHandle { - PolarDBXLockingFunctionHandle(LockingFunctionManager manager, ITConnection tddlConnection, String drdsSession) { - super(manager, tddlConnection, drdsSession); - } - - @Override - public Integer tryAcquireLock(String lockName, int timeout) { - return 0; - } - - @Override - public Integer release(String lockName) { - return 0; - } - - @Override - public Integer releaseAllLocks() { - return 0; - } - - @Override - public Integer isFreeLock(String lockName) { - return 0; - } - - @Override - public String isUsedLock(String lockName) { - return null; - } -} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/mock/MockUtils.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/mock/MockUtils.java new file mode 100644 index 000000000..f62186de7 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/mock/MockUtils.java @@ -0,0 +1,61 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.mock; + +import java.lang.reflect.Field; + +public class MockUtils { + + public interface ThrowableRunnable { + void run() throws Throwable; + } + + public interface ThrowableConsumer { + void accept(T t) throws Throwable; + } + + public static T assertThrows(Class expectedType, + String expectedMessage, + ThrowableRunnable methodCall) { + try { + methodCall.run(); + } catch (Throwable e) { + if (expectedType.isInstance(e)) { + if (expectedMessage != null) { + if (!expectedMessage.equals(e.getMessage())) { + throw new AssertionError( + "Expected message: " + expectedMessage + ", actual: " + e.getMessage() + ); + } + } + return (T) e; + } + throw new AssertionError("Expected exception: " + expectedType.getName(), e); + } + throw new AssertionError("Expected exception: " + expectedType.getName()); + } + + public static void setInternalState(Object target, String fieldName, Object value) { + try { + Field field = target.getClass().getDeclaredField(fieldName); + field.setAccessible(true); + field.set(target, value); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/ColumnarFileType.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/ColumnarFileType.java new file mode 100644 index 000000000..1f8025bd0 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/ColumnarFileType.java @@ -0,0 +1,50 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.oss; + +public enum ColumnarFileType { + ORC, CSV, DEL, SET, + /** + * PK IDX log and meta(one record per partition, used as meta lock) + */ + PK_IDX_LOG, + PK_IDX_LOG_META, + PK_IDX_SNAPSHOT, + PK_IDX_LOCK, + PK_IDX_SST, + PK_IDX_BF; + + public static ColumnarFileType of(String fileType) { + if (fileType == null) { + return null; + } + + try { + return valueOf(fileType); + } catch (Throwable throwable) { + try { + return valueOf(fileType.toUpperCase()); + } catch (Throwable ignorable) { + return null; + } + } + } + + public boolean isDeltaFile() { + return this == DEL || this == CSV; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/OSSFileType.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/OSSFileType.java index 7714ba9aa..123a7eed0 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/OSSFileType.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/OSSFileType.java @@ -23,7 +23,16 @@ public enum OSSFileType { TABLE_FORMAT("format", "%s_%s_%s.%s", "/tmp/%s_%s_%s.%s"), TABLE_FILE("orc", "%s_%s_%s.%s", "/tmp/%s_%s_%s.%s"), TABLE_META("bf", "%s_%s_%s_%s_%s.%s", "/tmp/%s_%s_%s_%s_%s.%s"), - EXPORT_ORC_FILE("orc", "%s_%s.%s", "../spill/temp/%s"); + EXPORT_ORC_FILE("orc", "%s_%s.%s", "../spill/temp/%s"), + + /** + * Columnar Primary index file: + */ + PK_IDX_LOG("log", "%s_%s_%s_%s_%s.%s", "/tmp/%s_%s_%s_%s_%s.%s"), + PK_IDX_LOG_META("meta", "%s_%s_%s_%s_%s.%s", "/tmp/%s_%s_%s_%s_%s.%s"), + PK_IDX_SNAPSHOT("snapshot", "%s_%s_%s_%s_%s.%s", "/tmp/%s_%s_%s_%s_%s.%s"), + PK_IDX_LOCK("lock", "%s_%s_%s_%s_%s.%s", "/tmp/%s_%s_%s_%s_%s.%s"), + PK_IDX_SST("sst", "%s_%s_%s_%s_%s.%s", "/tmp/%s_%s_%s_%s_%s.%s"); String suffix; String remotePathFormat; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/Constants.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/Constants.java index ccf4358a4..df9921746 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/Constants.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/Constants.java @@ -90,7 +90,9 @@ private Constants() { public static final long MULTIPART_UPLOAD_PART_SIZE_DEFAULT = 104857600; // 100 MB - /** The minimum multipart size which OSS supports. */ + /** + * The minimum multipart size which OSS supports. + */ public static final int MULTIPART_MIN_SIZE = 100 * 1024; public static final int MULTIPART_UPLOAD_PART_NUM_LIMIT = 10000; @@ -155,4 +157,16 @@ private Constants() { public static final String UPLOAD_ACTIVE_BLOCKS_KEY = "fs.oss.upload.active.blocks"; public static final int UPLOAD_ACTIVE_BLOCKS_DEFAULT = 4; + + // S3 access verification + public static final String S3_ACCESS_KEY = "fs.s3a.access.key"; + public static final String S3_SECRET_KEY = "fs.s3a.secret.key"; + + // ABS access verification + // fs.azure.account.key..blob. + public static final String ABS_URI_SUFFIX_PATTERN = "%s.blob.%s"; + public static final String ABS_ACCOUNT_KEY_PATTERN = "fs.azure.account.key." + ABS_URI_SUFFIX_PATTERN; + + public static final String AZURE_WASBS_SCHEME = "wasbs"; + public static final String AZURE_WASB_SCHEME = "wasb"; } \ No newline at end of file diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/InputStreamWithRateLimiter.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/InputStreamWithRateLimiter.java new file mode 100644 index 000000000..e03a8e772 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/InputStreamWithRateLimiter.java @@ -0,0 +1,124 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.oss.filesystem; + +import org.apache.hadoop.fs.PositionedReadable; +import org.apache.hadoop.fs.Seekable; +import org.jetbrains.annotations.NotNull; + +import java.io.IOException; +import java.io.InputStream; + +public class InputStreamWithRateLimiter extends InputStream implements Seekable, PositionedReadable { + final private FileSystemRateLimiter rateLimiter; + final private InputStream in; + + public InputStreamWithRateLimiter(InputStream in, FileSystemRateLimiter rateLimiter) { + if (!(in instanceof Seekable) || !(in instanceof PositionedReadable)) { + throw new IllegalArgumentException( + "In is not an instance of Seekable or PositionedReadable"); + } + this.in = in; + this.rateLimiter = rateLimiter; + } + + @Override + public synchronized int read() throws IOException { + rateLimiter.acquireRead(1); + return in.read(); + } + + @Override + public synchronized int read(byte @NotNull [] buf, int off, int len) throws IOException { + rateLimiter.acquireRead(len); + return in.read(buf, off, len); + } + + @Override + public synchronized int read(byte @NotNull [] b) throws IOException { + rateLimiter.acquireRead(b.length); + return in.read(b); + } + + @Override + public synchronized long skip(long n) throws IOException { + // TODO(siyun): should this be tracked by rate limiter? + return in.skip(n); + } + + @Override + public synchronized int available() throws IOException { + return in.available(); + } + + @Override + public synchronized void close() throws IOException { + in.close(); + } + + @Override + public synchronized void mark(int readlimit) { + in.mark(readlimit); + } + + @Override + public synchronized void reset() throws IOException { + in.reset(); + } + + @Override + public synchronized boolean markSupported() { + return in.markSupported(); + } + + @Override + public int read(long position, byte[] buffer, int offset, int length) throws IOException { + rateLimiter.acquireRead(length); + return ((PositionedReadable) in).read(position, buffer, offset, length); + } + + @Override + public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { + rateLimiter.acquireRead(length); + ((PositionedReadable) in).readFully(position, buffer, offset, length); + } + + @Override + public void readFully(long position, byte[] buffer) throws IOException { + rateLimiter.acquireRead(buffer.length); + ((PositionedReadable) in).readFully(position, buffer); + } + + @Override + public void seek(long pos) throws IOException { + ((Seekable) in).seek(pos); + } + + @Override + public long getPos() throws IOException { + return ((Seekable) in).getPos(); + } + + @Override + public boolean seekToNewSource(long targetPos) throws IOException { + return ((Seekable) in).seekToNewSource(targetPos); + } + + public InputStream getWrappedStream() { + return in; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/NFSFileSystem.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/NFSFileSystem.java index 1a9af7af0..4f4a7cd3c 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/NFSFileSystem.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/NFSFileSystem.java @@ -47,7 +47,7 @@ import static java.util.concurrent.TimeUnit.SECONDS; -public class NFSFileSystem extends FileSystem { +public class NFSFileSystem extends FileSystem implements RateLimitable { private static final Logger LOG = LoggerFactory.getLogger(NFSFileSystem.class); @@ -380,6 +380,7 @@ public FileStatus getFileStatusImpl(Path path) throws IOException { } } + @Override public FileSystemRateLimiter getRateLimiter() { return this.rateLimiter; } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OSSAppendOutputStream.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OSSAppendOutputStream.java new file mode 100644 index 000000000..6ba2b955d --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OSSAppendOutputStream.java @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.oss.filesystem; + +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; +import com.google.common.util.concurrent.MoreExecutors; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Syncable; +import org.jetbrains.annotations.NotNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; + +/** + * Append Object for Oss. Support synchronous and asynchronous. + */ +public class OSSAppendOutputStream extends OutputStream implements Syncable { + private static final Logger LOG = LoggerFactory.getLogger(OSSAppendOutputStream.class); + private final OSSFileSystemStore store; + private final Configuration conf; + private final String key; + private final ListeningExecutorService executorService; + private final FileSystemRateLimiter rateLimiter; + + private volatile boolean closed = false; + private final byte[] singleByte = new byte[1]; + private final int bufferSize; + private File localFile; + private long localFileId = 0L; + private OutputStream bufferStream; + private long writeLength = 0L; + private long ossFilePosition; + private ListenableFuture asyncJob = null; + private File currentUploadOssFile = null; + private final List historyTempFiles = new ArrayList<>(); + + public OSSAppendOutputStream(Configuration conf, + OSSFileSystemStore store, + String key, + int bufferSize, + ExecutorService executorService, + FileSystemRateLimiter rateLimiter) throws IOException { + this.conf = conf; + this.store = store; + this.key = key; + this.bufferSize = bufferSize; + this.executorService = MoreExecutors.listeningDecorator(executorService); + this.rateLimiter = rateLimiter; + this.localFile = createTmpLocalFile(); + this.bufferStream = new BufferedOutputStream(new FileOutputStream(localFile), bufferSize); + this.ossFilePosition = store.getOssFileLength(key); + historyTempFiles.add(localFile); + } + + @Override + public synchronized void write(int b) throws IOException { + singleByte[0] = (byte) b; + write(singleByte, 0, 1); + } + + @Override + public synchronized void write(byte @NotNull [] b, int off, int len) throws IOException { + if (closed) { + throw new IOException("Stream closed."); + } + + rateLimiter.acquireWrite(len); + + bufferStream.write(b, off, len); + writeLength += len; + } + + /** + * Upload to Oss by sync + * + * @throws IOException failed + */ + @Override + public synchronized void flush() throws IOException { + if (closed) { + throw new IOException("Stream closed."); + } + if (asyncJob != null) { + waitAsyncJob(); + } + if (writeLength == 0L) { + return; + } + + uploadBufferToOss(false); + + } + + /** + * upload to Oss by async. There is only one async task at a time. + * + * @throws IOException failed + */ + public synchronized void asyncFlush() throws IOException { + if (closed) { + throw new IOException("Stream closed."); + } + + if (asyncJob != null) { + waitAsyncJob(); + } + + if (writeLength == 0L) { + return; + } + + uploadBufferToOss(true); + } + + @Override + public synchronized void close() throws IOException { + if (closed) { + return; + } + flush(); + + removeTemporaryFiles(); + closed = true; + } + + @Override + public void hflush() throws IOException { + flush(); + } + + @Override + public void hsync() throws IOException { + asyncFlush(); + } + + private File createTmpLocalFile() throws IOException { + localFileId++; + return OSSUtils.createTmpFileForWrite(String.format("local-%s-%d-", key, localFileId), -1, conf); + } + + private void waitAsyncJob() throws IOException { + try { + ossFilePosition = asyncJob.get(); + } catch (InterruptedException ie) { + LOG.warn(String.format("Oss append File: %s, local file: %s, Position: %d, Interrupted: ", key, + currentUploadOssFile.getAbsolutePath(), ossFilePosition), ie); + Thread.currentThread().interrupt(); + } catch (ExecutionException ee) { + LOG.error(String.format("Oss append File: %s, local file: %s, Position: %d, Error: ", key, + currentUploadOssFile.getAbsolutePath(), ossFilePosition), ee); + throw new IOException( + String.format("Oss append File: %s, local file: %s, Position: %d, Error: ", key, + currentUploadOssFile.getAbsolutePath(), ossFilePosition), ee); + } finally { + asyncJob = null; + } + } + + private void uploadBufferToOss(boolean async) throws IOException { + bufferStream.flush(); + bufferStream.close(); + currentUploadOssFile = localFile; + + if (async) { + asyncJob = executorService.submit(() -> store.appendObject(key, currentUploadOssFile, ossFilePosition)); + } else { + ossFilePosition = store.appendObject(key, currentUploadOssFile, ossFilePosition); + } + + localFile = createTmpLocalFile(); + this.bufferStream = new BufferedOutputStream(new FileOutputStream(localFile), bufferSize); + historyTempFiles.add(localFile); + writeLength = 0L; + } + + private void removeTemporaryFiles() { + for (File file : historyTempFiles) { + if (file != null && file.exists() && !file.delete()) { + LOG.warn("Failed to delete temporary file {}", file); + } + } + } + + public long getOssFilePosition() { + return ossFilePosition; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OSSFileSystem.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OSSFileSystem.java index 3214cc5a8..3289c7e40 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OSSFileSystem.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OSSFileSystem.java @@ -16,14 +16,12 @@ package com.alibaba.polardbx.common.oss.filesystem; -import com.alibaba.polardbx.common.oss.filesystem.cache.FileReadRequest; import com.aliyun.oss.model.OSSObjectSummary; import com.aliyun.oss.model.ObjectListing; import com.aliyun.oss.model.ObjectMetadata; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.util.concurrent.MoreExecutors; -import com.google.common.util.concurrent.UncheckedExecutionException; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -58,17 +56,25 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.FS_OSS_BLOCK_SIZE_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.FS_OSS_BLOCK_SIZE_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.KEEPALIVE_TIME_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.KEEPALIVE_TIME_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MAX_PAGING_KEYS_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MAX_PAGING_KEYS_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MULTIPART_UPLOAD_PART_SIZE_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MULTIPART_UPLOAD_PART_SIZE_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.UPLOAD_ACTIVE_BLOCKS_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.UPLOAD_ACTIVE_BLOCKS_KEY; import static com.alibaba.polardbx.common.oss.filesystem.OSSUtils.intOption; import static com.alibaba.polardbx.common.oss.filesystem.OSSUtils.longOption; import static com.alibaba.polardbx.common.oss.filesystem.OSSUtils.objectRepresentsDirectory; -import static com.alibaba.polardbx.common.oss.filesystem.Constants.*; -import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.SECONDS; /** * To access OSS blob system in a filesystem style. */ -public class OSSFileSystem extends FileSystem { +public class OSSFileSystem extends FileSystem implements RateLimitable { private static final Logger LOG = LoggerFactory.getLogger(OSSFileSystem.class); private URI uri; @@ -109,7 +115,14 @@ public OSSFileSystem(boolean enableCache, FileSystemRateLimiter rateLimiter) { @Override public FSDataOutputStream append(Path path, int bufferSize, Progressable progress) throws IOException { - throw new IOException("Append is not supported!"); + String key = pathToKey(path); + OSSAppendOutputStream ossAppendOutputStream = new OSSAppendOutputStream(getConf(), + store, + key, + bufferSize, + new SemaphoredDelegatingExecutor(boundedThreadPool, blockOutputActiveBlocks, true), + getRateLimiter()); + return new FSDataOutputStream(ossAppendOutputStream, statistics, ossAppendOutputStream.getOssFilePosition()); } @Override @@ -296,7 +309,7 @@ public FileStatus getFileStatus(Path path) throws IOException { } public FileStatus getFileStatusImpl(Path path) throws IOException { - Path qualifiedPath = path.makeQualified(uri, workingDir); + Path qualifiedPath = path.makeQualified(uri, getWorkingDirectory()); String key = pathToKey(qualifiedPath); // Root always exists @@ -335,6 +348,7 @@ public FileStatus getFileStatusImpl(Path path) throws IOException { } } + @Override public FileSystemRateLimiter getRateLimiter() { return this.rateLimiter; } @@ -434,15 +448,15 @@ public void initialize(URI name, Configuration conf) throws IOException { * @param path the path of the file. * @return the key of the object that represents the file. */ - private String pathToKey(Path path) { + public String pathToKey(Path path) { if (!path.isAbsolute()) { - path = new Path(workingDir, path); + path = new Path(getWorkingDirectory(), path); } return path.toUri().getPath().substring(1); } - private Path keyToPath(String key) { + public Path keyToPath(String key) { return new Path("/" + key); } @@ -469,10 +483,9 @@ public FileStatus[] listStatus(Path path) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Ignoring: " + objKey); } - continue; } else { Path keyPath = keyToPath(objectSummary.getKey()) - .makeQualified(uri, workingDir); + .makeQualified(uri, getWorkingDirectory()); if (LOG.isDebugEnabled()) { LOG.debug("Adding: fi: " + keyPath); } @@ -487,9 +500,8 @@ public FileStatus[] listStatus(Path path) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Ignoring: " + prefix); } - continue; } else { - Path keyPath = keyToPath(prefix).makeQualified(uri, workingDir); + Path keyPath = keyToPath(prefix).makeQualified(uri, getWorkingDirectory()); if (LOG.isDebugEnabled()) { LOG.debug("Adding: rd: " + keyPath); } @@ -520,14 +532,9 @@ public FileStatus[] listStatus(Path path) throws IOException { @Override public RemoteIterator listFiles( final Path f, final boolean recursive) throws IOException { - Path qualifiedPath = f.makeQualified(uri, workingDir); + Path qualifiedPath = f.makeQualified(uri, getWorkingDirectory()); final FileStatus status = getFileStatus(qualifiedPath); - PathFilter filter = new PathFilter() { - @Override - public boolean accept(Path path) { - return status.isFile() || !path.equals(f); - } - }; + PathFilter filter = path -> status.isFile() || !path.equals(f); FileStatusAcceptor acceptor = new FileStatusAcceptor.AcceptFilesOnly(qualifiedPath); return innerList(f, status, filter, acceptor, recursive); @@ -542,7 +549,7 @@ public RemoteIterator listLocatedStatus(Path f) @Override public RemoteIterator listLocatedStatus(final Path f, final PathFilter filter) throws IOException { - Path qualifiedPath = f.makeQualified(uri, workingDir); + Path qualifiedPath = f.makeQualified(uri, getWorkingDirectory()); final FileStatus status = getFileStatus(qualifiedPath); FileStatusAcceptor acceptor = new FileStatusAcceptor.AcceptAllButSelf(qualifiedPath); @@ -554,7 +561,7 @@ private RemoteIterator innerList(final Path f, final PathFilter filter, final FileStatusAcceptor acceptor, final boolean recursive) throws IOException { - Path qualifiedPath = f.makeQualified(uri, workingDir); + Path qualifiedPath = f.makeQualified(uri, getWorkingDirectory()); String key = pathToKey(qualifiedPath); if (status.isFile()) { diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OSSFileSystemStore.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OSSFileSystemStore.java index 63d1b94d7..61eabb23f 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OSSFileSystemStore.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OSSFileSystemStore.java @@ -23,6 +23,8 @@ import com.aliyun.oss.common.auth.CredentialsProvider; import com.aliyun.oss.common.comm.Protocol; import com.aliyun.oss.model.AbortMultipartUploadRequest; +import com.aliyun.oss.model.AppendObjectRequest; +import com.aliyun.oss.model.AppendObjectResult; import com.aliyun.oss.model.CannedAccessControlList; import com.aliyun.oss.model.CompleteMultipartUploadRequest; import com.aliyun.oss.model.CompleteMultipartUploadResult; @@ -43,6 +45,7 @@ import com.aliyun.oss.model.UploadPartCopyResult; import com.aliyun.oss.model.UploadPartRequest; import com.aliyun.oss.model.UploadPartResult; +import lombok.Getter; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -71,7 +74,32 @@ import java.util.ListIterator; import java.util.NoSuchElementException; -import static com.alibaba.polardbx.common.oss.filesystem.Constants.*; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.CANNED_ACL_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.CANNED_ACL_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.ENDPOINT_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.ESTABLISH_TIMEOUT_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.ESTABLISH_TIMEOUT_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MAXIMUM_CONNECTIONS_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MAXIMUM_CONNECTIONS_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MAX_ERROR_RETRIES_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MAX_ERROR_RETRIES_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MAX_PAGING_KEYS_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MAX_PAGING_KEYS_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MULTIPART_UPLOAD_PART_SIZE_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.MULTIPART_UPLOAD_PART_SIZE_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.PROXY_DOMAIN_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.PROXY_HOST_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.PROXY_PASSWORD_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.PROXY_PORT_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.PROXY_USERNAME_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.PROXY_WORKSTATION_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.SECURE_CONNECTIONS_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.SECURE_CONNECTIONS_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.SOCKET_TIMEOUT_DEFAULT; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.SOCKET_TIMEOUT_KEY; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.USER_AGENT_PREFIX; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.USER_AGENT_PREFIX_DEFAULT; /** * Core implementation of OSS Filesystem for Hadoop. @@ -82,12 +110,18 @@ public class OSSFileSystemStore { LoggerFactory.getLogger(OSSFileSystemStore.class); private String username; private FileSystem.Statistics statistics; + @Getter private OSSClient ossClient; + @Getter private String bucketName; private long uploadPartSize; private int maxKeys; private String serverSideEncryptionAlgorithm; + public FileSystem.Statistics getStatistics() { + return statistics; + } + public void initialize(URI uri, Configuration conf, String user, FileSystem.Statistics stat) throws IOException { this.username = user; @@ -194,7 +228,7 @@ public void deleteObjects(List keysToDelete) throws IOException { int retry = 10; int tries = 0; List deleteFailed = keysToDelete; - while(CollectionUtils.isNotEmpty(deleteFailed)) { + while (CollectionUtils.isNotEmpty(deleteFailed)) { DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucketName); deleteRequest.setKeys(deleteFailed); // There are two modes to do batch delete: @@ -412,6 +446,28 @@ public void uploadObject(String key, File file) throws IOException { } } + /** + * Upload a file as an OSS object, using single upload. + * + * @param key object key. + * @param inputStream inputStream to upload. + * @throws IOException if failed to upload object. + */ + public void uploadObject(String key, InputStream inputStream) throws IOException { + ObjectMetadata meta = new ObjectMetadata(); + meta.setContentLength(inputStream.available()); + if (StringUtils.isNotEmpty(serverSideEncryptionAlgorithm)) { + meta.setServerSideEncryption(serverSideEncryptionAlgorithm); + } + try { + PutObjectResult result = ossClient.putObject(bucketName, key, inputStream, meta); + LOG.debug(result.getETag()); + statistics.incrementWriteOps(1); + } finally { + inputStream.close(); + } + } + /** * list objects. * @@ -484,7 +540,7 @@ public void purge(String prefix) throws IOException { statistics.incrementWriteOps(1); } - for (String dir: objects.getCommonPrefixes()) { + for (String dir : objects.getCommonPrefixes()) { deleteDirs(dir); } } catch (OSSException | ClientException e) { @@ -496,6 +552,7 @@ public RemoteIterator singleStatusRemoteIterator( final FileStatus fileStatus, final BlockLocation[] locations) { return new RemoteIterator() { private boolean hasNext = true; + @Override public boolean hasNext() throws IOException { return fileStatus != null && hasNext; @@ -610,7 +667,7 @@ public PartETag uploadPart(File file, String key, String uploadId, int idx) statistics.incrementWriteOps(1); return uploadResult.getPartETag(); } catch (Exception e) { - LOG.debug("Failed to upload "+ file.getPath() +", " + + LOG.debug("Failed to upload " + file.getPath() + ", " + "try again.", e); caught = e; } finally { @@ -659,6 +716,77 @@ public void abortMultipartUpload(String key, String uploadId) { ossClient.abortMultipartUpload(request); } + /** + * append file to oss file + * + * @param key object key. + * @param file local file + * @return next position + * @throws IOException if failed + */ + public Long appendObject(String key, File file) throws IOException { + return appendObject(key, file, getOssFileLength(key)); + } + + public Long appendObject(String key, InputStream inputStream) throws IOException { + return appendObject(key, inputStream, getOssFileLength(key)); + } + + /** + * append file to oss file + * + * @param key object key. + * @param file local file + * @param position write position + * @return next position + * @throws IOException If the file length and position are not equal, throw PositionNotEqualToLength + */ + public Long appendObject(String key, File file, Long position) throws IOException { + File object = file.getAbsoluteFile(); + FileInputStream fis = new FileInputStream(object); + return appendObject(key, fis, position); + } + + /** + * append file to oss file + * + * @param key object key. + * @param inputStream input stream + * @param position write position + * @return next position + * @throws IOException If the file length and position are not equal, throw PositionNotEqualToLength + */ + public Long appendObject(String key, InputStream inputStream, Long position) throws IOException { + ObjectMetadata meta = new ObjectMetadata(); + if (StringUtils.isNotEmpty(serverSideEncryptionAlgorithm)) { + meta.setServerSideEncryption(serverSideEncryptionAlgorithm); + } + AppendObjectRequest appendObjectRequest = new AppendObjectRequest(bucketName, key, inputStream, meta); + appendObjectRequest.setPosition(position); + AppendObjectResult appendObjectResult; + try { + appendObjectResult = ossClient.appendObject(appendObjectRequest); + statistics.incrementWriteOps(1); + } finally { + inputStream.close(); + } + return appendObjectResult.getNextPosition(); + } + + /** + * get oss file length + * + * @param key object name + * @return return file length. If the file does not exist, return 0L + */ + public long getOssFileLength(String key) { + ObjectMetadata objectMetadata = getObjectMetadata(key); + if (objectMetadata != null) { + return objectMetadata.getContentLength(); + } + return 0L; + } + private static class PartNumberAscendComparator implements Comparator, Serializable { @Override diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OutputStreamWithRateLimiter.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OutputStreamWithRateLimiter.java new file mode 100644 index 000000000..89f31b0c0 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/OutputStreamWithRateLimiter.java @@ -0,0 +1,64 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.oss.filesystem; + +import org.jetbrains.annotations.NotNull; + +import java.io.IOException; +import java.io.OutputStream; + +public class OutputStreamWithRateLimiter extends OutputStream { + final private FileSystemRateLimiter rateLimiter; + final private OutputStream out; + + public OutputStreamWithRateLimiter(OutputStream out, FileSystemRateLimiter rateLimiter) { + this.out = out; + this.rateLimiter = rateLimiter; + } + + @Override + public synchronized void write(byte @NotNull [] b) throws IOException { + rateLimiter.acquireWrite(b.length); + out.write(b); + } + + @Override + public synchronized void write(byte @NotNull [] b, int off, int len) throws IOException { + rateLimiter.acquireWrite(len); + out.write(b, off, len); + } + + @Override + public synchronized void flush() throws IOException { + out.flush(); + } + + @Override + public synchronized void close() throws IOException { + out.close(); + } + + @Override + public synchronized void write(int b) throws IOException { + rateLimiter.acquireWrite(1); + out.write(b); + } + + public OutputStream getWrappedStream() { + return out; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/RateLimitable.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/RateLimitable.java new file mode 100644 index 000000000..907c940f5 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/RateLimitable.java @@ -0,0 +1,21 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.oss.filesystem; + +public interface RateLimitable { + FileSystemRateLimiter getRateLimiter(); +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheConfig.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheConfig.java index 0abd20ff6..820cdd951 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheConfig.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheConfig.java @@ -30,21 +30,24 @@ package com.alibaba.polardbx.common.oss.filesystem.cache; -import io.airlift.slice.DataSize; +import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; import javax.annotation.Nullable; import java.net.URI; -import java.util.Optional; import static com.alibaba.polardbx.common.oss.filesystem.cache.CacheQuotaScope.GLOBAL; public class CacheConfig { + @Deprecated private boolean cachingEnabled; + @Deprecated private CacheType cacheType; + @Deprecated + private CacheQuotaScope cacheQuotaScope = GLOBAL; + private URI baseDirectory; private boolean validationEnabled; - private CacheQuotaScope cacheQuotaScope = GLOBAL; - private Optional defaultCacheQuota = Optional.empty(); + private int flushCacheThreadNum = ThreadCpuStatUtil.NUM_CORES; @Nullable public URI getBaseDirectory() { @@ -92,14 +95,11 @@ public CacheConfig setCacheQuotaScope(CacheQuotaScope cacheQuotaScope) { return this; } - public Optional getDefaultCacheQuota() { - return defaultCacheQuota; + public int getFlushCacheThreadNum() { + return flushCacheThreadNum; } - public CacheConfig setDefaultCacheQuota(DataSize defaultCacheQuota) { - if (defaultCacheQuota != null) { - this.defaultCacheQuota = Optional.of(defaultCacheQuota); - } - return this; + public void setFlushCacheThreadNum(int flushCacheThreadNum) { + this.flushCacheThreadNum = flushCacheThreadNum; } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheManager.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheManager.java index 83b3765a9..f8985b600 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheManager.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheManager.java @@ -34,7 +34,6 @@ import javax.annotation.concurrent.ThreadSafe; import java.io.Closeable; -import java.util.Map; @ThreadSafe public interface CacheManager extends Closeable { @@ -61,8 +60,11 @@ public interface CacheManager extends Closeable { /** * Rebuild the cache + * * @param configs cache configurations. */ - void rebuildCache(Map configs); + void rebuildCache(FileMergeCacheConfig fileMergeCacheConfig); + + CacheQuota getMaxCacheQuota(); } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheQuota.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheQuota.java index 3ddb8e97c..e2c0d69a0 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheQuota.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheQuota.java @@ -41,7 +41,6 @@ public class CacheQuota { public static final CacheQuota NO_CACHE_CONSTRAINTS = new CacheQuota("NO_IDENTITY", Optional.empty()); - public static final CacheQuota DISABLE_CACHE = new CacheQuota("DISABLE_CACHE", Optional.of(new DataSize(0, DataSize.Unit.BYTE))); private final String identity; private final long identifier; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheResult.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheResult.java index fca8ac0c6..96af8c0c8 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheResult.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheResult.java @@ -46,5 +46,10 @@ public enum CacheResult { /** * The data we're reading is not in cache and we don't have quota to write them to cache */ - CACHE_QUOTA_EXCEED + CACHE_QUOTA_EXCEED, + + /** + * The cache is unavailable. + */ + CACHE_IS_UNAVAILABLE } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheStats.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheStats.java index 2de9aeab0..c783647fb 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheStats.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/CacheStats.java @@ -39,6 +39,7 @@ public class CacheStats { private final AtomicLong hit = new AtomicLong(); private final AtomicLong miss = new AtomicLong(); private final AtomicLong quotaExceed = new AtomicLong(); + private final AtomicLong unavailable = new AtomicLong(); public void incrementCacheHit() { hit.getAndIncrement(); @@ -48,6 +49,10 @@ public void incrementCacheMiss() { miss.getAndIncrement(); } + public void incrementCacheUnavailable() { + unavailable.getAndIncrement(); + } + public void incrementQuotaExceed() { quotaExceed.getAndIncrement(); } @@ -71,4 +76,27 @@ public long getCacheMiss() { public long getQuotaExceed() { return quotaExceed.get(); } + + public long getCacheUnavailable() { + return unavailable.get(); + } + + public void reset() { + inMemoryRetainedBytes.set(0); + hit.set(0); + miss.set(0); + quotaExceed.set(0); + unavailable.set(0); + } + + @Override + public String toString() { + return "CacheStats{" + + "inMemoryRetainedBytes=" + inMemoryRetainedBytes + + ", hit=" + hit + + ", miss=" + miss + + ", quotaExceed=" + quotaExceed + + ", unavailable=" + unavailable + + '}'; + } } \ No newline at end of file diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCacheConfig.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCacheConfig.java index b9361925e..8a27f0d59 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCacheConfig.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCacheConfig.java @@ -35,15 +35,19 @@ import static io.airlift.slice.DataSize.Unit.GIGABYTE; import static java.util.concurrent.TimeUnit.DAYS; -import static java.util.concurrent.TimeUnit.MILLISECONDS; public class FileMergeCacheConfig { - private int maxCachedEntries = 1_000; + private boolean enableCache = true; + private int maxCachedEntries = 2048; private Duration cacheTtl = new Duration(2, DAYS); private DataSize maxInMemoryCacheSize = new DataSize(2, GIGABYTE); + private DataSize maxInDiskCacheSize = new DataSize(100, GIGABYTE); - private int maxHotCachedEntries = 1_000; - private Duration hotCacheTtl = new Duration(3000, MILLISECONDS); + /** + * To use bytes cache in Local File Cache. + */ + private boolean useByteCache = false; + private double memoryRatioOfBytesCache = 0.3d; public int getMaxCachedEntries() { return maxCachedEntries; @@ -54,6 +58,24 @@ public FileMergeCacheConfig setMaxCachedEntries(int maxCachedEntries) { return this; } + public double getMemoryRatioOfBytesCache() { + return memoryRatioOfBytesCache; + } + + public FileMergeCacheConfig setMemoryRatioOfBytesCache(double memoryRatioOfBytesCache) { + this.memoryRatioOfBytesCache = memoryRatioOfBytesCache; + return this; + } + + public boolean isUseByteCache() { + return useByteCache; + } + + public FileMergeCacheConfig setUseByteCache(boolean useByteCache) { + this.useByteCache = useByteCache; + return this; + } + public DataSize getMaxInMemoryCacheSize() { return maxInMemoryCacheSize; } @@ -72,19 +94,19 @@ public FileMergeCacheConfig setCacheTtl(Duration cacheTtl) { return this; } - public int getMaxHotCachedEntries() { - return maxHotCachedEntries; + public DataSize getMaxInDiskCacheSize() { + return maxInDiskCacheSize; } - public void setMaxHotCachedEntries(int maxHotCachedEntries) { - this.maxHotCachedEntries = maxHotCachedEntries; + public void setMaxInDiskCacheSize(DataSize maxInDiskCacheSize) { + this.maxInDiskCacheSize = maxInDiskCacheSize; } - public Duration getHotCacheTtl() { - return hotCacheTtl; + public boolean isEnableCache() { + return enableCache; } - public void setHotCacheTtl(Duration hotCacheTtl) { - this.hotCacheTtl = hotCacheTtl; + public void setEnableCache(boolean enableCache) { + this.enableCache = enableCache; } } \ No newline at end of file diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCacheManager.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCacheManager.java index a01c48ff3..20893e8c3 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCacheManager.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCacheManager.java @@ -31,24 +31,23 @@ package com.alibaba.polardbx.common.oss.filesystem.cache; import com.alibaba.polardbx.common.Engine; -import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.properties.FileConfig; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; -import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; -import com.alibaba.polardbx.common.utils.time.parser.StringNumericParser; +import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; +import com.google.common.cache.Weigher; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Range; import com.google.common.collect.RangeMap; import com.google.common.collect.TreeRangeMap; import io.airlift.slice.DataSize; -import io.airlift.slice.Duration; +import io.airlift.slice.SizeOf; import io.airlift.slice.Slice; import org.apache.hadoop.fs.Path; import org.eclipse.jetty.util.ConcurrentHashSet; @@ -56,15 +55,18 @@ import java.io.Closeable; import java.io.File; import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.RandomAccessFile; import java.math.BigInteger; +import java.nio.file.FileSystems; import java.nio.file.Files; -import java.nio.file.Paths; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -74,15 +76,14 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; -import static com.alibaba.polardbx.common.oss.filesystem.cache.CacheQuotaScope.GLOBAL; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.collect.Iterators.getOnlyElement; -import static io.airlift.slice.DataSize.Unit.GIGABYTE; import static io.airlift.slice.DataSize.Unit.MEGABYTE; import static java.lang.StrictMath.toIntExact; import static java.nio.file.StandardOpenOption.APPEND; @@ -90,17 +91,18 @@ import static java.util.Objects.requireNonNull; import static java.util.UUID.randomUUID; import static java.util.concurrent.Executors.newScheduledThreadPool; -import static java.util.concurrent.TimeUnit.DAYS; import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; public class FileMergeCacheManager implements CacheManager { - private static final Logger log = LoggerFactory.getLogger(FileMergeCacheManager.class); + private static final Logger log = LoggerFactory.getLogger("oss"); private static final String EXTENSION = ".cache"; private static final int FILE_MERGE_BUFFER_SIZE = toIntExact(new DataSize(8, MEGABYTE).toBytes()); + // for generation of packet. + private static final int CACHE_STATS_FIELD_COUNT = 13; + private final ThreadLocal buffers = ThreadLocal.withInitial(() -> new byte[FILE_MERGE_BUFFER_SIZE]); private final ExecutorService cacheFlushExecutor; @@ -113,7 +115,10 @@ public class FileMergeCacheManager implements CacheManager { // Path and its corresponding cacheScope identifier private Cache cache; - private final Cache hotCache; + // To cache the compressed bytes array of local cached file. + private Cache compressedBytesCache; + private long maxSizeOfCompressedBytes; + private CacheStats compressedBytesCacheStats = new CacheStats(); // CacheScope identifier to its cached files mapping private final Map> cacheScopeFiles = new ConcurrentHashMap<>(); @@ -121,15 +126,16 @@ public class FileMergeCacheManager implements CacheManager { // stats private final CacheStats stats; - + private volatile boolean building = false; + private volatile boolean enableCache = true; // config private final Path baseDirectory; - private final long maxInflightBytes; + private long maxInflightBytes; + private CacheQuota cacheQuota; - private final CacheConfig cacheConfig; - private final FileMergeCacheConfig fileMergeCacheConfig; + // bytes cache + private boolean useBytesCache; - private static final String CACHE_FILE_PREFIX = "cache"; private static final String OSS_CACHE_FLUSHER_THREAD_NAME_FORMAT = "%s-cache-flusher"; private static final String OSS_CACHE_REMOVER_THREAD_NAME_FORMAT = "%s-cache-remover"; private static final String OSS_CACHE_SIZE_CALCULATOR_THREAD_NAME_FORMAT = "%s-cache-size-calculator"; @@ -141,30 +147,35 @@ public FileMergeCacheManager( ExecutorService cacheFlushExecutor, ExecutorService cacheRemovalExecutor, ScheduledExecutorService cacheSizeCalculateExecutor) { + this(cacheConfig, fileMergeCacheConfig, stats, cacheFlushExecutor, cacheRemovalExecutor, + cacheSizeCalculateExecutor, + Engine.OSS); + } + + public FileMergeCacheManager( + CacheConfig cacheConfig, + FileMergeCacheConfig fileMergeCacheConfig, + CacheStats stats, + ExecutorService cacheFlushExecutor, + ExecutorService cacheRemovalExecutor, + ScheduledExecutorService cacheSizeCalculateExecutor, + Engine engine) { requireNonNull(cacheConfig, "directory is null"); this.cacheFlushExecutor = cacheFlushExecutor; this.cacheRemovalExecutor = cacheRemovalExecutor; this.cacheSizeCalculateExecutor = cacheSizeCalculateExecutor; - this.cacheConfig = cacheConfig; - this.fileMergeCacheConfig = fileMergeCacheConfig; + this.stats = requireNonNull(stats, "stats is null"); + this.useBytesCache = fileMergeCacheConfig.isUseByteCache(); - this.cache = CacheBuilder.newBuilder() - .maximumSize(fileMergeCacheConfig.getMaxCachedEntries()) - .expireAfterAccess(fileMergeCacheConfig.getCacheTtl().toMillis(), MILLISECONDS) - .removalListener(new CacheRemovalListener()) - .recordStats() - .build(); + this.baseDirectory = new Path(new Path(cacheConfig.getBaseDirectory()), engine.name()); + checkArgument( + fileMergeCacheConfig.getMaxInMemoryCacheSize().toBytes() >= 0, "max In-flight Bytes is negative"); - this.hotCache = CacheBuilder.newBuilder() - .maximumSize(fileMergeCacheConfig.getMaxHotCachedEntries()) - .expireAfterAccess(fileMergeCacheConfig.getHotCacheTtl().toMillis(), MILLISECONDS) - .build(); + this.maxSizeOfCompressedBytes = + (long) (Runtime.getRuntime().maxMemory() * fileMergeCacheConfig.getMemoryRatioOfBytesCache()); - this.stats = requireNonNull(stats, "stats is null"); - this.baseDirectory = new Path(cacheConfig.getBaseDirectory()); - checkArgument(fileMergeCacheConfig.getMaxInMemoryCacheSize().toBytes() >= 0, "max In-flight Bytes is negative"); - this.maxInflightBytes = fileMergeCacheConfig.getMaxInMemoryCacheSize().toBytes(); + rebuildCache(fileMergeCacheConfig); File target = new File(baseDirectory.toUri()); if (!target.exists()) { @@ -184,6 +195,7 @@ public FileMergeCacheManager( Files.delete(file.toPath()); } catch (IOException e) { // ignore + log.debug(file.getPath(), e); } })); } @@ -203,6 +215,50 @@ public FileMergeCacheManager( TimeUnit.SECONDS); } + @Override + public void rebuildCache(FileMergeCacheConfig fileMergeCacheConfig) { + // clear old cache. + try { + this.building = true; + this.enableCache = fileMergeCacheConfig.isEnableCache(); + clear(); + + this.maxInflightBytes = fileMergeCacheConfig.getMaxInMemoryCacheSize().toBytes(); + + this.cacheQuota = new CacheQuota( + "NO_IDENTITY", Optional.of(fileMergeCacheConfig.getMaxInDiskCacheSize())); + + this.cache = CacheBuilder.newBuilder() + .maximumSize(fileMergeCacheConfig.getMaxCachedEntries()) + .expireAfterAccess(fileMergeCacheConfig.getCacheTtl().toMillis(), MILLISECONDS) + .removalListener(new CacheRemovalListener()) + .recordStats() + .build(); + + this.compressedBytesCache = CacheBuilder.newBuilder() + .maximumWeight(maxSizeOfCompressedBytes) + .weigher((Weigher) (key, value) -> + // The case of invalidating compressed bytes cache: + // The total size of compressed bytes array exceeds the given threshold. + + // calculate compressed bytes array size for cache weight. + LocalCacheFile.BASE_SIZE_IN_BYTES + (int) SizeOf.sizeOf(value) + ) + .removalListener(notification -> { + // decrement memory size when invalidate compressed bytes cache. + compressedBytesCacheStats.addInMemoryRetainedBytes( + -(LocalCacheFile.BASE_SIZE_IN_BYTES + (int) SizeOf.sizeOf(notification.getValue())) + ); + compressedBytesCacheStats.incrementQuotaExceed(); + } + ) + .build(); + + } finally { + this.building = false; + } + } + public void destroy() { // Wait util all cache files removed. cacheFlushExecutor.shutdown(); @@ -213,9 +269,9 @@ public void destroy() { @Override public CacheResult get(FileReadRequest request, byte[] buffer, int offset, CacheQuota cacheQuota) { - // try to hit hot cache - if (readHotCache(request, buffer, offset)) { - return CacheResult.HIT_HOT_CACHE; + if (building || !enableCache) { + stats.incrementCacheUnavailable(); + return CacheResult.CACHE_IS_UNAVAILABLE; } boolean result = read(request, buffer, offset); @@ -241,22 +297,6 @@ public CacheResult get(FileReadRequest request, byte[] buffer, int offset, Cache return CacheResult.MISS; } - private boolean readHotCache(FileReadRequest request, byte[] buffer, int offset) { - byte[] readBuffer; - if (request.getLength() > 0 - && offset == 0 - && (readBuffer = this.hotCache.getIfPresent(request)) != null) { - System.arraycopy( - readBuffer, - 0, - buffer, - 0, - request.getLength()); - return true; - } - return false; - } - private boolean ifExceedQuota(CacheQuota cacheQuota, FileReadRequest request) { DataSize cacheSize = DataSize .succinctBytes(cacheScopeSizeInBytes.getOrDefault(cacheQuota.getIdentifier(), 0L) + request.getLength()); @@ -272,6 +312,9 @@ private long getCacheScopeSizeInBytes(long cacheScopeIdentifier) { long bytes = 0; for (Path path : paths) { CacheRange cacheRange = persistedRanges.get(path); + if (cacheRange == null) { + continue; + } Lock readLock = cacheRange.getLock().readLock(); readLock.lock(); try { @@ -285,12 +328,24 @@ private long getCacheScopeSizeInBytes(long cacheScopeIdentifier) { return bytes; } + public List collectLocalFileStats() { + List cacheStatsResultsList = new ArrayList<>(); + for (CacheRange cacheRange : persistedRanges.values()) { + for (LocalCacheFile cacheFile : cacheRange.getRange().asDescendingMapOfRanges().values()) { + byte[][] results = new byte[2][]; + int pos = 0; + results[pos++] = + String.valueOf(cacheFile.getPath().getName()).getBytes(); + results[pos++] = String.valueOf(cacheFile.hit).getBytes(); + cacheStatsResultsList.add(results); + } + } + return cacheStatsResultsList; + } + @Override public void put(FileReadRequest key, Slice data, CacheQuota cacheQuota) { - // write hot cache - this.hotCache.put(key, data.getBytes()); - - if (stats.getInMemoryRetainedBytes() + data.length() >= maxInflightBytes) { + if (stats.getInMemoryRetainedBytes() + data.length() >= this.maxInflightBytes) { // cannot accept more requests return; } @@ -314,30 +369,18 @@ public void put(FileReadRequest key, Slice data, CacheQuota cacheQuota) { @Override public void clear() { - this.cache.invalidateAll(); - this.hotCache.invalidateAll(); - } - - public void rebuildCache(Map configs) { - // clear old cache. - clear(); - - Duration cacheTTL = Optional.ofNullable(configs.get(ConnectionProperties.OSS_FS_CACHE_TTL)) - .map(d -> new Duration(d, DAYS)) - .orElse(fileMergeCacheConfig.getCacheTtl()); - - Long maxEntries = Optional.ofNullable(configs.get(ConnectionProperties.OSS_FS_MAX_CACHED_ENTRIES)) - .orElse((long) fileMergeCacheConfig.getMaxCachedEntries()); - - this.fileMergeCacheConfig.setCacheTtl(cacheTTL); - this.fileMergeCacheConfig.setMaxCachedEntries(maxEntries.intValue()); - - this.cache = CacheBuilder.newBuilder() - .maximumSize(maxEntries) - .expireAfterAccess(cacheTTL.toMillis(), MILLISECONDS) - .removalListener(new CacheRemovalListener()) - .recordStats() - .build(); + if (this.cache != null) { + this.cache.invalidateAll(); + } + if (this.stats != null) { + this.stats.reset(); + } + if (this.compressedBytesCache != null) { + this.compressedBytesCache.invalidateAll(); + } + if (this.compressedBytesCacheStats != null) { + this.compressedBytesCacheStats.reset(); + } } private boolean read(FileReadRequest request, byte[] buffer, int offset) { @@ -353,6 +396,7 @@ private boolean read(FileReadRequest request, byte[] buffer, int offset) { } LocalCacheFile cacheFile; + Range range; Lock readLock = cacheRange.getLock().readLock(); readLock.lock(); try { @@ -364,7 +408,31 @@ private boolean read(FileReadRequest request, byte[] buffer, int offset) { return false; } - cacheFile = getOnlyElement(diskRanges.entrySet().iterator()).getValue(); + Map.Entry, LocalCacheFile> values = getOnlyElement(diskRanges.entrySet().iterator()); + cacheFile = values.getValue(); + range = values.getKey(); + + // Try to use cached bytes firstly. + if (useBytesCache) { + try { + byte[] cachedBytes = compressedBytesCache.getIfPresent(cacheFile); + if (cachedBytes != null) { + compressedBytesCacheStats.incrementCacheHit(); + + System.arraycopy(cachedBytes, + (int) (request.getOffset() - cacheFile.getOffset()), + buffer, offset, + request.getLength()); + return true; + } else { + compressedBytesCacheStats.incrementCacheMiss(); + } + } catch (Throwable t) { + // there might be a change the file has been deleted + return false; + } + } + } finally { readLock.unlock(); } @@ -372,19 +440,37 @@ private boolean read(FileReadRequest request, byte[] buffer, int offset) { try (RandomAccessFile file = new RandomAccessFile(new File(cacheFile.getPath().toUri()), "r")) { file.seek(request.getOffset() - cacheFile.getOffset()); file.readFully(buffer, offset, request.getLength()); + cacheFile.incrementCacheHit(); return true; + } catch (FileNotFoundException e) { + // there might be a change the file has been deleted + log.warn(String.format("No such file or directory %s", cacheFile.getPath().getName())); + deleteNotExistFile(cacheRange, range); + return false; } catch (IOException e) { - // there might be a chance the file has been deleted + // there might be a change the file has been deleted return false; } } + private void deleteNotExistFile(CacheRange cacheRange, Range range) { + Lock readLock = cacheRange.getLock().readLock(); + readLock.lock(); + try { + cacheRange.getRange().remove(range); + } finally { + readLock.unlock(); + } + } + private boolean write(FileReadRequest key, byte[] data, Path newFilePath) { Path targetFile = key.getPath(); persistedRanges.putIfAbsent(targetFile, new CacheRange()); - LocalCacheFile previousCacheFile; - LocalCacheFile followingCacheFile; + LocalCacheFile previousCacheFile = null; + Range previousRange = null; + LocalCacheFile followingCacheFile = null; + Range followingRange = null; CacheRange cacheRange = persistedRanges.get(targetFile); if (cacheRange == null) { @@ -396,17 +482,52 @@ private boolean write(FileReadRequest key, byte[] data, Path newFilePath) { readLock.lock(); try { RangeMap cache = cacheRange.getRange(); + while (true) { + // check if it can be merged with the previous or following range + Map.Entry, LocalCacheFile> preEntry = cache.getEntry(key.getOffset() - 1); + if (preEntry != null) { + previousRange = preEntry.getKey(); + previousCacheFile = preEntry.getValue(); + } + + Map.Entry, LocalCacheFile> followingEntry = + cache.getEntry(key.getOffset() + key.getLength()); + if (followingEntry != null) { + followingRange = followingEntry.getKey(); + followingCacheFile = followingEntry.getValue(); + } + boolean continueCheck = false; + // check if the file is not exist + if (previousCacheFile != null && + !Files.exists(FileSystems.getDefault().getPath(previousCacheFile.getPath().toUri().getPath()))) { + log.warn(String.format("No such file or directory %s", previousCacheFile.getPath().getName())); + deleteNotExistFile(cacheRange, previousRange); + previousCacheFile = null; + continueCheck = true; + } + if (followingCacheFile != null && !Files.exists( + FileSystems.getDefault().getPath(followingCacheFile.getPath().toUri().getPath()))) { + log.warn(String.format("No such file or directory %s", followingCacheFile.getPath().getName())); + deleteNotExistFile(cacheRange, followingRange); + followingCacheFile = null; + continueCheck = true; + } + if (!continueCheck) { + break; + } + } - // check if it can be merged with the previous or following range - previousCacheFile = cache.get(key.getOffset() - 1); - followingCacheFile = cache.get(key.getOffset() + key.getLength()); } finally { readLock.unlock(); } if (previousCacheFile != null && cacheFileEquals(previousCacheFile, followingCacheFile)) { - log.debug(String - .format("%s found covered range %s", Thread.currentThread().getName(), previousCacheFile.getPath())); + if (log.isDebugEnabled()) { + log.debug(String + .format("%s found covered range %s", Thread.currentThread().getName(), + previousCacheFile.getPath())); + } + // this range has already been covered by someone else return true; } @@ -458,6 +579,7 @@ private boolean write(FileReadRequest key, byte[] data, Path newFilePath) { // use a flag so that file deletion can be done outside the lock boolean updated; Set cacheFilesToDelete = new HashSet<>(); + Set localCacheFileToDelete = new HashSet<>(); Lock writeLock = persistedRanges.get(targetFile).getLock().writeLock(); writeLock.lock(); @@ -475,15 +597,35 @@ private boolean write(FileReadRequest key, byte[] data, Path newFilePath) { updated = true; // remove all the files that can be covered by the current range - cacheFilesToDelete = - cache.subRangeMap(Range.closedOpen(key.getOffset(), key.getOffset() + key.getLength())) - .asMapOfRanges().values().stream() - .map(LocalCacheFile::getPath).collect(Collectors.toSet()); + localCacheFileToDelete = cache + .subRangeMap(Range.closedOpen(key.getOffset(), key.getOffset() + key.getLength())) + .asMapOfRanges().values().stream().collect(Collectors.toSet()); + + cacheFilesToDelete = localCacheFileToDelete.stream() + .map(LocalCacheFile::getPath).collect(Collectors.toSet()); // update the range + LocalCacheFile localCacheFile = new LocalCacheFile(newFileOffset, newFilePath); Range newRange = Range.closedOpen(newFileOffset, newFileOffset + newFileLength); cache.remove(newRange); - cache.put(newRange, new LocalCacheFile(newFileOffset, newFilePath)); + cache.put(newRange, localCacheFile); + + if (useBytesCache) { + // The cached bytes of this file fragment. + byte[] bytesCache = new byte[newFileLength]; + + // Store the compressed bytes of file into bytes cache. + try (RandomAccessFile file = new RandomAccessFile(new File(newFilePath.toUri()), "r")) { + file.readFully(bytesCache, 0, newFileLength); + + compressedBytesCache.put(localCacheFile, bytesCache); + compressedBytesCacheStats.addInMemoryRetainedBytes( + LocalCacheFile.BASE_SIZE_IN_BYTES + (int) SizeOf.sizeOf(bytesCache) + ); + } catch (Exception e) { + log.warn(e); + } + } } } finally { writeLock.unlock(); @@ -491,17 +633,23 @@ private boolean write(FileReadRequest key, byte[] data, Path newFilePath) { // no lock is needed for the following operation if (updated) { - // remove the the previous or following file as well + // remove the previous or following file as well if (previousCacheFile != null) { + localCacheFileToDelete.add(previousCacheFile); cacheFilesToDelete.add(previousCacheFile.getPath()); } if (followingCacheFile != null) { + localCacheFileToDelete.add(followingCacheFile); cacheFilesToDelete.add(followingCacheFile.getPath()); } } else { + localCacheFileToDelete = ImmutableSet.of(new LocalCacheFile(newFileOffset, newFilePath)); cacheFilesToDelete = ImmutableSet.of(newFilePath); } + // The case of invalidating compressed bytes cache: + // Merge previous or following files of given new file. + localCacheFileToDelete.forEach(compressedBytesCache::invalidate); cacheFilesToDelete.forEach(FileMergeCacheManager::tryDeleteFile); return true; } @@ -540,6 +688,8 @@ private static void tryDeleteFile(Path path) { Files.delete(file.toPath()); } } catch (IOException e) { + log.warn(String.format("Can't delete the file %s", Thread.currentThread().getName(), + path), e); // ignore } } @@ -560,14 +710,6 @@ public CacheStats getStats() { return stats; } - public CacheConfig getCacheConfig() { - return cacheConfig; - } - - public FileMergeCacheConfig getFileMergeCacheConfig() { - return fileMergeCacheConfig; - } - public long currentCacheEntries() { return this.cache.size(); } @@ -588,9 +730,34 @@ public void close() throws IOException { destroy(); } - private static class LocalCacheFile { + public byte[][] generatePacketOfBytesCache() { + byte[][] results = new byte[CACHE_STATS_FIELD_COUNT][]; + int pos = 0; + results[pos++] = "Compressed Bytes Cache".getBytes(); // ENGINE + results[pos++] = String.valueOf(compressedBytesCacheStats.getInMemoryRetainedBytes()).getBytes(); // CACHE_SIZE + results[pos++] = String.valueOf(compressedBytesCache.size()).getBytes(); // CACHE_ENTRIES + results[pos++] = String.valueOf(-1).getBytes(); // IN_FLIGHT_MEMORY_SIZE + results[pos++] = String.valueOf(compressedBytesCacheStats.getCacheHit()).getBytes(); // HIT COUNT + results[pos++] = String.valueOf(0).getBytes(); // HOT HIT COUNT + results[pos++] = String.valueOf(compressedBytesCacheStats.getCacheMiss()).getBytes(); // MISS COUNT + results[pos++] = String.valueOf(compressedBytesCacheStats.getQuotaExceed()).getBytes(); // QUOTA_EXCEED + results[pos++] = String.valueOf(-1).getBytes(); // UNAVAILABLE_NUM + results[pos++] = "".getBytes(); // CACHE_DICTIONARY + results[pos++] = String.valueOf(-1).getBytes(); // CACHE_TTL + results[pos++] = String.valueOf(-1).getBytes(); // MAX_CACHE_ENTRIES + + // MAX_CACHE_SIZE + results[pos] = new StringBuilder().append(maxSizeOfCompressedBytes).append(" BYTES").toString() + .getBytes(); + return results; + } + + public static class LocalCacheFile { + public static final int BASE_SIZE_IN_BYTES = 64; + private final long offset; // the original file offset private final Path path; // the cache location on disk + private final AtomicLong hit = new AtomicLong(); public LocalCacheFile(long offset, Path path) { this.offset = offset; @@ -605,6 +772,10 @@ public Path getPath() { return path; } + public void incrementCacheHit() { + hit.getAndIncrement(); + } + @Override public boolean equals(Object o) { if (this == o) { @@ -677,61 +848,80 @@ public void onRemoval(RemovalNotification notification) { // There is a chance of the files to be deleted are being read. // We may just fail the cache hit and do it in a simple way given the chance is low. for (LocalCacheFile file : files) { + + // The case of invalidating compressed bytes cache: + // Remove the physical file of the local cache file. + compressedBytesCache.invalidate(file); + try { Files.delete(new File(file.getPath().toUri()).toPath()); } catch (IOException e) { // ignore + log.debug(file.getPath().toUri().getPath(), e); } } }); } } - public synchronized static CacheManager createMergeCacheManager(Map globalVariables, Engine engine) - throws IOException { - CacheConfig cacheConfig = new CacheConfig(); - FileMergeCacheConfig fileMergeCacheConfig = new FileMergeCacheConfig(); + public synchronized static CacheManager createMergeCacheManager( + Engine engine, CacheConfig cacheConfig, FileMergeCacheConfig fileMergeCacheConfig) { CacheStats cacheStats = new CacheStats(); - Long cacheTTL = Optional.ofNullable(globalVariables.get(ConnectionProperties.OSS_FS_CACHE_TTL)) - .orElse(StringNumericParser.simplyParseLong(ConnectionParams.OSS_FS_CACHE_TTL.getDefault())); - Long maxCacheEntries = Optional.ofNullable(globalVariables.get(ConnectionProperties.OSS_FS_MAX_CACHED_ENTRIES)) - .orElse(StringNumericParser.simplyParseLong(ConnectionParams.OSS_FS_MAX_CACHED_ENTRIES.getDefault())); - - cacheConfig.setBaseDirectory( - Files.createTempDirectory(Paths.get("../spill/temp"), CACHE_FILE_PREFIX).toUri()); - cacheConfig.setCacheQuotaScope(GLOBAL); - cacheConfig.setCacheType(CacheType.FILE_MERGE); - - cacheConfig.setCachingEnabled(true); - cacheConfig.setValidationEnabled(false); - - fileMergeCacheConfig.setCacheTtl(new Duration(cacheTTL, DAYS)); - fileMergeCacheConfig.setMaxCachedEntries(maxCacheEntries.intValue()); - fileMergeCacheConfig.setMaxInMemoryCacheSize(new DataSize(2, GIGABYTE)); - - fileMergeCacheConfig.setHotCacheTtl(new Duration(3, SECONDS)); - fileMergeCacheConfig.setMaxHotCachedEntries(1000); - - final int cores = ThreadCpuStatUtil.NUM_CORES; ScheduledExecutorService cacheFlushExecutor = - newScheduledThreadPool(cores, + newScheduledThreadPool(cacheConfig.getFlushCacheThreadNum(), new NamedThreadFactory(String.format(OSS_CACHE_FLUSHER_THREAD_NAME_FORMAT, engine))); ScheduledExecutorService cacheRemovalExecutor = - newScheduledThreadPool(cores, + newScheduledThreadPool(4, new NamedThreadFactory(String.format(OSS_CACHE_REMOVER_THREAD_NAME_FORMAT, engine))); ScheduledExecutorService cacheSizeCalculateExecutor = - newScheduledThreadPool(cores, + newScheduledThreadPool(1, new NamedThreadFactory(String.format(OSS_CACHE_SIZE_CALCULATOR_THREAD_NAME_FORMAT, engine))); - CacheManager cacheManager = new FileMergeCacheManager( - cacheConfig, - fileMergeCacheConfig, - cacheStats, - cacheFlushExecutor, - cacheRemovalExecutor, - cacheSizeCalculateExecutor - ); - return cacheManager; + try { + return new FileMergeCacheManager( + cacheConfig, + fileMergeCacheConfig, + cacheStats, + cacheFlushExecutor, + cacheRemovalExecutor, + cacheSizeCalculateExecutor, + engine + ); + } catch (Throwable t) { + cacheFlushExecutor.shutdown(); + cacheRemovalExecutor.shutdown(); + cacheSizeCalculateExecutor.shutdown(); + throw t; + } + } + + public synchronized static CacheManager createMergeCacheManager(Engine engine) + throws IOException { + CacheConfig cacheConfig = FileConfig.getInstance().getCacheConfig(); + FileMergeCacheConfig fileMergeCacheConfig = FileConfig.getInstance().getMergeCacheConfig(); + return createMergeCacheManager(engine, cacheConfig, fileMergeCacheConfig); + } + + public Cache getCompressedBytesCache() { + return compressedBytesCache; + } + + public long getMaxSizeOfCompressedBytes() { + return maxSizeOfCompressedBytes; + } + + public CacheStats getCompressedBytesCacheStats() { + return compressedBytesCacheStats; + } + + @Override + public CacheQuota getMaxCacheQuota() { + return cacheQuota; + } + + @VisibleForTesting + public Path getBaseDirectory() { + return baseDirectory; } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCachingFileSystem.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCachingFileSystem.java index 3897cbfa0..2295f51b2 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCachingFileSystem.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCachingFileSystem.java @@ -44,7 +44,6 @@ public final class FileMergeCachingFileSystem extends CachingFileSystem { private final CacheManager cacheManager; private final boolean cacheValidationEnabled; - private final boolean enableCache; public FileMergeCachingFileSystem( URI uri, @@ -59,8 +58,6 @@ public FileMergeCachingFileSystem( this.cacheValidationEnabled = cacheValidationEnabled; setConf(configuration); - - this.enableCache = enableCache; } @Override @@ -69,7 +66,7 @@ public FSDataInputStream open(Path path) throws IOException { dataTier.open(path), cacheManager, path, - enableCache ? CacheQuota.NO_CACHE_CONSTRAINTS : CacheQuota.DISABLE_CACHE, + cacheManager.getMaxCacheQuota(), cacheValidationEnabled); } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCachingInputStream.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCachingInputStream.java index 337e2d85c..f36a27831 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCachingInputStream.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/oss/filesystem/cache/FileMergeCachingInputStream.java @@ -41,7 +41,8 @@ import static io.airlift.slice.Slices.wrappedBuffer; import static java.util.Objects.requireNonNull; -public final class FileMergeCachingInputStream +public final class +FileMergeCachingInputStream extends FSDataInputStream { private static final Logger LOGGER = LoggerFactory.getLogger(FileMergeCachingInputStream.class); private static final String LOG_FORMAT = "%s: [ %s, %s ] size: %s time: %s file: %s"; @@ -79,51 +80,66 @@ public void readFully(long position, byte[] buffer, int offset, int length) FileReadRequest key = new FileReadRequest(path, position, length); switch (cacheManager.get(key, buffer, offset, cacheQuota)) { case HIT_HOT_CACHE: - LOGGER.info(String.format( - LOG_FORMAT, - "HIT_HOT_CACHE", - position, - position + length, - length, - System.currentTimeMillis() - s, - key.getPath() - )); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format( + LOG_FORMAT, + "HIT_HOT_CACHE", + position, + position + length, + length, + System.currentTimeMillis() - s, + key.getPath() + )); + } + return; case HIT: - LOGGER.info(String.format( - LOG_FORMAT, - "HIT_CACHE", - position, - position + length, - length, - System.currentTimeMillis() - s, - key.getPath() + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format( + LOG_FORMAT, + "HIT_CACHE", + position, + position + length, + length, + System.currentTimeMillis() - s, + key.getPath() )); + } + break; case MISS: inputStream.readFully(position, buffer, offset, length); cacheManager.put(key, wrappedBuffer(buffer, offset, length), cacheQuota); - LOGGER.info(String.format( - LOG_FORMAT, - "OSS_READ", - position, - position + length, - length, - System.currentTimeMillis() - s, - key.getPath() - )); + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format( + LOG_FORMAT, + "OSS_READ", + position, + position + length, + length, + System.currentTimeMillis() - s, + key.getPath() + )); + } + return; case CACHE_QUOTA_EXCEED: + case CACHE_IS_UNAVAILABLE: inputStream.readFully(position, buffer, offset, length); - LOGGER.info(String.format( - LOG_FORMAT, - "CACHE_EXCEED", - position, - position + length, - length, - System.currentTimeMillis() - s, - key.getPath() - )); + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(String.format( + LOG_FORMAT, + "CACHE_EXCEED", + position, + position + length, + length, + System.currentTimeMillis() - s, + key.getPath() + )); + } + return; } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/partition/MurmurHashUtils.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/partition/MurmurHashUtils.java index e5821b00b..9f8882e28 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/partition/MurmurHashUtils.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/partition/MurmurHashUtils.java @@ -33,20 +33,33 @@ private static long murmurHash3_128(byte[] data, int seed) { return longVal; } - private static final HashFunction zeroSeedMurmur3hashFunc = Hashing.murmur3_128(0); + private static int murmurHash3_32(byte[] data) { + HashCode hashCode = zeroSeedMurmur3hashFunc32.hashBytes(data); + int intVal = hashCode.asInt(); + return intVal; + } + + private static final HashFunction zeroSeedMurmur3hashFunc32 = Hashing.murmur3_32(0); + + private static final HashFunction zeroSeedMurmur3hashFunc128 = Hashing.murmur3_128(0); private static long murmurHash3_128(long data) { - HashCode hashCode = zeroSeedMurmur3hashFunc.hashLong(data); + HashCode hashCode = zeroSeedMurmur3hashFunc128.hashLong(data); long longVal = hashCode.asLong(); return longVal; } - public static long murmurHashWithZeroSeed(byte[] data) { + public static long murmurHash128WithZeroSeed(long data) { + return murmurHash3_128(data); + } + + public static long murmurHash128WithZeroSeed(byte[] data) { return murmurHash3_128(data, 0); } - public static long murmurHashWithZeroSeed(long data) { - return murmurHash3_128(data); + public static int murmurHash32WithZeroSeed(byte[] data) { + return murmurHash3_32(data); } + } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/privilege/ColumnPrivilegeVerifyItem.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/privilege/ColumnPrivilegeVerifyItem.java new file mode 100644 index 000000000..fa44ffbe6 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/privilege/ColumnPrivilegeVerifyItem.java @@ -0,0 +1,35 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.privilege; + +import com.taobao.tddl.common.privilege.PrivilegePoint; + +/** + * @author pangzhaoxing + */ +public class ColumnPrivilegeVerifyItem extends PrivilegeVerifyItem { + private String column; + + public ColumnPrivilegeVerifyItem(String db, String table, String column, PrivilegePoint privilegePoint) { + super(db, table, privilegePoint); + this.column = column; + } + + public String getColumn() { + return column; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ConnectionParams.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ConnectionParams.java index a76415498..64ad01c15 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ConnectionParams.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ConnectionParams.java @@ -69,6 +69,48 @@ public class ConnectionParams { false, false); + /** + * 是否允许DDL,默认为false + */ + public static final BooleanConfigParam ENABLE_DDL = new BooleanConfigParam(ConnectionProperties.ENABLE_DDL, + false, + false); + + /* + * 是否开启两阶段DDL + */ + public static final BooleanConfigParam ENABLE_DRDS_MULTI_PHASE_DDL = + new BooleanConfigParam(ConnectionProperties.ENABLE_DRDS_MULTI_PHASE_DDL, + true, + false); + + /* + * 是否开启两阶段DDL + */ + public static final BooleanConfigParam CHECK_TABLE_BEFORE_PHY_DDL = + new BooleanConfigParam(ConnectionProperties.CHECK_TABLE_BEFORE_PHY_DDL, + true, + false); + + /* + * 是否检查连接状态 + */ + public static final BooleanConfigParam CHECK_PHY_CONN_NUM = + new BooleanConfigParam(ConnectionProperties.CHECK_PHY_CONN_NUM, + true, + false); + + /* + * 两阶段DDL最终状态,仅用于调试 + */ + public static final StringConfigParam TWO_PHASE_DDL_FINAL_STATUS = + new StringConfigParam(ConnectionProperties.TWO_PHASE_DDL_FINAL_STATUS, + "FINISH", + false); + + /** + * Check if random physical table name is enabled. + */ public static final BooleanConfigParam ENABLE_RANDOM_PHY_TABLE_NAME = new BooleanConfigParam(ConnectionProperties.ENABLE_RANDOM_PHY_TABLE_NAME, true, @@ -316,11 +358,39 @@ public class ConnectionParams { public static final IntConfigParam PHYSICAL_DDL_MDL_WAITING_TIMEOUT = new IntConfigParam(ConnectionProperties.PHYSICAL_DDL_MDL_WAITING_TIMEOUT, - DdlConstants.MIN_PHYSICAL_DDL_MDL_WAITING_TIMEOUT, - DdlConstants.MAX_PHYSICAL_DDL_MDL_WAITING_TIMEOUT, - Integer.valueOf(DdlConstants.PHYSICAL_DDL_MDL_WAITING_TIMEOUT), + -1, //5 + Attribute.MAX_PHYSICAL_DDL_MDL_WAITING_TIMEOUT, //Integer.MAX_VALUE + -1, + false); + + /** + * Check if server should automatically recover left jobs during initialization. + */ + public static final BooleanConfigParam AUTOMATIC_DDL_JOB_RECOVERY = + new BooleanConfigParam(ConnectionProperties.AUTOMATIC_DDL_JOB_RECOVERY, + Attribute.DEFAULT_AUTOMATIC_DDL_JOB_RECOVERY, false); + /** + * Comma separated string(.e.g "TASK1,TASK2"), using for skip execution some ddl task; + * Only works for ddl tasks that handled this flag explicitly + */ + public static final StringConfigParam SKIP_DDL_TASKS = + new StringConfigParam(ConnectionProperties.SKIP_DDL_TASKS, + "", + true); + public static final StringConfigParam SKIP_DDL_TASKS_EXECUTE = + new StringConfigParam(ConnectionProperties.SKIP_DDL_TASKS_EXECUTE, + "", + true); + public static final StringConfigParam SKIP_DDL_TASKS_ROLLBACK = + new StringConfigParam(ConnectionProperties.SKIP_DDL_TASKS_ROLLBACK, + "", + true); + + /** + * Maximum number of table partitions per database. + */ public static final IntConfigParam MAX_TABLE_PARTITIONS_PER_DB = new IntConfigParam(ConnectionProperties.MAX_TABLE_PARTITIONS_PER_DB, DdlConstants.MIN_ALLOWED_TABLE_SHARDS_PER_DB, @@ -489,6 +559,14 @@ public class ConnectionParams { true, false); + /** + * 在 DML 计算 affected rows 和 REPLACE、UPSERT 判断修改的时候是否使用简单的字符串比较 JSON + */ + public final static BooleanConfigParam DML_CHECK_JSON_BY_STRING_COMPARE = new BooleanConfigParam( + ConnectionProperties.DML_CHECK_JSON_BY_STRING_COMPARE, + true, + false); + /** * INSERT 中的 VALUES 出现列名时是否替换为插入值而不是默认值,以兼容 MySQL 行为;会对 INSERT 的 INPUT 按 VALUES 顺序排序 */ @@ -508,6 +586,50 @@ public class ConnectionParams { false ); + /** + * 在检验建表语句时,主动延迟的时间。仅用于测试。 + */ + public final static IntConfigParam MULTI_PHASE_WAIT_PREPARED_DELAY = new IntConfigParam( + ConnectionProperties.MULTI_PHASE_WAIT_PREPARED_DELAY, + 0, + 7200, + 0, + false + ); + + /** + * 在检验建表语句时,主动延迟的时间。仅用于测试。 + */ + public final static IntConfigParam MULTI_PHASE_WAIT_COMMIT_DELAY = new IntConfigParam( + ConnectionProperties.MULTI_PHASE_WAIT_COMMIT_DELAY, + 0, + 7200, + 0, + false + ); + + /** + * 在检验建表语句时,主动延迟的时间。仅用于测试。 + */ + public final static IntConfigParam MULTI_PHASE_COMMIT_DELAY = new IntConfigParam( + ConnectionProperties.MULTI_PHASE_COMMIT_DELAY, + 0, + 7200, + 0, + false + ); + + /** + * 在检验建表语句时,主动延迟的时间。仅用于测试。 + */ + public final static IntConfigParam MULTI_PHASE_PREPARE_DELAY = new IntConfigParam( + ConnectionProperties.MULTI_PHASE_PREPARE_DELAY, + 0, + 7200, + 0, + false + ); + /** * 在执行物理DDL语句时,主动延迟的时间,仅用于测试 */ @@ -520,13 +642,22 @@ public class ConnectionParams { ); /** - * 在执行物理DDL语句时,主动延迟的时间,仅用于测试 + * 在建表语句中跳过CDC */ public final static BooleanConfigParam CREATE_TABLE_SKIP_CDC = new BooleanConfigParam( ConnectionProperties.CREATE_TABLE_SKIP_CDC, false, false ); + + /** + * 在check table时,主动校验逻辑列顺序 + */ + public final static BooleanConfigParam CHECK_LOGICAL_COLUMN_ORDER = new BooleanConfigParam( + ConnectionProperties.CHECK_LOGICAL_COLUMN_ORDER, + false, + false + ); /** * DML 执行时是否检查主键冲突 */ @@ -543,6 +674,16 @@ public class ConnectionParams { false, true); + /** + * Rebalance组装任务时生成的单个DDL job迁移对最大数据量,单位为MB + */ + public final static LongConfigParam REBALANCE_MAX_UNIT_SIZE = new LongConfigParam( + ConnectionProperties.REBALANCE_MAX_UNIT_SIZE, + 0L, + Long.MAX_VALUE, + 0L, + true); + /** * 是否开启 Foreign Constraint Check */ @@ -572,6 +713,14 @@ public class ConnectionParams { true, true); + /** + * 是否允许在包含 CCI 的表上执行 DDL + */ + public final static BooleanConfigParam FORBID_DDL_WITH_CCI = new BooleanConfigParam( + ConnectionProperties.FORBID_DDL_WITH_CCI, + true, + true); + /** * 是否强制使用 Online Modify Column,即使列类型没有改变,或者不是支持的类型 */ @@ -581,18 +730,39 @@ public class ConnectionParams { false); /** - * 是否在有 GSI 的表上使用 Online Modify Column + * Online Modify Column 回填时是否使用 returning 优化 + */ + public final static BooleanConfigParam OMC_BACK_FILL_USE_RETURNING = new BooleanConfigParam( + ConnectionProperties.OMC_BACK_FILL_USE_RETURNING, + true, + false); + + /** + * 是否自动采用 Online Modify Column + */ + public final static BooleanConfigParam ENABLE_AUTO_OMC = new BooleanConfigParam( + ConnectionProperties.ENABLE_AUTO_OMC, + true, + false); + + /** + * 是否强制采用 Online Modify Column */ - public final static BooleanConfigParam OMC_ALTER_TABLE_WITH_GSI = new BooleanConfigParam( - ConnectionProperties.OMC_ALTER_TABLE_WITH_GSI, + public final static BooleanConfigParam FORCE_USING_OMC = new BooleanConfigParam( + ConnectionProperties.FORCE_USING_OMC, false, false); /** - * Online Modify Column 回填时是否使用 returning 优化 + * OMC 是否开启 changeset 优化 */ - public final static BooleanConfigParam OMC_BACK_FILL_USE_RETURNING = new BooleanConfigParam( - ConnectionProperties.OMC_BACK_FILL_USE_RETURNING, + public final static BooleanConfigParam ENABLE_CHANGESET_FOR_OMC = new BooleanConfigParam( + ConnectionProperties.ENABLE_CHANGESET_FOR_OMC, + true, + false); + + public final static BooleanConfigParam ENABLE_BACKFILL_OPT_FOR_OMC = new BooleanConfigParam( + ConnectionProperties.ENABLE_BACKFILL_OPT_FOR_OMC, true, false); @@ -693,6 +863,14 @@ public class ConnectionParams { false, false); + /** + * 在 SHOW CREATE TABLE 结果中输出与 MySQL 兼容的缩进格式(两个空格) + */ + public static final BooleanConfigParam OUTPUT_MYSQL_INDENT = new BooleanConfigParam( + ConnectionProperties.OUTPUT_MYSQL_INDENT, + false, + false); + public static final BooleanConfigParam ALLOW_FULL_TABLE_SCAN = new BooleanConfigParam( ConnectionProperties.ALLOW_FULL_TABLE_SCAN, false, @@ -753,12 +931,29 @@ public class ConnectionParams { false, false); + public static final BooleanConfigParam ENABLE_XPLAN_FEEDBACK = new BooleanConfigParam( + ConnectionProperties.ENABLE_XPLAN_FEEDBACK, + true, + true); + + /** + * Socket Timeout + */ public static final LongConfigParam SOCKET_TIMEOUT = new LongConfigParam(ConnectionProperties.SOCKET_TIMEOUT, null, null, -1L, false); + /** + * ddl Socket Timeout, default value 7 days. + */ + public static final LongConfigParam MERGE_DDL_TIMEOUT = new LongConfigParam(ConnectionProperties.MERGE_DDL_TIMEOUT, + null, + null, + 3600 * 24 * 7 * 1000L, + false); + public static final BooleanConfigParam ENABLE_COMPATIBLE_DATETIME_ROUNDDOWN = new BooleanConfigParam( ConnectionProperties.ENABLE_COMPATIBLE_DATETIME_ROUNDDOWN, false, @@ -908,6 +1103,25 @@ public class ConnectionParams { TransactionAttribute.DEFAULT_TSO_HEARTBEAT_INTERVAL, false); + public static final IntConfigParam COLUMNAR_TSO_PURGE_INTERVAL = new IntConfigParam( + ConnectionProperties.COLUMNAR_TSO_PURGE_INTERVAL, + 100, + null, + TransactionAttribute.DEFAULT_COLUMNAR_TSO_PURGE_INTERVAL, // 1 min + true + ); + + public static final IntConfigParam COLUMNAR_TSO_UPDATE_INTERVAL = new IntConfigParam( + ConnectionProperties.COLUMNAR_TSO_UPDATE_INTERVAL, + 100, + null, + TransactionAttribute.DEFAULT_COLUMNAR_TSO_UPDATE_INTERVAL, // 3 seconds + true + ); + + /** + * 开始清理事务日志的时间段(默认 00:00-00:10) + */ public static final StringConfigParam PURGE_TRANS_START_TIME = new StringConfigParam( ConnectionProperties.PURGE_TRANS_START_TIME, TransactionAttribute.PURGE_TRANS_START_TIME, @@ -1039,6 +1253,11 @@ public static void addSupportedParam(ConfigParam param) { true, false); + public final static BooleanConfigParam BACKFILL_USE_RETURNING = new BooleanConfigParam( + ConnectionProperties.BACKFILL_USE_RETURNING, + true, + false); + /** * enable parallel physical table backfill */ @@ -1102,9 +1321,9 @@ public static void addSupportedParam(ConfigParam param) { true, false); - public static final IntConfigParam FASTCHECKER_RETRY_TIMES = - new IntConfigParam(ConnectionProperties.FASTCHECKER_RETRY_TIMES, - 1, 5, 3, false); + public static IntConfigParam FASTCHECKER_THREAD_POOL_SIZE = + new IntConfigParam(ConnectionProperties.FASTCHECKER_THREAD_POOL_SIZE, + 1, 10, 1, false); public static final IntConfigParam FASTCHECKER_BATCH_TIMEOUT_RETRY_TIMES = new IntConfigParam(ConnectionProperties.FASTCHECKER_BATCH_TIMEOUT_RETRY_TIMES, @@ -1118,6 +1337,9 @@ public static void addSupportedParam(ConfigParam param) { true, false); + /** + * check whether enable the scaleout feature, could disable/enable it from diamond/metadb + */ public static final BooleanConfigParam ENABLE_SCALE_OUT_FEATURE = new BooleanConfigParam(ConnectionProperties.ENABLE_SCALE_OUT_FEATURE, true, @@ -1301,6 +1523,42 @@ public static void addSupportedParam(ConfigParam param) { 64L, false); + /** + * Columnar default partitions + */ + public static final LongConfigParam COLUMNAR_DEFAULT_PARTITIONS = new LongConfigParam( + ConnectionProperties.COLUMNAR_DEFAULT_PARTITIONS, + 2L, + 16384L, + 16L, + false); + + /** + * Specify the 'before status' of ALTER INDEX VISIBLE, + * so that we can change cci status from CREATING to PUBLIC + */ + public static final StringConfigParam ALTER_CCI_STATUS_BEFORE = new StringConfigParam( + ConnectionProperties.ALTER_CCI_STATUS_BEFORE, + "", + false); + + /** + * Specify the 'after status' of ALTER INDEX VISIBLE, + * so that we can change cci status from CREATING to PUBLIC + */ + public static final StringConfigParam ALTER_CCI_STATUS_AFTER = new StringConfigParam( + ConnectionProperties.ALTER_CCI_STATUS_AFTER, + "", + false); + + /** + * Enable change index status with ALTER INDEX VISIBLE + */ + public static final BooleanConfigParam ALTER_CCI_STATUS = new BooleanConfigParam( + ConnectionProperties.ALTER_CCI_STATUS, + false, + false); + public static final BooleanConfigParam GSI_DEFAULT_CURRENT_TIMESTAMP = new BooleanConfigParam( ConnectionProperties.GSI_DEFAULT_CURRENT_TIMESTAMP, true, @@ -1464,28 +1722,6 @@ public static void addSupportedParam(ConfigParam param) { false ); - /** - * parallelism limit for CreateDatabase as fastchecker - */ - public static final IntConfigParam CREATE_DATABASE_AS_FASTCHECKER_PARALLELISM = new IntConfigParam( - ConnectionProperties.CREATE_DATABASE_AS_FASTCHECKER_PARALLELISM, - -1, - 128, - 4, - false - ); - - /** - * fastchecker retry times for create database as - */ - public static final IntConfigParam CREATE_DATABASE_AS_FASTCHECKER_RETRY_TIMES = new IntConfigParam( - ConnectionProperties.CREATE_DATABASE_AS_FASTCHECKER_RETRY_TIMES, - 1, - 5, - 2, - false - ); - public static final IntConfigParam CREATE_DATABASE_MAX_PARTITION_FOR_DEBUG = new IntConfigParam( ConnectionProperties.CREATE_DATABASE_MAX_PARTITION_FOR_DEBUG, 1, @@ -1494,16 +1730,6 @@ public static void addSupportedParam(ConfigParam param) { false ); - /** - * phy table lock timeout for fastchecker, unit: second - */ - public static final IntConfigParam FASTCHECKER_LOCK_TIMEOUT = new IntConfigParam( - ConnectionProperties.FASTCHECKER_LOCK_TIMEOUT, - 1, - Integer.MAX_VALUE, - 10, - false); - /** * if phy table rows count exceed this param, fastchecker will check by batch */ @@ -1549,14 +1775,33 @@ public static void addSupportedParam(ConfigParam param) { ); /** - * parallelism limit for GsiFastChecker + * import table */ - public static final IntConfigParam GSI_FASTCHECKER_PARALLELISM = new IntConfigParam( - ConnectionProperties.GSI_FASTCHECKER_PARALLELISM, - -1, - 128, + public static final BooleanConfigParam IMPORT_TABLE = new BooleanConfigParam( + ConnectionProperties.IMPORT_TABLE, + false, + true + ); + + public static final IntConfigParam IMPORT_TABLE_PARALLELISM = new IntConfigParam( + ConnectionProperties.IMPORT_TABLE_PARALLELISM, + 1, + 64, 4, - false); + true + ); + + public static final BooleanConfigParam REIMPORT_TABLE = new BooleanConfigParam( + ConnectionProperties.REIMPORT_TABLE, + false, + true + ); + + public static final BooleanConfigParam IMPORT_DATABASE = new BooleanConfigParam( + ConnectionProperties.IMPORT_DATABASE, + false, + true + ); public static final StringConfigParam GSI_BACKFILL_POSITION_MARK = new StringConfigParam( ConnectionProperties.GSI_BACKFILL_POSITION_MARK, @@ -1656,7 +1901,11 @@ public static void addSupportedParam(ConfigParam param) { ConnectionProperties.ENABLE_BACKGROUND_STATISTIC_COLLECTION, true, true); public static final IntConfigParam STATISTIC_VISIT_DN_TIMEOUT = new IntConfigParam( - ConnectionProperties.STATISTIC_VISIT_DN_TIMEOUT, 1, null, 60000, true); + ConnectionProperties.STATISTIC_VISIT_DN_TIMEOUT, 1, null, 600000, true); + + public static final IntConfigParam STATISTIC_IN_DEGRADATION_NUMBER = new IntConfigParam( + ConnectionProperties.STATISTIC_IN_DEGRADATION_NUMBER, 1, null, 100, true); + public static final StringConfigParam BACKGROUND_STATISTIC_COLLECTION_START_TIME = new StringConfigParam( ConnectionProperties.BACKGROUND_STATISTIC_COLLECTION_START_TIME, "02:00", true); @@ -1670,6 +1919,21 @@ public static void addSupportedParam(ConfigParam param) { public static final IntConfigParam BACKGROUND_STATISTIC_COLLECTION_EXPIRE_TIME = new IntConfigParam( ConnectionProperties.BACKGROUND_STATISTIC_COLLECTION_EXPIRE_TIME, 1, null, 3 * 24 * 60 * 60, true); + public static final BooleanConfigParam SKIP_PHYSICAL_ANALYZE = new BooleanConfigParam( + ConnectionProperties.SKIP_PHYSICAL_ANALYZE, false, true); + + public static final IntConfigParam STATISTIC_EXPIRE_TIME = new IntConfigParam( + ConnectionProperties.STATISTIC_EXPIRE_TIME, 1, null, 8 * 24 * 60 * 60, true); + + public static final LongConfigParam CACHELINE_INDICATE_UPDATE_TIME = new LongConfigParam( + ConnectionProperties.CACHELINE_INDICATE_UPDATE_TIME, 1L, null, 0L, true); + + public static final BooleanConfigParam ENABLE_CACHELINE_COMPENSATION = new BooleanConfigParam( + ConnectionProperties.ENABLE_CACHELINE_COMPENSATION, true, true); + + public static final StringConfigParam CACHELINE_COMPENSATION_BLACKLIST = new StringConfigParam( + ConnectionProperties.CACHELINE_COMPENSATION_BLACKLIST, "", true); + public static final FloatConfigParam SAMPLE_PERCENTAGE = new FloatConfigParam( ConnectionProperties.SAMPLE_PERCENTAGE, -1f, 100f, -1f, true); @@ -1717,6 +1981,12 @@ public static void addSupportedParam(ConfigParam param) { public static final BooleanConfigParam ENABLE_SEMI_HASH_JOIN = new BooleanConfigParam( ConnectionProperties.ENABLE_SEMI_HASH_JOIN, true, true); + public static final BooleanConfigParam ENABLE_REVERSE_SEMI_HASH_JOIN = new BooleanConfigParam( + ConnectionProperties.ENABLE_REVERSE_SEMI_HASH_JOIN, true, true); + + public static final BooleanConfigParam ENABLE_REVERSE_ANTI_HASH_JOIN = new BooleanConfigParam( + ConnectionProperties.ENABLE_REVERSE_ANTI_HASH_JOIN, true, true); + public static final BooleanConfigParam ENABLE_SEMI_BKA_JOIN = new BooleanConfigParam( ConnectionProperties.ENABLE_SEMI_BKA_JOIN, true, true); @@ -1744,6 +2014,9 @@ public static void addSupportedParam(ConfigParam param) { public static final IntConfigParam CBO_TOO_MANY_JOIN_LIMIT = new IntConfigParam( ConnectionProperties.CBO_TOO_MANY_JOIN_LIMIT, 0, null, 14, true); + public static final IntConfigParam COLUMNAR_CBO_TOO_MANY_JOIN_LIMIT = new IntConfigParam( + ConnectionProperties.COLUMNAR_CBO_TOO_MANY_JOIN_LIMIT, 0, null, 10, true); + public static final IntConfigParam CBO_LEFT_DEEP_TREE_JOIN_LIMIT = new IntConfigParam( ConnectionProperties.CBO_LEFT_DEEP_TREE_JOIN_LIMIT, 0, null, 7, true); @@ -1753,6 +2026,9 @@ public static void addSupportedParam(ConfigParam param) { public static final IntConfigParam CBO_BUSHY_TREE_JOIN_LIMIT = new IntConfigParam( ConnectionProperties.CBO_BUSHY_TREE_JOIN_LIMIT, 0, null, 3, true); + public static final BooleanConfigParam ENABLE_JOINAGG_TO_JOINAGGSEMIJOIN = new BooleanConfigParam( + ConnectionProperties.ENABLE_JOINAGG_TO_JOINAGGSEMIJOIN, true, true); + public static final IntConfigParam CBO_JOIN_TABLELOOKUP_TRANSPOSE_LIMIT = new IntConfigParam( ConnectionProperties.CBO_JOIN_TABLELOOKUP_TRANSPOSE_LIMIT, 0, null, 1, true); @@ -1765,9 +2041,15 @@ public static void addSupportedParam(ConfigParam param) { public static final BooleanConfigParam ENABLE_LV_SUBQUERY_UNWRAP = new BooleanConfigParam( ConnectionProperties.ENABLE_LV_SUBQUERY_UNWRAP, true, true); + public static final BooleanConfigParam EXPLAIN_PRUNING_DETAIL = new BooleanConfigParam( + ConnectionProperties.EXPLAIN_PRUNING_DETAIL, false, true); + public static final BooleanConfigParam ENABLE_FILTER_REORDER = new BooleanConfigParam( ConnectionProperties.ENABLE_FILTER_REORDER, true, true); + public static final BooleanConfigParam ENABLE_CONSTANT_FOLD = new BooleanConfigParam( + ConnectionProperties.ENABLE_CONSTANT_FOLD, true, true); + public static final BooleanConfigParam ENABLE_STATISTIC_FEEDBACK = new BooleanConfigParam( ConnectionProperties.ENABLE_STATISTIC_FEEDBACK, true, true); @@ -1777,6 +2059,12 @@ public static void addSupportedParam(ConfigParam param) { public static final BooleanConfigParam ENABLE_SORT_AGG = new BooleanConfigParam( ConnectionProperties.ENABLE_SORT_AGG, true, true); + public static final BooleanConfigParam PARTIAL_AGG_ONLY = new BooleanConfigParam( + ConnectionProperties.PARTIAL_AGG_ONLY, false, true); + + public static final IntConfigParam PARTIAL_AGG_SHARD = new IntConfigParam( + ConnectionProperties.PARTIAL_AGG_SHARD, 0, Integer.MAX_VALUE, 6, true); + public static final BooleanConfigParam ENABLE_PARTIAL_AGG = new BooleanConfigParam( ConnectionProperties.ENABLE_PARTIAL_AGG, true, true); @@ -1786,9 +2074,6 @@ public static void addSupportedParam(ConfigParam param) { public static final IntConfigParam PARTIAL_AGG_BUCKET_THRESHOLD = new IntConfigParam( ConnectionProperties.PARTIAL_AGG_BUCKET_THRESHOLD, 0, Integer.MAX_VALUE, 64, true); - public static final IntConfigParam AGG_MAX_HASH_TABLE_FACTOR = new IntConfigParam( - ConnectionProperties.AGG_MAX_HASH_TABLE_FACTOR, 1, 128, 1, true); - public static final IntConfigParam AGG_MIN_HASH_TABLE_FACTOR = new IntConfigParam( ConnectionProperties.AGG_MIN_HASH_TABLE_FACTOR, 1, 128, 1, true); @@ -1825,6 +2110,9 @@ public static void addSupportedParam(ConfigParam param) { public static final BooleanConfigParam ENABLE_PUSH_JOIN = new BooleanConfigParam( ConnectionProperties.ENABLE_PUSH_JOIN, true, true); + public static final BooleanConfigParam ENABLE_PUSH_CORRELATE = new BooleanConfigParam( + ConnectionProperties.ENABLE_PUSH_CORRELATE, true, true); + public static final BooleanConfigParam IGNORE_UN_PUSHABLE_FUNC_IN_JOIN = new BooleanConfigParam( ConnectionProperties.IGNORE_UN_PUSHABLE_FUNC_IN_JOIN, true, true); @@ -1867,6 +2155,9 @@ public static void addSupportedParam(ConfigParam param) { public static final BooleanConfigParam ENABLE_START_UP_COST = new BooleanConfigParam( ConnectionProperties.ENABLE_START_UP_COST, true, true); + public static final BooleanConfigParam ENABLE_MQ_CACHE_COST_BY_THREAD = new BooleanConfigParam( + ConnectionProperties.ENABLE_MQ_CACHE_COST_BY_THREAD, true, true); + public static final IntConfigParam CBO_START_UP_COST_JOIN_LIMIT = new IntConfigParam( ConnectionProperties.CBO_START_UP_COST_JOIN_LIMIT, 0, null, 5, true); @@ -1931,6 +2222,11 @@ public static void addSupportedParam(ConfigParam param) { true, true); + public static final BooleanConfigParam ENABLE_OPTIMIZE_RANDOM_EXCHANGE = + new BooleanConfigParam(ConnectionProperties.ENABLE_OPTIMIZE_RANDOM_EXCHANGE, + true, + true); + public static final BooleanConfigParam ENABLE_SPM_EVOLUTION_BY_TIME = new BooleanConfigParam(ConnectionProperties.ENABLE_SPM_EVOLUTION_BY_TIME, false, @@ -2098,6 +2394,12 @@ public static void addSupportedParam(ConfigParam param) { public static final BooleanConfigParam INSERT_SELECT_SELF_BY_PARALLEL = new BooleanConfigParam( ConnectionProperties.INSERT_SELECT_SELF_BY_PARALLEL, false, true); + /** + * 是否允许 Insert 列重复 + */ + public final static BooleanConfigParam INSERT_DUPLICATE_COLUMN = new BooleanConfigParam( + ConnectionProperties.INSERT_DUPLICATE_COLUMN, false, true); + /** * MODIFY_SELECT_MULTI策略时 逻辑任务执行 的线程个数 */ @@ -2184,11 +2486,6 @@ public static void addSupportedParam(ConfigParam param) { public static final IntConfigParam PUSH_CORRELATE_MATERIALIZED_LIMIT = new IntConfigParam( ConnectionProperties.PUSH_CORRELATE_MATERIALIZED_LIMIT, 1, 10000, 500, true); - public static final BooleanConfigParam WINDOW_FUNC_REORDER_JOIN = - new BooleanConfigParam(ConnectionProperties.WINDOW_FUNC_REORDER_JOIN, - false, - true); - public static final BooleanConfigParam ENABLE_MPP = new BooleanConfigParam( ConnectionProperties.ENABLE_MPP, false, true); @@ -2204,6 +2501,18 @@ public static void addSupportedParam(ConfigParam param) { public static final BooleanConfigParam MPP_PARALLELISM_AUTO_ENABLE = new BooleanConfigParam( ConnectionProperties.MPP_PARALLELISM_AUTO_ENABLE, false, true); + /** + * show pipeline info when explain physical is under mpp mode + */ + public static final BooleanConfigParam SHOW_PIPELINE_INFO_UNDER_MPP = new BooleanConfigParam( + ConnectionProperties.SHOW_PIPELINE_INFO_UNDER_MPP, true, true); + + public static final BooleanConfigParam ENABLE_TWO_CHOICE_SCHEDULE = new BooleanConfigParam( + ConnectionProperties.ENABLE_TWO_CHOICE_SCHEDULE, true, true); + + public static final BooleanConfigParam ENABLE_COLUMNAR_SCHEDULE = new BooleanConfigParam( + ConnectionProperties.ENABLE_COLUMNAR_SCHEDULE, false, true); + public static final BooleanConfigParam MPP_PRINT_ELAPSED_LONG_QUERY_ENABLED = new BooleanConfigParam( ConnectionProperties.MPP_PRINT_ELAPSED_LONG_QUERY_ENABLED, false, true); @@ -2217,6 +2526,9 @@ public static void addSupportedParam(ConfigParam param) { public static final IntConfigParam MPP_MAX_PARALLELISM = new IntConfigParam( ConnectionProperties.MPP_MAX_PARALLELISM, 0, 1024, -1, true); + public static final IntConfigParam PARALLELISM_FOR_EMPTY_TABLE = new IntConfigParam( + ConnectionProperties.PARALLELISM_FOR_EMPTY_TABLE, 0, 1024, -1, true); + public static final IntConfigParam MPP_QUERY_ROWS_PER_PARTITION = new IntConfigParam( ConnectionProperties.MPP_QUERY_ROWS_PER_PARTITION, 1, Integer.MAX_VALUE, 150000, true); @@ -2239,9 +2551,27 @@ public static void addSupportedParam(ConfigParam param) { public static final IntConfigParam MPP_PARALLELISM = new IntConfigParam( ConnectionProperties.MPP_PARALLELISM, 1, Integer.MAX_VALUE, -1, true); + public static final IntConfigParam MPP_NODE_SIZE = new IntConfigParam( + ConnectionProperties.MPP_NODE_SIZE, 1, Integer.MAX_VALUE, -1, true); + + public static final BooleanConfigParam MPP_NODE_RANDOM = new BooleanConfigParam( + ConnectionProperties.MPP_NODE_RANDOM, true, true + ); + + public static final BooleanConfigParam MPP_PREFER_LOCAL_NODE = new BooleanConfigParam( + ConnectionProperties.MPP_PREFER_LOCAL_NODE, true, true + ); + + public static final BooleanConfigParam SCHEDULE_BY_PARTITION = new BooleanConfigParam( + ConnectionProperties.SCHEDULE_BY_PARTITION, false, true + ); + public static final IntConfigParam DATABASE_PARALLELISM = new IntConfigParam( ConnectionProperties.DATABASE_PARALLELISM, 0, 128, 0, true); + public static final IntConfigParam AGG_MAX_HASH_TABLE_FACTOR = new IntConfigParam( + ConnectionProperties.AGG_MAX_HASH_TABLE_FACTOR, 1, 128, -1, true); + public static final IntConfigParam POLARDBX_PARALLELISM = new IntConfigParam( ConnectionProperties.POLARDBX_PARALLELISM, 0, 128, -1, true); @@ -2296,7 +2626,20 @@ public static void addSupportedParam(ConfigParam param) { true, true); - public static final BooleanConfigParam ENABLE_INDEX_SKYLINE = new BooleanConfigParam( + /** + * try to prune useless gsi + */ + public static final BooleanConfigParam ENABLE_INDEX_SELECTION_PRUNE = new BooleanConfigParam( + ConnectionProperties.ENABLE_INDEX_SELECTION_PRUNE, + true, + true); + + public static final BooleanConfigParam ENABLE_PUSHDOWN_DISTINCT = new BooleanConfigParam( + ConnectionProperties.ENABLE_PUSHDOWN_DISTINCT, + true, + true); + + public static final BooleanConfigParam ENABLE_INDEX_SKYLINE = new BooleanConfigParam( ConnectionProperties.ENABLE_INDEX_SKYLINE, false, true); @@ -2311,6 +2654,21 @@ public static void addSupportedParam(ConfigParam param) { true, true); + /** + * where use plan cache for columnar plan + */ + public static final BooleanConfigParam ENABLE_COLUMNAR_PLAN_CACHE = new BooleanConfigParam( + ConnectionProperties.ENABLE_COLUMNAR_PLAN_CACHE, + false, + true); + + public static final BooleanConfigParam ENABLE_COLUMNAR_PULL_UP_PROJECT = new BooleanConfigParam( + ConnectionProperties.ENABLE_COLUMNAR_PULL_UP_PROJECT, + true, + true); + /** + * Batch size for select size for update or delete. + */ public static final LongConfigParam UPDATE_DELETE_SELECT_BATCH_SIZE = new LongConfigParam( ConnectionProperties.UPDATE_DELETE_SELECT_BATCH_SIZE, 0L, @@ -2347,18 +2705,6 @@ public static void addSupportedParam(ConfigParam param) { true ); - public static final BooleanConfigParam ENABLE_JAVA_UDF = new BooleanConfigParam( - ConnectionProperties.ENABLE_JAVA_UDF, - true, - true - ); - - public static final BooleanConfigParam CHECK_INVALID_JAVA_UDF = new BooleanConfigParam( - ConnectionProperties.CHECK_INVALID_JAVA_UDF, - true, - true - ); - public static final LongConfigParam MAX_JAVA_UDF_NUM = new LongConfigParam( ConnectionProperties.MAX_JAVA_UDF_NUM, 0L, @@ -2425,6 +2771,24 @@ public static void addSupportedParam(ConfigParam param) { true ); + public static final BooleanConfigParam ENABLE_LOCAL_PARTITION_WISE_JOIN = new BooleanConfigParam( + ConnectionProperties.ENABLE_LOCAL_PARTITION_WISE_JOIN, true, true); + + public static final BooleanConfigParam LOCAL_PAIRWISE_PROBE_SEPARATE = new BooleanConfigParam( + ConnectionProperties.LOCAL_PAIRWISE_PROBE_SEPARATE, + false, + true + ); + + public static final BooleanConfigParam JOIN_KEEP_PARTITION = new BooleanConfigParam( + ConnectionProperties.JOIN_KEEP_PARTITION, + true, + true + ); + + /** + * Enable direct plan, default true + */ public static final BooleanConfigParam ENABLE_DIRECT_PLAN = new BooleanConfigParam( ConnectionProperties.ENABLE_DIRECT_PLAN, true, @@ -2526,6 +2890,14 @@ public static void addSupportedParam(ConfigParam param) { public static final BooleanConfigParam CONN_POOL_XPROTO_XPLAN = new BooleanConfigParam( ConnectionProperties.CONN_POOL_XPROTO_XPLAN, true, true); + public static final LongConfigParam XPLAN_MAX_SCAN_ROWS = new LongConfigParam( + ConnectionProperties.XPLAN_MAX_SCAN_ROWS, 0L, Long.MAX_VALUE, + 1000L, true); + + /** + * x-protocol xplan expend star + * Unit: bool + */ public static final BooleanConfigParam CONN_POOL_XPROTO_XPLAN_EXPEND_STAR = new BooleanConfigParam( ConnectionProperties.CONN_POOL_XPROTO_XPLAN_EXPEND_STAR, true, true); @@ -2568,12 +2940,15 @@ public static void addSupportedParam(ConfigParam param) { ConnectionProperties.CONN_POOL_XPROTO_PIPE_BUFFER_SIZE, 1L, Long.MAX_VALUE, 256 * 1024 * 1024L, true); + /** + * x-protocol max DN concurrent. (default 500) (default 2000 for columnar) + */ public static final LongConfigParam XPROTO_MAX_DN_CONCURRENT = new LongConfigParam( ConnectionProperties.XPROTO_MAX_DN_CONCURRENT, 1L, Long.MAX_VALUE, - 500L, true); + 2000L, true); /** - * x-protocol max wait connection per DN. (default 100) + * x-protocol max wait connection per DN. (default 100) (default 200 for columnar) */ public static final LongConfigParam XPROTO_MAX_DN_WAIT_CONNECTION = new LongConfigParam( ConnectionProperties.XPROTO_MAX_DN_WAIT_CONNECTION, 1L, Long.MAX_VALUE, @@ -2658,9 +3033,18 @@ public static class ConnectionParamValues { public static final LongConfigParam WORKLOAD_OSS_NET_THRESHOLD = new LongConfigParam( ConnectionProperties.WORKLOAD_OSS_NET_THRESHOLD, 0L, null, 2L, true); + public static final LongConfigParam WORKLOAD_COLUMNAR_ROW_THRESHOLD = new LongConfigParam( + ConnectionProperties.WORKLOAD_COLUMNAR_ROW_THRESHOLD, 0L, null, 500000L, true); + public static final StringConfigParam WORKLOAD_TYPE = new StringConfigParam( ConnectionProperties.WORKLOAD_TYPE, null, true); + public static final BooleanConfigParam ENABLE_OSS_MOCK_COLUMNAR = new BooleanConfigParam( + ConnectionProperties.ENABLE_OSS_MOCK_COLUMNAR, false, true); + + public static final BooleanConfigParam ENABLE_COLUMNAR_OPTIMIZER = new BooleanConfigParam( + ConnectionProperties.ENABLE_COLUMNAR_OPTIMIZER, true, true); + public static final StringConfigParam EXECUTOR_MODE = new StringConfigParam( ConnectionProperties.EXECUTOR_MODE, "NONE", true); @@ -2691,7 +3075,7 @@ public static class ConnectionParamValues { public static final IntConfigParam IN_SUB_QUERY_THRESHOLD = new IntConfigParam( ConnectionProperties.IN_SUB_QUERY_THRESHOLD, 2, Integer.MAX_VALUE, - 8, true); + 100000, true); public static final BooleanConfigParam ENABLE_IN_SUB_QUERY_FOR_DML = new BooleanConfigParam( ConnectionProperties.ENABLE_IN_SUB_QUERY_FOR_DML, false, Boolean.TRUE); @@ -2699,6 +3083,12 @@ public static class ConnectionParamValues { public static final BooleanConfigParam ENABLE_RUNTIME_FILTER = new BooleanConfigParam( ConnectionProperties.ENABLE_RUNTIME_FILTER, true, true); + public static final BooleanConfigParam ENABLE_LOCAL_RUNTIME_FILTER = new BooleanConfigParam( + ConnectionProperties.ENABLE_LOCAL_RUNTIME_FILTER, false, true); + + public static final BooleanConfigParam CHECK_RUNTIME_FILTER_SAME_FRAGMENT = new BooleanConfigParam( + ConnectionProperties.CHECK_RUNTIME_FILTER_SAME_FRAGMENT, false, true); + public static final LongConfigParam BLOOM_FILTER_BROADCAST_NUM = new LongConfigParam( ConnectionProperties.BLOOM_FILTER_BROADCAST_NUM, -1L, Long.MAX_VALUE, 20L, true); @@ -2767,7 +3157,7 @@ public static class ConnectionParamValues { false); public static final BooleanConfigParam ENABLE_DRIVING_STREAM_SCAN = new BooleanConfigParam( - ConnectionProperties.ENABLE_DRIVING_STREAM_SCAN, true, true); + ConnectionProperties.ENABLE_DRIVING_STREAM_SCAN, false, true); public static final BooleanConfigParam ENABLE_SIMPLIFY_TRACE_SQL = new BooleanConfigParam( ConnectionProperties.ENABLE_SIMPLIFY_TRACE_SQL, false, true); @@ -2775,12 +3165,25 @@ public static class ConnectionParamValues { public static final StringConfigParam PARAMETRIC_SIMILARITY_ALGO = new StringConfigParam( ConnectionProperties.PARAMETRIC_SIMILARITY_ALGO, "COSINE", true); + // whether to use new topn or not + public static final BooleanConfigParam NEW_TOPN = new BooleanConfigParam( + ConnectionProperties.NEW_TOPN, true, true); + public static final IntConfigParam TOPN_SIZE = new IntConfigParam( ConnectionProperties.TOPN_SIZE, 0, Integer.MAX_VALUE, 15, true); + // topn size upper bound + public static final IntConfigParam NEW_TOPN_SIZE = new IntConfigParam( + ConnectionProperties.NEW_TOPN_SIZE, 0, Integer.MAX_VALUE, 10000, true); + public static final IntConfigParam TOPN_MIN_NUM = new IntConfigParam( ConnectionProperties.TOPN_MIN_NUM, 1, Integer.MAX_VALUE, 3, true); + // min number can access topn, -1 means calculate by formula + public static final IntConfigParam NEW_TOPN_MIN_NUM = new IntConfigParam( + ConnectionProperties.NEW_TOPN_MIN_NUM, -1, Integer.MAX_VALUE, -1, true); + + //HTAP FEEDBACK public static final IntConfigParam FEEDBACK_WORKLOAD_TP_THRESHOLD = new IntConfigParam( ConnectionProperties.FEEDBACK_WORKLOAD_TP_THRESHOLD, 1, Integer.MAX_VALUE, -1, true); @@ -2839,6 +3242,12 @@ public static class ConnectionParamValues { public static final IntConfigParam MAX_PARTITION_COLUMN_COUNT = new IntConfigParam( ConnectionProperties.MAX_PARTITION_COLUMN_COUNT, 1, Integer.MAX_VALUE, 5, true); + /** + * The max length of partition name(included the name of subpartition template) + */ + public static final IntConfigParam MAX_PARTITION_NAME_LENGTH = new IntConfigParam( + ConnectionProperties.MAX_PARTITION_NAME_LENGTH, 16, 32, 16, true); + /** * Label if auto use range-key subpart for index of auto-part table, default is false */ @@ -2847,6 +3256,14 @@ public static class ConnectionParamValues { false, true); + /** + * Label if auto use key syntax for all local index on show create table + */ + public static final BooleanConfigParam ENABLE_USE_KEY_FOR_ALL_LOCAL_INDEX = new BooleanConfigParam( + ConnectionProperties.ENABLE_USE_KEY_FOR_ALL_LOCAL_INDEX, + false, + true); + /** * Label if auto use range/list columns partitions for "part by range/list", default is true */ @@ -2983,12 +3400,35 @@ public static class ConnectionParamValues { 10, true); + /** + * whether enable tp_slow alert + */ + public static final BooleanConfigParam ENABLE_TP_SLOW_ALERT = new BooleanConfigParam( + ConnectionProperties.ENABLE_TP_SLOW_ALERT, + true, + true); + + public static final BooleanConfigParam ENABLE_ALERT_TEST_DEFAULT = new BooleanConfigParam( + ConnectionProperties.ENABLE_ALERT_TEST_DEFAULT, + true, + true); + /** * whether call alert when use in test, used for test only */ public static final BooleanConfigParam ENABLE_ALERT_TEST = new BooleanConfigParam( ConnectionProperties.ENABLE_ALERT_TEST, false, + true); + + public static final BooleanConfigParam ALERT_STATISTIC_INTERRUPT = new BooleanConfigParam( + ConnectionProperties.ALERT_STATISTIC_INTERRUPT, + false, + false); + + public static final BooleanConfigParam ALERT_STATISTIC_INCONSISTENT = new BooleanConfigParam( + ConnectionProperties.ALERT_STATISTIC_INCONSISTENT, + false, false); public static final BooleanConfigParam ENABLE_BRANCH_AND_BOUND_OPTIMIZATION = new BooleanConfigParam( @@ -2997,6 +3437,17 @@ public static class ConnectionParamValues { public static final BooleanConfigParam ENABLE_BROADCAST_JOIN = new BooleanConfigParam( ConnectionProperties.ENABLE_BROADCAST_JOIN, true, true); + public static final BooleanConfigParam ENABLE_PARTITION_WISE_JOIN = new BooleanConfigParam( + ConnectionProperties.ENABLE_PARTITION_WISE_JOIN, true, true); + public static final BooleanConfigParam ENABLE_BROADCAST_LEFT = new BooleanConfigParam( + ConnectionProperties.ENABLE_BROADCAST_LEFT, false, true); + + public static final BooleanConfigParam ENABLE_PARTITION_WISE_AGG = new BooleanConfigParam( + ConnectionProperties.ENABLE_PARTITION_WISE_AGG, true, true); + + public static final BooleanConfigParam ENABLE_PARTITION_WISE_WINDOW = new BooleanConfigParam( + ConnectionProperties.ENABLE_PARTITION_WISE_WINDOW, true, true); + public static final IntConfigParam BROADCAST_SHUFFLE_PARALLELISM = new IntConfigParam( ConnectionProperties.BROADCAST_SHUFFLE_PARALLELISM, 1, Integer.MAX_VALUE, 64, true); @@ -3013,7 +3464,11 @@ public static class ConnectionParamValues { ConnectionProperties.ADVISE_TYPE, null, true); public static final BooleanConfigParam ENABLE_HLL = new BooleanConfigParam( - ConnectionProperties.ENABLE_HLL, false, true); + ConnectionProperties.ENABLE_HLL, true, true); + + public static final IntConfigParam HLL_PARALLELISM = new IntConfigParam( + ConnectionProperties.HLL_PARALLELISM, + 1, 1024, 1, true); public static final BooleanConfigParam STRICT_ENUM_CONVERT = new BooleanConfigParam( ConnectionProperties.STRICT_ENUM_CONVERT, false, true); @@ -3054,6 +3509,15 @@ public static class ConnectionParamValues { public static final IntConfigParam AUTO_COLLECT_NDV_SKETCH = new IntConfigParam( ConnectionProperties.AUTO_COLLECT_NDV_SKETCH, 1, Integer.MAX_VALUE, 24, true); + public static final BooleanConfigParam ENABLE_NDV_USE_COLUMNAR = new BooleanConfigParam( + ConnectionProperties.ENABLE_NDV_USE_COLUMNAR, false, true); + + public static final BooleanConfigParam ENABLE_MPP_NDV_USE_COLUMNAR = new BooleanConfigParam( + ConnectionProperties.ENABLE_MPP_NDV_USE_COLUMNAR, false, true); + + /** + * expire time(sec) for ndv sketch info + */ public static final IntConfigParam STATISTIC_NDV_SKETCH_EXPIRE_TIME = new IntConfigParam( ConnectionProperties.STATISTIC_NDV_SKETCH_EXPIRE_TIME, 60, Integer.MAX_VALUE, 1000 * 60 * 60 * 24 * 7, true); @@ -3187,13 +3651,6 @@ public static class ConnectionParamValues { 1024L, false); - public static final IntConfigParam TABLEGROUP_REORG_FASTCHECKER_PARALLELISM = new IntConfigParam( - ConnectionProperties.TABLEGROUP_REORG_FASTCHECKER_PARALLELISM, - -1, - 128, - 4, - false); - public static final StringConfigParam TABLEGROUP_REORG_FINAL_TABLE_STATUS_DEBUG = new StringConfigParam(ConnectionProperties.TABLEGROUP_REORG_FINAL_TABLE_STATUS_DEBUG, "", @@ -3215,7 +3672,7 @@ public static class ConnectionParamValues { new IntConfigParam(ConnectionProperties.CHANGE_SET_APPLY_BATCH, 1, 10 * 1024, 128, false); public static final LongConfigParam CHANGE_SET_MEMORY_LIMIT = new LongConfigParam( - ConnectionProperties.CHANGE_SET_MEMORY_LIMIT, 1024L, 1024 * 1024 * 1024L, 8 * 1024 * 1024L, false); + ConnectionProperties.CHANGE_SET_MEMORY_LIMIT, 1024L, 16 * 1024 * 1024L, 1024 * 1024L, false); public static final BooleanConfigParam CN_ENABLE_CHANGESET = new BooleanConfigParam(ConnectionProperties.CN_ENABLE_CHANGESET, true, true); @@ -3306,27 +3763,6 @@ public static class ConnectionParamValues { public static final LongConfigParam OSS_FS_MAX_WRITE_RATE = new LongConfigParam( ConnectionProperties.OSS_FS_MAX_WRITE_RATE, -1L, Long.MAX_VALUE, -1L, true); - public static final BooleanConfigParam OSS_FS_VALIDATION_ENABLE = new BooleanConfigParam( - ConnectionProperties.OSS_FS_VALIDATION_ENABLE, false, true); - - /** - * Period of Expiration (TimeUnit: Day) - */ - public static final LongConfigParam OSS_FS_CACHE_TTL = new LongConfigParam( - ConnectionProperties.OSS_FS_CACHE_TTL, 1L, Long.MAX_VALUE, 7L, true); - - /** - * Max Entries in OSS Cache (An entry represents a remote file path) - */ - public static final LongConfigParam OSS_FS_MAX_CACHED_ENTRIES = new LongConfigParam( - ConnectionProperties.OSS_FS_MAX_CACHED_ENTRIES, 1L, Long.MAX_VALUE, 2048L, true); - - public static final LongConfigParam OSS_FS_HOT_CACHE_TTL = new LongConfigParam( - ConnectionProperties.OSS_FS_HOT_CACHE_TTL, 1 * 1000L, 2 * 24 * 3600 * 1000L, 3 * 1000L, true); - - public static final LongConfigParam OSS_FS_MAX_HOT_CACHED_ENTRIES = new LongConfigParam( - ConnectionProperties.OSS_FS_MAX_HOT_CACHED_ENTRIES, 1L, 4096L, 2048L, true); - public static final LongConfigParam OSS_ORC_MAX_MERGE_DISTANCE = new LongConfigParam(ConnectionProperties.OSS_ORC_MAX_MERGE_DISTANCE, 0L, 2L * 1024 * 1024 * 1024, 64L * 1024, true); @@ -3373,17 +3809,38 @@ public static class ConnectionParamValues { public static final BooleanConfigParam ENABLE_OSS_ZERO_COPY = new BooleanConfigParam( ConnectionProperties.ENABLE_OSS_ZERO_COPY, false, true); + /** + * should get compatible from execution context rather than param manager + * for auto switch between columnar and row mode + */ public static final BooleanConfigParam ENABLE_OSS_COMPATIBLE = new BooleanConfigParam( ConnectionProperties.ENABLE_OSS_COMPATIBLE, true, true); + /** + * enable shuffle compatible under partition wise + * change this value is danger!!! + */ + public static final BooleanConfigParam ENABLE_PAIRWISE_SHUFFLE_COMPATIBLE = new BooleanConfigParam( + ConnectionProperties.ENABLE_PAIRWISE_SHUFFLE_COMPATIBLE, true, true); + + /** + * For PolarDB-X version >= 5.4.19 + * Indicates whether user has enabled cold data archive feature or not + * 1: ON + * 0: OFF + * -1: IMPLICIT, fallback to check whether file_storage_info record exists or not + */ + public static final IntConfigParam COLD_DATA_STATUS = new IntConfigParam( + ConnectionProperties.COLD_DATA_STATUS, -1, 1, -1, false); + public static final BooleanConfigParam ENABLE_OSS_DELAY_MATERIALIZATION_ON_EXCHANGE = new BooleanConfigParam( ConnectionProperties.ENABLE_OSS_DELAY_MATERIALIZATION_ON_EXCHANGE, false, true); public static final BooleanConfigParam ENABLE_OSS_FILE_CONCURRENT_SPLIT_ROUND_ROBIN = new BooleanConfigParam( ConnectionProperties.ENABLE_OSS_FILE_CONCURRENT_SPLIT_ROUND_ROBIN, false, true); - public static final BooleanConfigParam ENABLE_REUSE_VECTOR = new BooleanConfigParam( - ConnectionProperties.ENABLE_REUSE_VECTOR, false, true); + public static final BooleanConfigParam ENABLE_COLUMNAR_DECIMAL64 = new BooleanConfigParam( + ConnectionProperties.ENABLE_COLUMNAR_DECIMAL64, true, true); public static final BooleanConfigParam ENABLE_DECIMAL_FAST_VEC = new BooleanConfigParam( ConnectionProperties.ENABLE_DECIMAL_FAST_VEC, false, true); @@ -3391,6 +3848,9 @@ public static class ConnectionParamValues { public static final BooleanConfigParam ENABLE_UNIQUE_HASH_KEY = new BooleanConfigParam( ConnectionProperties.ENABLE_UNIQUE_HASH_KEY, false, true); + public static final BooleanConfigParam ENABLE_PRUNE_EXCHANGE_PARTITION = new BooleanConfigParam( + ConnectionProperties.ENABLE_PRUNE_EXCHANGE_PARTITION, true, true); + public static final IntConfigParam BLOCK_BUILDER_CAPACITY = new IntConfigParam( ConnectionProperties.BLOCK_BUILDER_CAPACITY, 1, Integer.MAX_VALUE, 4, true); @@ -3438,19 +3898,10 @@ public static class ConnectionParamValues { true); /** - * the min size of IN expr that would be pruned + * the MAX size of IN expr being pruned */ - public static final IntConfigParam IN_PRUNE_SIZE = new IntConfigParam( - ConnectionProperties.IN_PRUNE_SIZE, 0, Integer.MAX_VALUE, 150, true); - - /** - * the batch size of IN expr being pruned - */ - public static final IntConfigParam IN_PRUNE_STEP_SIZE = new IntConfigParam( - ConnectionProperties.IN_PRUNE_STEP_SIZE, 1, Integer.MAX_VALUE, 10, true); - public static final IntConfigParam IN_PRUNE_MAX_TIME = new IntConfigParam( - ConnectionProperties.IN_PRUNE_MAX_TIME, 1, Integer.MAX_VALUE, 100, true); + ConnectionProperties.IN_PRUNE_MAX_TIME, 1, Integer.MAX_VALUE, 100000, true); public static final IntConfigParam MAX_IN_PRUNE_CACHE_SIZE = new IntConfigParam( ConnectionProperties.MAX_IN_PRUNE_CACHE_SIZE, 0, Integer.MAX_VALUE, 200, true); @@ -3492,6 +3943,16 @@ public static class ConnectionParamValues { false, false); + public static final BooleanConfigParam OUTPUT_MYSQL_ERROR_CODE = + new BooleanConfigParam(ConnectionProperties.OUTPUT_MYSQL_ERROR_CODE, + false, + false); + + public static final StringConfigParam MAPPING_TO_MYSQL_ERROR_CODE = + new StringConfigParam(ConnectionProperties.MAPPING_TO_MYSQL_ERROR_CODE, + "", + false); + public static final StringConfigParam PURGE_OSS_FILE_CRON_EXPR = new StringConfigParam( ConnectionProperties.PURGE_OSS_FILE_CRON_EXPR, "0 0 1 ? * WED", true); @@ -3549,9 +4010,6 @@ public static class ConnectionParamValues { public static final LongConfigParam DDL_PLAN_SCHEDULER_DELAY = new LongConfigParam( ConnectionProperties.DDL_PLAN_SCHEDULER_DELAY, 10L, 1800L, 60L, true); - public static final BooleanConfigParam ENABLE_FAST_MOCK = new BooleanConfigParam( - ConnectionProperties.ENABLE_FAST_MOCK, false, true); - public static final IntConfigParam OPTIMIZE_TABLE_PARALLELISM = new IntConfigParam( ConnectionProperties.OPTIMIZE_TABLE_PARALLELISM, 1, 4096, 4096, true); @@ -3659,7 +4117,7 @@ public static class ConnectionParamValues { ConnectionProperties.PREFETCH_EXECUTE_POLICY, -1, 3, -1, true); public static final BooleanConfigParam ENABLE_REPLICA = new BooleanConfigParam( - ConnectionProperties.ENABLE_REPLICA, false, true); + ConnectionProperties.ENABLE_REPLICA, true, true); /** * Whether enable async commit. @@ -3724,6 +4182,224 @@ public static class ConnectionParamValues { 100L, false); + public static final LongConfigParam MAX_CCI_COUNT = + new LongConfigParam(ConnectionProperties.MAX_CCI_COUNT, + 0L, + Long.MAX_VALUE, + 1L, + false); + + public static final BooleanConfigParam ENABLE_CCI_ON_TABLE_WITH_IMPLICIT_PK = new BooleanConfigParam( + ConnectionProperties.ENABLE_CCI_ON_TABLE_WITH_IMPLICIT_PK, + false, + true + ); + + public static final BooleanConfigParam SKIP_COLUMNAR_WAIT_TASK = new BooleanConfigParam( + ConnectionProperties.SKIP_COLUMNAR_WAIT_TASK, + false, + true + ); + + /** + * COLUMNAR_BITMAP_INDEX_MAX_SCAN_SIZE_FOR_PRUNING + */ + public static final LongConfigParam COLUMNAR_BITMAP_INDEX_MAX_SCAN_SIZE_FOR_PRUNING = new LongConfigParam( + ConnectionProperties.COLUMNAR_BITMAP_INDEX_MAX_SCAN_SIZE_FOR_PRUNING, + -1L, + Long.MAX_VALUE, + 1024L, + false); + + /** + * To enable the columnar scan exec. + */ + public static final BooleanConfigParam ENABLE_COLUMNAR_SCAN_EXEC = new BooleanConfigParam( + ConnectionProperties.ENABLE_COLUMNAR_SCAN_EXEC, + true, + true + ); + /** + * The count of maximum groups in a scan work. + */ + public static final IntConfigParam COLUMNAR_WORK_UNIT = new IntConfigParam( + ConnectionProperties.COLUMNAR_WORK_UNIT, + 1, + Integer.MAX_VALUE, + 10000, + true + ); + /** + * The policy of table scan: IO_PRIORITY, FILTER_PRIORITY, IO_ON_DEMAND. + */ + public static final IntConfigParam SCAN_POLICY = new IntConfigParam( + ConnectionProperties.SCAN_POLICY, + 1, + 3, + 2, + true + ); + /** + * To enable the block cache. + */ + public static final BooleanConfigParam ENABLE_BLOCK_CACHE = new BooleanConfigParam( + ConnectionProperties.ENABLE_BLOCK_CACHE, + true, + true + ); + + /** + * To enable the usage of in-flight block cache. + */ + public static final BooleanConfigParam ENABLE_USE_IN_FLIGHT_BLOCK_CACHE = new BooleanConfigParam( + ConnectionProperties.ENABLE_USE_IN_FLIGHT_BLOCK_CACHE, + true, + true + ); + + /** + * To enable the verbose metrics report. + */ + public static final BooleanConfigParam ENABLE_VERBOSE_METRICS_REPORT = new BooleanConfigParam( + ConnectionProperties.ENABLE_VERBOSE_METRICS_REPORT, + false, + true + ); + public static final BooleanConfigParam ENABLE_COLUMNAR_METRICS = new BooleanConfigParam( + ConnectionProperties.ENABLE_COLUMNAR_METRICS, + false, + true + ); + /** + * To enable the index pruning on orc. + */ + public static final BooleanConfigParam ENABLE_INDEX_PRUNING = new BooleanConfigParam( + ConnectionProperties.ENABLE_INDEX_PRUNING, + true, + true + ); + + /** + * If disabled, SliceBlock with dictionary will be converted to + * SliceBlock with values + */ + public static final BooleanConfigParam ENABLE_COLUMNAR_SLICE_DICT = new BooleanConfigParam( + ConnectionProperties.ENABLE_COLUMNAR_SLICE_DICT, + true, + true + ); + + /** + * To enable canceling the loading processing of stripe-loader. + */ + public static final BooleanConfigParam ENABLE_CANCEL_STRIPE_LOADING = new BooleanConfigParam( + ConnectionProperties.ENABLE_CANCEL_STRIPE_LOADING, + false, + true + ); + + public static final BooleanConfigParam ENABLE_LAZY_BLOCK_ACTIVE_LOADING = new BooleanConfigParam( + ConnectionProperties.ENABLE_LAZY_BLOCK_ACTIVE_LOADING, + true, + true + ); + + public static final BooleanConfigParam ENABLE_COLUMN_READER_LOCK = new BooleanConfigParam( + ConnectionProperties.ENABLE_COLUMN_READER_LOCK, + true, + true + ); + + public static final BooleanConfigParam ENABLE_VEC_ACCUMULATOR = new BooleanConfigParam( + ConnectionProperties.ENABLE_VEC_ACCUMULATOR, + false, + true + ); + + public static final BooleanConfigParam ENABLE_VEC_BUILD_JOIN_ROW = new BooleanConfigParam( + ConnectionProperties.ENABLE_VEC_BUILD_JOIN_ROW, + false, + true + ); + + public static final BooleanConfigParam ENABLE_VEC_JOIN = new BooleanConfigParam( + ConnectionProperties.ENABLE_VEC_JOIN, + false, + true + ); + + public static final BooleanConfigParam ENABLE_LOCAL_EXCHANGE_BATCH = new BooleanConfigParam( + ConnectionProperties.ENABLE_LOCAL_EXCHANGE_BATCH, + true, + true + ); + + public static final BooleanConfigParam ENABLE_JOIN_CONDITION_PRUNING = new BooleanConfigParam( + ConnectionProperties.ENABLE_JOIN_CONDITION_PRUNING, + true, + true + ); + + public static final BooleanConfigParam ENABLE_REUSE_VECTOR = new BooleanConfigParam( + ConnectionProperties.ENABLE_REUSE_VECTOR, false, true); + + public static final BooleanConfigParam ENABLE_EXCHANGE_PARTITION_OPTIMIZATION = new BooleanConfigParam( + ConnectionProperties.ENABLE_EXCHANGE_PARTITION_OPTIMIZATION, + true, + true + ); + + public static final BooleanConfigParam ENABLE_DRIVER_OBJECT_POOL = new BooleanConfigParam( + ConnectionProperties.ENABLE_DRIVER_OBJECT_POOL, + false, + true + ); + + public static final BooleanConfigParam ENABLE_COLUMNAR_SCAN_SELECTION = new BooleanConfigParam( + ConnectionProperties.ENABLE_COLUMNAR_SCAN_SELECTION, + false, + true + ); + + public static final FloatConfigParam BLOCK_CACHE_MEMORY_SIZE_FACTOR = new FloatConfigParam( + ConnectionProperties.BLOCK_CACHE_MEMORY_SIZE_FACTOR, + .1f, + .8f, + .6f, + true + ); + + public static final BooleanConfigParam ENABLE_BLOCK_BUILDER_BATCH_WRITING = new BooleanConfigParam( + ConnectionProperties.ENABLE_BLOCK_BUILDER_BATCH_WRITING, + true, + true + ); + + public static final BooleanConfigParam ENABLE_SCAN_RANDOM_SHUFFLE = new BooleanConfigParam( + ConnectionProperties.ENABLE_SCAN_RANDOM_SHUFFLE, + false, + true + ); + + public static final IntConfigParam SCAN_RANDOM_SHUFFLE_THRESHOLD = + new IntConfigParam(ConnectionProperties.SCAN_RANDOM_SHUFFLE_THRESHOLD, + 1, + Integer.MAX_VALUE, + 3, + false); + + public static final BooleanConfigParam ENABLE_AUTOMATIC_COLUMNAR_PARAMS = new BooleanConfigParam( + ConnectionProperties.ENABLE_AUTOMATIC_COLUMNAR_PARAMS, + true, + true + ); + + public static final BooleanConfigParam ENABLE_FILE_STORAGE_DELTA_STATISTIC = new BooleanConfigParam( + ConnectionProperties.ENABLE_FILE_STORAGE_DELTA_STATISTIC, + false, + true + ); + public static final BooleanConfigParam ENABLE_SIMPLIFY_SUBQUERY_SQL = new BooleanConfigParam( ConnectionProperties.ENABLE_SIMPLIFY_SUBQUERY_SQL, false, true); @@ -3786,6 +4462,12 @@ public static class ConnectionParamValues { true ); + public static final BooleanConfigParam BACKFILL_USING_BINARY = new BooleanConfigParam( + ConnectionProperties.BACKFILL_USING_BINARY, + true, + false + ); + public static final BooleanConfigParam ENABLE_ROLLBACK_TO_READY = new BooleanConfigParam( ConnectionProperties.ENABLE_ROLLBACK_TO_READY, true, @@ -3800,9 +4482,6 @@ public static class ConnectionParamValues { true ); - public static final BooleanConfigParam ENABLE_SINGLE_SHARD_WRITE = new BooleanConfigParam( - ConnectionProperties.ENABLE_SINGLE_SHARD_WRITE, true, true); - public static final BooleanConfigParam CHECK_RESPONSE_IN_MEM = new BooleanConfigParam( ConnectionProperties.CHECK_RESPONSE_IN_MEM, true, @@ -3820,4 +4499,370 @@ public static class ConnectionParamValues { true, true ); + + public static final IntConfigParam ZONEMAP_MAX_GROUP_SIZE = new IntConfigParam( + ConnectionProperties.ZONEMAP_MAX_GROUP_SIZE, 1, 100000, 5000, true); + + public static final LongConfigParam PHYSICAL_BACKFILL_BATCH_SIZE = new LongConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_BATCH_SIZE, + 1024L, + 1024L * 1024 * 1024, + 1024L * 64, + false); + ; + + public static final LongConfigParam PHYSICAL_BACKFILL_MIN_SUCCESS_BATCH_UPDATE = new LongConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_MIN_SUCCESS_BATCH_UPDATE, + 1L, + Long.MAX_VALUE, + 1000L, + false); + + public static final LongConfigParam PHYSICAL_BACKFILL_MIN_WRITE_BATCH_PER_THREAD = new LongConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_MIN_WRITE_BATCH_PER_THREAD, + 1L, + Long.MAX_VALUE, + 100L, + false); + + public static final LongConfigParam PHYSICAL_BACKFILL_PARALLELISM = new LongConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_PARALLELISM, 1L, Long.MAX_VALUE, 8L, true); + + public static final BooleanConfigParam PHYSICAL_BACKFILL_ENABLE = new BooleanConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_ENABLE, + false, + true); + + public static final BooleanConfigParam PHYSICAL_BACKFILL_FROM_FOLLOWER = new BooleanConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_FROM_FOLLOWER, + true, + true); + + public static final LongConfigParam PHYSICAL_BACKFILL_MAX_RETRY_WAIT_FOLLOWER_TO_LSN = new LongConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_MAX_RETRY_WAIT_FOLLOWER_TO_LSN, + 10L, Long.MAX_VALUE, 1200L, true); + + //30 minutes + public static final LongConfigParam PHYSICAL_BACKFILL_MAX_SLAVE_LATENCY = new LongConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_MAX_SLAVE_LATENCY, + 10L, Long.MAX_VALUE, 1800L, true); + + //5s + public static final LongConfigParam PHYSICAL_BACKFILL_NET_SPEED_TEST_TIME = new LongConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_NET_SPEED_TEST_TIME, + 10L, Long.MAX_VALUE, 5000L, true); + + public static final BooleanConfigParam IMPORT_TABLESPACE_TASK_EXEC_SERIALLY = new BooleanConfigParam( + ConnectionProperties.IMPORT_TABLESPACE_TASK_EXEC_SERIALLY, + false, + true); + + public static final BooleanConfigParam PHYSICAL_BACKFILL_IGNORE_CFG = new BooleanConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_IGNORE_CFG, + false, + true); + + //default 250MB/s + public static final LongConfigParam PHYSICAL_BACKFILL_SPEED_LIMIT = new LongConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_SPEED_LIMIT, + -1L, Long.MAX_VALUE, 250 * 1024 * 1024L, true); + + public static final BooleanConfigParam PHYSICAL_BACKFILL_WAIT_LSN_WHEN_ROLLBACK = new BooleanConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_WAIT_LSN_WHEN_ROLLBACK, + true, + true); + + public static final BooleanConfigParam PHYSICAL_BACKFILL_STORAGE_HEALTHY_CHECK = new BooleanConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_STORAGE_HEALTHY_CHECK, + true, + true); + + public static final BooleanConfigParam PHYSICAL_BACKFILL_IMPORT_TABLESPACE_BY_LEADER = new BooleanConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_IMPORT_TABLESPACE_BY_LEADER, + true, + true); + + public static final BooleanConfigParam PHYSICAL_BACKFILL_SPEED_TEST = new BooleanConfigParam( + ConnectionProperties.PHYSICAL_BACKFILL_SPEED_TEST, + false, + true); + + /** + * rebalance start point + */ + public static final BooleanConfigParam REBALANCE_MAINTENANCE_ENABLE = new BooleanConfigParam( + ConnectionProperties.REBALANCE_MAINTENANCE_ENABLE, + true, + true); + + /** + * rebalance start point + */ + public static final StringConfigParam REBALANCE_MAINTENANCE_TIME_START = + new StringConfigParam(ConnectionProperties.REBALANCE_MAINTENANCE_TIME_START, "00:00", true); + + /** + * rebalance stop point + */ + public static final StringConfigParam REBALANCE_MAINTENANCE_TIME_END = + new StringConfigParam(ConnectionProperties.REBALANCE_MAINTENANCE_TIME_END, "00:00", true); + + public static final BooleanConfigParam CANCEL_REBALANCE_JOB_DUE_MAINTENANCE = new BooleanConfigParam( + ConnectionProperties.CANCEL_REBALANCE_JOB_DUE_MAINTENANCE, + false, + true + ); + + public static final BooleanConfigParam ENABLE_DEADLOCK_DETECTION_80 = new BooleanConfigParam( + ConnectionProperties.ENABLE_DEADLOCK_DETECTION_80, + false, + true + ); + + public static final BooleanConfigParam IGNORE_TRANSACTION_POLICY_NO_TRANSACTION = new BooleanConfigParam( + ConnectionProperties.IGNORE_TRANSACTION_POLICY_NO_TRANSACTION, + false, + true + ); + + public static final BooleanConfigParam ENABLE_LOGICAL_TABLE_META = new BooleanConfigParam( + ConnectionProperties.ENABLE_LOGICAL_TABLE_META, + false, + true + ); + + public static final StringConfigParam OPTIMIZER_TYPE = new StringConfigParam( + ConnectionProperties.OPTIMIZER_TYPE, + "", + true + ); + + public static final BooleanConfigParam MOCK_COLUMNAR_INDEX = new BooleanConfigParam( + ConnectionProperties.MOCK_COLUMNAR_INDEX, + false, + true + ); + + public static final StringConfigParam MCI_FORMAT = new StringConfigParam( + ConnectionProperties.MCI_FORMAT, + "orc", + true + ); + + public static final BooleanConfigParam ENABLE_COLUMNAR_AFTER_CBO_PLANNER = new BooleanConfigParam( + ConnectionProperties.ENABLE_COLUMNAR_AFTER_CBO_PLANNER, + true, + true + ); + + public static final IntConfigParam PUSH_PROJECT_INPUT_REF_THRESHOLD = new IntConfigParam( + ConnectionProperties.PUSH_PROJECT_INPUT_REF_THRESHOLD, + 1, + 1024, + 3, + true + ); + + public static final BooleanConfigParam ENABLE_ENCDB = new BooleanConfigParam( + ConnectionProperties.ENABLE_ENCDB, + false, + true + ); + + public static final BooleanConfigParam ENABLE_XXHASH_RF_IN_BUILD = new BooleanConfigParam( + ConnectionProperties.ENABLE_XXHASH_RF_IN_BUILD, + true, + true + ); + + public static final BooleanConfigParam ENABLE_XXHASH_RF_IN_FILTER = new BooleanConfigParam( + ConnectionProperties.ENABLE_XXHASH_RF_IN_FILTER, + true, + true + ); + + public static final BooleanConfigParam ENABLE_NEW_RF = new BooleanConfigParam( + ConnectionProperties.ENABLE_NEW_RF, + false, + true + ); + + public static final LongConfigParam GLOBAL_RF_ROWS_UPPER_BOUND = + new LongConfigParam(ConnectionProperties.GLOBAL_RF_ROWS_UPPER_BOUND, + 1000L, + Long.MAX_VALUE, + 20000000L, + false); + + public static final LongConfigParam GLOBAL_RF_ROWS_LOWER_BOUND = + new LongConfigParam(ConnectionProperties.GLOBAL_RF_ROWS_LOWER_BOUND, + 1L, + Long.MAX_VALUE, + 4096L, + false); + + public static final BooleanConfigParam ENABLE_SKIP_COMPRESSION_IN_ORC = new BooleanConfigParam( + ConnectionProperties.ENABLE_SKIP_COMPRESSION_IN_ORC, + false, + true + ); + + public static final BooleanConfigParam ONLY_CACHE_PRIMARY_KEY_IN_BLOCK_CACHE = new BooleanConfigParam( + ConnectionProperties.ONLY_CACHE_PRIMARY_KEY_IN_BLOCK_CACHE, + false, + true + ); + + public static final IntConfigParam NEW_RF_SAMPLE_COUNT = new IntConfigParam( + ConnectionProperties.NEW_RF_SAMPLE_COUNT, + 1, + Integer.MAX_VALUE, + 10, + true + ); + + public static final FloatConfigParam NEW_RF_FILTER_RATIO_THRESHOLD = new FloatConfigParam( + ConnectionProperties.NEW_RF_FILTER_RATIO_THRESHOLD, + 0.05f, + 1f, + 0.25f, + true + ); + + public static final BooleanConfigParam ENABLE_LBAC = new BooleanConfigParam( + ConnectionProperties.ENABLE_LBAC, + false, + true + ); + + public static final BooleanConfigParam ENABLE_VALUES_PUSHDOWN = new BooleanConfigParam( + ConnectionProperties.ENABLE_VALUES_PUSHDOWN, + true, + false + ); + + /** + * CDC random token for DDL Sql + */ + public final static StringConfigParam CDC_RANDOM_DDL_TOKEN = new StringConfigParam( + ConnectionProperties.CDC_RANDOM_DDL_TOKEN, + "", + false); + + public static final BooleanConfigParam ENABLE_IMPLICIT_TABLE_GROUP = new BooleanConfigParam( + ConnectionProperties.ENABLE_IMPLICIT_TABLE_GROUP, + true, + true + ); + + public static final BooleanConfigParam ALLOW_AUTO_CREATE_TABLEGROUP = + new BooleanConfigParam(ConnectionProperties.ALLOW_AUTO_CREATE_TABLEGROUP, + true, + false); + + public static final StringConfigParam SUPER_WRITE = new StringConfigParam( + ConnectionProperties.SUPER_WRITE, + "false", + true + ); + + public static final BooleanConfigParam ENABLE_EXTRACT_STREAM_NAME_FROM_USER = new BooleanConfigParam( + ConnectionProperties.ENABLE_EXTRACT_STREAM_NAME_FROM_USER, + true, + false); + + public static final LongConfigParam SNAPSHOT_TS = new LongConfigParam( + ConnectionProperties.SNAPSHOT_TS, + Long.MIN_VALUE, + Long.MAX_VALUE, + -1L, + true + ); + + public static final BooleanConfigParam SKIP_CHECK_CCI_TASK = new BooleanConfigParam( + ConnectionProperties.SKIP_CHECK_CCI_TASK, + true, + true + ); + + public static final BooleanConfigParam FORCE_CCI_VISIBLE = new BooleanConfigParam( + ConnectionProperties.FORCE_CCI_VISIBLE, + false, + true + ); + + /** + * Enable oss table scan only returns deleted data in orc files. + */ + public static final BooleanConfigParam ENABLE_ORC_DELETED_SCAN = new BooleanConfigParam( + ConnectionProperties.ENABLE_ORC_DELETED_SCAN, + false, + true + ); + + /** + * Make table scan returns orc raw type block: Long block, Double block, Byte array block. + */ + public static final BooleanConfigParam ENABLE_ORC_RAW_TYPE_BLOCK = new BooleanConfigParam( + ConnectionProperties.ENABLE_ORC_RAW_TYPE_BLOCK, + false, + true + ); + + public static final StringConfigParam FORCE_READ_ORC_FILE = new StringConfigParam( + ConnectionProperties.FORCE_READ_ORC_FILE, + null, + true + ); + + public static final BooleanConfigParam READ_CSV_ONLY = new BooleanConfigParam( + ConnectionProperties.READ_CSV_ONLY, + false, + true + ); + + public static final BooleanConfigParam READ_ORC_ONLY = new BooleanConfigParam( + ConnectionProperties.READ_ORC_ONLY, + false, + true + ); + + public static final BooleanConfigParam ENABLE_FAST_CCI_CHECKER = new BooleanConfigParam( + ConnectionProperties.ENABLE_FAST_CCI_CHECKER, + true, + true + ); + + public static final BooleanConfigParam ENABLE_FAST_PARSE_ORC_RAW_TYPE = new BooleanConfigParam( + ConnectionProperties.ENABLE_FAST_PARSE_ORC_RAW_TYPE, + true, + true + ); + + public static final BooleanConfigParam FORCE_2PC_DURING_CCI_CHECK = new BooleanConfigParam( + ConnectionProperties.FORCE_2PC_DURING_CCI_CHECK, + false, + true + ); + + public static final BooleanConfigParam ENABLE_XA_TSO = new BooleanConfigParam( + ConnectionProperties.ENABLE_XA_TSO, + true, + true + ); + + public static final BooleanConfigParam ENABLE_AUTO_COMMIT_TSO = new BooleanConfigParam( + ConnectionProperties.ENABLE_AUTO_COMMIT_TSO, + true, + true + ); + + public static final BooleanConfigParam ENABLE_1PC_OPT = new BooleanConfigParam( + ConnectionProperties.ENABLE_1PC_OPT, + true, + true + ); + + public static final BooleanConfigParam ENABLE_SINGLE_SHARD_WRITE = new BooleanConfigParam( + ConnectionProperties.ENABLE_SINGLE_SHARD_WRITE, true, true); } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ConnectionProperties.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ConnectionProperties.java index 0aea802de..5c89c64dd 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ConnectionProperties.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ConnectionProperties.java @@ -51,6 +51,14 @@ public class ConnectionProperties { public static final String OPTIMIZER_CACHE_SIZE = "OPTIMIZER_CACHE_SIZE"; + /** + * 在 SHOW CREATE TABLE 结果中输出与 MySQL 兼容的缩进格式(两个空格) + */ + public static final String OUTPUT_MYSQL_INDENT = "OUTPUT_MYSQL_INDENT"; + + /** + * 是否允许全表扫描查询,默认为false + */ public static final String ALLOW_FULL_TABLE_SCAN = "ALLOW_FULL_TABLE_SCAN"; public static final String CHOOSE_STREAMING = "CHOOSE_STREAMING"; @@ -75,6 +83,9 @@ public class ConnectionProperties { public static final String SOCKET_TIMEOUT = "SOCKET_TIMEOUT"; + /** + * 事务策略 + */ public static final String TRANSACTION_POLICY = "TRANSACTION_POLICY"; public static final String SHARE_READ_VIEW = "SHARE_READ_VIEW"; @@ -168,6 +179,11 @@ public class ConnectionProperties { */ public static final String PRIMARY_KEY_CHECK = "PRIMARY_KEY_CHECK"; + /** + * Rebalance组装任务时生成的单个DDL job迁移对最大数据量,单位为MB. + */ + public static final String REBALANCE_MAX_UNIT_SIZE = "REBALANCE_MAX_UNIT_SIZE"; + /** * 是否开启 Foreign Key */ @@ -191,6 +207,11 @@ public class ConnectionProperties { */ public static final String FOREIGN_KEY_CHECKS_FOR_UPDATE_DELETE = "FOREIGN_KEY_CHECKS_FOR_UPDATE_DELETE"; + /** + * 是否允许在包含 CCI 的表上执行 DDL + */ + public static final String FORBID_DDL_WITH_CCI = "FORBID_DDL_WITH_CCI"; + /** * 在 RelocateWriter 中是否通过 PartitionField 判断拆分键是否变化 */ @@ -215,6 +236,11 @@ public class ConnectionProperties { public static final String DML_SELECT_SAME_ROW_ONLY_COMPARE_PK_UK_SK = "DML_SELECT_SAME_ROW_ONLY_COMPARE_PK_UK_SK"; + /** + * 在 DML 的时候是否使用简单的字符串比较 JSON + */ + public static final String DML_CHECK_JSON_BY_STRING_COMPARE = "DML_CHECK_JSON_BY_STRING_COMPARE"; + /** * INSERT 中的 VALUES 出现列名时是否替换为插入值而不是默认值,以兼容 MySQL 行为;会对 INSERT 的 INPUT 按 VALUES 顺序排序 */ @@ -235,6 +261,31 @@ public class ConnectionProperties { */ public static final String CREATE_TABLE_SKIP_CDC = "CREATE_TABLE_SKIP_CDC"; + /** + * WAIT_PREPARED的延时时间 + */ + public static final String MULTI_PHASE_WAIT_PREPARED_DELAY = "MULTI_PHASE_WAIT_PREPARED_DELAY"; + + /** + * WAIT_COMMIT的延时时间 + */ + public static final String MULTI_PHASE_WAIT_COMMIT_DELAY = "MULTI_PHASE_WAIT_COMMIT_DELAY"; + + /** + * WAIT_COMMIT的延时时间 + */ + public static final String MULTI_PHASE_COMMIT_DELAY = "MULTI_PHASE_COMMIT_DELAY"; + + /** + * WAIT_COMMIT的延时时间 + */ + public static final String MULTI_PHASE_PREPARE_DELAY = "MULTI_PHASE_PREPARE_DELAY"; + + /** + * 校验逻辑列顺序 + */ + public static final String CHECK_LOGICAL_COLUMN_ORDER = "CHECK_LOGICAL_COLUMN_ORDER"; + /** * 是否强制使用 Online Modify Column,即使列类型没有改变,或者不是支持的类型 */ @@ -250,6 +301,20 @@ public class ConnectionProperties { */ public static final String OMC_BACK_FILL_USE_RETURNING = "OMC_BACK_FILL_USE_RETURNING"; + /** + * 是否自动采用 Online Modify Column + */ + public static final String ENABLE_AUTO_OMC = "ENABLE_AUTO_OMC"; + + /** + * 是否强制采用 Online Modify Column + */ + public static final String FORCE_USING_OMC = "FORCE_USING_OMC"; + + public static final String ENABLE_CHANGESET_FOR_OMC = "ENABLE_CHANGESET_FOR_OMC"; + + public static final String ENABLE_BACKFILL_OPT_FOR_OMC = "ENABLE_BACKFILL_OPT_FOR_OMC"; + /** * Online Modify Column / Add Generated Column 回填后是否进行检查 */ @@ -305,6 +370,25 @@ public class ConnectionProperties { */ public static final String ENABLE_DDL = "ENABLE_DDL"; + /* + * 是否开启两阶段DDL + */ + public static final String ENABLE_DRDS_MULTI_PHASE_DDL = "ENABLE_DRDS_MULTI_PHASE_DDL"; + + /* + * 是否在DDL之前执行check table + */ + public static final String CHECK_TABLE_BEFORE_PHY_DDL = "CHECK_TABLE_BEFORE_PHY_DDL"; + + /* + * 是否检查连接状态 + */ + public static final String CHECK_PHY_CONN_NUM = "CHECK_PHY_CONN_NUM"; + /* + * 两阶段DDL最终状态,仅用于调试 + */ + + public static final String TWO_PHASE_DDL_FINAL_STATUS = "TWO_PHASE_DDL_FINAL_STATUS"; /** * 是否开启DDL */ @@ -499,6 +583,19 @@ public class ConnectionProperties { public static final String TSO_HEARTBEAT_INTERVAL = "TSO_HEARTBEAT_INTERVAL"; + /** + * 列存 purge 间隔 + */ + public static final String COLUMNAR_TSO_PURGE_INTERVAL = "COLUMNAR_TSO_PURGE_INTERVAL"; + + /** + * 列存 tso 加载间隔 + */ + public static final String COLUMNAR_TSO_UPDATE_INTERVAL = "COLUMNAR_TSO_UPDATE_INTERVAL"; + + /** + * 默认事务清理开始时间(在该时间段内随机) + */ public static final String PURGE_TRANS_START_TIME = "PURGE_TRANS_START_TIME"; public static final String PURGE_TRANS_BATCH_SIZE = "PURGE_TRANS_BATCH_SIZE"; @@ -552,6 +649,11 @@ public class ConnectionProperties { */ public static final String INSERT_SELECT_SELF_BY_PARALLEL = "INSERT_SELECT_SELF_BY_PARALLEL"; + /** + * 是否允许 Insert 列重复 + */ + public static final String INSERT_DUPLICATE_COLUMN = "INSERT_DUPLICATE_COLUMN"; + /** * MODIFY_SELECT_MULTI策略时 逻辑任务执行 的线程个数 */ @@ -582,10 +684,6 @@ public class ConnectionProperties { public static final String ENABLE_UDF = "ENABLE_UDF"; - public static final String ENABLE_JAVA_UDF = "ENABLE_JAVA_UDF"; - - public static final String CHECK_INVALID_JAVA_UDF = "CHECK_INVALID_JAVA_UDF"; - public static final String MAX_JAVA_UDF_NUM = "MAX_JAVA_UDF_NUM"; public static final String FORCE_DROP_JAVA_UDF = "FORCE_DROP_JAVA_UDF"; @@ -623,6 +721,8 @@ public class ConnectionProperties { public static final String STATISTIC_CORRECTIONS = "STATISTIC_CORRECTIONS"; + public static final String STATISTIC_IN_DEGRADATION_NUMBER = "STATISTIC_IN_DEGRADATION_NUMBER"; + /** * background ttl expire end time default 05:00 */ @@ -636,6 +736,22 @@ public class ConnectionProperties { public static final String BACKGROUND_STATISTIC_COLLECTION_EXPIRE_TIME = "BACKGROUND_STATISTIC_COLLECTION_EXPIRE_TIME"; + public static final String SKIP_PHYSICAL_ANALYZE = "SKIP_PHYSICAL_ANALYZE"; + + /** + * statistic info expire time + */ + public static final String STATISTIC_EXPIRE_TIME = "STATISTIC_EXPIRE_TIME"; + + public static final String CACHELINE_INDICATE_UPDATE_TIME = "CACHELINE_INDICATE_UPDATE_TIME"; + + public static final String ENABLE_CACHELINE_COMPENSATION = "ENABLE_CACHELINE_COMPENSATION"; + + public static final String CACHELINE_COMPENSATION_BLACKLIST = "CACHELINE_COMPENSATION_BLACKLIST"; + + /** + * statistic sample rate + */ public static final String STATISTIC_SAMPLE_RATE = "STATISTIC_SAMPLE_RATE"; public static final String SAMPLE_PERCENTAGE = "SAMPLE_PERCENTAGE"; @@ -662,6 +778,9 @@ public class ConnectionProperties { public static final String ANALYZE_TABLE_SPEED_LIMITATION = "ANALYZE_TABLE_SPEED_LIMITATION"; + /** + * enable sort merge join default true + */ public static final String ENABLE_SORT_MERGE_JOIN = "ENABLE_SORT_MERGE_JOIN"; public static final String ENABLE_BKA_JOIN = "ENABLE_BKA_JOIN"; @@ -695,6 +814,17 @@ public class ConnectionProperties { public static final String ENABLE_SEMI_HASH_JOIN = "ENABLE_SEMI_HASH_JOIN"; + public static final String ENABLE_REVERSE_SEMI_HASH_JOIN = "ENABLE_REVERSE_SEMI_HASH_JOIN"; + + public static final String ENABLE_REVERSE_ANTI_HASH_JOIN = "ENABLE_REVERSE_ANTI_HASH_JOIN"; + + public static final String EARLY_MATCH_MARKED_TABLE = "EARLY_MATCH_MARKED_TABLE"; + + public static final String BLOOM_FILTER_IN_REVERSE_SEMI_JOIN = "BLOOM_FILTER_IN_REVERSE_SEMI_JOIN"; + + /** + * enable semi bka join default true + */ public static final String ENABLE_SEMI_BKA_JOIN = "ENABLE_SEMI_BKA_JOIN"; public static final String ENABLE_SEMI_SORT_MERGE_JOIN = "ENABLE_SEMI_SORT_MERGE_JOIN"; @@ -709,20 +839,34 @@ public class ConnectionProperties { public static final String CBO_TOO_MANY_JOIN_LIMIT = "CBO_TOO_MANY_JOIN_LIMIT"; + public static final String COLUMNAR_CBO_TOO_MANY_JOIN_LIMIT = "COLUMNAR_CBO_TOO_MANY_JOIN_LIMIT"; + + /** + * cbo search in left deep tree search space only when CBO_ZIG_ZAG_TREE_JOIN_LIMIT < join size <= CBO_LEFT_DEEP_TREE_JOIN_LIMIT + */ public static final String CBO_LEFT_DEEP_TREE_JOIN_LIMIT = "CBO_LEFT_DEEP_TREE_JOIN_LIMIT"; public static final String CBO_ZIG_ZAG_TREE_JOIN_LIMIT = "CBO_ZIG_ZAG_TREE_JOIN_LIMIT"; public static final String CBO_BUSHY_TREE_JOIN_LIMIT = "CBO_BUSHY_TREE_JOIN_LIMIT"; + public static final String ENABLE_JOINAGG_TO_JOINAGGSEMIJOIN = "ENABLE_JOINAGG_TO_JOINAGGSEMIJOIN"; + + /** + * enable the heuristic algorithm to reorder join when join size <= RBO_HEURISTIC_JOIN_REORDER_LIMIT + */ public static final String RBO_HEURISTIC_JOIN_REORDER_LIMIT = "RBO_HEURISTIC_JOIN_REORDER_LIMIT"; public static final String MYSQL_JOIN_REORDER_EXHAUSTIVE_DEPTH = "MYSQL_JOIN_REORDER_EXHAUSTIVE_DEPTH"; public static final String ENABLE_LV_SUBQUERY_UNWRAP = "ENABLE_LV_SUBQUERY_UNWRAP"; + public static final String EXPLAIN_PRUNING_DETAIL = "EXPLAIN_PRUNING_DETAIL"; + public static final String ENABLE_FILTER_REORDER = "ENABLE_FILTER_REORDER"; + public static final String ENABLE_CONSTANT_FOLD = "ENABLE_CONSTANT_FOLD"; + /** * enable semi join reorder default true */ @@ -736,6 +880,13 @@ public class ConnectionProperties { public static final String ENABLE_SORT_AGG = "ENABLE_SORT_AGG"; + public static final String PARTIAL_AGG_ONLY = "PARTIAL_AGG_ONLY"; + + public static final String PARTIAL_AGG_SHARD = "PARTIAL_AGG_SHARD"; + + /** + * enable partial agg default true + */ public static final String ENABLE_PARTIAL_AGG = "ENABLE_PARTIAL_AGG"; public static final String PARTIAL_AGG_SELECTIVITY_THRESHOLD = "PARTIAL_AGG_SELECTIVITY_THRESHOLD"; @@ -755,6 +906,8 @@ public class ConnectionProperties { */ public static final String ENABLE_PUSH_JOIN = "ENABLE_PUSH_JOIN"; + public static final String ENABLE_PUSH_CORRELATE = "ENABLE_PUSH_CORRELATE"; + /** * ignore un pushable function when join */ @@ -804,6 +957,11 @@ public class ConnectionProperties { public static final String ENABLE_START_UP_COST = "ENABLE_START_UP_COST"; + public static final String ENABLE_MQ_CACHE_COST_BY_THREAD = "ENABLE_MQ_CACHE_COST_BY_THREAD"; + + /** + * join hint + */ public static final String JOIN_HINT = "JOIN_HINT"; public static final String SQL_SIMPLE_MAX_LENGTH = "SQL_SIMPLE_MAX_LENGTH"; @@ -905,6 +1063,9 @@ public class ConnectionProperties { public static final String SCALEOUT_FASTCHECKER_PARALLELISM = "SCALEOUT_FASTCHECKER_PARALLELISM"; + /** + * number of error for check early fail. + */ public static final String SCALEOUT_EARLY_FAIL_NUMBER = "SCALEOUT_EARLY_FAIL_NUMBER"; public static final String SCALEOUT_BACKFILL_POSITION_MARK = "GSI_BACKFILL_POSITION_MARK"; @@ -932,6 +1093,8 @@ public class ConnectionProperties { */ public static final String BACKFILL_MAX_SAMPLE_SIZE = "BACKFILL_MAX_SAMPLE_SIZE"; + public static final String BACKFILL_USE_RETURNING = "BACKFILL_USE_RETURNING"; + /** * enable split physical table for backfill */ @@ -959,6 +1122,11 @@ public class ConnectionProperties { public static final String CHECK_GLOBAL_INDEX_USE_FASTCHECKER = "CHECK_GLOBAL_INDEX_USE_FASTCHECKER"; public static final String FASTCHECKER_RETRY_TIMES = "FASTCHECKER_RETRY_TIMES"; + /** + * fastChecker use thread pool to control parallelism + * each thread pool corresponds to a storage inst node + */ + public static final String FASTCHECKER_THREAD_POOL_SIZE = "FASTCHECKER_THREAD_POOL_SIZE"; /** * when fastchecker failed to calculate hash value because of timeout, @@ -966,12 +1134,6 @@ public class ConnectionProperties { */ public static final String FASTCHECKER_BATCH_TIMEOUT_RETRY_TIMES = "FASTCHECKER_BATCH_TIMEOUT_RETRY_TIMES"; - /** - * when fastchecker use xa check, it must acquire table lock on phyTables. - * this argument will limit the acquire_lock_timeout. - */ - public static final String FASTCHECKER_LOCK_TIMEOUT = "FASTCHECKER_LOCK_TIMEOUT"; - /** * if a physical table's row count exceed FASTCHECKER_BATCH_SIZE, we will start to check by batch */ @@ -997,12 +1159,36 @@ public class ConnectionProperties { */ public static final String GSI_FASTCHECKER_PARALLELISM = "GSI_FASTCHECKER_PARALLELISM"; + /** + * allow to push down dml for the non-gsi and non-broadcast table + * when shard groups has no scale-out group + */ public static final String SCALEOUT_DML_PUSHDOWN_OPTIMIZATION = "SCALEOUT_DML_PUSHDOWN_OPTIMIZATION"; public static final String SCALEOUT_DML_PUSHDOWN_BATCH_LIMIT = "SCALEOUT_DML_PUSHDOWN_BATCH_LIMIT"; public static final String ENABLE_SCALE_OUT_FEATURE = "ENABLE_SCALE_OUT_FEATURE"; + /** + * import table + */ + public static final String IMPORT_TABLE = "IMPORT_TABLE"; + + public static final String IMPORT_TABLE_PARALLELISM = "IMPORT_TABLE_PARALLELISM"; + + /** + * reimport table + */ + public static final String REIMPORT_TABLE = "REIMPORT_TABLE"; + + /** + * import database + */ + public static final String IMPORT_DATABASE = "IMPORT_DATABASE"; + + /** + * check whether enable all phy dml log during doing scale out + */ public static final String ENABLE_SCALE_OUT_ALL_PHY_DML_LOG = "ENABLE_SCALE_OUT_ALL_PHY_DML_LOG"; public static final String ENABLE_SCALE_OUT_GROUP_PHY_DML_LOG = "ENABLE_SCALE_OUT_GROUP_PHY_DML_LOG"; @@ -1052,6 +1238,31 @@ public class ConnectionProperties { public static final String AUTO_PARTITION_PARTITIONS = "AUTO_PARTITION_PARTITIONS"; + /** + * Columnar default partitions + */ + public static final String COLUMNAR_DEFAULT_PARTITIONS = "COLUMNAR_DEFAULT_PARTITIONS"; + + /** + * Specify the 'before status' of ALTER INDEX VISIBLE, + * so that we can change cci status from CREATING to PUBLIC + */ + public static final String ALTER_CCI_STATUS_BEFORE = "ALTER_CCI_STATUS_BEFORE"; + + /** + * Specify the 'after status' of ALTER INDEX VISIBLE, + * so that we can change cci status from CREATING to PUBLIC + */ + public static final String ALTER_CCI_STATUS_AFTER = "ALTER_CCI_STATUS_AFTER"; + + /** + * Enable change cci status with ALTER INDEX VISIBLE + */ + public static final String ALTER_CCI_STATUS = "ALTER_CCI_STATUS"; + + /** + * allow create table gsi on table with column default current_timestamp + */ public static final String GSI_DEFAULT_CURRENT_TIMESTAMP = "GSI_DEFAULT_CURRENT_TIMESTAMP"; public static final String GSI_ON_UPDATE_CURRENT_TIMESTAMP = "GSI_ON_UPDATE_CURRENT_TIMESTAMP"; @@ -1121,16 +1332,6 @@ public class ConnectionProperties { * whether use fastchecker for CTAS */ public static final String CREATE_DATABASE_AS_USE_FASTCHECKER = "CREATE_DATABASE_AS_USE_FASTCHECKER"; - /** - * fastchecker parallelism for CTAS - */ - public static final String CREATE_DATABASE_AS_FASTCHECKER_PARALLELISM = - "CREATE_DATABASE_AS_FASTCHECKER_PARALLELISM"; - /** - * fastchecker retry times for CTAS - */ - public static final String CREATE_DATABASE_AS_FASTCHECKER_RETRY_TIMES = - "CREATE_DATABASE_AS_FASTCHECKER_RETRY_TIMES"; public static final String CREATE_DATABASE_MAX_PARTITION_FOR_DEBUG = "CREATE_DATABASE_MAX_PARTITION_FOR_DEBUG"; /** @@ -1245,6 +1446,8 @@ public class ConnectionProperties { */ public static final String ENABLE_EXPRESSION_VECTORIZATION = "ENABLE_EXPRESSION_VECTORIZATION"; + public static final String ENABLE_OPTIMIZE_RANDOM_EXCHANGE = "ENABLE_OPTIMIZE_RANDOM_EXCHANGE"; + /** * Allow constant fold when binding the vectorized expression. */ @@ -1372,6 +1575,22 @@ public class ConnectionProperties { */ public static final String PHYSICAL_DDL_MDL_WAITING_TIMEOUT = "PHYSICAL_DDL_MDL_WAITING_TIMEOUT"; + /** + * Check if server should automatically recover left jobs during initialization. + */ + public static final String AUTOMATIC_DDL_JOB_RECOVERY = "AUTOMATIC_DDL_JOB_RECOVERY"; + + /** + * Comma separated string(.e.g "TASK1,TASK2"), using for skip execution some ddl task; + * Only works for ddl tasks that handled this flag explicitly + */ + public static final String SKIP_DDL_TASKS = "SKIP_DDL_TASKS"; + public static final String SKIP_DDL_TASKS_EXECUTE = "SKIP_DDL_TASKS_EXECUTE"; + public static final String SKIP_DDL_TASKS_ROLLBACK = "SKIP_DDL_TASKS_ROLLBACK"; + + /** + * Maximum number of table partitions per database. + */ public static final String MAX_TABLE_PARTITIONS_PER_DB = "MAX_TABLE_PARTITIONS_PER_DB"; public static final String LOGICAL_DB_TIME_ZONE = "LOGICAL_DB_TIME_ZONE"; @@ -1402,11 +1621,6 @@ public class ConnectionProperties { */ public static final String PUSH_CORRELATE_MATERIALIZED_LIMIT = "PUSH_CORRELATE_MATERIALIZED_LIMIT"; - /** - * force window and join reorder - */ - public static final String WINDOW_FUNC_REORDER_JOIN = "WINDOW_FUNC_REORDER_JOIN"; - public static final String STATISTIC_COLLECTOR_FROM_RULE = "STATISTIC_COLLECTOR_FROM_RULE"; public static final String ENABLE_MPP = "ENABLE_MPP"; @@ -1437,12 +1651,22 @@ public class ConnectionProperties { public static final String MPP_PARALLELISM = "MPP_PARALLELISM"; - public static final String MPP_HTTP_SERVER_MAX_THREADS = "MPP_HTTP_SERVER_MAX_THREADS"; - public static final String MPP_HTTP_SERVER_MIN_THREADS = "MPP_HTTP_SERVER_MIN_THREADS"; - public static final String MPP_HTTP_CLIENT_MAX_THREADS = "MPP_HTTP_CLIENT_MAX_THREADS"; - public static final String MPP_HTTP_CLIENT_MIN_THREADS = "MPP_HTTP_CLIENT_MIN_THREADS"; - public static final String MPP_HTTP_MAX_REQUESTS_PER_DESTINATION = "MPP_HTTP_MAX_REQUESTS_PER_DESTINATION"; - public static final String MPP_HTTP_CLIENT_MAX_CONNECTIONS = "MPP_HTTP_CLIENT_MAX_CONNECTIONS"; + public static final String MPP_NODE_SIZE = "MPP_NODE_SIZE"; + + public static final String MPP_NODE_RANDOM = "MPP_NODE_RANDOM"; + + public static final String MPP_PREFER_LOCAL_NODE = "MPP_PREFER_LOCAL_NODE"; + + public static final String SCHEDULE_BY_PARTITION = "SCHEDULE_BY_PARTITION"; + + //-------------------------------------------- http rpc thread ----------------------------------------- + + public static final String MPP_HTTP_SERVER_MAX_THREADS = "MPP_HTTP_SERVER_MAX_THREADS"; //200 + public static final String MPP_HTTP_SERVER_MIN_THREADS = "MPP_HTTP_SERVER_MIN_THREADS"; //2 + public static final String MPP_HTTP_CLIENT_MAX_THREADS = "MPP_HTTP_CLIENT_MAX_THREADS"; //200 + public static final String MPP_HTTP_CLIENT_MIN_THREADS = "MPP_HTTP_CLIENT_MIN_THREADS"; //8 + public static final String MPP_HTTP_MAX_REQUESTS_PER_DESTINATION = "MPP_HTTP_MAX_REQUESTS_PER_DESTINATION"; //5000 + public static final String MPP_HTTP_CLIENT_MAX_CONNECTIONS = "MPP_HTTP_CLIENT_MAX_CONNECTIONS"; //250 public static final String MPP_HTTP_CLIENT_MAX_CONNECTIONS_PER_SERVER = "MPP_HTTP_CLIENT_MAX_CONNECTIONS_PER_SERVER"; @@ -1502,6 +1726,8 @@ public class ConnectionProperties { public static final String MPP_MAX_PARALLELISM = "MPP_MAX_PARALLELISM"; + public static final String PARALLELISM_FOR_EMPTY_TABLE = "PARALLELISM_FOR_EMPTY_TABLE"; + public static final String MPP_MIN_PARALLELISM = "MPP_MIN_PARALLELISM"; public static final String MPP_QUERY_ROWS_PER_PARTITION = "MPP_QUERY_ROWS_PER_PARTITION"; @@ -1512,6 +1738,12 @@ public class ConnectionProperties { public static final String MPP_PARALLELISM_AUTO_ENABLE = "MPP_PARALLELISM_AUTO_ENABLE"; + public static final String SHOW_PIPELINE_INFO_UNDER_MPP = "SHOW_PIPELINE_INFO_UNDER_MPP"; + + public static final String ENABLE_TWO_CHOICE_SCHEDULE = "ENABLE_TWO_CHOICE_SCHEDULE"; + + public static final String ENABLE_COLUMNAR_SCHEDULE = "ENABLE_COLUMNAR_SCHEDULE"; + public static final String MPP_QUERY_PHASED_EXEC_SCHEDULE_ENABLE = "MPP_QUERY_PHASED_EXEC_SCHEDULE_ENABLE"; public static final String MPP_SCHEDULE_MAX_SPLITS_PER_NODE = "MPP_SCHEDULE_MAX_SPLITS_PER_NODE"; @@ -1527,7 +1759,7 @@ public class ConnectionProperties { public static final String MPP_INFO_UPDATE_INTERVAL = "MPP_INFO_UPDATE_INTERVAL"; - public static final String MPP_OUTPUT_MAX_BUFFER_SIZE = "MPP_TASK_OUTPUT_MAX_BUFFER_SIZE"; + public static final String MPP_OUTPUT_MAX_BUFFER_SIZE = "MPP_OUTPUT_MAX_BUFFER_SIZE"; public static final String MPP_TASK_CLIENT_TIMEOUT = "MPP_TASK_CLIENT_TIMEOUT"; @@ -1564,100 +1796,67 @@ public class ConnectionProperties { public static final String MPP_QUERY_NEED_RESERVE = "MPP_QUERY_NEED_RESERVE"; public static final String ENABLE_MODIFY_SHARDING_COLUMN = "ENABLE_MODIFY_SHARDING_COLUMN"; - - @Override - protected Object clone() throws CloneNotSupportedException { - return super.clone(); - } - public static final String ENABLE_MODIFY_LIMIT_OFFSET_NOT_ZERO = "ENABLE_MODIFY_LIMIT_OFFSET_NOT_ZERO"; - /** * Allow multi update/delete cross db */ public static final String ENABLE_COMPLEX_DML_CROSS_DB = "ENABLE_COMPLEX_DML_CROSS_DB"; - public static final String COMPLEX_DML_WITH_TRX = "COMPLEX_DML_WITH_TRX"; + public static final String ENABLE_PUSHDOWN_DISTINCT = "ENABLE_PUSHDOWN_DISTINCT"; + /** + * Enable index selection + */ public static final String ENABLE_INDEX_SELECTION = "ENABLE_INDEX_SELECTION"; - public static final String ENABLE_INDEX_SKYLINE = "ENABLE_INDEX_SKYLINE"; + public static final String ENABLE_INDEX_SELECTION_PRUNE = "ENABLE_INDEX_SELECTION_PRUNE"; + public static final String ENABLE_INDEX_SKYLINE = "ENABLE_INDEX_SKYLINE"; public static final String ENABLE_MERGE_INDEX = "ENABLE_MERGE_INDEX"; - public static final String ENABLE_OSS_INDEX_SELECTION = "ENABLE_OSS_INDEX_SELECTION"; - + public static final String ENABLE_COLUMNAR_PLAN_CACHE = "ENABLE_COLUMNAR_PLAN_CACHE"; + public static final String ENABLE_COLUMNAR_PULL_UP_PROJECT = "ENABLE_COLUMNAR_PULL_UP_PROJECT"; /** * Enable index selection */ public static final String SWITCH_GROUP_ONLY = "SWITCH_GROUP_ONLY"; - public static final String PLAN = "PLAN"; - public static final String ENABLE_SQL_PROFILE_LOG = "ENABLE_SQL_PROFILE_LOG"; - public static final String ENABLE_CPU_PROFILE = "ENABLE_CPU_PROFILE"; - public static final String ENABLE_MEMORY_POOL = "ENABLE_MEMORY_POOL"; - public static final String PER_QUERY_MEMORY_LIMIT = "PER_QUERY_MEMORY_LIMIT"; - public static final String SCHEMA_MEMORY_LIMIT = "SCHEMA_MEMORY_LIMIT"; - public static final String GLOBAL_MEMORY_LIMIT = "GLOBAL_MEMORY_LIMIT"; - public static final String ENABLE_MEMORY_LIMITATION = "ENABLE_MEMORY_LIMITATION"; - public static final String ENABLE_POST_PLANNER = "ENABLE_POST_PLANNER"; - public static final String ENABLE_DIRECT_PLAN = "ENABLE_DIRECT_PLAN"; - public static final String MPP_MEMORY_REVOKING_THRESHOLD = "MPP_MEMORY_REVOKING_THRESHOLD"; - public static final String MPP_MEMORY_REVOKING_TARGET = "MPP_MEMORY_REVOKING_TARGET"; - public static final String MPP_NOTIFY_BLOCKED_QUERY_MEMORY = "MPP_NOTIFY_BLOCKED_QUERY_MEMORY"; - public static final String TP_LOW_MEMORY_PROPORTION = "TP_LOW_MEMORY_PROPORTION"; - public static final String TP_HIGH_MEMORY_PROPORTION = "TP_HIGH_MEMORY_PROPORTION"; - public static final String AP_LOW_MEMORY_PROPORTION = "AP_LOW_MEMORY_PROPORTION"; - public static final String AP_HIGH_MEMORY_PROPORTION = "AP_HIGH_MEMORY_PROPORTION"; - public static final String ENABLE_SPILL = "ENABLE_SPILL"; - public static final String MPP_MAX_SPILL_THREADS = "MPP_MAX_SPILL_THREADS"; - public static final String MPP_SPILL_PATHS = "MPP_SPILL_PATHS"; - public static final String MPP_MAX_SPILL_SPACE_THRESHOLD = "MPP_MAX_SPILL_SPACE_THRESHOLD"; + public static final String MAX_SPILL_SPACE_THRESHOLD = "MAX_SPILL_SPACE_THRESHOLD"; public static final String MPP_AVAILABLE_SPILL_SPACE_THRESHOLD = "MPP_AVAILABLE_SPILL_SPACE_THRESHOLD"; - public static final String MPP_MAX_QUERY_SPILL_SPACE_THRESHOLD = "MPP_MAX_QUERY_SPILL_SPACE_THRESHOLD"; + public static final String MAX_QUERY_SPILL_SPACE_THRESHOLD = "MAX_QUERY_SPILL_SPACE_THRESHOLD"; public static final String MPP_MAX_SPILL_FD_THRESHOLD = "MPP_MAX_SPILL_FD_THRESHOLD"; - public static final String HYBRID_HASH_JOIN_BUCKET_NUM = "HYBRID_HASH_JOIN_BUCKET_NUM"; - public static final String HYBRID_HASH_JOIN_RECURSIVE_BUCKET_NUM = "HYBRID_HASH_JOIN_RECURSIVE_BUCKET_NUM"; - public static final String HYBRID_HASH_JOIN_MAX_RECURSIVE_DEPTH = "HYBRID_HASH_JOIN_MAX_RECURSIVE_DEPTH"; - public static final String MPP_LESS_REVOKE_BYTES = "MPP_LESS_REVOKE_BYTES"; - public static final String MPP_ALLOCATOR_SIZE = "MPP_ALLOCATOR_SIZE"; - public static final String MPP_CLUSTER_NAME = "MPP_CLUSTER_NAME"; - public static final String ENABLE_PARAMETER_PLAN = "ENABLE_PARAMETER_PLAN"; - public static final String ENABLE_CROSS_VIEW_OPTIMIZE = "ENABLE_CROSS_VIEW_OPTIMIZE"; - public static final String MPP_GLOBAL_MEMORY_LIMIT_RATIO = "MPP_GLOBAL_MEMORY_LIMIT_RATIO"; - public static final String CONN_POOL_PROPERTIES = "CONN_POOL_PROPERTIES"; public static final String CONN_POOL_MIN_POOL_SIZE = "CONN_POOL_MIN_POOL_SIZE"; public static final String CONN_POOL_MAX_POOL_SIZE = "CONN_POOL_MAX_POOL_SIZE"; @@ -1679,6 +1878,7 @@ protected Object clone() throws CloneNotSupportedException { public static final String CONN_POOL_XPROTO_AUTH = "CONN_POOL_XPROTO_AUTH"; public static final String CONN_POOL_XPROTO_AUTO_COMMIT_OPTIMIZE = "CONN_POOL_XPROTO_AUTO_COMMIT_OPTIMIZE"; public static final String CONN_POOL_XPROTO_XPLAN = "CONN_POOL_XPROTO_XPLAN"; + public static final String XPLAN_MAX_SCAN_ROWS = "XPLAN_MAX_SCAN_ROWS"; public static final String CONN_POOL_XPROTO_XPLAN_EXPEND_STAR = "CONN_POOL_XPROTO_XPLAN_EXPEND_STAR"; public static final String CONN_POOL_XPROTO_XPLAN_TABLE_SCAN = "CONN_POOL_XPROTO_XPLAN_TABLE_SCAN"; public static final String CONN_POOL_XPROTO_TRX_LEAK_CHECK = "CONN_POOL_XPROTO_TRX_LEAK_CHECK"; @@ -1692,26 +1892,20 @@ protected Object clone() throws CloneNotSupportedException { public static final String CONN_POOL_XPROTO_MAX_PACKET_SIZE = "CONN_POOL_XPROTO_MAX_PACKET_SIZE"; public static final String CONN_POOL_XPROTO_QUERY_TOKEN = "CONN_POOL_XPROTO_QUERY_TOKEN"; public static final String CONN_POOL_XPROTO_PIPE_BUFFER_SIZE = "CONN_POOL_XPROTO_PIPE_BUFFER_SIZE"; - public static final String XPROTO_MAX_DN_CONCURRENT = "XPROTO_MAX_DN_CONCURRENT"; - public static final String XPROTO_MAX_DN_WAIT_CONNECTION = "XPROTO_MAX_DN_WAIT_CONNECTION"; - /** * X-Protocol always keep upper filter when use XPlan */ public static final String XPROTO_ALWAYS_KEEP_FILTER_ON_XPLAN_GET = "XPROTO_ALWAYS_KEEP_FILTER_ON_XPLAN_GET"; - /** * x-protocol probe timeout. */ public static final String XPROTO_PROBE_TIMEOUT = "XPROTO_PROBE_TIMEOUT"; - /** * Galaxy prepare config. */ public static final String XPROTO_GALAXY_PREPARE = "XPROTO_GALAXY_PREPARE"; - /** * X-Protocol / XRPC flow control pipe max size(in KB, 1024 means 1MB). */ @@ -1721,184 +1915,160 @@ protected Object clone() throws CloneNotSupportedException { * X-Protocol / XRPC TCP aging time in seconds. */ public static final String XPROTO_TCP_AGING = "XPROTO_TCP_AGING"; - /** * The storage inst list of all single groups when creating new database */ public static final String SINGLE_GROUP_STORAGE_INST_LIST = "SINGLE_GROUP_STORAGE_INST_LIST"; - public static final String SHARD_DB_COUNT_EACH_STORAGE_INST = "SHARD_DB_COUNT_EACH_STORAGE_INST"; - public static final String SHARD_DB_COUNT_EACH_STORAGE_INST_FOR_STMT = - "SHARD_DB_COUNT_EACH_STORAGE_INST_FOR_STMT"; - + public static final String SHARD_DB_COUNT_EACH_STORAGE_INST_FOR_STMT = "SHARD_DB_COUNT_EACH_STORAGE_INST_FOR_STMT"; public static final String MAX_LOGICAL_DB_COUNT = "MAX_LOGICAL_DB_COUNT"; - public static final String PASSWORD_RULE_CONFIG = "PASSWORD_RULE_CONFIG"; public static final String MAX_AUDIT_LOG_CLEAN_KEEP_DAYS = "MAX_AUDIT_LOG_CLEAN_KEEP_DAYS"; public static final String MAX_AUDIT_LOG_CLEAN_DELAY_DAYS = "MAX_AUDIT_LOG_CLEAN_DELAY_DAYS"; public static final String LOGIN_ERROR_MAX_COUNT_CONFIG = "LOGIN_ERROR_MAX_COUNT_CONFIG"; public static final String ENABLE_LOGIN_AUDIT_CONFIG = "ENABLE_LOGIN_AUDIT_CONFIG"; - public static final String ENABLE_FORBID_PUSH_DML_WITH_HINT = "ENABLE_FORBID_PUSH_DML_WITH_HINT"; - public static final String VARIABLE_EXPIRE_TIME = "VARIABLE_EXPIRE_TIME"; - public static final String MERGE_SORT_BUFFER_SIZE = "MERGE_SORT_BUFFER_SIZE"; - public static final String ENABLE_AGG_PRUNING = "ENABLE_AGG_PRUNING"; - public static final String WORKLOAD_CPU_THRESHOLD = "WORKLOAD_CPU_THRESHOLD"; - public static final String WORKLOAD_MEMORY_THRESHOLD = "WORKLOAD_MEMORY_THRESHOLD"; - public static final String WORKLOAD_IO_THRESHOLD = "WORKLOAD_IO_THRESHOLD"; - public static final String WORKLOAD_OSS_NET_THRESHOLD = "WORKLOAD_OSS_NET_THRESHOLD"; - + public static final String WORKLOAD_COLUMNAR_ROW_THRESHOLD = "WORKLOAD_COLUMNAR_ROW_THRESHOLD"; public static final String WORKLOAD_TYPE = "WORKLOAD_TYPE"; - public static final String EXECUTOR_MODE = "EXECUTOR_MODE"; + public static final String ENABLE_OSS_MOCK_COLUMNAR = "ENABLE_OSS_MOCK_COLUMNAR"; + public static final String ENABLE_COLUMNAR_OPTIMIZER = "ENABLE_COLUMNAR_OPTIMIZER"; + public static final String EXECUTOR_MODE = "EXECUTOR_MODE"; public static final String ENABLE_MASTER_MPP = "ENABLE_MASTER_MPP"; - public static final String ENABLE_TEMP_TABLE_JOIN = "ENABLE_TEMP_TABLE_JOIN"; - public static final String LOOKUP_IN_VALUE_LIMIT = "LOOKUP_IN_VALUE_LIMIT"; - public static final String LOOKUP_JOIN_BLOCK_SIZE_PER_SHARD = "LOOKUP_JOIN_BLOCK_SIZE_PER_SHARD"; - public static final String ENABLE_CONSISTENT_REPLICA_READ = "ENABLE_CONSISTENT_REPLICA_READ"; - public static final String EXPLAIN_LOGICALVIEW = "EXPLAIN_LOGICALVIEW"; - public static final String ENABLE_HTAP = "ENABLE_HTAP"; - public static final String IN_SUB_QUERY_THRESHOLD = "IN_SUB_QUERY_THRESHOLD"; - public static final String ENABLE_OR_OPT = "ENABLE_OR_OPT"; - public static final String ENABLE_IN_SUB_QUERY_FOR_DML = "ENABLE_IN_SUB_QUERY_FOR_DML"; + public static final String ENABLE_XPLAN_FEEDBACK = "ENABLE_XPLAN_FEEDBACK"; + public static final String ENABLE_IN_SUB_QUERY_FOR_DML = "ENABLE_IN_SUB_QUERY_FOR_DML"; public static final String ENABLE_RUNTIME_FILTER = "ENABLE_RUNTIME_FILTER"; - + public static final String ENABLE_LOCAL_RUNTIME_FILTER = "ENABLE_LOCAL_RUNTIME_FILTER"; + public static final String CHECK_RUNTIME_FILTER_SAME_FRAGMENT = "CHECK_RUNTIME_FILTER_SAME_FRAGMENT"; public static final String FORCE_ENABLE_RUNTIME_FILTER_COLUMNS = "FORCE_ENABLE_RUNTIME_FILTER_COLUMNS"; - public static final String FORCE_DISABLE_RUNTIME_FILTER_COLUMNS = "FORCE_DISABLE_RUNTIME_FILTER_COLUMNS"; - public static final String BLOOM_FILTER_BROADCAST_NUM = "BLOOM_FILTER_BROADCAST_NUM"; - public static final String BLOOM_FILTER_MAX_SIZE = "BLOOM_FILTER_MAX_SIZE"; - public static final String BLOOM_FILTER_RATIO = "BLOOM_FILTER_RATIO"; - public static final String RUNTIME_FILTER_PROBE_MIN_ROW_COUNT = "RUNTIME_FILTER_PROBE_MIN_ROW_COUNT"; - public static final String BLOOM_FILTER_GUESS_SIZE = "BLOOM_FILTER_GUESS_SIZE"; - public static final String BLOOM_FILTER_MIN_SIZE = "BLOOM_FILTER_MIN_SIZE"; - public static final String ENABLE_PUSH_RUNTIME_FILTER_SCAN = "ENABLE_PUSH_RUNTIME_FILTER_SCAN"; - public static final String WAIT_RUNTIME_FILTER_FOR_SCAN = "WAIT_RUNTIME_FILTER_FOR_SCAN"; - public static final String ENABLE_RUNTIME_FILTER_INTO_BUILD_SIDE = "ENABLE_RUNTIME_FILTER_INTO_BUILD_SIDE"; - public static final String ENABLE_RUNTIME_FILTER_XXHASH = "ENABLE_RUNTIME_FILTER_XXHASH"; - public static final String ENABLE_SPLIT_RUNTIME_FILTER = "ENABLE_SPLIT_RUNTIME_FILTER"; - public static final String ENABLE_OPTIMIZE_SCAN_WITH_RUNTIME_FILTER = "ENABLE_OPTIMIZE_SCAN_WITH_RUNTIME_FILTER"; - public static final String RUNTIME_FILTER_FPP = "RUNTIME_FILTER_FPP"; - public static final String STORAGE_SUPPORTS_BLOOM_FILTER = "STORAGE_SUPPORTS_BLOOM_FILTER"; - public static final String WAIT_BLOOM_FILTER_TIMEOUT_MS = "WAIT_BLOOM_FILTER_TIMEOUT_MS"; - public static final String RESUME_SCAN_STEP_SIZE = "RESUME_SCAN_STEP_SIZE"; - public static final String ENABLE_SPILL_OUTPUT = "ENABLE_SPILL_OUTPUT"; - public static final String SPILL_OUTPUT_MAX_BUFFER_SIZE = "SPILL_OUTPUT_MAX_BUFFER_SIZE"; - public static final String SUPPORT_READ_FOLLOWER_STRATEGY = "SUPPORT_READ_FOLLOWER_STRATEGY"; - public static final String ENABLE_BROADCAST_RANDOM_READ = "ENABLE_BROADCAST_RANDOM_READ"; - public static final String TABLEGROUP_DEBUG = "TABLEGROUP_DEBUG"; - - public static final String DDL_ON_PRIMARY_GSI_TYPE = "DDL_ON_PRIMARY_GSI_TYPE"; - - public static final String SLEEP_TIME_BEFORE_NOTIFY_DDL = "SLEEP_TIME_BEFORE_NOTIFY_DDL"; + public static final String ENABLE_LOCAL_PARTITION_WISE_JOIN = "ENABLE_LOCAL_PARTITION_WISE_JOIN"; - public static final String SHOW_IMPLICIT_ID = "SHOW_IMPLICIT_ID"; + public static final String LOCAL_PAIRWISE_PROBE_SEPARATE = "LOCAL_PAIRWISE_PROBE_SEPARATE"; - public static final String ENABLE_DRIVING_STREAM_SCAN = "ENABLE_DRIVING_STREAM_SCAN"; + public static final String JOIN_KEEP_PARTITION = "JOIN_KEEP_PARTITION"; + /** + * debug mode on alter tablegroup, which makes alter tablegroup status change slower etc. + */ + public static final String TABLEGROUP_DEBUG = "TABLEGROUP_DEBUG"; + public static final String DDL_ON_PRIMARY_GSI_TYPE = "DDL_ON_PRIMARY_GSI_TYPE"; + public static final String SLEEP_TIME_BEFORE_NOTIFY_DDL = "SLEEP_TIME_BEFORE_NOTIFY_DDL"; + public static final String SHOW_IMPLICIT_ID = "SHOW_IMPLICIT_ID"; + public static final String SHOW_IMPLICIT_TABLE_GROUP = "SHOW_IMPLICIT_TABLE_GROUP"; + public static final String ENABLE_DRIVING_STREAM_SCAN = "ENABLE_DRIVING_STREAM_SCAN"; public static final String ENABLE_SIMPLIFY_TRACE_SQL = "ENABLE_SIMPLIFY_TRACE_SQL"; - public static final String CALCULATE_ACTUAL_SHARD_COUNT_FOR_COST = "CALCULATE_ACTUAL_SHARD_COUNT_FOR_COST"; - public static final String PARAMETRIC_SIMILARITY_ALGO = "PARAMETRIC_SIMILARITY_ALGO"; public static final String FEEDBACK_WORKLOAD_AP_THRESHOLD = "FEEDBACK_WORKLOAD_AP_THRESHOLD"; - + //HTAP FEEDBACK public static final String FEEDBACK_WORKLOAD_TP_THRESHOLD = "FEEDBACK_WORKLOAD_TP_THRESHOLD"; public static final String MASTER_READ_WEIGHT = "MASTER_READ_WEIGHT"; + //HTAP ROUTE public static final String STORAGE_DELAY_THRESHOLD = "STORAGE_DELAY_THRESHOLD"; - public static final String STORAGE_BUSY_THRESHOLD = "STORAGE_BUSY_THRESHOLD"; - /** * set the operation strategy when the slave delay * <0 means nothing, =1 change master, =2 throw exception */ public static final String DELAY_EXECUTION_STRATEGY = "DELAY_EXECUTION_STRATEGY"; - public static final String KEEP_DELAY_EXECUTION_STRATEGY = "KEEP_DELAY_EXECUTION_STRATEGY"; - public static final String USE_CDC_CON = "USE_CDC_CON"; + public static final String NEW_TOPN = "NEW_TOPN"; + /** * top record size */ public static final String TOPN_SIZE = "TOPN_SIZE"; + public static final String NEW_TOPN_SIZE = "NEW_TOPN_SIZE"; + + /** + * topn min num, only record the topn info if its count > TOPN_MIN_NUM + */ public static final String TOPN_MIN_NUM = "TOPN_MIN_NUM"; - public static final String SELECT_INTO_OUTFILE_STATISTICS_DUMP = "SELECT_INTO_OUTFILE_STATISTICS_DUMP"; + public static final String NEW_TOPN_MIN_NUM = "NEW_TOPN_MIN_NUM"; + /** + * Whether return the result of SELECT INTO OUTFILE STATISTICS + */ + public static final String SELECT_INTO_OUTFILE_STATISTICS_DUMP = "SELECT_INTO_OUTFILE_STATISTICS_DUMP"; /** * Whether ignore histogram of string column */ public static final String STATISTICS_DUMP_IGNORE_STRING = "STATISTICS_DUMP_IGNORE_STRING"; - /** * 是否开启 SELECT INTO OUTFILE 默认关闭 */ public static final String ENABLE_SELECT_INTO_OUTFILE = "ENABLE_SELECT_INTO_OUTFILE"; - public static final String SHOW_HASH_PARTITIONS_BY_RANGE = "SHOW_HASH_PARTITIONS_BY_RANGE"; - public static final String SHOW_TABLE_GROUP_NAME = "SHOW_TABLE_GROUP_NAME"; - public static final String MAX_PHYSICAL_PARTITION_COUNT = "MAX_PHYSICAL_PARTITION_COUNT"; - public static final String MAX_PARTITION_COLUMN_COUNT = "MAX_PARTITION_COLUMN_COUNT"; + /** + * The max length of partition name(included the name of subpartition template) + */ + public static final String MAX_PARTITION_NAME_LENGTH = "MAX_PARTITION_NAME_LENGTH"; + /** * Label if auto use range-key subpart for index of auto-part table, default is true */ public static final String ENABLE_AUTO_USE_RANGE_FOR_TIME_INDEX = "ENABLE_AUTO_USE_RANGE_FOR_TIME_INDEX"; + /** + * Label if auto use key syntax for all local index on show create table + */ + public static final String ENABLE_USE_KEY_FOR_ALL_LOCAL_INDEX = "ENABLE_USE_KEY_FOR_ALL_LOCAL_INDEX"; /** * Label if auto use range/list columns partitions for "part by range/list", default is true */ public static final String ENABLE_AUTO_USE_COLUMNS_PARTITION = "ENABLE_AUTO_USE_COLUMNS_PARTITION"; - /** * Balancer parameters */ @@ -1915,349 +2085,276 @@ protected Object clone() throws CloneNotSupportedException { * Allow move the single table with locality='balance_single_table=on' during scale-out/scale-in */ public static final String ALLOW_MOVING_BALANCED_SINGLE_TABLE = "ALLOW_MOVING_BALANCED_SINGLE_TABLE"; - /** * The default value of default_single when create auto-db without specify default_single option */ public static final String DATABASE_DEFAULT_SINGLE = "DATABASE_DEFAULT_SINGLE"; - /** * switch for partition pruning, only use for qatest and debug */ public static final String ENABLE_PARTITION_PRUNING = "ENABLE_PARTITION_PRUNING"; - public static final String ENABLE_AUTO_MERGE_INTERVALS_IN_PRUNING = "ENABLE_AUTO_MERGE_INTERVALS_IN_PRUNING"; - public static final String ENABLE_INTERVAL_ENUMERATION_IN_PRUNING = "ENABLE_INTERVAL_ENUMERATION_IN_PRUNING"; - public static final String PARTITION_PRUNING_STEP_COUNT_LIMIT = "PARTITION_PRUNING_STEP_COUNT_LIMIT"; - public static final String USE_FAST_SINGLE_POINT_INTERVAL_MERGING = "USE_FAST_SINGLE_POINT_INTERVAL_MERGING"; - public static final String ENABLE_CONST_EXPR_EVAL_CACHE = "ENABLE_CONST_EXPR_EVAL_CACHE"; - public static final String MAX_ENUMERABLE_INTERVAL_LENGTH = "MAX_ENUMERABLE_INTERVAL_LENGTH"; - /** * The max size of in value from the InSubQuery pruning */ public static final String MAX_IN_SUBQUERY_PRUNING_SIZE = "MAX_IN_SUBQUERY_PRUNING_SIZE"; - /** * Enable do pruning log in pruner.log */ public static final String ENABLE_LOG_PART_PRUNING = "ENABLE_LOG_PART_PRUNING"; - public static final String ENABLE_OPTIMIZER_ALERT = "ENABLE_OPTIMIZER_ALERT"; - public static final String ENABLE_OPTIMIZER_ALERT_LOG = "ENABLE_OPTIMIZER_ALERT_LOG"; - public static final String OPTIMIZER_ALERT_LOG_INTERVAL = "OPTIMIZER_ALERT_LOG_INTERVAL"; - public static final String ALERT_BKA_BASE = "ALERT_BKA_BASE"; - public static final String ALERT_TP_BASE = "ALERT_TP_BASE"; + public static final String ENABLE_TP_SLOW_ALERT = "ENABLE_TP_SLOW_ALERT"; + + public static final String ENABLE_TP_SLOW_ALERT_THRESHOLD = "ENABLE_TP_SLOW_ALERT_THRESHOLD"; + + public static final String ENABLE_ALERT_TEST_DEFAULT = "ENABLE_ALERT_TEST_DEFAULT"; + public static final String ENABLE_ALERT_TEST = "ENABLE_ALERT_TEST"; - public static final String ENABLE_BRANCH_AND_BOUND_OPTIMIZATION = "ENABLE_BRANCH_AND_BOUND_OPTIMIZATION"; + public static final String ALERT_STATISTIC_INTERRUPT = "ALERT_STATISTIC_INTERRUPT"; + public static final String ALERT_STATISTIC_INCONSISTENT = "ALERT_STATISTIC_INCONSISTENT"; + + public static final String ENABLE_BRANCH_AND_BOUND_OPTIMIZATION = "ENABLE_BRANCH_AND_BOUND_OPTIMIZATION"; public static final String ENABLE_BROADCAST_JOIN = "ENABLE_BROADCAST_JOIN"; - public static final String BROADCAST_SHUFFLE_COUNT = "BROADCAST_SHUFFLE_COUNT"; + public static final String ENABLE_PARTITION_WISE_JOIN = "ENABLE_PARTITION_WISE_JOIN"; + public static final String ENABLE_BROADCAST_LEFT = "ENABLE_BROADCAST_LEFT"; - public static final String BROADCAST_SHUFFLE_PARALLELISM = "BROADCAST_SHUFFLE_PARALLELISM"; + public static final String ENABLE_PARTITION_WISE_AGG = "ENABLE_PARTITION_WISE_AGG"; - public static final String ENABLE_PASS_THROUGH_TRAIT = "ENABLE_PASS_THROUGH_TRAIT"; + public static final String ENABLE_PARTITION_WISE_WINDOW = "ENABLE_PARTITION_WISE_WINDOW"; + public static final String BROADCAST_SHUFFLE_PARALLELISM = "BROADCAST_SHUFFLE_PARALLELISM"; + public static final String ENABLE_PASS_THROUGH_TRAIT = "ENABLE_PASS_THROUGH_TRAIT"; public static final String ENABLE_DERIVE_TRAIT = "ENABLE_DERIVE_TRAIT"; - public static final String ENABLE_SHUFFLE_BY_PARTIAL_KEY = "ENABLE_SHUFFLE_BY_PARTIAL_KEY"; - public static final String ADVISE_TYPE = "ADVISE_TYPE"; - public static final String ENABLE_HLL = "ENABLE_HLL"; - public static final String STRICT_ENUM_CONVERT = "STRICT_ENUM_CONVERT"; + public static final String HLL_PARALLELISM = "HLL_PARALLELISM"; + public static final String STRICT_ENUM_CONVERT = "STRICT_ENUM_CONVERT"; public static final String STRICT_YEAR_CONVERT = "STRICT_YEAR_CONVERT"; - /** * feedback minor tolerance value */ public static final String MINOR_TOLERANCE_VALUE = "MINOR_TOLERANCE_VALUE"; - /** * upper bound for baseline sync */ public static final String MAX_BASELINE_SYNC_PLAN_SIZE = "MAX_BASELINE_SYNC_PLAN_SIZE"; - public static final String SPM_OLD_PLAN_CHOOSE_COUNT_LEVEL = "SPM_OLD_PLAN_CHOOSE_COUNT_LEVEL"; - /** * bytes upper bound for baseline sync */ public static final String MAX_BASELINE_SYNC_BYTE_SIZE = "MAX_BASELINE_SYNC_BYTE_SIZE"; - /** * the period of storage ha task of each dn, unit:ms */ public static final String STORAGE_HA_TASK_PERIOD = "STORAGE_HA_TASK_PERIOD"; - public static final String STORAGE_HA_SOCKET_TIMEOUT = "STORAGE_HA_SOCKET_TIMEOUT"; - public static final String STORAGE_HA_CONNECT_TIMEOUT = "STORAGE_HA_CONNECT_TIMEOUT"; - public static final String ENABLE_HA_CHECK_TASK_LOG = "ENABLE_HA_CHECK_TASK_LOG"; - public static final String STATISTIC_NDV_SKETCH_EXPIRE_TIME = "STATISTIC_NDV_SKETCH_EXPIRE_TIME"; + public static final String ENABLE_NDV_USE_COLUMNAR = "ENABLE_NDV_USE_COLUMNAR"; + public static final String ENABLE_MPP_NDV_USE_COLUMNAR = "ENABLE_MPP_NDV_USE_COLUMNAR"; + /** + * ndv sketch expire time + */ + public static final String STATISTIC_NDV_SKETCH_EXPIRE_TIME = "STATISTIC_NDV_SKETCH_EXPIRE_TIME"; public static final String STATISTIC_NDV_SKETCH_QUERY_TIMEOUT = "STATISTIC_NDV_SKETCH_QUERY_TIMEOUT"; - public static final String STATISTIC_NDV_SKETCH_MAX_DIFFERENT_VALUE = "STATISTIC_NDV_SKETCH_MAX_DIFFERENT_VALUE"; - public static final String STATISTIC_NDV_SKETCH_MAX_DIFFERENT_RATIO = "STATISTIC_NDV_SKETCH_MAX_DIFFERENT_RATIO"; - public static final String STATISTIC_NDV_SKETCH_SAMPLE_RATE = "STATISTIC_NDV_SKETCH_SAMPLE_RATE"; - public static final String ENABLE_CHECK_STATISTICS_EXPIRE = "ENABLE_CHECK_STATISTICS_EXPIRE"; - public static final String INDEX_ADVISOR_CARDINALITY_BASE = "INDEX_ADVISOR_CARDINALITY_BASE"; - public static final String AUTO_COLLECT_NDV_SKETCH = "AUTO_COLLECT_NDV_SKETCH"; - public static final String CDC_STARTUP_MODE = "CDC_STARTUP_MODE"; /** * CDC模块是否开启metadata snapshot 能力 */ public static final String ENABLE_CDC_META_BUILD_SNAPSHOT = "ENABLE_CDC_META_BUILD_SNAPSHOT"; - public static final String SHARE_STORAGE_MODE = "SHARE_STORAGE_MODE"; - public static final String SHOW_ALL_PARAMS = "SHOW_ALL_PARAMS"; - public static final String ENABLE_SET_GLOBAL = "ENABLE_SET_GLOBAL"; - + public static final String COMPATIBLE_CHARSET_VARIABLES = "COMPATIBLE_CHARSET_VARIABLES"; public static final String ENABLE_PREEMPTIVE_MDL = "ENABLE_PREEMPTIVE_MDL"; - public static final String SHOW_STORAGE_POOL = "SHOW_STORAGE_POOL"; - public static final String SHOW_FULL_LOCALITY = "SHOW_FULL_LOCALITY"; public static final String PREEMPTIVE_MDL_INITWAIT = "PREEMPTIVE_MDL_INITWAIT"; public static final String PREEMPTIVE_MDL_INTERVAL = "PREEMPTIVE_MDL_INTERVAL"; - public static final String RENAME_PREEMPTIVE_MDL_INITWAIT = "RENAME_PREEMPTIVE_MDL_INITWAIT"; public static final String RENAME_PREEMPTIVE_MDL_INTERVAL = "RENAME_PREEMPTIVE_MDL_INTERVAL"; - public static final String TG_PREEMPTIVE_MDL_INITWAIT = "TG_PREEMPTIVE_MDL_INITWAIT"; public static final String TG_PREEMPTIVE_MDL_INTERVAL = "TG_PREEMPTIVE_MDL_INTERVAL"; - public static final String FORCE_READ_OUTSIDE_TX = "FORCE_READ_OUTSIDE_TX"; - public static final String SCHEDULER_SCAN_INTERVAL_SECONDS = "SCHEDULER_SCAN_INTERVAL_SECONDS"; public static final String SCHEDULER_CLEAN_UP_INTERVAL_HOURS = "SCHEDULER_CLEAN_UP_INTERVAL_HOURS"; public static final String SCHEDULER_RECORD_KEEP_HOURS = "SCHEDULER_RECORD_KEEP_HOURS"; public static final String SCHEDULER_MIN_WORKER_COUNT = "SCHEDULER_MIN_WORKER_COUNT"; public static final String SCHEDULER_MAX_WORKER_COUNT = "SCHEDULER_MAX_WORKER_COUNT"; - public static final String DEFAULT_LOCAL_PARTITION_SCHEDULE_CRON_EXPR = "DEFAULT_LOCAL_PARTITION_SCHEDULE_CRON_EXPR"; - /** * check target table after alter tablegroup's backfill */ public static final String TABLEGROUP_REORG_CHECK_AFTER_BACKFILL = "TABLEGROUP_REORG_CHECK_AFTER_BACKFILL"; - /** * TABLEGROUP_REORG_BACKFILL_USE_FASTCHECKER */ public static final String TABLEGROUP_REORG_BACKFILL_USE_FASTCHECKER = "TABLEGROUP_REORG_BACKFILL_USE_FASTCHECKER"; - public static final String TABLEGROUP_REORG_CHECK_BATCH_SIZE = "TABLEGROUP_REORG_CHECK_BATCH_SIZE"; - public static final String TABLEGROUP_REORG_CHECK_SPEED_LIMITATION = "TABLEGROUP_REORG_CHECK_SPEED_LIMITATION"; - public static final String TABLEGROUP_REORG_CHECK_SPEED_MIN = "TABLEGROUP_REORG_CHECK_SPEED_MIN"; - public static final String TABLEGROUP_REORG_CHECK_PARALLELISM = "TABLEGROUP_REORG_CHECK_PARALLELISM"; - public static final String TABLEGROUP_REORG_FASTCHECKER_PARALLELISM = "TABLEGROUP_REORG_FASTCHECKER_PARALLELISM"; /** * number of error for check early fail. */ public static final String TABLEGROUP_REORG_EARLY_FAIL_NUMBER = "TABLEGROUP_REORG_EARLY_FAIL_NUMBER"; - /** * set the table's final status for alter tablegroup debug purpose. */ public static final String TABLEGROUP_REORG_FINAL_TABLE_STATUS_DEBUG = "TABLEGROUP_REORG_FINAL_TABLE_STATUS_DEBUG"; - public static final String INTERRUPT_DDL_WHILE_LOSING_LEADER = "INTERRUPT_DDL_WHILE_LOSING_LEADER"; - public static final String RECORD_SQL_COST = "RECORD_SQL_COST"; - public static final String ENABLE_LOGICALVIEW_COST = "ENABLE_LOGICALVIEW_COST"; - public static final String FORCE_RECREATE_GROUP_DATASOURCE = "FORCE_RECREATE_GROUP_DATASOURCE"; - public static final String ENABLE_PLAN_TYPE_DIGEST = "ENABLE_PLAN_TYPE_DIGEST"; - public static final String ENABLE_PLAN_TYPE_DIGEST_STRICT_MODE = "ENABLE_PLAN_TYPE_DIGEST_STRICT_MODE"; - /** * flag that if auto warming logical db */ public static final String ENABLE_LOGICAL_DB_WARMMING_UP = "ENABLE_LOGICAL_DB_WARMMING_UP"; - /** * pool size of auto-warming-logical-db-executor */ public static final String LOGICAL_DB_WARMMING_UP_EXECUTOR_POOL_SIZE = "LOGICAL_DB_WARMMING_UP_EXECUTOR_POOL_SIZE"; - public static final String FLASHBACK_RENAME = "FLASHBACK_RENAME"; - public static final String PURGE_FILE_STORAGE_TABLE = "PURGE_FILE_STORAGE_TABLE"; - /* ================ For OSS Table ORC File ================ */ - public static final String OSS_BACKFILL_PARALLELISM = "OSS_BACKFILL_PARALLELISM"; - + /* ================ For OSS Table ORC File ================ */ public static final String OSS_ORC_INDEX_STRIDE = "OSS_ORC_INDEX_STRIDE"; - public static final String OSS_BLOOM_FILTER_FPP = "OSS_BLOOM_FILTER_FPP"; - public static final String OSS_MAX_ROWS_PER_FILE = "OSS_MAX_ROWS_PER_FILE"; - public static final String OSS_REMOVE_TMP_FILES = "OSS_REMOVE_TMP_FILES"; - public static final String OSS_ORC_COMPRESSION = "OSS_ORC_COMPRESSION"; - - /* ================ For OSS Table File System ================ */ - public static final String OSS_FS_MAX_READ_RATE = "OSS_FS_MAX_READ_RATE"; + /* ================ For OSS Table File System ================ */ public static final String OSS_FS_MAX_WRITE_RATE = "OSS_FS_MAX_WRITE_RATE"; - public static final String OSS_FS_VALIDATION_ENABLE = "OSS_FS_VALIDATION_ENABLE"; - public static final String OSS_FS_CACHE_TTL = "OSS_FS_CACHE_TTL"; - public static final String OSS_FS_MAX_CACHED_ENTRIES = "OSS_FS_MAX_CACHED_ENTRIES"; - public static final String OSS_FS_HOT_CACHE_TTL = "OSS_FS_HOT_CACHE_TTL"; + public static final String OSS_FS_ENABLE_CACHED = "OSS_FS_ENABLE_CACHED"; - public static final String OSS_FS_MAX_HOT_CACHED_ENTRIES = "OSS_FS_MAX_HOT_CACHED_ENTRIES"; + public static final String OSS_FS_CACHED_FLUSH_THREAD_NUM = "OSS_FS_CACHED_FLUSH_THREAD_NUM"; - public static final String OSS_ORC_MAX_MERGE_DISTANCE = "OSS_ORC_MAX_MERGE_DISTANCE"; + public static final String OSS_FS_MAX_CACHED_GB = "OSS_FS_MAX_CACHED_GB"; - public static final String FILE_LIST = "FILE_LIST"; + public static final String OSS_FS_USE_BYTES_CACHE = "OSS_FS_USE_BYTES_CACHE"; - public static final String FILE_PATTERN = "FILE_PATTERN"; + public static final String OSS_FS_MEMORY_RATIO_OF_BYTES_CACHE = "OSS_FS_MEMORY_RATIO_OF_BYTES_CACHE"; + public static final String OSS_ORC_MAX_MERGE_DISTANCE = "OSS_ORC_MAX_MERGE_DISTANCE"; + public static final String FILE_LIST = "FILE_LIST"; + public static final String FILE_PATTERN = "FILE_PATTERN"; public static final String ENABLE_EXPIRE_FILE_STORAGE_PAUSE = "ENABLE_EXPIRE_FILE_STORAGE_PAUSE"; - public static final String ENABLE_CHECK_DDL_FILE_STORAGE = "ENABLE_CHECK_DDL_FILE_STORAGE"; - public static final String ENABLE_CHECK_DDL_BINDING_FILE_STORAGE = "ENABLE_CHECK_DDL_BINDING_FILE_STORAGE"; - public static final String ENABLE_EXPIRE_FILE_STORAGE_TEST_PAUSE = "ENABLE_EXPIRE_FILE_STORAGE_TEST_PAUSE"; - public static final String FILE_STORAGE_TASK_PARALLELISM = "FILE_STORAGE_TASK_PARALLELISM"; public static final String ENABLE_FILE_STORE_CHECK_TABLE = "ENABLE_FILE_STORE_CHECK_TABLE"; - public static final String ENABLE_OSS_BUFFER_POOL = "ENABLE_OSS_BUFFER_POOL"; - public static final String ENABLE_OSS_DELAY_MATERIALIZATION = "ENABLE_OSS_DELAY_MATERIALIZATION"; - public static final String ENABLE_OSS_ZERO_COPY = "ENABLE_OSS_ZERO_COPY"; - public static final String ENABLE_OSS_COMPATIBLE = "ENABLE_OSS_COMPATIBLE"; + public static final String ENABLE_PAIRWISE_SHUFFLE_COMPATIBLE = "ENABLE_PAIRWISE_SHUFFLE_COMPATIBLE"; + + public static final String COLD_DATA_STATUS = "COLD_DATA_STATUS"; + public static final String ENABLE_OSS_DELAY_MATERIALIZATION_ON_EXCHANGE = "ENABLE_OSS_DELAY_MATERIALIZATION_ON_EXCHANGE"; - public static final String ENABLE_OSS_FILE_CONCURRENT_SPLIT_ROUND_ROBIN = "ENABLE_OSS_FILE_CONCURRENT_SPLIT_ROUND_ROBIN"; - public static final String ENABLE_REUSE_VECTOR = "ENABLE_REUSE_VECTOR"; - public static final String ENABLE_DECIMAL_FAST_VEC = "ENABLE_DECIMAL_FAST_VEC"; - public static final String ENABLE_UNIQUE_HASH_KEY = "ENABLE_UNIQUE_HASH_KEY"; - + public static final String ENABLE_PRUNE_EXCHANGE_PARTITION = "ENABLE_PRUNE_EXCHANGE_PARTITION"; public static final String BLOCK_BUILDER_CAPACITY = "BLOCK_BUILDER_CAPACITY"; - public static final String ENABLE_HASH_TABLE_BLOOM_FILTER = "ENABLE_HASH_TABLE_BLOOM_FILTER"; - public static final String ENABLE_COMMON_SUB_EXPRESSION_TREE_ELIMINATE = "ENABLE_COMMON_SUB_EXPRESSION_TREE_ELIMINATE"; - public static final String OSS_FILE_ORDER = "OSS_FILE_ORDER"; - public static final String MAX_SESSION_PREPARED_STMT_COUNT = "MAX_SESSION_PREPARED_STMT_COUNT"; - public static final String ALLOW_REPLACE_ARCHIVE_TABLE = "ALLOW_REPLACE_ARCHIVE_TABLE"; - public static final String ALLOW_CREATE_TABLE_LIKE_FILE_STORE = "ALLOW_CREATE_TABLE_LIKE_FILE_STORE"; - /** * is enable collect partitions heatmap, dynamic, default:true */ public static final String ENABLE_PARTITIONS_HEATMAP_COLLECTION = "ENABLE_PARTITIONS_HEATMAP_COLLECTION"; - /** * set schemas and tables of partitions heatmap collect * exp: 'schema_01#table1&table12,schema_02#table1' or 'schema_01,schema_02' or '' */ public static final String PARTITIONS_HEATMAP_COLLECTION_ONLY = "PARTITIONS_HEATMAP_COLLECTION_ONLY"; - /** * if partitions numbers that has been collected more than PARTITIONS_HEATMAP_COLLECTION_MAX_SCAN, then do not collect others. */ public static final String PARTITIONS_HEATMAP_COLLECTION_MAX_SCAN = "PARTITIONS_HEATMAP_COLLECTION_MAX_SCAN"; - /** * if single logic schema count more than PARTITIONS_HEATMAP_COLLECTION_MAX_SINGLE_LOGIC_SCHEMA_COUNT, then do not collect it. */ public static final String PARTITIONS_HEATMAP_COLLECTION_MAX_SINGLE_LOGIC_SCHEMA_COUNT = "PARTITIONS_HEATMAP_COLLECTION_MAX_SINGLE_LOGIC_SCHEMA_COUNT"; - /** * if partitions numbers more than PARTITIONS_HEATMAP_COLLECTION_MAX_MERGE_NUM, then merge this. */ public static final String PARTITIONS_HEATMAP_COLLECTION_MAX_MERGE_NUM = "PARTITIONS_HEATMAP_COLLECTION_MAX_MERGE_NUM"; - /** * extreme performance mode */ public static final String ENABLE_EXTREME_PERFORMANCE = "ENABLE_EXTREME_PERFORMANCE"; - public static final String ENABLE_CLEAN_FAILED_PLAN = "ENABLE_CLEAN_FAILED_PLAN"; - /** * the min size of IN expr that would be pruned */ public static final String IN_PRUNE_SIZE = "IN_PRUNE_SIZE"; - /** * the batch size of IN expr being pruned */ public static final String IN_PRUNE_STEP_SIZE = "IN_PRUNE_STEP_SIZE"; - public static final String IN_PRUNE_MAX_TIME = "IN_PRUNE_MAX_TIME"; + public static final String PRUNING_TIME_WARNING_THRESHOLD = "PRUNING_TIME_WARNING_THRESHOLD"; + + public static final String ENABLE_PRUNING_IN = "ENABLE_PRUNING_IN"; + + public static final String ENABLE_PRUNING_IN_DML = "ENABLE_PRUNING_IN_DML"; /** * the max num of pruning info cache by logical view */ public static final String MAX_IN_PRUNE_CACHE_SIZE = "MAX_IN_PRUNE_CACHE_SIZE"; - /** * the max table num of cache pruning info for logical view */ public static final String MAX_IN_PRUNE_CACHE_TABLE_SIZE = "MAX_IN_PRUNE_CACHE_TABLE_SIZE"; - public static final String REBALANCE_TASK_PARALISM = "REBALANCE_TASK_PARALISM"; - /** * params for statement summary */ @@ -2274,249 +2371,160 @@ protected Object clone() throws CloneNotSupportedException { * the max statement template count which statement summary support. */ public static final String STATEMENTS_SUMMARY_MAX_SQL_TEMPLATE_COUNT = "STATEMENTS_SUMMARY_MAX_SQL_TEMPLATE_COUNT"; - public static final String STATEMENTS_SUMMARY_RECORD_INTERNAL = "STATEMENTS_SUMMARY_RECORD_INTERNAL"; - /** * only collect local data when it is false. */ public static final String ENABLE_REMOTE_SYNC_ACTION = "ENABLE_REMOTE_SYNC_ACTION"; - /** * the max length of sql sample stored in statement summary. */ public static final String STATEMENTS_SUMMARY_MAX_SQL_LENGTH = "STATEMENTS_SUMMARY_MAX_SQL_LENGTH"; - /** * the percent of queries being summarized. * when the percent is 0, only slow sql is summarized. */ public static final String STATEMENTS_SUMMARY_PERCENT = "STATEMENTS_SUMMARY_PERCENT"; - public static final String ENABLE_STORAGE_TRIGGER = "enable_storage_trigger"; - public static final String ENABLE_TRANS_LOG = "ENABLE_TRANS_LOG"; - public static final String PLAN_CACHE_EXPIRE_TIME = "PLAN_CACHE_EXPIRE_TIME"; - public static final String SKIP_MOVE_DATABASE_VALIDATOR = "SKIP_MOVE_DATABASE_VALIDATOR"; - public static final String ENABLE_MPP_FILE_STORE_BACKFILL = "ENABLE_MPP_FILE_STORE_BACKFILL"; - public static final String PARTITION_NAME = "PARTITION_NAME"; - public static final String FORBID_REMOTE_DDL_TASK = "FORBID_REMOTE_DDL_TASK"; - public static final String ENABLE_STANDBY_BACKFILL = "ENABLE_STANDBY_BACKFILL"; - public static final String PHYSICAL_DDL_IGNORED_ERROR_CODE = "PHYSICAL_DDL_IGNORED_ERROR_CODE"; - public static final String DDL_PAUSE_DURING_EXCEPTION = "DDL_PAUSE_DURING_EXCEPTION"; + public static final String OUTPUT_MYSQL_ERROR_CODE = "OUTPUT_MYSQL_ERROR_CODE"; - public static final String CHANGE_SET_REPLAY_TIMES = "CHANGE_SET_REPLAY_TIMES"; + public static final String MAPPING_TO_MYSQL_ERROR_CODE = "MAPPING_TO_MYSQL_ERROR_CODE"; + public static final String CHANGE_SET_REPLAY_TIMES = "CHANGE_SET_REPLAY_TIMES"; public static final String CHANGE_SET_APPLY_BATCH = "CHANGE_SET_APPLY_BATCH"; - public static final String CHANGE_SET_MEMORY_LIMIT = "CHANGE_SET_MEMORY_LIMIT"; - public static final String ENABLE_CHANGESET = "ENABLE_CHANGESET"; - public static final String CN_ENABLE_CHANGESET = "CN_ENABLE_CHANGESET"; - public static final String CHANGE_SET_APPLY_SPEED_LIMITATION = "CHANGE_SET_APPLY_SPEED_LIMITATION"; - public static final String CHANGE_SET_APPLY_SPEED_MIN = "CHANGE_SET_APPLY_SPEED_MIN"; - public static final String CHANGE_SET_APPLY_PARALLELISM = "CHANGE_SET_APPLY_PARALLELISM"; - public static final String CHANGE_SET_APPLY_PHY_PARALLELISM = "CHANGE_SET_APPLY_PHY_PARALLELISM"; - public static final String CHANGE_SET_APPLY_OPTIMIZATION = "CHANGE_SET_APPLY_OPTIMIZATION"; - /** * for change set debug */ public static final String SKIP_CHANGE_SET_CHECKER = "SKIP_CHANGE_SET_CHECKER"; - public static final String CHANGE_SET_CHECK_TWICE = "CHANGE_SET_CHECK_TWICE"; - public static final String SKIP_CHANGE_SET = "SKIP_CHANGE_SET"; - public static final String CHANGE_SET_DEBUG_MODE = "CHANGE_SET_DEBUG_MODE"; - public static final String SKIP_CHANGE_SET_APPLY = "SKIP_CHANGE_SET_APPLY"; - public static final String SKIP_CHANGE_SET_FETCH = "SKIP_CHANGE_SET_FETCH"; - public static final String PURGE_OSS_FILE_CRON_EXPR = "PURGE_OSS_FILE_CRON_EXPR"; - public static final String PURGE_OSS_FILE_BEFORE_DAY = "PURGE_OSS_FILE_BEFORE_DAY"; - public static final String BACKUP_OSS_PERIOD = "BACKUP_OSS_PERIOD"; - public static final String FILE_STORAGE_FILES_META_QUERY_PARALLELISM = "FILE_STORAGE_FILES_META_QUERY_PARALLELISM"; - public static final String ENBALE_BIND_PARAM_TYPE = "ENBALE_BIND_PARAM_TYPE"; - public static final String ENBALE_BIND_COLLATE = "ENBALE_BIND_COLLATE"; - public static final String SKIP_TABLEGROUP_VALIDATOR = "SKIP_TABLEGROUP_VALIDATOR"; - /** * Enable auto savepoint. If it is TRUE, failed DML statements will be rollbacked automatically. */ public static final String ENABLE_AUTO_SAVEPOINT = "ENABLE_AUTO_SAVEPOINT"; - public static final String CURSOR_FETCH_CONN_MEMORY_LIMIT = "CURSOR_FETCH_CONN_MEMORY_LIMIT"; - public static final String FORCE_RESHARD = "FORCE_RESHARD"; - public static final String REMOVE_DDL_JOB_REDUNDANCY_RELATIONS = "REMOVE_DDL_JOB_REDUNDANCY_RELATIONS"; - public static final String TG_MDL_SEGMENT_SIZE = "TG_MDL_SEGMENT_SIZE"; - public static final String DB_MDL_SEGMENT_SIZE = "DB_MDL_SEGMENT_SIZE"; - - public static final String ENABLE_FAST_MOCK = "ENABLE_FAST_MOCK"; - public static final String ENABLE_TRIGGER_DIRECT_INFORMATION_SCHEMA_QUERY = "ENABLE_TRIGGER_DIRECT_INFORMATION_SCHEMA_QUERY"; - public static final String ENABLE_LOWER_CASE_TABLE_NAMES = "ENABLE_LOWER_CASE_TABLE_NAMES"; - /** * second when ddl plan scheduler wait for polling ddl plan record. */ public static final String DDL_PLAN_SCHEDULER_DELAY = "DDL_PLAN_SCHEDULER_DELAY"; - public static final String USE_PARAMETER_DELEGATE = "USE_PARAMETER_DELEGATE"; - public static final String ENABLE_NODE_HINT_REPLACE = "ENABLE_NODE_HINT_REPLACE"; - public static final String USE_JDK_DEFAULT_SER = "USE_JDK_DEFAULT_SER"; - public static final String OPTIMIZE_TABLE_PARALLELISM = "OPTIMIZE_TABLE_PARALLELISM"; - public static final String OPTIMIZE_TABLE_USE_DAL = "OPTIMIZE_TABLE_USE_DAL"; - /** * module conf */ public static final String MAINTENANCE_TIME_START = "MAINTENANCE_TIME_START"; - public static final String MAINTENANCE_TIME_END = "MAINTENANCE_TIME_END"; - public static final String ENABLE_MODULE_LOG = "ENABLE_MODULE_LOG"; - + public static final String ENABLE_COLUMNAR_DECIMAL64 = "ENABLE_COLUMNAR_DECIMAL64"; + public static final String ENABLE_XPROTO_RESULT_DECIMAL64 = "ENABLE_XPROTO_RESULT_DECIMAL64"; public static final String MAX_MODULE_LOG_PARAMS_SIZE = "MAX_MODULE_LOG_PARAMS_SIZE"; - public static final String MAX_MODULE_LOG_PARAM_SIZE = "MAX_MODULE_LOG_PARAM_SIZE"; - /** * speed limit for oss backfill procedure */ public static final String OSS_BACKFILL_SPEED_LIMITATION = "OSS_BACKFILL_SPEED_LIMITATION"; - /** * speed lower bound for oss backfill procedure */ public static final String OSS_BACKFILL_SPEED_MIN = "OSS_BACKFILL_SPEED_MIN"; - public static final String ONLY_MANUAL_TABLEGROUP_ALLOW = "ONLY_MANUAL_TABLEGROUP_ALLOW"; public static final String MANUAL_TABLEGROUP_NOT_ALLOW_AUTO_MATCH = "MANUAL_TABLEGROUP_NOT_ALLOW_AUTO_MATCH"; - public static final String ACQUIRE_CREATE_TABLE_GROUP_LOCK = "ACQUIRE_CREATE_TABLE_GROUP_LOCK"; public static final String ENABLE_DRUID_FOR_SYNC_CONN = "ENABLE_DRUID_FOR_SYNC_CONN"; public static final String PASSWORD_CHECK_PATTERN = "PASSWORD_CHECK_PATTERN"; - public static final String DEPRECATE_EOF = "DEPRECATE_EOF"; - public static final String ENABLE_AUTO_SPLIT_PARTITION = "ENABLE_AUTO_SPLIT_PARTITION"; - public static final String ENABLE_FORCE_PRIMARY_FOR_TSO = "ENABLE_FORCE_PRIMARY_FOR_TSO"; - public static final String ENABLE_FORCE_PRIMARY_FOR_FILTER = "ENABLE_FORCE_PRIMARY_FOR_FILTER"; - public static final String ENABLE_FORCE_PRIMARY_FOR_GROUP_BY = "ENABLE_FORCE_PRIMARY_FOR_GROUP_BY"; - /** * Whether rollback a branch of XA trx if its primary group is unknown. */ public static final String ROLLBACK_UNKNOWN_PRIMARY_GROUP_XA_TRX = "ROLLBACK_UNKNOWN_PRIMARY_GROUP_XA_TRX"; - public static final String PREFETCH_EXECUTE_POLICY = "PREFETCH_EXECUTE_POLICY"; - public static final String MAX_RECURSIVE_TIME = "MAX_RECURSIVE_COUNT"; public static final String MAX_RECURSIVE_CTE_MEM_BYTES = "MAX_RECURSIVE_CTE_MEM_BYTES"; - public static final String ENABLE_REPLICA = "ENABLE_REPLICA"; - public static final String GROUPING_LSN_THREAD_NUM = "GROUPING_LSN_THREAD_NUM"; - public static final String GROUPING_LSN_TIMEOUT = "GROUPING_LSN_TIMEOUT"; - public static final String ENABLE_ASYNC_COMMIT = "ENABLE_ASYNC_COMMIT"; - public static final String ENABLE_TRANSACTION_RECOVER_TASK = "ENABLE_TRANSACTION_RECOVER_TASK"; - public static final String ASYNC_COMMIT_TASK_LIMIT = "ASYNC_COMMIT_TASK_LIMIT"; - public static final String ASYNC_COMMIT_PUSH_MAX_SEQ_ONLY_LEADER = "ASYNC_COMMIT_PUSH_MAX_SEQ_ONLY_LEADER"; - public static final String ASYNC_COMMIT_OMIT_PREPARE_TS = "ASYNC_COMMIT_OMIT_PREPARE_TS"; public static final String ENABLE_SINGLE_SHARD_WRITE = "ENABLE_SINGLE_SHARD_WRITE"; public static final String ENABLE_FOLLOWER_READ = "ENABLE_FOLLOWER_READ"; - public static final String CREATE_TABLE_WITH_CHARSET_COLLATE = "CREATE_TABLE_WITH_CHARSET_COLLATE"; - public static final String ENABLE_SIMPLIFY_SUBQUERY_SQL = "ENABLE_SIMPLIFY_SUBQUERY_SQL"; public static final String ENABLE_SIMPLIFY_SHARDING_SQL = "ENABLE_SIMPLIFY_SHARDING_SQL"; - public static final String MAX_PHYSICAL_SLOW_SQL_PARAMS_TO_PRINT = "MAX_PHYSICAL_SLOW_SQL_PARAMS_TO_PRINT"; + public static final String MAX_CCI_COUNT = "MAX_CCI_COUNT"; + public static final String ENABLE_CCI_ON_TABLE_WITH_IMPLICIT_PK = "ENABLE_CCI_ON_TABLE_WITH_IMPLICIT_PK"; - public static final String SERVER_ID = "SERVER_ID"; - + public static final String SERVER_ID = "SERVER_ID"; public static final String ENABLE_REMOTE_CONSUME_LOG = "ENABLE_REMOTE_CONSUME_LOG"; - public static final String REMOTE_CONSUME_LOG_BATCH_SIZE = "REMOTE_CONSUME_LOG_BATCH_SIZE"; - public static final String ENABLE_TRANSACTION_STATISTICS = "ENABLE_TRANSACTION_STATISTICS"; - public static final String SLOW_TRANS_THRESHOLD = "SLOW_TRANS_THRESHOLD"; - public static final String TRANSACTION_STATISTICS_TASK_INTERVAL = "TRANSACTION_STATISTICS_TASK_INTERVAL"; - public static final String IDLE_TRANSACTION_TIMEOUT = "IDLE_TRANSACTION_TIMEOUT"; - public static final String IDLE_WRITE_TRANSACTION_TIMEOUT = "IDLE_WRITE_TRANSACTION_TIMEOUT"; - public static final String IDLE_READONLY_TRANSACTION_TIMEOUT = "IDLE_READONLY_TRANSACTION_TIMEOUT"; - public static final String MAX_CACHED_SLOW_TRANS_STATS = "MAX_CACHED_SLOW_TRANS_STATS"; - public static final String ENABLE_TRX_IDLE_TIMEOUT_TASK = "ENABLE_TRX_IDLE_TIMEOUT_TASK"; - public static final String TRX_IDLE_TIMEOUT_TASK_INTERVAL = "TRX_IDLE_TIMEOUT_TASK_INTERVAL"; - + public static final String BACKFILL_USING_BINARY = "BACKFILL_USING_BINARY"; /** * -1 mean the learner only allow read, this is the default value; */ public static final String LEARNER_LEVEL = "LEARNER_LEVEL"; - public static final String PLAN_CACHE_SIZE = "PLAN_CACHE_SIZE"; - public static final String ENABLE_X_PROTO_OPT_FOR_AUTO_SP = "ENABLE_X_PROTO_OPT_FOR_AUTO_SP"; - public static final String SIM_CDC_FAILED = "SIM_CDC_FAILED"; public static final String SKIP_DDL_RESPONSE = "SKIP_DDL_RESPONSE"; - public static final String ENABLE_ROLLBACK_TO_READY = "ENABLE_ROLLBACK_TO_READY"; - public static final String TRX_LOG_CLEAN_PARALLELISM = "TRX_LOG_CLEAN_PARALLELISM"; public static final String CHECK_RESPONSE_IN_MEM = "CHECK_RESPONSE_IN_MEM"; @@ -2526,4 +2534,201 @@ protected Object clone() throws CloneNotSupportedException { public static final String PHYSICAL_DDL_TASK_RETRY = "PHYSICAL_DDL_TASK_RETRY"; public static final String ENABLE_2PC_OPT = "ENABLE_2PC_OPT"; + + public static final String SKIP_COLUMNAR_WAIT_TASK = "SKIP_COLUMNAR_WAIT_TASK"; + // columnar index + public static final String COLUMNAR_BITMAP_INDEX_MAX_SCAN_SIZE_FOR_PRUNING = + "COLUMNAR_BITMAP_INDEX_MAX_SCAN_SIZE_FOR_PRUNING"; + /** + * To enable the columnar scan exec. + */ + public static final String ENABLE_COLUMNAR_SCAN_EXEC = "ENABLE_COLUMNAR_SCAN_EXEC"; + /** + * The count of maximum groups in a scan work. + */ + public static final String COLUMNAR_WORK_UNIT = "COLUMNAR_WORK_UNIT"; + /** + * The policy of table scan: IO_PRIORITY, FILTER_PRIORITY, IO_ON_DEMAND. + */ + public static final String SCAN_POLICY = "SCAN_POLICY"; + /** + * To enable the block cache. + */ + public static final String ENABLE_BLOCK_CACHE = "ENABLE_BLOCK_CACHE"; + + public static final String ENABLE_USE_IN_FLIGHT_BLOCK_CACHE = "ENABLE_USE_IN_FLIGHT_BLOCK_CACHE"; + + /** + * To enable the verbose metrics report. + */ + public static final String ENABLE_VERBOSE_METRICS_REPORT = "ENABLE_VERBOSE_METRICS_REPORT"; + /** + * To enable the columnar metrics. + */ + public static final String ENABLE_COLUMNAR_METRICS = "ENABLE_COLUMNAR_METRICS"; + /** + * To enable the index pruning on orc. + */ + public static final String ENABLE_INDEX_PRUNING = "ENABLE_INDEX_PRUNING"; + /** + * To enable canceling the loading processing of stripe-loader. + */ + public static final String ENABLE_CANCEL_STRIPE_LOADING = "ENABLE_CANCEL_STRIPE_LOADING"; + + public static final String ENABLE_COLUMNAR_SLICE_DICT = "ENABLE_COLUMNAR_SLICE_DICT"; + + public static final String ENABLE_LAZY_BLOCK_ACTIVE_LOADING = "ENABLE_LAZY_BLOCK_ACTIVE_LOADING"; + public static final String ENABLE_COLUMN_READER_LOCK = "ENABLE_COLUMN_READER_LOCK"; + public static final String ENABLE_VEC_ACCUMULATOR = "ENABLE_VEC_ACCUMULATOR"; + public static final String ENABLE_LOCAL_EXCHANGE_BATCH = "ENABLE_LOCAL_EXCHANGE_BATCH"; + public static final String ENABLE_VEC_BUILD_JOIN_ROW = "ENABLE_VEC_BUILD_JOIN_ROW"; + public static final String ENABLE_VEC_JOIN = "ENABLE_VEC_JOIN"; + public static final String ENABLE_JOIN_CONDITION_PRUNING = "ENABLE_JOIN_CONDITION_PRUNING"; + public static final String ENABLE_EXCHANGE_PARTITION_OPTIMIZATION = "ENABLE_EXCHANGE_PARTITION_OPTIMIZATION"; + public static final String ENABLE_DRIVER_OBJECT_POOL = "ENABLE_DRIVER_OBJECT_POOL"; + public static final String ENABLE_COLUMNAR_SCAN_SELECTION = "ENABLE_COLUMNAR_SCAN_SELECTION"; + public static final String BLOCK_CACHE_MEMORY_SIZE_FACTOR = "BLOCK_CACHE_MEMORY_SIZE_FACTOR"; + public static final String ENABLE_BLOCK_BUILDER_BATCH_WRITING = "ENABLE_BLOCK_BUILDER_BATCH_WRITING"; + public static final String ENABLE_SCAN_RANDOM_SHUFFLE = "ENABLE_SCAN_RANDOM_SHUFFLE"; + + public static final String SCAN_RANDOM_SHUFFLE_THRESHOLD = "SCAN_RANDOM_SHUFFLE_THRESHOLD"; + + public static final String ENABLE_AUTOMATIC_COLUMNAR_PARAMS = "ENABLE_AUTOMATIC_COLUMNAR_PARAMS"; + + public static final String ENABLE_FILE_STORAGE_DELTA_STATISTIC = "ENABLE_FILE_STORAGE_DELTA_STATISTIC"; + + public static final String ZONEMAP_MAX_GROUP_SIZE = "ZONEMAP_MAX_GROUP_SIZE"; + public static final String PHYSICAL_BACKFILL_BATCH_SIZE = "PHYSICAL_BACKFILL_BATCH_SIZE"; + public static final String PHYSICAL_BACKFILL_MIN_SUCCESS_BATCH_UPDATE = + "PHYSICAL_BACKFILL_MIN_SUCCESS_BATCH_UPDATE"; + public static final String PHYSICAL_BACKFILL_MIN_WRITE_BATCH_PER_THREAD = + "PHYSICAL_BACKFILL_MIN_WRITE_BATCH_PER_THREAD"; + public static final String PHYSICAL_BACKFILL_PARALLELISM = "PHYSICAL_BACKFILL_PARALLELISM"; + public static final String PHYSICAL_BACKFILL_ENABLE = "PHYSICAL_BACKFILL_ENABLE"; + public static final String PHYSICAL_BACKFILL_FROM_FOLLOWER = "PHYSICAL_BACKFILL_FROM_FOLLOWER"; + public static final String PHYSICAL_BACKFILL_MAX_RETRY_WAIT_FOLLOWER_TO_LSN = + "PHYSICAL_BACKFILL_MAX_RETRY_WAIT_FOLLOWER_TO_LSN"; + public static final String PHYSICAL_BACKFILL_MAX_SLAVE_LATENCY = "PHYSICAL_BACKFILL_MAX_SLAVE_LATENCY"; + public static final String PHYSICAL_BACKFILL_NET_SPEED_TEST_TIME = "PHYSICAL_BACKFILL_NET_SPEED_TEST_TIME"; + public static final String IMPORT_TABLESPACE_TASK_EXEC_SERIALLY = "IMPORT_TABLESPACE_TASK_EXEC_SERIALLY"; + //this option is just for test only + public static final String PHYSICAL_BACKFILL_IGNORE_CFG = "PHYSICAL_BACKFILL_IGNORE_CFG"; + public static final String PHYSICAL_BACKFILL_SPEED_LIMIT = "PHYSICAL_BACKFILL_SPEED_LIMIT"; + public static final String PHYSICAL_BACKFILL_WAIT_LSN_WHEN_ROLLBACK = "PHYSICAL_BACKFILL_WAIT_LSN_WHEN_ROLLBACK"; + public static final String PHYSICAL_BACKFILL_STORAGE_HEALTHY_CHECK = "PHYSICAL_BACKFILL_STORAGE_HEALTHY_CHECK"; + + public static final String PHYSICAL_BACKFILL_IMPORT_TABLESPACE_BY_LEADER = + "PHYSICAL_BACKFILL_IMPORT_TABLESPACE_BY_LEADER"; + + public static final String PHYSICAL_BACKFILL_SPEED_TEST = + "PHYSICAL_BACKFILL_SPEED_TEST"; + + public static final String REBALANCE_MAINTENANCE_ENABLE = "REBALANCE_MAINTENANCE_ENABLE"; + public static final String REBALANCE_MAINTENANCE_TIME_START = "REBALANCE_MAINTENANCE_TIME_START"; + + public static final String REBALANCE_MAINTENANCE_TIME_END = "REBALANCE_MAINTENANCE_TIME_END"; + + public static final String CANCEL_REBALANCE_JOB_DUE_MAINTENANCE = "CANCEL_REBALANCE_JOB_DUE_MAINTENANCE"; + + public static final String ENABLE_DEADLOCK_DETECTION_80 = "ENABLE_DEADLOCK_DETECTION_80"; + public static final String MOCK_COLUMNAR_INDEX = "MOCK_COLUMNAR_INDEX"; + public static final String MCI_FORMAT = "MCI_FORMAT"; + public static final String ENABLE_LOGICAL_TABLE_META = "ENABLE_LOGICAL_TABLE_META"; + public static final String OPTIMIZER_TYPE = "OPTIMIZER_TYPE"; + public static final String ENABLE_COLUMNAR_AFTER_CBO_PLANNER = "ENABLE_COLUMNAR_AFTER_CBO_PLANNER"; + public static final String PUSH_PROJECT_INPUT_REF_THRESHOLD = "PUSH_PROJECT_INPUT_REF_THRESHOLD"; + + /** + * 0: legacy method + * 1: new method (A/B table) + */ + public static final String TRX_LOG_METHOD = "TRX_LOG_METHOD"; + + /** + * A/B table clean interval time, in minute. + * default: 30 min + */ + public static final String TRX_LOG_CLEAN_INTERVAL = "TRX_LOG_CLEAN_INTERVAL"; + + public static final String SKIP_LEGACY_LOG_TABLE_CLEAN = "SKIP_LEGACY_LOG_TABLE_CLEAN"; + + public static final String WARM_UP_DB_PARALLELISM = "WARM_UP_DB_PARALLELISM"; + + /** + * In seconds. + */ + public static final String WARM_UP_DB_INTERVAL = "WARM_UP_DB_INTERVAL"; + + public static final String ENABLE_XA_TSO = "ENABLE_XA_TSO"; + + public static final String ENABLE_AUTO_COMMIT_TSO = "ENABLE_AUTO_COMMIT_TSO"; + + public static final String MAX_CONNECTIONS = "MAX_CONNECTIONS"; + + public static final String ENABLE_ENCDB = "ENABLE_ENCDB"; + + public static final String ENABLE_TRX_EVENT_LOG = "ENABLE_TRX_EVENT_LOG"; + + public static final String ENABLE_TRX_DEBUG_MODE = "ENABLE_TRX_DEBUG_MODE"; + + public static final String IGNORE_TRANSACTION_POLICY_NO_TRANSACTION = "IGNORE_TRANSACTION_POLICY_NO_TRANSACTION"; + + public static final String ENABLE_XXHASH_RF_IN_BUILD = "ENABLE_XXHASH_RF_IN_BUILD"; + + public static final String ENABLE_XXHASH_RF_IN_FILTER = "ENABLE_XXHASH_RF_IN_FILTER"; + + public static final String ENABLE_NEW_RF = "ENABLE_NEW_RF"; + + public static final String GLOBAL_RF_ROWS_UPPER_BOUND = "GLOBAL_RF_ROWS_UPPER_BOUND"; + + public static final String GLOBAL_RF_ROWS_LOWER_BOUND = "GLOBAL_RF_ROWS_LOWER_BOUND"; + + public static final String ENABLE_SKIP_COMPRESSION_IN_ORC = "ENABLE_SKIP_COMPRESSION_IN_ORC"; + + public static final String ONLY_CACHE_PRIMARY_KEY_IN_BLOCK_CACHE = "ONLY_CACHE_PRIMARY_KEY_IN_BLOCK_CACHE"; + + public static final String NEW_RF_SAMPLE_COUNT = "NEW_RF_SAMPLE_COUNT"; + + public static final String NEW_RF_FILTER_RATIO_THRESHOLD = "NEW_RF_FILTER_RATIO_THRESHOLD"; + + public static final String ENABLE_LBAC = "ENABLE_LBAC"; + + public static final String ENABLE_VALUES_PUSHDOWN = "ENABLE_VALUES_PUSHDOWN"; + + public static final String ENABLE_SET_GLOBAL_SERVER_ID = "ENABLE_SET_GLOBAL_SERVER_ID"; + public static final String CDC_RANDOM_DDL_TOKEN = "CDC_RANDOM_DDL_TOKEN"; + public static final String ENABLE_IMPLICIT_TABLE_GROUP = "ENABLE_IMPLICIT_TABLE_GROUP"; + public static final String ALLOW_AUTO_CREATE_TABLEGROUP = "ALLOW_AUTO_CREATE_TABLEGROUP"; + public static final String INSTANCE_READ_ONLY = "INSTANCE_READ_ONLY"; + public static final String SUPER_WRITE = "SUPER_WRITE"; + public static final String ENABLE_EXTRACT_STREAM_NAME_FROM_USER = "ENABLE_EXTRACT_STREAM_NAME_FROM_USER"; + + public static final String SNAPSHOT_TS = "SNAPSHOT_TS"; + + public static final String SKIP_CHECK_CCI_TASK = "SKIP_CHECK_CCI_TASK"; + + public static final String ENABLE_1PC_OPT = "ENABLE_1PC_OPT"; + + // In milliseconds. + public static final String MIN_SNAPSHOT_KEEP_TIME = "MIN_SNAPSHOT_KEEP_TIME"; + + public static final String FORCE_CCI_VISIBLE = "FORCE_CCI_VISIBLE"; + + public static final String ENABLE_ORC_DELETED_SCAN = "ENABLE_ORC_DELETED_SCAN"; + + public static final String ENABLE_ORC_RAW_TYPE_BLOCK = "ENABLE_ORC_RAW_TYPE_BLOCK"; + + public static final String FORCE_READ_ORC_FILE = "FORCE_READ_ORC_FILE"; + + public static final String READ_CSV_ONLY = "READ_CSV_ONLY"; + + public static final String READ_ORC_ONLY = "READ_ORC_ONLY"; + + public static final String ENABLE_FAST_CCI_CHECKER = "ENABLE_FAST_CCI_CHECKER"; + + public static final String ENABLE_FAST_PARSE_ORC_RAW_TYPE = "ENABLE_FAST_PARSE_ORC_RAW_TYPE"; + + public static final String FORBID_AUTO_COMMIT_TRX = "FORBID_AUTO_COMMIT_TRX"; + + public static final String FORCE_2PC_DURING_CCI_CHECK = "FORCE_2PC_DURING_CCI_CHECK"; } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/DynamicConfig.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/DynamicConfig.java index 7c3350e8e..2c170fbd7 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/DynamicConfig.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/DynamicConfig.java @@ -16,12 +16,23 @@ package com.alibaba.polardbx.common.properties; +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.TypeReference; +import com.alibaba.fastjson.parser.Feature; +import com.alibaba.polardbx.common.TddlConstants; import com.alibaba.polardbx.common.constants.IsolationLevel; import com.alibaba.polardbx.common.statementsummary.StatementSummaryManager; +import com.alibaba.polardbx.common.utils.version.InstanceVersion; +import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.config.ConfigDataMode; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; import java.util.regex.Pattern; /** @@ -85,6 +96,9 @@ public void loadValue(Logger logger, String key, String value) { case ConnectionProperties.OPTIMIZER_ALERT_LOG_INTERVAL: optimizerAlertLogInterval = parseValue(value, Long.class, 600000L); break; + case ConnectionProperties.ENABLE_TP_SLOW_ALERT_THRESHOLD: + tpSlowAlertThreshold = parseValue(value, Integer.class, 10); + break; case ConnectionProperties.STORAGE_BUSY_THRESHOLD: busyThreshold = parseValue(value, Integer.class, 100); break; @@ -125,6 +139,11 @@ public void loadValue(Logger logger, String key, String value) { learnerMode = tempLearnerMode; } break; + + case ConnectionProperties.BLOCK_CACHE_MEMORY_SIZE_FACTOR: + blockCacheMemoryFactor = parseValue(value, Float.class, 0.6f); + break; + case ConnectionProperties.PURGE_HISTORY_MS: { long tempPurgeHistoryMs = parseValue(value, Long.class, 600 * 1000L); if (tempPurgeHistoryMs > 0 && tempPurgeHistoryMs < purgeHistoryMs) { @@ -142,6 +161,9 @@ public void loadValue(Logger logger, String key, String value) { case ConnectionProperties.MAX_SESSION_PREPARED_STMT_COUNT: maxSessionPreparedStmtCount = parseValue(value, Integer.class, maxSessionPreparedStmtCountDefault); break; + case ConnectionProperties.STATISTIC_IN_DEGRADATION_NUMBER: + inDegradationNum = parseValue(value, Integer.class, 100); + break; case ConnectionProperties.ENABLE_AUTO_USE_RANGE_FOR_TIME_INDEX: enableAutoUseRangeForTimeIndex = parseValue(value, Boolean.class, true); @@ -191,6 +213,21 @@ public void loadValue(Logger logger, String key, String value) { case ConnectionProperties.FOREIGN_KEY_CHECKS: foreignKeyChecks = parseValue(value, Boolean.class, true); break; + case ConnectionProperties.ENABLE_XPROTO_RESULT_DECIMAL64: + enableXResultDecimal64 = parseValue(value, Boolean.class, false); + break; + case ConnectionProperties.ENABLE_COLUMNAR_DECIMAL64: + enableColumnarDecimal64 = parseValue(value, Boolean.class, true); + break; + case ConnectionProperties.MAX_CONNECTIONS: + maxConnections = parseValue(value, Integer.class, 20000); + break; + case ConnectionProperties.MAX_ALLOWED_PACKET: + maxAllowedPacket = parseValue(value, Integer.class, 16 * 1024 * 1024); + break; + case ConnectionProperties.PHYSICAL_DDL_MDL_WAITING_TIMEOUT: + phyiscalMdlWaitTimeout = parseValue(value, Integer.class, 15); + break; case ConnectionProperties.ENABLE_STATEMENTS_SUMMARY: int enableStatementsSummary = parseValue(value, Boolean.class, true) ? 1 : 0; StatementSummaryManager.getInstance().getConfig().setEnableStmtSummary(enableStatementsSummary); @@ -260,8 +297,98 @@ public void loadValue(Logger logger, String key, String value) { case ConnectionProperties.ENABLE_2PC_OPT: enable2pcOpt = parseValue(value, Boolean.class, false); break; + case ConnectionProperties.COMPATIBLE_CHARSET_VARIABLES: + compatibleCharsetVariables = parseValue(value, Boolean.class, false); + break; + case ConnectionProperties.VERSION_PREFIX: + String versionPrefix = parseValue(value, String.class, null); + InstanceVersion.reloadVersion(versionPrefix); + break; + case ConnectionProperties.TRX_LOG_METHOD: + trxLogMethod = parseValue(value, Integer.class, 0); + break; + case ConnectionProperties.TRX_LOG_CLEAN_INTERVAL: + trxLogCleanInterval = parseValue(value, Integer.class, 30); + break; + case ConnectionProperties.SKIP_LEGACY_LOG_TABLE_CLEAN: + skipLegacyLogTableClean = parseValue(value, Boolean.class, false); + break; + case ConnectionProperties.WARM_UP_DB_PARALLELISM: + warmUpDbParallelism = parseValue(value, Integer.class, 1); + break; + case ConnectionProperties.WARM_UP_DB_INTERVAL: + warmUpDbInterval = parseValue(value, Long.class, 60L); + break; + case ConnectionProperties.MAX_PARTITION_NAME_LENGTH: { + int newPartNameLength = parseValue(value, Integer.class, + Integer.valueOf(ConnectionParams.MAX_PARTITION_NAME_LENGTH.getDefault())); + /** + * For protect partition meta. The max allowed length of (sub)partition name in metadb is 64 + * , but subpartName = partName+subpartTempName, so the max allowed length of partition name + * should be 32. + */ + if (newPartNameLength > 32) { + newPartNameLength = 32; + } + if (newPartNameLength < 0) { + newPartNameLength = Integer.valueOf(ConnectionParams.MAX_PARTITION_NAME_LENGTH.getDefault()); + } + maxPartitionNameLength = newPartNameLength; + + } + break; + case ConnectionProperties.ENABLE_HLL: + enableHll = parseValue(value, Boolean.class, true); + break; + case ConnectionProperties.ENABLE_TRX_EVENT_LOG: + enableTrxEventLog = parseValue(value, Boolean.class, true); + break; + case ConnectionProperties.ENABLE_TRX_DEBUG_MODE: + enableTrxDebugMode = parseValue(value, Boolean.class, false); + break; + case ConnectionProperties.INSTANCE_READ_ONLY: + instanceReadOnly = parseValue(value, Boolean.class, false); + break; + case ConnectionProperties.MIN_SNAPSHOT_KEEP_TIME: + minSnapshotKeepTime = parseValue(value, Integer.class, 5 * 60 * 1000); + break; + case ConnectionProperties.FORBID_AUTO_COMMIT_TRX: + forbidAutoCommitTrx = parseValue(value, Boolean.class, false); + break; + case ConnectionProperties.MAPPING_TO_MYSQL_ERROR_CODE: + errorCodeMapping = initErrorCodeMapping(value); + break; + case ConnectionProperties.PRUNING_TIME_WARNING_THRESHOLD: + pruningTimeWarningThreshold = parseValue(value, Long.class, 500L); + break; + case ConnectionProperties.ENABLE_PRUNING_IN: + enablePruningIn = parseValue(value, Boolean.class, true); + break; + case ConnectionProperties.ENABLE_PRUNING_IN_DML: + enablePruningInDml = parseValue(value, Boolean.class, true); + break; + case ConnectionProperties.ENABLE_MQ_CACHE_COST_BY_THREAD: + enableMQCacheByThread = parseValue(value, Boolean.class, true); + break; + case ConnectionProperties.ENABLE_USE_KEY_FOR_ALL_LOCAL_INDEX: + enableUseKeyForAllLocalIndex = parseValue(value, Boolean.class, false); + break; + case TddlConstants.BLACK_LIST_CONF: + String blockLists = parseValue(value, String.class, ""); + List tempBlackList = new ArrayList<>(); + if (StringUtils.isNotBlank(blockLists)) { + String[] blockListArr = blockLists.split(","); + for (String blockList : blockListArr) { + if (StringUtils.isNotBlank(blockList)) { + tempBlackList.add(blockList.toLowerCase(Locale.ROOT)); + } + } + } + blackListConf = tempBlackList; + break; default: + FileConfig.getInstance().loadValue(logger, key, value); break; } } @@ -337,8 +464,24 @@ public int getXprotoTcpAging() { parseValue(ConnectionParams.AUTO_PARTITION_PARTITIONS.getDefault(), Long.class, 64L); private volatile long autoPartitionPartitions = autoPartitionPartitionsDefault; - public long getAutoPartitionPartitions() { - return autoPartitionPartitions; + private static final long autoPartitionCciPartitionsDefault = + parseValue(ConnectionParams.COLUMNAR_DEFAULT_PARTITIONS.getDefault(), Long.class, 64L); + private volatile long autoPartitionCciPartitions = autoPartitionCciPartitionsDefault; + + private static final float blockCacheMemoryFactorDefault = + parseValue(ConnectionParams.BLOCK_CACHE_MEMORY_SIZE_FACTOR.getDefault(), Float.class, 0.6f); + private volatile float blockCacheMemoryFactor = blockCacheMemoryFactorDefault; + + public float getBlockCacheMemoryFactor() { + return blockCacheMemoryFactor; + } + + public long getAutoPartitionPartitions(boolean isColumnar) { + return isColumnar ? autoPartitionCciPartitions : autoPartitionPartitions; + } + + public long getAutoPartitionCciPartitions() { + return autoPartitionCciPartitions; } private volatile int delayThreshold = 3; @@ -366,6 +509,12 @@ public long getOptimizerAlertLogInterval() { return optimizerAlertLogInterval; } + private volatile int tpSlowAlertThreshold = 10; + + public int getTpSlowAlertThreshold() { + return tpSlowAlertThreshold; + } + private volatile int busyThreshold = 100; public int getBusyThreshold() { @@ -476,12 +625,43 @@ public boolean useJdkDefaultSer() { return useJdkDefaultSer; } + private volatile boolean enableXResultDecimal64 = false; + + public boolean enableXResultDecimal64() { + return enableXResultDecimal64; + } + + private volatile boolean enableColumnarDecimal64 = true; + + public boolean enableColumnarDecimal64() { + return enableColumnarDecimal64; + } + private volatile boolean enableOrOpt = true; public boolean useOrOpt() { return enableOrOpt; } + private volatile boolean enableHll = true; + + public boolean enableHll() { + return enableHll; + } + + private volatile boolean enableMQCacheByThread = true; + + public boolean isEnableMQCacheByThread() { + return enableMQCacheByThread; + } + + private volatile int inDegradationNum = + parseValue(ConnectionParams.STATISTIC_IN_DEGRADATION_NUMBER.getDefault(), Integer.class, 100); + + public int getInDegradationNum() { + return inDegradationNum; + } + private static final int maxSessionPreparedStmtCountDefault = parseValue(ConnectionParams.MAX_SESSION_PREPARED_STMT_COUNT.getDefault(), Integer.class, 256); @@ -500,7 +680,8 @@ public boolean isEnableAutoUseRangeForTimeIndex() { } private static final String DEFAULT_PASSWORD_CHECK_PATTERN_STR = "^[0-9A-Za-z!@#$%^&*()_+=-]{6,32}$"; - private static final Pattern DEFAULT_PASSWORD_CHECK_PATTERN = Pattern.compile(DEFAULT_PASSWORD_CHECK_PATTERN_STR); + private static final Pattern DEFAULT_PASSWORD_CHECK_PATTERN = + Pattern.compile(DEFAULT_PASSWORD_CHECK_PATTERN_STR); private volatile Pattern passwordCheckPattern = DEFAULT_PASSWORD_CHECK_PATTERN; @@ -596,6 +777,12 @@ public boolean isDatabaseDefaultSingle() { return databaseDefaultSingle; } + private volatile boolean compatibleCharsetVariables = false; + + public boolean isCompatibleCharsetVariables() { + return compatibleCharsetVariables; + } + private volatile ConfigDataMode.LearnerMode learnerMode = ConfigDataMode.LearnerMode.ONLY_READ; public ConfigDataMode.LearnerMode learnerMode() { @@ -608,6 +795,191 @@ public boolean isEnable2pcOpt() { return enable2pcOpt; } + //--------------- the followed setting is for test ------------------- + private boolean supportSingleDbMultiTbs = false; + private boolean supportRemoveDdl = false; + private boolean supportDropAutoSeq = false; + private boolean allowSimpleSequence = false; + + public boolean isSupportSingleDbMultiTbs() { + return supportSingleDbMultiTbs; + } + + public void setSupportSingleDbMultiTbs(boolean supportSingleDbMultiTbs) { + this.supportSingleDbMultiTbs = supportSingleDbMultiTbs; + } + + public boolean isSupportRemoveDdl() { + return supportRemoveDdl; + } + + public void setSupportRemoveDdl(boolean supportRemoveDdl) { + this.supportRemoveDdl = supportRemoveDdl; + } + + public boolean isSupportDropAutoSeq() { + return supportDropAutoSeq; + } + + public void setSupportDropAutoSeq(boolean supportDropAutoSeq) { + this.supportDropAutoSeq = supportDropAutoSeq; + } + + public boolean isAllowSimpleSequence() { + return allowSimpleSequence; + } + + public void setAllowSimpleSequence(boolean allowSimpleSequence) { + this.allowSimpleSequence = allowSimpleSequence; + } + + private volatile int trxLogMethod = 0; + + public int getTrxLogMethod() { + return trxLogMethod; + } + + private volatile long trxLogCleanInterval = 30; + + public long getTrxLogCleanInterval() { + return trxLogCleanInterval; + } + + private volatile boolean skipLegacyLogTableClean = false; + + public boolean isSkipLegacyLogTableClean() { + return skipLegacyLogTableClean; + } + + private volatile int warmUpDbParallelism = 1; + + public int getWarmUpDbParallelism() { + return warmUpDbParallelism < 0 ? 1 : warmUpDbParallelism; + } + + private String columnarOssDirectory; + + public String getColumnarOssDirectory() { + return columnarOssDirectory; + } + + public void setColumnarOssDirectory(String columnarOssDirectory) { + this.columnarOssDirectory = columnarOssDirectory; + } + + /** + * Default 60s. + */ + private volatile long warmUpDbInterval = 60; + + public long getWarmUpDbInterval() { + return warmUpDbInterval; + } + + public int maxPartitionNameLength = Integer.valueOf(ConnectionParams.MAX_PARTITION_NAME_LENGTH.getDefault()); + + public int getMaxPartitionNameLength() { + return maxPartitionNameLength; + } + + private volatile int maxConnections = 20000; + + public int getMaxConnections() { + return maxConnections; + } + + private volatile int maxAllowedPacket = 16 * 1024 * 1024; + + public int getMaxAllowedPacket() { + return maxAllowedPacket; + } + + private volatile boolean enableTrxEventLog = true; + + public boolean isEnableTrxEventLog() { + return enableTrxEventLog; + } + + private volatile boolean enableTrxDebugMode = false; + + public boolean isEnableTrxDebugMode() { + return enableTrxDebugMode; + } + + private volatile int phyiscalMdlWaitTimeout = 15; + + public int getPhyiscalMdlWaitTimeout() { + return phyiscalMdlWaitTimeout; + } + + private volatile boolean instanceReadOnly = false; + + public boolean isInstanceReadOnly() { + return instanceReadOnly; + } + + // 5 min. + private volatile long minSnapshotKeepTime = 5 * 60 * 1000; + + public long getMinSnapshotKeepTime() { + return minSnapshotKeepTime; + } + + private volatile boolean forbidAutoCommitTrx = false; + + private volatile Map errorCodeMapping = new HashMap<>(); + + public boolean isForbidAutoCommitTrx() { + return forbidAutoCommitTrx; + } + + public Map getErrorCodeMapping() { + return errorCodeMapping; + } + + private Map initErrorCodeMapping(String mapping) { + if (TStringUtil.isNotBlank(mapping)) { + try { + return JSON.parseObject(mapping, new TypeReference>() { + }, Feature.IgnoreAutoType); + } catch (Exception ignored) { + } + } + return new HashMap<>(); + } + + private boolean enableUseKeyForAllLocalIndex = + Boolean.valueOf(ConnectionParams.ENABLE_USE_KEY_FOR_ALL_LOCAL_INDEX.getDefault()); + + public boolean isEnableUseKeyForAllLocalIndex() { + return enableUseKeyForAllLocalIndex; + } + + // pruning warning threshold in microsecond + private volatile long pruningTimeWarningThreshold = 500; + + public long getPruningTimeWarningThreshold() { + return pruningTimeWarningThreshold; + } + + private volatile boolean enablePruningIn = true; + + private volatile boolean enablePruningInDml = true; + + public boolean isEnablePruningIn() { + return enablePruningIn; + } + + public boolean isEnablePruningInDml() { + return enablePruningInDml; + } + + private volatile List blackListConf = new ArrayList<>(); + + public List getBlacklistConf() { + return blackListConf; + } + public static T parseValue(String value, Class type, T defaultValue) { if (value == null) { return defaultValue; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/FileConfig.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/FileConfig.java new file mode 100644 index 000000000..dfe901eae --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/FileConfig.java @@ -0,0 +1,137 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.properties; + +import com.alibaba.polardbx.common.oss.filesystem.cache.CacheConfig; +import com.alibaba.polardbx.common.oss.filesystem.cache.FileMergeCacheConfig; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; +import io.airlift.slice.DataSize; +import io.airlift.slice.Duration; + +import java.nio.file.Path; +import java.nio.file.Paths; + +import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_SPILL_PATHS; +import static com.alibaba.polardbx.common.properties.DynamicConfig.parseValue; +import static io.airlift.slice.DataSize.Unit.GIGABYTE; +import static java.util.concurrent.TimeUnit.DAYS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +public class FileConfig { + + private static final Logger log = LoggerFactory.getLogger(FileConfig.class); + + private static final String TEMP_DIR_NAME = "temp"; + + private static final String CACHE_DIR_NAME = "cache"; + + private Path rootPath; + + // root path of spillers + private Path spillerTempPath; + // root path of spillers + private Path spillerCachePath; + + private final SpillConfig spillConfig = new SpillConfig(); + private final CacheConfig cacheConfig = new CacheConfig(); + private final FileMergeCacheConfig mergeCacheConfig = new FileMergeCacheConfig(); + + private FileConfig() { + initDirs(Paths.get("../spill")); + } + + private void initDirs(Path spillerRootPath) { + this.rootPath = spillerRootPath; + // init temp paths + this.spillerTempPath = Paths.get(spillerRootPath.toFile().getAbsolutePath(), TEMP_DIR_NAME); + log.info("init SpillerManager with temp path:" + spillerTempPath.toFile().getAbsolutePath()); + this.spillerCachePath = Paths.get(spillerRootPath.toFile().getAbsolutePath(), CACHE_DIR_NAME); + log.info("init SpillerManager with cache path:" + spillerCachePath.toFile().getAbsolutePath()); + spillerRootPath.toFile().mkdirs(); + spillerTempPath.toFile().mkdirs(); + spillerCachePath.toFile().mkdirs(); + this.cacheConfig.setBaseDirectory(spillerCachePath.toUri()); + } + + public Path getRootPath() { + return rootPath; + } + + public Path getSpillerTempPath() { + return spillerTempPath; + } + + public CacheConfig getCacheConfig() { + return cacheConfig; + } + + public FileMergeCacheConfig getMergeCacheConfig() { + return mergeCacheConfig; + } + + public SpillConfig getSpillConfig() { + return spillConfig; + } + + public void loadValue(org.slf4j.Logger logger, String key, String value) { + if (key != null && value != null) { + switch (key.toUpperCase()) { + case ConnectionProperties.OSS_FS_CACHE_TTL: + mergeCacheConfig.setCacheTtl(new Duration(parseValue(value, Long.class, 2L), DAYS)); + break; + case ConnectionProperties.OSS_FS_MAX_CACHED_ENTRIES: + mergeCacheConfig.setMaxCachedEntries(parseValue(value, Integer.class, 2048)); + break; + case ConnectionProperties.OSS_FS_ENABLE_CACHED: + mergeCacheConfig.setEnableCache(parseValue(value, Boolean.class, true)); + break; + case ConnectionProperties.OSS_FS_MAX_CACHED_GB: + mergeCacheConfig.setMaxInDiskCacheSize(new DataSize(parseValue(value, Integer.class, 100), GIGABYTE)); + break; + case ConnectionProperties.OSS_FS_USE_BYTES_CACHE: + mergeCacheConfig.setUseByteCache(parseValue(value, Boolean.class, false)); + break; + case ConnectionProperties.OSS_FS_MEMORY_RATIO_OF_BYTES_CACHE: + mergeCacheConfig.setMemoryRatioOfBytesCache(parseValue(value, Double.class, 0.3d)); + break; + case ConnectionProperties.OSS_FS_VALIDATION_ENABLE: + cacheConfig.setValidationEnabled(parseValue(value, Boolean.class, false)); + break; + case ConnectionProperties.OSS_FS_CACHED_FLUSH_THREAD_NUM: + cacheConfig.setFlushCacheThreadNum(parseValue(value, Integer.class, ThreadCpuStatUtil.NUM_CORES)); + break; + case MPP_SPILL_PATHS: + initDirs(Paths.get(value)); + break; + default: + spillConfig.loadValue(logger, key, value); + break; + } + } + } + + private static final FileConfig instance = new FileConfig(); + + public static FileConfig getInstance() { + return instance; + } + +} + + diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/MppConfig.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/MppConfig.java index 4e2fa9a35..27871e17f 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/MppConfig.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/MppConfig.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.common.properties; -import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableList; import org.slf4j.Logger; @@ -29,7 +28,6 @@ import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_ALLOCATOR_SIZE; import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_AP_PRIORITY; -import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_AVAILABLE_SPILL_SPACE_THRESHOLD; import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_CLUSTER_NAME; import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_CPU_CFS_MAX_QUOTA; import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_CPU_CFS_MIN_QUOTA; @@ -55,10 +53,6 @@ import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_LOW_PRIORITY_ENABLED; import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_MAX_QUERY_EXPIRED_RESERVETION_TIME; import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_MAX_QUERY_HISTORY; -import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_MAX_QUERY_SPILL_SPACE_THRESHOLD; -import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_MAX_SPILL_FD_THRESHOLD; -import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_MAX_SPILL_SPACE_THRESHOLD; -import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_MAX_SPILL_THREADS; import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_MAX_WORKER_THREAD_SIZE; import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_MEMORY_REVOKING_TARGET; import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_MEMORY_REVOKING_THRESHOLD; @@ -268,9 +262,6 @@ public void loadValue(Logger logger, String key, String value) { httpClientMaxConnectionsPerServer = parseValue(value, Integer.class, DEFAULT_MPP_HTTP_CLIENT_MAX_CONNECTIONS_PER_SERVER); break; - case MPP_MAX_SPILL_THREADS: - maxSpillThreads = parseValue(value, Integer.class, DEFAULT_MAX_SPILL_THREADS); - break; case MPP_GLOBAL_MEMORY_LIMIT_RATIO: globalMemoryLimitRatio = parseValue(value, Double.class, DEFAULT_MPP_GLOBAL_MEMORY_LIMIT_RATIO); break; @@ -283,18 +274,7 @@ public void loadValue(Logger logger, String key, String value) { case MPP_CLUSTER_NAME: defaultCluster = parseValue(value, String.class, DEFAULT_MPP_CLUSTER_NAME); break; - case MPP_MAX_SPILL_FD_THRESHOLD: - maxSpillFdThreshold = parseValue(value, Integer.class, DEFAULT_MAX_SPILL_FD_THRESHOLD); - break; - case MPP_MAX_SPILL_SPACE_THRESHOLD: - maxSpillSpaceThreshold = parseValue(value, Double.class, DEFAULT_MAX_SPILL_SPACE_THRESHOLD); - break; - case MPP_AVAILABLE_SPILL_SPACE_THRESHOLD: - maxAvaliableSpaceThreshold = parseValue(value, Double.class, DEFAULT_AVALIABLE_SPACE_THRESHOLD); - break; - case MPP_MAX_QUERY_SPILL_SPACE_THRESHOLD: - maxQuerySpillSpaceThreshold = parseValue(value, Double.class, DEFAULT_MAX_QUERY_SPILL_SPACE_THRESHOLD); - break; + case MPP_SPILL_PATHS: List spillPathsSplit = ImmutableList.copyOf( Splitter.on(",").trimResults().omitEmptyStrings().split(value)); @@ -669,13 +649,6 @@ public int getTableScanDsMaxSize() { return tablescanDsMaxSize; } - private static final int DEFAULT_MAX_SPILL_THREADS = ThreadCpuStatUtil.NUM_CORES; - private int maxSpillThreads = DEFAULT_MAX_SPILL_THREADS; - - public int getMaxSpillThreads() { - return maxSpillThreads; - } - private static final double DEFAULT_MPP_GLOBAL_MEMORY_LIMIT_RATIO = 1.0; private double globalMemoryLimitRatio = DEFAULT_MPP_GLOBAL_MEMORY_LIMIT_RATIO; @@ -704,34 +677,6 @@ public String getDefaultCluster() { return defaultCluster; } - private static final int DEFAULT_MAX_SPILL_FD_THRESHOLD = 10000; - private int maxSpillFdThreshold = DEFAULT_MAX_SPILL_FD_THRESHOLD; - - public int getMaxSpillFdThreshold() { - return maxSpillFdThreshold; - } - - private static final double DEFAULT_MAX_SPILL_SPACE_THRESHOLD = 0.1; - private double maxSpillSpaceThreshold = DEFAULT_MAX_SPILL_SPACE_THRESHOLD; - - public double getMaxSpillSpaceThreshold() { - return maxSpillSpaceThreshold; - } - - private static final double DEFAULT_AVALIABLE_SPACE_THRESHOLD = 0.9; - private double maxAvaliableSpaceThreshold = DEFAULT_AVALIABLE_SPACE_THRESHOLD; - - public double getAvaliableSpillSpaceThreshold() { - return maxAvaliableSpaceThreshold; - } - - private static final double DEFAULT_MAX_QUERY_SPILL_SPACE_THRESHOLD = 0.3; - private double maxQuerySpillSpaceThreshold = DEFAULT_MAX_QUERY_SPILL_SPACE_THRESHOLD; - - public double getMaxQuerySpillSpaceThreshold() { - return maxQuerySpillSpaceThreshold; - } - private static final List DEFAULT_SPILL_PATHS = initDefaultPathList(); private static List initDefaultPathList() { diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ParamManager.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ParamManager.java index bc398f052..ee1ed66ba 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ParamManager.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/ParamManager.java @@ -336,4 +336,17 @@ public static void setDurationVal(Map props, DurationConfigParam public Map getProps() { return props; } + + /** + * Get paramValue if exists, otherwise return {@param defaultValue} + * (instead of returning {@link ConfigParam#getDefault()}) + */ + public V getWithDefault(ConfigParam param, Class valueType, V defaultVal) { + Object v; + if ((v = getProps().get(param.getName())) != null) { + return DynamicConfig.parseValue(v.toString(), valueType, defaultVal); + } else { + return defaultVal; + } + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/SpillConfig.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/SpillConfig.java new file mode 100644 index 000000000..649fb8ae0 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/SpillConfig.java @@ -0,0 +1,86 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.properties; + +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; +import io.airlift.slice.DataSize; + +import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_AVAILABLE_SPILL_SPACE_THRESHOLD; +import static com.alibaba.polardbx.common.properties.ConnectionProperties.MAX_QUERY_SPILL_SPACE_THRESHOLD; +import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_MAX_SPILL_FD_THRESHOLD; +import static com.alibaba.polardbx.common.properties.ConnectionProperties.MAX_SPILL_SPACE_THRESHOLD; +import static com.alibaba.polardbx.common.properties.ConnectionProperties.MPP_MAX_SPILL_THREADS; +import static com.alibaba.polardbx.common.properties.DynamicConfig.parseValue; +import static io.airlift.slice.DataSize.Unit.GIGABYTE; + +public class SpillConfig { + + private static final Logger log = LoggerFactory.getLogger(SpillConfig.class); + + private int maxSpillThreads = ThreadCpuStatUtil.NUM_CORES; + private int maxSpillFdThreshold = 10000; + private DataSize maxSpillSpaceThreshold = new DataSize(200, GIGABYTE); + private DataSize maxQuerySpillSpaceThreshold = new DataSize(100, GIGABYTE); + private double maxAvaliableSpaceThreshold = 0.9; + + public void loadValue(org.slf4j.Logger logger, String key, String value) { + if (key != null && value != null) { + switch (key.toUpperCase()) { + case MPP_MAX_SPILL_THREADS: + maxSpillThreads = parseValue(value, Integer.class, ThreadCpuStatUtil.NUM_CORES); + break; + case MPP_MAX_SPILL_FD_THRESHOLD: + maxSpillFdThreshold = parseValue(value, Integer.class, 1000); + break; + case MPP_AVAILABLE_SPILL_SPACE_THRESHOLD: + maxAvaliableSpaceThreshold = parseValue(value, Double.class, 0.9); + break; + case MAX_SPILL_SPACE_THRESHOLD: + maxSpillSpaceThreshold = new DataSize(parseValue(value, Long.class, 200L), GIGABYTE); + break; + case MAX_QUERY_SPILL_SPACE_THRESHOLD: + maxQuerySpillSpaceThreshold = new DataSize(parseValue(value, Long.class, 100L), GIGABYTE); + break; + } + } + } + + public double getAvaliableSpillSpaceThreshold() { + return maxAvaliableSpaceThreshold; + } + + public DataSize getMaxQuerySpillSpaceThreshold() { + return maxQuerySpillSpaceThreshold; + } + + public DataSize getMaxSpillSpaceThreshold() { + return maxSpillSpaceThreshold; + } + + public int getMaxSpillFdThreshold() { + return maxSpillFdThreshold; + } + + public int getMaxSpillThreads() { + return maxSpillThreads; + } + +} + + diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/SystemPropertiesHelper.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/SystemPropertiesHelper.java index 490d5d61d..ea3569545 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/SystemPropertiesHelper.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/properties/SystemPropertiesHelper.java @@ -16,16 +16,17 @@ package com.alibaba.polardbx.common.properties; +import com.google.common.collect.ImmutableSet; + import java.lang.reflect.Field; -import java.util.HashSet; -import java.util.Locale; +import java.util.ArrayList; +import java.util.List; import java.util.Properties; -import java.util.Set; public class SystemPropertiesHelper { public static final String INST_ROLE = "instRole"; - public static Set connectionProperties = null; + private static volatile ImmutableSet connectionProperties = null; public static void setPropertyValue(String propertyKey, Object propertyVal) { Properties properties = System.getProperties(); @@ -37,15 +38,22 @@ public static Object getPropertyValue(String propertyKey) { return properties.getProperty(propertyKey); } - public static Set getConnectionProperties() { + public static ImmutableSet getConnectionProperties() { if (connectionProperties == null) { - connectionProperties = new HashSet<>(); - for (Field field : ConnectionProperties.class.getDeclaredFields()) { - try { - String key = field.get(ConnectionProperties.class).toString(); - connectionProperties.add(key.toUpperCase(Locale.ROOT)); - } catch (IllegalAccessException ignored) { - + synchronized (SystemPropertiesHelper.class) { + if (connectionProperties == null) { + List propertyList = new ArrayList<>(); + for (Field field : ConnectionProperties.class.getDeclaredFields()) { + try { + String key = field.get(ConnectionProperties.class).toString(); + if (key != null) { + propertyList.add(key.toUpperCase()); + } + } catch (IllegalAccessException ignored) { + + } + } + connectionProperties = ImmutableSet.copyOf(propertyList); } } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/trx/TrxLogTableConstants.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/trx/TrxLogTableConstants.java new file mode 100644 index 000000000..f5f24bd09 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/trx/TrxLogTableConstants.java @@ -0,0 +1,222 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.trx; + +import com.alibaba.polardbx.common.constants.SystemTables; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.google.protobuf.ByteString; + +import java.security.MessageDigest; + +/** + * @author yaozhili + */ +public class TrxLogTableConstants { + public final static String SET_DISTRIBUTED_TRX_ID = "SET polarx_distributed_trx_id = %s"; + protected final static Logger logger = LoggerFactory.getLogger(TrxLogTableConstants.class); + + public static final String ALTER_GLOBAL_TX_TABLE_COMMIT_TS = + "ALTER TABLE `" + SystemTables.DRDS_GLOBAL_TX_LOG + "` " + + "ADD COLUMN `COMMIT_TS` BIGINT DEFAULT NULL, " + + "ADD COLUMN `PARTICIPANTS` BLOB DEFAULT NULL, " + + "ALGORITHM=INPLACE, LOCK=NONE"; + + public static final String ALTER_GLOBAL_TX_TABLE_TYPE_ENUMS = + "ALTER TABLE `" + SystemTables.DRDS_GLOBAL_TX_LOG + "` MODIFY COLUMN `TYPE` " + + "enum('TCC', 'XA', 'BED', 'TSO', 'HLC') NOT NULL"; + + public static final String GLOBAL_TX_TABLE_GET_PARTITIONS = + "SELECT `PARTITION_NAME`, `PARTITION_DESCRIPTION`, `TABLE_ROWS` FROM INFORMATION_SCHEMA.PARTITIONS\n" + + "WHERE TABLE_NAME = '" + SystemTables.DRDS_GLOBAL_TX_LOG + "'\n" + + "AND TABLE_SCHEMA = DATABASE()"; + + public static final String ALTER_GLOBAL_TX_TABLE_DROP_PARTITION_PREFIX = + "ALTER TABLE `" + SystemTables.DRDS_GLOBAL_TX_LOG + "` \n" + + "DROP PARTITION "; + + public static final String REDO_LOG_TABLE = SystemTables.DRDS_REDO_LOG; + + public static final String ALTER_REDO_LOG_TABLE = "ALTER TABLE `" + REDO_LOG_TABLE + "` " + + "ADD COLUMN `SCHEMA` VARCHAR(64) NULL AFTER `TXID`"; + + public static final String CREATE_REDO_LOG_TABLE = + "CREATE TABLE IF NOT EXISTS `" + REDO_LOG_TABLE + "` (\n" + + " `TXID` BIGINT NOT NULL,\n" + + " `SCHEMA` VARCHAR(64) NULL,\n" + + " `SEQ` INT(11) NOT NULL,\n" + + " `INFO` LONGTEXT NOT NULL,\n" + + " PRIMARY KEY (`TXID`, `SEQ`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4\n"; + + private static final String GLOBAL_TX_TABLE_MAX_PARTITION = "p_unlimited"; + + public static final String ALTER_GLOBAL_TX_TABLE_ADD_PARTITION = + "ALTER TABLE `" + SystemTables.DRDS_GLOBAL_TX_LOG + "` \n" + + "REORGANIZE PARTITION `" + GLOBAL_TX_TABLE_MAX_PARTITION + "` INTO \n" + + "(PARTITION `%s` VALUES LESS THAN (%d), PARTITION `" + GLOBAL_TX_TABLE_MAX_PARTITION + + "` VALUES LESS THAN MAXVALUE)"; + + public static final String ALTER_GLOBAL_TX_TABLE_ADD_MAX_PARTITION = + "ALTER TABLE `" + SystemTables.DRDS_GLOBAL_TX_LOG + "` \n" + + "PARTITION BY RANGE (`TXID`) (PARTITION `" + GLOBAL_TX_TABLE_MAX_PARTITION + + "` VALUES LESS THAN MAXVALUE)"; + + public static final String ALTER_GLOBAL_TX_TABLE_INIT_PARTITION = + "ALTER TABLE `" + SystemTables.DRDS_GLOBAL_TX_LOG + "` \n" + + "PARTITION BY RANGE (`TXID`) (PARTITION `" + GLOBAL_TX_TABLE_MAX_PARTITION + + "` VALUES LESS THAN MAXVALUE)"; + + public static final String CREATE_GLOBAL_TX_TABLE = + "CREATE TABLE IF NOT EXISTS `" + SystemTables.DRDS_GLOBAL_TX_LOG + "` (\n" + + " `TXID` BIGINT NOT NULL,\n" + + " `START_TIME` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n" + + " `TYPE` ENUM('TCC', 'XA', 'BED', 'TSO', 'HLC') NOT NULL,\n" + + " `STATE` ENUM('PREPARE', 'COMMIT', 'ROLLBACK', 'SUCCEED', 'ABORTED') NOT NULL,\n" + + " `RETRIES` INT(11) NOT NULL DEFAULT 0,\n" + + " `COMMIT_TS` BIGINT DEFAULT NULL,\n" + + " `PARTICIPANTS` BLOB DEFAULT NULL,\n" + + " `TIMEOUT` TIMESTAMP NULL,\n" + + " `SERVER_ADDR` VARCHAR(21) NOT NULL,\n" + + " `CONTEXT` TEXT NOT NULL,\n" + + " `ERROR` TEXT NULL,\n" + + " PRIMARY KEY (`TXID`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8\n" + + "PARTITION BY RANGE (`TXID`) (PARTITION `%s` VALUES LESS THAN (%d), PARTITION `" + + GLOBAL_TX_TABLE_MAX_PARTITION + "` VALUES LESS THAN MAXVALUE)"; + + /** + * Trx Log Table V2. + * Column: TXID, COMMIT_TS, N_PARTICIPANTS + */ + public static final String CREATE_GLOBAL_TX_TABLE_V2 = + "CREATE TABLE IF NOT EXISTS " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE + + " (\n" + + " `TXID` BIGINT UNSIGNED NOT NULL,\n" + + " `TRX_SEQ` BIGINT UNSIGNED NOT NULL DEFAULT 18446744073709551615 COMMENT \"DEFAULT INVALID_SEQUENCE_NUMBER\",\n" + + " `N_PARTICIPANTS` INT UNSIGNED NOT NULL DEFAULT 0,\n" + + " PRIMARY KEY (`TXID`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4"; + + /** + * V2 tmp table. + */ + public static final String DROP_GLOBAL_TX_TABLE_V2_TMP = + "DROP TABLE IF EXISTS " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_TMP; + public static final String CREATE_GLOBAL_TX_TABLE_V2_TMP = + "CREATE TABLE IF NOT EXISTS " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_TMP + + " (\n" + + " `TXID` BIGINT UNSIGNED NOT NULL,\n" + + " `TRX_SEQ` BIGINT UNSIGNED NOT NULL DEFAULT 18446744073709551615 COMMENT \"DEFAULT INVALID_SEQUENCE_NUMBER\",\n" + + " `N_PARTICIPANTS` INT UNSIGNED NOT NULL DEFAULT 0,\n" + + " PRIMARY KEY (`TXID`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4"; + + /** + * Switch V2 table to ARCHIVE, tmp table to V2. + */ + public static final String SWITCH_GLOBAL_TX_TABLE_V2 = + "RENAME TABLE " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE + " TO " + + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_ARCHIVE + ", " + + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_TMP + " TO " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE; + + /** + * In case unexpected situations, force renaming tmp to V2. + */ + public static final String FORCE_RENAME_GLOBAL_TX_TABLE_V2 = + "RENAME TABLE " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_TMP + + " TO " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE; + + public static final String SHOW_ALL_GLOBAL_TX_TABLE_V2 = String.format( + "select table_name from information_schema.tables where table_schema = '%s' and table_name like '%s%%'", + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_DB, SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_PREFIX); + + public static final String SELECT_MAX_TX_ID_IN_ARCHIVE = + "SELECT max(TXID) FROM " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_ARCHIVE; + + /** + * Drop archive table if it is not needed any more. + */ + public static final String DROP_GLOBAL_TX_TABLE_V2_ARCHIVE = + "DROP TABLE IF EXISTS " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_ARCHIVE; + + /** + * Note: We want to use same digest for all sql on different physical DB, so we treat table as a parameter. + */ + public static final String APPEND_TRX = + "INSERT INTO ? (`TXID`, `TYPE`, `STATE`, `SERVER_ADDR`, `CONTEXT`) VALUES (?, ?, ?, ?, ?)"; + + public static final String APPEND_TRX_WITH_TS = + "INSERT INTO ? (`TXID`, `TYPE`, `STATE`, `SERVER_ADDR`, `CONTEXT`, `COMMIT_TS`) VALUES (?, ?, ?, ?, ?, ?)"; + + /** + * Column: TXID, START_TIME, TYPE, STATE, COMMIT_TS, N_PARTICIPANTS, SERVER_ADDR, EXTRA + */ + public static final String APPEND_TRX_V2 = + "INSERT INTO " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE + + " (`TXID`, `TRX_SEQ`) VALUES (?, ?)"; + + public static final String DELETE_ASYNC_COMMIT_TRX = + "DELETE FROM " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE + + " WHERE `TXID` = ?"; + + public static final String SELECT_BY_ID = + "SELECT `TYPE`, `STATE`, `SERVER_ADDR`, `CONTEXT`, `COMMIT_TS` FROM ? WHERE `TXID` = ?"; + + /** + * Column: TXID, `TRX_SEQ`, `N_PARTICIPANTS` + */ + public static final String SELECT_BY_ID_V2 = + "SELECT `TRX_SEQ`, `N_PARTICIPANTS` FROM " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE + " WHERE `TXID` = %s"; + + public static final String SELECT_BY_ID_V2_ARCHIVE = + "SELECT `TRX_SEQ`, `N_PARTICIPANTS` FROM " + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_ARCHIVE + + " WHERE `TXID` = %s"; + + public static final String SELECT_TABLE_ROWS_V2 = + "SELECT TABLE_ROWS FROM INFORMATION_SCHEMA.TABLES " + + "WHERE TABLE_SCHEMA = '" + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_DB + "'" + + " AND TABLE_NAME = '" + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_ARCHIVE_TABLE + "'"; + + public static final String EXISTS_GLOBAL_TX_TABLE_V2 = + "SELECT 1 FROM information_schema.tables WHERE table_schema = '" + + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_DB + + "' AND table_name = '" + SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_ARCHIVE_TABLE + "'"; + + /** + * 18446744073709551611 is a magic snapshot sequence. Using it you can see prepared trx. + */ + public static final String RECOVER_TIMESTAMP_SQL = "SET innodb_snapshot_seq = 18446744073709551611"; + + public static ByteString APPEND_TRX_DIGEST; + public static ByteString APPEND_TRX_WITH_TS_DIGEST; + public static ByteString SELECT_BY_ID_DIGEST; + + static { + try { + final MessageDigest md5 = MessageDigest.getInstance("md5"); + TrxLogTableConstants.APPEND_TRX_DIGEST = + ByteString.copyFrom(md5.digest(TrxLogTableConstants.APPEND_TRX.getBytes())); + TrxLogTableConstants.APPEND_TRX_WITH_TS_DIGEST = + ByteString.copyFrom(md5.digest(TrxLogTableConstants.APPEND_TRX_WITH_TS.getBytes())); + TrxLogTableConstants.SELECT_BY_ID_DIGEST = + ByteString.copyFrom(md5.digest(TrxLogTableConstants.SELECT_BY_ID.getBytes())); + } catch (Exception e) { + logger.error(e); + } + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/type/TransactionType.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/type/TransactionType.java index f255b9eea..b76c5e543 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/type/TransactionType.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/type/TransactionType.java @@ -17,5 +17,13 @@ package com.alibaba.polardbx.common.type; public enum TransactionType { - XA, BED, TSO, TSO_RO, TSO_SSR, TSO_MPP, AUTO_COMMIT, ALLOW_READ, COBAR_STYLE, TSO_2PC_OPT + XA, + BED, + TSO, + TSO_RO, + TSO_SSR, + TSO_MPP, + AUTO_COMMIT, + ALLOW_READ, + COBAR_STYLE, TSO_2PC_OPT } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/AsyncUtils.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/AsyncUtils.java index 1faf0e547..1804ad293 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/AsyncUtils.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/AsyncUtils.java @@ -20,6 +20,7 @@ import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.concurrent.Future; @@ -27,7 +28,7 @@ public class AsyncUtils { private static final Logger logger = LoggerFactory.getLogger(AsyncUtils.class); - public static void waitAll(List futures) { + public static void waitAll(Collection futures) { List exceptions = new ArrayList<>(); for (Future future : futures) { diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/BigDecimalUtil.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/BigDecimalUtil.java index 8e6d1ea8c..2744edaa3 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/BigDecimalUtil.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/BigDecimalUtil.java @@ -16,7 +16,7 @@ package com.alibaba.polardbx.common.utils; -import org.apache.commons.lang3.math.NumberUtils; +import com.alibaba.polardbx.common.datatype.Decimal; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -28,7 +28,7 @@ public class BigDecimalUtil { private static final long BITS_PER_DIGIT = 3402; private static final int DIGIT_PER_INT = 9; private static final int DIGIT_PER_LONG = 18; - private static final int RADIX_10 = 1000000000; + private static final int RADIX_10 = 1_000_000_000; private static final BigInteger LONG_RADIX_10 = BigInteger.valueOf(0xde0b6b3a7640000L); private static final int MAX_MAG_LENGTH = Integer.MAX_VALUE / Integer.SIZE + 1; // (1 << 26) @@ -38,6 +38,8 @@ public class BigDecimalUtil { static final byte[] ZERO_BYTE = new byte[] {'0'}; private static final int SCHOENHAGE_BASE_CONVERSION_THRESHOLD = 20; + private static final byte[] MIN_INT128_BYTES = "-170141183460469231731687303715884105728".getBytes(); + /** * for XRowSet fastGetBytes only */ @@ -332,4 +334,154 @@ private static int[] trustedStripLeadingZeroInts(int val[]) { } return keep == 0 ? val : Arrays.copyOfRange(val, keep, vlen); } + + public static byte[] fastInt128ToBytes(long int128Low, long int128High) { + if (int128Low == 0 && int128High == 0) { + return ZERO_BYTE; + } + if (int128Low == 0 && int128High == Long.MIN_VALUE) { + return MIN_INT128_BYTES; + } + // will not overflow size of 39 + byte[] buffer = new byte[Decimal.MAX_128_BIT_PRECISION + 1]; + int bufCount = 0; + int nonZeroBufCount = 0; + + long tmpLow, tmpHigh; + if (int128High >= 0) { + tmpLow = int128Low; + tmpHigh = int128High; + } else { + tmpLow = ~int128Low + 1; + tmpHigh = ~int128High; + if (tmpLow == 0) { + tmpHigh += 1; + } + } + int x1 = (int) tmpLow; + int x2 = (int) (tmpLow >>> 32); + int x3 = (int) tmpHigh; + int x4 = (int) (tmpHigh >>> 32); + boolean skip; + long rightUnsigned = RADIX_10 & LONG_MASK; + while (!((x1 == 0) && (x2 == 0) && (x3 == 0) && (x4 == 0))) { + skip = true; + long quotient; + long remainder = 0; + if (x4 != 0) { + remainder = (x4 & LONG_MASK) + (remainder << 32); + quotient = remainder / rightUnsigned; + remainder %= rightUnsigned; + x4 = (int) quotient; + skip = false; + } + if (x3 != 0 || !skip) { + remainder = (x3 & LONG_MASK) + (remainder << 32); + quotient = remainder / rightUnsigned; + remainder %= rightUnsigned; + x3 = (int) quotient; + skip = false; + } + if (x2 != 0 || !skip) { + remainder = (x2 & LONG_MASK) + (remainder << 32); + quotient = remainder / rightUnsigned; + remainder %= rightUnsigned; + x2 = (int) quotient; + skip = false; + } + remainder = (x1 & LONG_MASK) + (remainder << 32); + quotient = remainder / rightUnsigned; + remainder %= rightUnsigned; + x1 = (int) quotient; + + for (int i = 0; i < DIGIT_PER_INT && bufCount < buffer.length; ++i) { + int digit = (((int) remainder) % 10); + remainder /= 10; + buffer[bufCount] = (byte) (digit + '0'); + ++bufCount; + if (digit != 0) { + nonZeroBufCount = bufCount; + } + } + } + + byte[] result; + if (int128High >= 0) { + result = new byte[nonZeroBufCount]; + for (int i = 0; i < nonZeroBufCount; ++i) { + result[i] = buffer[nonZeroBufCount - i - 1]; + } + } else { + result = new byte[nonZeroBufCount + 1]; + result[0] = '-'; + for (int i = 0; i < nonZeroBufCount; ++i) { + result[i + 1] = buffer[nonZeroBufCount - i - 1]; + } + } + + return result; + } + + /** + * Danger: ignoring overflow + */ + public static long decodeAsUnscaledLong(byte[] buf, int scale) { + int i = 0; + int len = buf.length; + boolean negative = false; + boolean inFracPart = false; + long limit = -Long.MAX_VALUE; + int fracPart = 0; + + if (len > 0) { + byte firstChar = buf[0]; + if (firstChar < '0') { // Possible leading "+" or "-" + if (firstChar == '-') { + negative = true; + limit = Long.MIN_VALUE; + } else if (firstChar != '+') { + throw new NumberFormatException(new String(buf)); + } + + if (len == 1) { // Cannot have lone "+" or "-" + throw new NumberFormatException(new String(buf)); + } + i++; + } + long multmin = limit / 10; + long result = 0; + while (i < len) { + byte b = buf[i++]; + if (b == '.') { + if (inFracPart) { + throw new NumberFormatException(new String(buf)); + } else { + inFracPart = true; + } + continue; + } + + int digit = b - '0'; + if (digit < 0 || result < multmin) { + throw new NumberFormatException(new String(buf)); + } + if (inFracPart) { + fracPart++; + } + + result *= 10; + if (result < limit + digit) { + throw new NumberFormatException(new String(buf)); + } + result -= digit; + } + while (fracPart < scale) { + result *= 10; + fracPart++; + } + return negative ? result : -result; + } else { + throw new NumberFormatException(new String(buf)); + } + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/GeneralUtil.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/GeneralUtil.java index 230d41fe1..39f122297 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/GeneralUtil.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/GeneralUtil.java @@ -20,7 +20,6 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.jdbc.RawString; -import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.commons.lang.BooleanUtils; @@ -41,13 +40,14 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Set; +import java.util.concurrent.ThreadLocalRandom; import java.util.function.Supplier; -import java.util.regex.Pattern; public class GeneralUtil { - static Pattern pattern = Pattern.compile("\\d+$"); + private static String lsnErrorMessage = "Variable 'read_lsn' can't be set to the value of"; + private static String followDelayMessage = + "The follow exists delay, please use 'show storage' command to check latency"; public static boolean isEmpty(Map map) { return null == map || map.isEmpty(); @@ -249,6 +249,9 @@ public static RuntimeException nestedException(Throwable e) { return (TddlNestableRuntimeException) e; } + if (e.getMessage() != null && e.getMessage().contains(lsnErrorMessage)) { + return new TddlNestableRuntimeException(followDelayMessage); + } return new TddlNestableRuntimeException(e); } @@ -394,46 +397,6 @@ public static boolean startsWithIgnoreCaseAndWs(String searchIn, String searchFo public final static String LIMIT_KW = " LIMIT "; public final static String UNION_ALIAS = "__DRDS_ALIAS_T_"; - /** - * Use union all to reduce the amount of physical sql. - * - * @param num number of sub-queries - */ - public static String buildPhysicalQuery(int num, String sqlTemplateStr, String orderBy, String prefix, long limit) { - Preconditions.checkArgument(num > 0, "The number of tables must great than 0 when build UNION ALL sql"); - if (num == 1) { - if (StringUtils.isNotEmpty(prefix)) { - return prefix + sqlTemplateStr; - } else { - return sqlTemplateStr; - } - } - - StringBuilder builder = new StringBuilder(); - if (prefix != null) { - builder.append(prefix); - } - if (orderBy != null) { - builder.append("SELECT * FROM ("); - } - - builder.append("( ").append(sqlTemplateStr).append(" )"); - for (int i = 1; i < num; i++) { - builder.append(UNION_KW).append("( ").append(sqlTemplateStr).append(") "); - } - - // 最终生成的 UNION ALL SQL,需要在最外层添加 OrderBy - // 不能添加limit 和 offset, 有聚合函数的情况下会导致结果错误 - if (orderBy != null) { - builder.append(") ").append(UNION_ALIAS).append(" ").append(ORDERBY_KW).append(orderBy); - } - - if (limit > 0) { - builder.append(LIMIT_KW).append(limit); - } - return builder.toString(); - } - /** * Convert string value to boolean value. * TRUE/ON/1 will be converted to true. @@ -544,6 +507,9 @@ public static Map decode(String statisticTraceInfo) throws IOExc line = line.trim(); if (line.startsWith("Catalog:")) { String actionLine = lineReader.readLine().trim(); + if (!actionLine.startsWith("Action:")) { + continue; + } line = removeIdxSuffix(line); String key = line + "\n" + actionLine; @@ -551,7 +517,7 @@ public static Map decode(String statisticTraceInfo) throws IOExc if (statisticResultLine.length() > "StatisticValue:".length()) { statisticResultLine = statisticResultLine.substring("StatisticValue:".length()); } - statisticTraceMap.put(key, statisticResultLine); + statisticTraceMap.put(key.toLowerCase(), statisticResultLine); } } return statisticTraceMap; @@ -602,27 +568,6 @@ public static void close(Closeable x) { } } - private static String buildFkReferenceName(Set existingSymbols, String prefix) { - StringBuilder indexName = new StringBuilder(prefix); - int tryTime = 0; - - while (existingSymbols.contains(indexName.toString().toUpperCase())) { - if (tryTime == 0) { - indexName.append("_").append(tryTime++); - continue; - } - int i = indexName.lastIndexOf("_"); - indexName.delete(i, indexName.length()); - indexName.append('_').append(tryTime++); - } - - String identifier = indexName.toString().toLowerCase(); - if (identifier.contains("`")) { - return "`" + identifier.replaceAll("`", "``") + "`"; - } - return "`" + identifier + "`"; - } - /** * remove the suffix of gsi name */ @@ -639,4 +584,16 @@ public static String removeIdxSuffix(String source) { return source; } } + + /** + * return target length random string + */ + public static String randomString(int length) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < length; i++) { + char ch = (char) (ThreadLocalRandom.current().nextInt('x' - 'a') + 'a'); + sb.append(ch); + } + return sb.toString(); + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/InstanceRole.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/InstanceRole.java index cab17a3a1..40c7d755a 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/InstanceRole.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/InstanceRole.java @@ -25,7 +25,17 @@ public enum InstanceRole { MASTER, /** - * 云上流量型只读实例 + * 行存只读实例 */ - SLAVE, + ROW_SLAVE, + + /** + * 列存只读实例 + */ + COLUMNAR_SLAVE, + + /** + * MOCK模式 + */ + FAST_MOCK } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/LockUtil.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/LockUtil.java index 7ab663a82..1015f54ba 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/LockUtil.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/LockUtil.java @@ -34,7 +34,7 @@ public class LockUtil { /** * Wrapped operations with specified lock_wait_timeout to avoid long MDL-wait. */ - public static void wrapWithLockWaitTimeout(Connection conn, int logWaitTimeout, Consumer consumer) + public static void wrapWithLockWaitTimeout(Connection conn, int lockWaitTimeout, Runnable task) throws SQLException { int originLockWaitTimeout = 0; try (Statement stmt = conn.createStatement(); @@ -43,13 +43,21 @@ public static void wrapWithLockWaitTimeout(Connection conn, int logWaitTimeout, originLockWaitTimeout = rs.getInt(1); } } - if (originLockWaitTimeout > 0) { + // Only decrease the lock wait timeout, not increase it. + if (originLockWaitTimeout > lockWaitTimeout) { try (Statement stmt = conn.createStatement()) { - stmt.execute("set lock_wait_timeout = " + logWaitTimeout); try { - consumer.accept(stmt); + stmt.execute("set lock_wait_timeout = " + lockWaitTimeout); + task.run(); } finally { - stmt.execute("set lock_wait_timeout = " + originLockWaitTimeout); + try { + stmt.execute("set lock_wait_timeout = " + originLockWaitTimeout); + } catch (Throwable t) { + if (conn instanceof IConnection) { + // Discard connection to prevent reuse. + ((IConnection) conn).discard(t); + } + } } } } else { @@ -57,4 +65,36 @@ public static void wrapWithLockWaitTimeout(Connection conn, int logWaitTimeout, "Get wrong lock_wait_timeout value: " + originLockWaitTimeout); } } + + public static void wrapWithInnodbLockWaitTimeout(Connection conn, int lockWaitTimeout, Runnable task) + throws SQLException { + int originLockWaitTimeout = 0; + try (Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("select @@innodb_lock_wait_timeout")) { + while (rs.next()) { + originLockWaitTimeout = rs.getInt(1); + } + } + // Only decrease the lock wait timeout, not increase it. + if (originLockWaitTimeout > lockWaitTimeout) { + try (Statement stmt = conn.createStatement()) { + try { + stmt.execute("set innodb_lock_wait_timeout = " + lockWaitTimeout); + task.run(); + } finally { + try { + stmt.execute("set innodb_lock_wait_timeout = " + originLockWaitTimeout); + } catch (Throwable t) { + if (conn instanceof IConnection) { + // Discard connection to prevent reuse. + ((IConnection) conn).discard(t); + } + } + } + } + } else { + throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, + "Get wrong innodb_lock_wait_timeout value: " + originLockWaitTimeout); + } + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/LoggerUtil.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/LoggerUtil.java index 76a7fe443..abb554610 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/LoggerUtil.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/LoggerUtil.java @@ -23,6 +23,8 @@ public class LoggerUtil { private static final Logger loggerSpm = LoggerFactory.getLogger("spm"); + // Statistics logger + public final static Logger statisticsLogger = LoggerFactory.getLogger("STATISTICS"); public static void buildMDC(String schemaName) { if (TStringUtil.isNotEmpty(schemaName)) { diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/MathUtils.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/MathUtils.java index f3d6cd798..b52d3e2e3 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/MathUtils.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/MathUtils.java @@ -34,4 +34,31 @@ public static long ceilMod(long x, long y) { return x - ceilDiv(x, y) * y; } + /** + * Overflow iff both arguments have the opposite sign of the result + */ + public static boolean longAddOverflow(long x, long y, long r) { + return ((x ^ r) & (y ^ r)) < 0; + } + + public static boolean longSubOverflow(long x, long y, long r) { + return ((x ^ y) & (x ^ r)) < 0; + } + + public static boolean longMultiplyOverflow(long x, long y, long r) { + long ax = Math.abs(x); + long ay = Math.abs(y); + if (((ax | ay) >>> 31 != 0)) { + // Some bits greater than 2^31 that might cause overflow + // Check the result using the divide operator + // and check for the special case of Long.MIN_VALUE * -1 + return ((y != 0) && (r / y != x)) || + (x == Long.MIN_VALUE && y == -1); + } + return false; + } + + public static boolean isPowerOfTwo(int val) { + return (val & -val) == val; + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/PooledHttpHelper.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/PooledHttpHelper.java index 7bd1d18b6..eff6a7c57 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/PooledHttpHelper.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/PooledHttpHelper.java @@ -22,6 +22,7 @@ import org.apache.http.client.config.RequestConfig; import org.apache.http.client.entity.EntityBuilder; import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.protocol.HttpClientContext; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/TStringUtil.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/TStringUtil.java index 0dd8c4790..5c0d3effb 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/TStringUtil.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/TStringUtil.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.common.utils; +import com.alibaba.polardbx.common.properties.DynamicConfig; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.math.NumberUtils; @@ -689,13 +690,22 @@ public static String addBacktick(String str) { return str; } + /** + * used for result set encoding + */ public static String javaEncoding(String encoding) { if (encoding.equalsIgnoreCase("utf8mb4")) { return "utf8"; } else if (encoding.equalsIgnoreCase("binary")) { - return "iso_8859_1"; + if (DynamicConfig.getInstance().isCompatibleCharsetVariables()) { + // compatible with MySQL's behavior + // resultSet encoding is binary, which means no conversion + return "utf8"; + } else { + // compatible with the old behavior + return "iso_8859_1"; + } } - return encoding; } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/XxhashUtils.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/XxhashUtils.java new file mode 100644 index 000000000..cbe9f0f2b --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/XxhashUtils.java @@ -0,0 +1,32 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.utils; + +public class XxhashUtils { + private static final long PRIME64_1 = 0x9E3779B185EBCA87L; + private static final long PRIME64_2 = 0xC2B2AE3D27D4EB4FL; + private static final long PRIME64_3 = 0x165667B19E3779F9L; + + public static long finalShuffle(long hash) { + hash ^= hash >>> 33; + hash *= PRIME64_2; + hash ^= hash >>> 29; + hash *= PRIME64_3; + hash ^= hash >>> 32; + return hash; + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/binlog/JsonConversion.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/binlog/JsonConversion.java new file mode 100644 index 000000000..0892360c6 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/binlog/JsonConversion.java @@ -0,0 +1,522 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.utils.binlog; + +import com.alibaba.polardbx.common.type.MySQLStandardFieldType; + +import java.nio.charset.Charset; + +/** + * 处理下MySQL json二进制转化为可读的字符串 + * + * @author agapple 2016年6月30日 上午11:26:17 + * @since 1.0.22 + */ +public class JsonConversion { + + private static char[] digits = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}; + + // JSON TYPE + public static final int JSONB_TYPE_SMALL_OBJECT = 0x0; + public static final int JSONB_TYPE_LARGE_OBJECT = 0x1; + public static final int JSONB_TYPE_SMALL_ARRAY = 0x2; + public static final int JSONB_TYPE_LARGE_ARRAY = 0x3; + public static final int JSONB_TYPE_LITERAL = 0x4; + public static final int JSONB_TYPE_INT16 = 0x5; + public static final int JSONB_TYPE_UINT16 = 0x6; + public static final int JSONB_TYPE_INT32 = 0x7; + public static final int JSONB_TYPE_UINT32 = 0x8; + public static final int JSONB_TYPE_INT64 = 0x9; + public static final int JSONB_TYPE_UINT64 = 0xA; + public static final int JSONB_TYPE_DOUBLE = 0xB; + public static final int JSONB_TYPE_STRING = 0xC; + public static final int JSONB_TYPE_OPAQUE = 0xF; + public static final char JSONB_NULL_LITERAL = '\0'; + public static final char JSONB_TRUE_LITERAL = '\1'; + public static final char JSONB_FALSE_LITERAL = '\2'; + + /* + * The size of offset or size fields in the small and the large storage + * format for JSON objects and JSON arrays. + */ + public static final int SMALL_OFFSET_SIZE = 2; + public static final int LARGE_OFFSET_SIZE = 4; + + /* + * The size of key entries for objects when using the small storage format + * or the large storage format. In the small format it is 4 bytes (2 bytes + * for key length and 2 bytes for key offset). In the large format it is 6 + * (2 bytes for length, 4 bytes for offset). + */ + public static final int KEY_ENTRY_SIZE_SMALL = (2 + SMALL_OFFSET_SIZE); + public static final int KEY_ENTRY_SIZE_LARGE = (2 + LARGE_OFFSET_SIZE); + + /* + * The size of value entries for objects or arrays. When using the small + * storage format, the entry size is 3 (1 byte for type, 2 bytes for + * offset). When using the large storage format, it is 5 (1 byte for type, 4 + * bytes for offset). + */ + public static final int VALUE_ENTRY_SIZE_SMALL = (1 + SMALL_OFFSET_SIZE); + public static final int VALUE_ENTRY_SIZE_LARGE = (1 + LARGE_OFFSET_SIZE); + + public static Json_Value parse_value(int type, LogBuffer buffer, long len, String charsetName) { + return parse_value(type, buffer, len, Charset.forName(charsetName)); + } + + public static Json_Value parse_value(int type, LogBuffer buffer, long len, Charset charset) { + buffer = buffer.duplicate(buffer.position(), (int) len); + switch (type) { + case JSONB_TYPE_SMALL_OBJECT: + return parse_array_or_object(Json_enum_type.OBJECT, buffer, len, false, charset); + case JSONB_TYPE_LARGE_OBJECT: + return parse_array_or_object(Json_enum_type.OBJECT, buffer, len, true, charset); + case JSONB_TYPE_SMALL_ARRAY: + return parse_array_or_object(Json_enum_type.ARRAY, buffer, len, false, charset); + case JSONB_TYPE_LARGE_ARRAY: + return parse_array_or_object(Json_enum_type.ARRAY, buffer, len, true, charset); + default: + return parse_scalar(type, buffer, len, charset); + } + } + + private static Json_Value parse_array_or_object(Json_enum_type type, LogBuffer buffer, long len, boolean large, + Charset charset) { + long offset_size = large ? LARGE_OFFSET_SIZE : SMALL_OFFSET_SIZE; + if (len < 2 * offset_size) { + throw new IllegalArgumentException("illegal json data"); + } + long element_count = read_offset_or_size(buffer, large); + long bytes = read_offset_or_size(buffer, large); + + if (bytes > len) { + throw new IllegalArgumentException("illegal json data"); + } + long header_size = 2 * offset_size; + if (type == Json_enum_type.OBJECT) { + header_size += element_count * (large ? KEY_ENTRY_SIZE_LARGE : KEY_ENTRY_SIZE_SMALL); + } + + header_size += element_count * (large ? VALUE_ENTRY_SIZE_LARGE : VALUE_ENTRY_SIZE_SMALL); + if (header_size > bytes) { + throw new IllegalArgumentException("illegal json data"); + } + return new Json_Value(type, buffer.rewind(), element_count, bytes, large); + } + + private static long read_offset_or_size(LogBuffer buffer, boolean large) { + return large ? buffer.getUint32() : buffer.getUint16(); + } + + private static Json_Value parse_scalar(int type, LogBuffer buffer, long len, Charset charset) { + switch (type) { + case JSONB_TYPE_LITERAL: + /* purecov: inspected */ + int data = buffer.getUint8(); + switch (data) { + case JSONB_NULL_LITERAL: + return new Json_Value(Json_enum_type.LITERAL_NULL); + case JSONB_TRUE_LITERAL: + return new Json_Value(Json_enum_type.LITERAL_TRUE); + case JSONB_FALSE_LITERAL: + return new Json_Value(Json_enum_type.LITERAL_FALSE); + default: + throw new IllegalArgumentException("illegal json data"); + } + case JSONB_TYPE_INT16: + return new Json_Value(Json_enum_type.INT, buffer.getInt16()); + case JSONB_TYPE_INT32: + return new Json_Value(Json_enum_type.INT, buffer.getInt32()); + case JSONB_TYPE_INT64: + return new Json_Value(Json_enum_type.INT, buffer.getLong64()); + case JSONB_TYPE_UINT16: + return new Json_Value(Json_enum_type.UINT, buffer.getUint16()); + case JSONB_TYPE_UINT32: + return new Json_Value(Json_enum_type.UINT, buffer.getUint32()); + case JSONB_TYPE_UINT64: + return new Json_Value(Json_enum_type.UINT, buffer.getUlong64()); + case JSONB_TYPE_DOUBLE: + return new Json_Value(Json_enum_type.DOUBLE, Double.valueOf(buffer.getDouble64())); + case JSONB_TYPE_STRING: + int max_bytes = (int) Math.min(len, 5); + long tlen = 0; + long str_len = 0; + long n = 0; + byte[] datas = buffer.getData(max_bytes); + for (int i = 0; i < max_bytes; i++) { + // get the next 7 bits of the length. + tlen |= (datas[i] & 0x7f) << (7 * i); + if ((datas[i] & 0x80) == 0) { + // The length shouldn't exceed 32 bits. + if (tlen > 4294967296L) { + throw new IllegalArgumentException("illegal json data"); + } + + // This was the last byte. Return successfully. + n = i + 1; + str_len = tlen; + break; + } + } + + if (len < n + str_len) { + throw new IllegalArgumentException("illegal json data"); + } + return new Json_Value(Json_enum_type.STRING, buffer.rewind() + .forward((int) n) + .getFixString((int) str_len, charset)); + case JSONB_TYPE_OPAQUE: + /* + * There should always be at least one byte, which tells the + * field type of the opaque value. + */ + // The type is encoded as a uint8 that maps to an + // enum_field_types. + int type_byte = buffer.getUint8(); + int position = buffer.position(); + // Then there's the length of the value. + int q_max_bytes = (int) Math.min(len - 1, 5); + long q_tlen = 0; + long q_str_len = 0; + long q_n = 0; + byte[] q_datas = buffer.getData(q_max_bytes); + for (int i = 0; i < q_max_bytes; i++) { + // get the next 7 bits of the length. + q_tlen |= (q_datas[i] & 0x7f) << (7 * i); + if ((q_datas[i] & 0x80) == 0) { + // The length shouldn't exceed 32 bits. + if (q_tlen > 4294967296L) { + throw new IllegalArgumentException("illegal json data"); + } + + // This was the last byte. Return successfully. + q_n = i + 1; + q_str_len = q_tlen; + break; + } + } + + if (q_str_len == 0 || len < q_n + q_str_len) { + throw new IllegalArgumentException("illegal json data"); + } + return new Json_Value(type_byte, buffer.position(position).forward((int) q_n), q_str_len); + default: + throw new IllegalArgumentException("illegal json data"); + } + } + + public static class Json_Value { + + Json_enum_type m_type; + int m_field_type; + LogBuffer m_data; + long m_element_count; + long m_length; + String m_string_value; + Number m_int_value; + double m_double_value; + boolean m_large; + + public Json_Value(Json_enum_type t) { + this.m_type = t; + } + + public Json_Value(Json_enum_type t, Number val) { + this.m_type = t; + if (t == Json_enum_type.DOUBLE) { + this.m_double_value = val.doubleValue(); + } else { + this.m_int_value = val; + } + } + + public Json_Value(Json_enum_type t, String value) { + this.m_type = t; + this.m_string_value = value; + } + + public Json_Value(int field_type, LogBuffer data, long bytes) { + this.m_type = Json_enum_type.OPAQUE; // 不确定类型 + this.m_field_type = field_type; + this.m_data = data; + this.m_length = bytes; + } + + public Json_Value(Json_enum_type t, LogBuffer data, long element_count, long bytes, boolean large) { + this.m_type = t; + this.m_data = data; + this.m_element_count = element_count; + this.m_length = bytes; + this.m_large = large; + } + + public String key(int i, Charset charset) { + m_data.rewind(); + int offset_size = m_large ? LARGE_OFFSET_SIZE : SMALL_OFFSET_SIZE; + int key_entry_size = m_large ? KEY_ENTRY_SIZE_LARGE : KEY_ENTRY_SIZE_SMALL; + int entry_offset = 2 * offset_size + key_entry_size * i; + // The offset of the key is the first part of the key + // entry. + m_data.forward(entry_offset); + long key_offset = read_offset_or_size(m_data, m_large); + // The length of the key is the second part of the + // entry, always two + // bytes. + long key_length = m_data.getUint16(); + return m_data.rewind().forward((int) key_offset).getFixString((int) key_length, charset); + } + + public Json_Value element(int i, Charset charset) { + m_data.rewind(); + int offset_size = m_large ? LARGE_OFFSET_SIZE : SMALL_OFFSET_SIZE; + int key_entry_size = m_large ? KEY_ENTRY_SIZE_LARGE : KEY_ENTRY_SIZE_SMALL; + int value_entry_size = m_large ? VALUE_ENTRY_SIZE_LARGE : VALUE_ENTRY_SIZE_SMALL; + int first_entry_offset = 2 * offset_size; + if (m_type == Json_enum_type.OBJECT) { + first_entry_offset += m_element_count * key_entry_size; + } + int entry_offset = first_entry_offset + value_entry_size * i; + int type = m_data.forward(entry_offset).getUint8(); + if (type == JSONB_TYPE_INT16 || type == JSONB_TYPE_UINT16 || type == JSONB_TYPE_LITERAL + || (m_large && (type == JSONB_TYPE_INT32 || type == JSONB_TYPE_UINT32))) { + return parse_scalar(type, m_data, value_entry_size - 1, charset); + } + int value_offset = (int) read_offset_or_size(m_data, m_large); + return parse_value(type, m_data.rewind().forward(value_offset), (int) m_length - value_offset, charset); + } + + public StringBuilder toJsonString(StringBuilder buf, Charset charset) { + switch (m_type) { + case OBJECT: + buf.append("{"); + for (int i = 0; i < m_element_count; ++i) { + if (i > 0) { + buf.append(", "); + } + buf.append('"').append(key(i, charset)).append('"'); + buf.append(": "); + element(i, charset).toJsonString(buf, charset); + } + buf.append("}"); + break; + case ARRAY: + buf.append("["); + for (int i = 0; i < m_element_count; ++i) { + if (i > 0) { + buf.append(", "); + } + element(i, charset).toJsonString(buf, charset); + } + buf.append("]"); + break; + case DOUBLE: + buf.append(Double.valueOf(m_double_value).toString().toLowerCase()); + break; + case INT: + buf.append(m_int_value.toString().toLowerCase()); + break; + case UINT: + buf.append(m_int_value.toString().toLowerCase()); + break; + case LITERAL_FALSE: + buf.append("false"); + break; + case LITERAL_TRUE: + buf.append("true"); + break; + case LITERAL_NULL: + buf.append("null"); + break; + case OPAQUE: + String text = null; + if (m_field_type == MySQLStandardFieldType.MYSQL_TYPE_NEWDECIMAL.getId()) { + int precision = m_data.getInt8(); + int scale = m_data.getInt8(); + text = m_data.getDecimal(precision, scale).toPlainString(); + buf.append(text); + } else if (m_field_type == MySQLStandardFieldType.MYSQL_TYPE_TIME.getId()) { + long packed_value = m_data.getLong64(); + if (packed_value == 0) { + text = "00:00:00"; + } else { + long ultime = Math.abs(packed_value); + long intpart = ultime >> 24; + int frac = (int) (ultime % (1L << 24)); + // text = String.format("%s%02d:%02d:%02d", + // packed_value >= 0 ? "" : "-", + // (int) ((intpart >> 12) % (1 << 10)), + // (int) ((intpart >> 6) % (1 << 6)), + // (int) (intpart % (1 << 6))); + // text = text + "." + usecondsToStr(frac, 6); + StringBuilder builder = new StringBuilder(17); + if (packed_value < 0) { + builder.append('-'); + } + + int d = (int) ((intpart >> 12) % (1 << 10)); + if (d > 100) { + builder.append(String.valueOf(d)); + } else { + appendNumber2(builder, d); + } + builder.append(':'); + appendNumber2(builder, (int) ((intpart >> 6) % (1 << 6))); + builder.append(':'); + appendNumber2(builder, (int) (intpart % (1 << 6))); + + builder.append('.').append(usecondsToStr(frac, 6)); + text = builder.toString(); + } + buf.append('"').append(text).append('"'); + } else if (m_field_type == MySQLStandardFieldType.MYSQL_TYPE_DATE.getId() + || m_field_type == MySQLStandardFieldType.MYSQL_TYPE_DATETIME.getId() + || m_field_type == MySQLStandardFieldType.MYSQL_TYPE_TIMESTAMP.getId()) { + long packed_value = m_data.getLong64(); + if (packed_value == 0) { + text = "0000-00-00 00:00:00"; + } else { + // 构造TimeStamp只处理到秒 + long ultime = Math.abs(packed_value); + long intpart = ultime >> 24; + int frac = (int) (ultime % (1L << 24)); + long ymd = intpart >> 17; + long ym = ymd >> 5; + long hms = intpart % (1 << 17); + // text = + // String.format("%04d-%02d-%02d %02d:%02d:%02d", + // (int) (ym / 13), + // (int) (ym % 13), + // (int) (ymd % (1 << 5)), + // (int) (hms >> 12), + // (int) ((hms >> 6) % (1 << 6)), + // (int) (hms % (1 << 6))); + StringBuilder builder = new StringBuilder(26); + appendNumber4(builder, (int) (ym / 13)); + builder.append('-'); + appendNumber2(builder, (int) (ym % 13)); + builder.append('-'); + appendNumber2(builder, (int) (ymd % (1 << 5))); + builder.append(' '); + appendNumber2(builder, (int) (hms >> 12)); + builder.append(':'); + appendNumber2(builder, (int) ((hms >> 6) % (1 << 6))); + builder.append(':'); + appendNumber2(builder, (int) (hms % (1 << 6))); + builder.append('.').append(usecondsToStr(frac, 6)); + text = builder.toString(); + } + buf.append('"').append(text).append('"'); + } else { + text = m_data.getFixString((int) m_length, charset); + buf.append('"').append(escapse(text)).append('"'); + } + + break; + case STRING: + buf.append('"').append(escapse(m_string_value)).append('"'); + break; + case ERROR: + throw new IllegalArgumentException("illegal json data"); + } + + return buf; + } + } + + private static StringBuilder escapse(String data) { + StringBuilder sb = new StringBuilder(data.length()); + int endIndex = data.length(); + for (int i = 0; i < endIndex; ++i) { + char c = data.charAt(i); + if (c == '"') { + sb.append("\\\""); + } else if (c == '\n') { + sb.append("\\n"); + } else if (c == '\r') { + sb.append("\\r"); + } else if (c == '\\') { + sb.append("\\\\"); + } else if (c == '\t') { + sb.append("\\t"); + } else if (c < 16) { + sb.append("\\u000"); + sb.append(Integer.toHexString(c)); + } else if (c < 32) { + sb.append("\\u00"); + sb.append(Integer.toHexString(c)); + } else if (c >= 0x7f && c <= 0xA0) { + sb.append("\\u00"); + sb.append(Integer.toHexString(c)); + } else { + sb.append(c); + } + } + return sb; + } + + public static enum Json_enum_type { + OBJECT, ARRAY, STRING, INT, UINT, DOUBLE, LITERAL_NULL, LITERAL_TRUE, LITERAL_FALSE, OPAQUE, ERROR + } + + private static void appendNumber2(StringBuilder builder, int d) { + if (d >= 10) { + builder.append(digits[(d / 10) % 10]).append(digits[d % 10]); + } else { + builder.append('0').append(digits[d]); + } + } + + private static void appendNumber3(StringBuilder builder, int d) { + if (d >= 100) { + builder.append(digits[d / 100]).append(digits[(d / 10) % 10]).append(digits[d % 10]); + } else { + builder.append('0'); + appendNumber2(builder, d); + } + } + + private static void appendNumber4(StringBuilder builder, int d) { + if (d >= 1000) { + builder.append(digits[d / 1000]) + .append(digits[(d / 100) % 10]) + .append(digits[(d / 10) % 10]) + .append(digits[d % 10]); + } else { + builder.append('0'); + appendNumber3(builder, d); + } + } + + private static String usecondsToStr(int frac, int meta) { + String sec = String.valueOf(frac); + if (meta > 6) { + throw new IllegalArgumentException("unknow useconds meta : " + meta); + } + + if (sec.length() < 6) { + StringBuilder result = new StringBuilder(6); + int len = 6 - sec.length(); + for (; len > 0; len--) { + result.append('0'); + } + result.append(sec); + sec = result.toString(); + } + + return sec.substring(0, meta); + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/binlog/LogBuffer.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/binlog/LogBuffer.java new file mode 100644 index 000000000..1c90bbd0c --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/binlog/LogBuffer.java @@ -0,0 +1,1793 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.utils.binlog; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.BitSet; + +import org.apache.commons.compress.compressors.deflate.DeflateCompressorInputStream; +import org.apache.commons.io.IOUtils; + +/** + * TODO: Document Me!! + * + * @author Changyuan.lh + * @version 1.0 + */ +public class LogBuffer { + + static final BigDecimal DECIMAL_ZERO_1_SCALE = BigDecimal.valueOf(0, 1); + static final BigDecimal DECIMAL_ONE_1_SCALE = BigDecimal.valueOf(10, 1); + protected byte[] buffer; + + protected int origin, limit; + protected int position; + protected int semival; + + protected LogBuffer() { + } + + public LogBuffer(byte[] buffer, final int origin, final int limit) { + if (origin + limit > buffer.length) { + throw new IllegalArgumentException("capacity excceed: " + (origin + limit)); + } + + this.buffer = buffer; + this.origin = origin; + this.position = origin; + this.limit = limit; + } + + /** + * Return n bytes in this buffer. + */ + public final LogBuffer duplicate(final int pos, final int len) { + if (pos + len > limit) { + throw new IllegalArgumentException("limit excceed: " + (pos + len)); + } + + // XXX: Do momery copy avoid buffer modified. + final int off = origin + pos; + byte[] buf = Arrays.copyOfRange(buffer, off, off + len); + return new LogBuffer(buf, 0, len); + } + + /** + * Return next n bytes in this buffer. + */ + public final LogBuffer duplicate(final int len) { + if (position + len > origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position + len - origin)); + } + + // XXX: Do momery copy avoid buffer modified. + final int end = position + len; + byte[] buf = Arrays.copyOfRange(buffer, position, end); + LogBuffer dupBuffer = new LogBuffer(buf, 0, len); + position = end; + return dupBuffer; + } + + /** + * Return next n bytes in this buffer. + */ + public final LogBuffer duplicate() { + // XXX: Do momery copy avoid buffer modified. + byte[] buf = Arrays.copyOfRange(buffer, origin, origin + limit); + return new LogBuffer(buf, 0, limit); + } + + /** + * Returns this buffer's capacity.

+ * + * @return The capacity of this buffer + */ + public final int capacity() { + return buffer.length; + } + + /** + * Returns this buffer's position.

+ * + * @return The position of this buffer + */ + public final int position() { + return position - origin; + } + + /** + * Sets this buffer's position. If the mark is defined and larger than the + * new position then it is discarded.

+ * + * @param newPosition The new position value; must be non-negative and no + * larger than the current limit + * @return This buffer + * @throws IllegalArgumentException If the preconditions on + * newPosition do not hold + */ + public final LogBuffer position(final int newPosition) { + if (newPosition > limit || newPosition < 0) { + throw new IllegalArgumentException("limit excceed: " + newPosition); + } + + this.position = origin + newPosition; + return this; + } + + /** + * Forwards this buffer's position. + * + * @param len The forward distance + * @return This buffer + */ + public final LogBuffer forward(final int len) { + if (position + len > origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position + len - origin)); + } + + this.position += len; + return this; + } + + /** + * Consume this buffer, moving origin and position. + * + * @param len The consume distance + * @return This buffer + */ + public final LogBuffer consume(final int len) { + if (limit > len) { + limit -= len; + origin += len; + position = origin; + return this; + } else if (limit == len) { + limit = 0; + origin = 0; + position = 0; + return this; + } else { + /* Should not happen. */ + throw new IllegalArgumentException("limit excceed: " + len); + } + } + + /** + * Rewinds this buffer. The position is set to zero. + * + * @return This buffer + */ + public final LogBuffer rewind() { + position = origin; + return this; + } + + /** + * Returns this buffer's limit.

+ * + * @return The limit of this buffer + */ + public final int limit() { + return limit; + } + + /** + * Sets this buffer's limit. If the position is larger than the new limit + * then it is set to the new limit. If the mark is defined and larger than + * the new limit then it is discarded.

+ * + * @param newLimit The new limit value; must be non-negative and no larger + * than this buffer's capacity + * @return This buffer + * @throws IllegalArgumentException If the preconditions on + * newLimit do not hold + */ + public final LogBuffer limit(int newLimit) { + if (origin + newLimit > buffer.length || newLimit < 0) { + throw new IllegalArgumentException("capacity excceed: " + + (origin + newLimit)); + } + + limit = newLimit; + return this; + } + + /** + * Returns the number of elements between the current position and the + * limit.

+ * + * @return The number of elements remaining in this buffer + */ + public final int remaining() { + return limit + origin - position; + } + + /** + * Tells whether there are any elements between the current position and the + * limit.

+ * + * @return true if, and only if, there is at least one element + * remaining in this buffer + */ + public final boolean hasRemaining() { + return position < limit + origin; + } + + /** + * Return 8-bit signed int from buffer. + */ + public final int getInt8(final int pos) { + if (pos >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + pos); + } + + return buffer[origin + pos]; + } + + /** + * Return next 8-bit signed int from buffer. + */ + public final int getInt8() { + if (position >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin)); + } + + return buffer[position++]; + } + + /** + * Return 8-bit unsigned int from buffer. + */ + public final int getUint8(final int pos) { + if (pos >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + pos); + } + + return 0xff & buffer[origin + pos]; + } + + /** + * Return next 8-bit unsigned int from buffer. + */ + public final int getUint8() { + if (position >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin)); + } + + return 0xff & buffer[position++]; + } + + /** + * Return 16-bit signed int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - sint2korr + */ + public final int getInt16(final int pos) { + final int position = origin + pos; + + if (pos + 1 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 1))); + } + + byte[] buf = buffer; + return (0xff & buf[position]) | ((buf[position + 1]) << 8); + } + + /** + * Return next 16-bit signed int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - sint2korr + */ + public final int getInt16() { + if (position + 1 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 1)); + } + + byte[] buf = buffer; + return (0xff & buf[position++]) | ((buf[position++]) << 8); + } + + /** + * Return 16-bit unsigned int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - uint2korr + */ + public final int getUint16(final int pos) { + final int position = origin + pos; + + if (pos + 1 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 1))); + } + + byte[] buf = buffer; + return (0xff & buf[position]) | ((0xff & buf[position + 1]) << 8); + } + + /** + * Return next 16-bit unsigned int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - uint2korr + */ + public final int getUint16() { + if (position + 1 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 1)); + } + + byte[] buf = buffer; + return (0xff & buf[position++]) | ((0xff & buf[position++]) << 8); + } + + /** + * Return 16-bit signed int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_sint2korr + */ + public final int getBeInt16(final int pos) { + final int position = origin + pos; + if (pos + 1 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 1))); + } + + byte[] buf = buffer; + return (0xff & buf[position + 1]) | ((buf[position]) << 8); + } + + /** + * Return next 16-bit signed int from buffer. (big-endian) + * + * @see mysql-5.1.60/include/my_global.h - mi_sint2korr + */ + public final int getBeInt16() { + if (position + 1 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 1)); + } + + byte[] buf = buffer; + return (buf[position++] << 8) | (0xff & buf[position++]); + } + + /** + * Return 16-bit unsigned int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_usint2korr + */ + public final int getBeUint16(final int pos) { + final int position = origin + pos; + if (pos + 1 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 1))); + } + + byte[] buf = buffer; + return (0xff & buf[position + 1]) | ((0xff & buf[position]) << 8); + } + + /** + * Return next 16-bit unsigned int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_usint2korr + */ + public final int getBeUint16() { + if (position + 1 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 1)); + } + + byte[] buf = buffer; + return ((0xff & buf[position++]) << 8) | (0xff & buf[position++]); + } + + /** + * Return 24-bit signed int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - sint3korr + */ + public final int getInt24(final int pos) { + final int position = origin + pos; + + if (pos + 2 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 2))); + } + + byte[] buf = buffer; + return (0xff & buf[position]) | ((0xff & buf[position + 1]) << 8) | ((buf[position + 2]) << 16); + } + + /** + * Return next 24-bit signed int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - sint3korr + */ + public final int getInt24() { + if (position + 2 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 2)); + } + + byte[] buf = buffer; + return (0xff & buf[position++]) | ((0xff & buf[position++]) << 8) | ((buf[position++]) << 16); + } + + /** + * Return 24-bit signed int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_usint3korr + */ + public final int getBeInt24(final int pos) { + final int position = origin + pos; + + if (pos + 2 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 2))); + } + + byte[] buf = buffer; + return (0xff & buf[position + 2]) | ((0xff & buf[position + 1]) << 8) | ((buf[position]) << 16); + } + + /** + * Return next 24-bit signed int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_usint3korr + */ + public final int getBeInt24() { + if (position + 2 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 2)); + } + + byte[] buf = buffer; + return ((buf[position++]) << 16) | ((0xff & buf[position++]) << 8) | (0xff & buf[position++]); + } + + /** + * Return 24-bit unsigned int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - uint3korr + */ + public final int getUint24(final int pos) { + final int position = origin + pos; + + if (pos + 2 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 2))); + } + + byte[] buf = buffer; + return (0xff & buf[position]) | ((0xff & buf[position + 1]) << 8) | ((0xff & buf[position + 2]) << 16); + } + + /** + * Return next 24-bit unsigned int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - uint3korr + */ + public final int getUint24() { + if (position + 2 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 2)); + } + + byte[] buf = buffer; + return (0xff & buf[position++]) | ((0xff & buf[position++]) << 8) | ((0xff & buf[position++]) << 16); + } + + /** + * Return 24-bit unsigned int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_usint3korr + */ + public final int getBeUint24(final int pos) { + final int position = origin + pos; + + if (pos + 2 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 2))); + } + + byte[] buf = buffer; + return (0xff & buf[position + 2]) | ((0xff & buf[position + 1]) << 8) | ((0xff & buf[position]) << 16); + } + + /** + * Return next 24-bit unsigned int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_usint3korr + */ + public final int getBeUint24() { + if (position + 2 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 2)); + } + + byte[] buf = buffer; + return ((0xff & buf[position++]) << 16) | ((0xff & buf[position++]) << 8) | (0xff & buf[position++]); + } + + /** + * Return 32-bit signed int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - sint4korr + */ + public final int getInt32(final int pos) { + final int position = origin + pos; + + if (pos + 3 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 3))); + } + + byte[] buf = buffer; + return (0xff & buf[position]) | ((0xff & buf[position + 1]) << 8) | ((0xff & buf[position + 2]) << 16) + | ((buf[position + 3]) << 24); + } + + /** + * Return 32-bit signed int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_sint4korr + */ + public final int getBeInt32(final int pos) { + final int position = origin + pos; + + if (pos + 3 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 3))); + } + + byte[] buf = buffer; + return (0xff & buf[position + 3]) | ((0xff & buf[position + 2]) << 8) | ((0xff & buf[position + 1]) << 16) + | ((buf[position]) << 24); + } + + /** + * Return next 32-bit signed int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - sint4korr + */ + public final int getInt32() { + if (position + 3 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 3)); + } + + byte[] buf = buffer; + return (0xff & buf[position++]) | ((0xff & buf[position++]) << 8) | ((0xff & buf[position++]) << 16) + | ((buf[position++]) << 24); + } + + /** + * Return next 32-bit signed int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_sint4korr + */ + public final int getBeInt32() { + if (position + 3 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 3)); + } + + byte[] buf = buffer; + return ((buf[position++]) << 24) | ((0xff & buf[position++]) << 16) | ((0xff & buf[position++]) << 8) + | (0xff & buf[position++]); + } + + /** + * Return 32-bit unsigned int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - uint4korr + */ + public final long getUint32(final int pos) { + final int position = origin + pos; + + if (pos + 3 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 3))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position])) | ((long) (0xff & buf[position + 1]) << 8) + | ((long) (0xff & buf[position + 2]) << 16) | ((long) (0xff & buf[position + 3]) << 24); + } + + /** + * Return 32-bit unsigned int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_usint4korr + */ + public final long getBeUint32(final int pos) { + final int position = origin + pos; + + if (pos + 3 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 3))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position + 3])) | ((long) (0xff & buf[position + 2]) << 8) + | ((long) (0xff & buf[position + 1]) << 16) | ((long) (0xff & buf[position]) << 24); + } + + /** + * Return next 32-bit unsigned int from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - uint4korr + */ + public final long getUint32() { + if (position + 3 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 3)); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position++])) | ((long) (0xff & buf[position++]) << 8) + | ((long) (0xff & buf[position++]) << 16) | ((long) (0xff & buf[position++]) << 24); + } + + /** + * Return next 32-bit unsigned int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_uint4korr + */ + public final long getBeUint32() { + if (position + 3 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 3)); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position++]) << 24) | ((long) (0xff & buf[position++]) << 16) + | ((long) (0xff & buf[position++]) << 8) | ((long) (0xff & buf[position++])); + } + + /** + * Return 40-bit unsigned int from buffer. (little-endian) + */ + public final long getUlong40(final int pos) { + final int position = origin + pos; + + if (pos + 4 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 4))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position])) | ((long) (0xff & buf[position + 1]) << 8) + | ((long) (0xff & buf[position + 2]) << 16) | ((long) (0xff & buf[position + 3]) << 24) + | ((long) (0xff & buf[position + 4]) << 32); + } + + /** + * Return next 40-bit unsigned int from buffer. (little-endian) + */ + public final long getUlong40() { + if (position + 4 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 4)); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position++])) | ((long) (0xff & buf[position++]) << 8) + | ((long) (0xff & buf[position++]) << 16) | ((long) (0xff & buf[position++]) << 24) + | ((long) (0xff & buf[position++]) << 32); + } + + /** + * Return 40-bit unsigned int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_uint5korr + */ + public final long getBeUlong40(final int pos) { + final int position = origin + pos; + + if (pos + 4 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 4))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position + 4])) | ((long) (0xff & buf[position + 3]) << 8) + | ((long) (0xff & buf[position + 2]) << 16) | ((long) (0xff & buf[position + 1]) << 24) + | ((long) (0xff & buf[position]) << 32); + } + + /** + * Return next 40-bit unsigned int from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_uint5korr + */ + public final long getBeUlong40() { + if (position + 4 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 4)); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position++]) << 32) | ((long) (0xff & buf[position++]) << 24) + | ((long) (0xff & buf[position++]) << 16) | ((long) (0xff & buf[position++]) << 8) + | ((long) (0xff & buf[position++])); + } + + /** + * Return 48-bit signed long from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - sint6korr + */ + public final long getLong48(final int pos) { + final int position = origin + pos; + + if (pos + 5 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 5))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position])) | ((long) (0xff & buf[position + 1]) << 8) + | ((long) (0xff & buf[position + 2]) << 16) | ((long) (0xff & buf[position + 3]) << 24) + | ((long) (0xff & buf[position + 4]) << 32) | ((long) (buf[position + 5]) << 40); + } + + /** + * Return 48-bit signed long from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_sint6korr + */ + public final long getBeLong48(final int pos) { + final int position = origin + pos; + + if (pos + 5 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 5))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position + 5])) | ((long) (0xff & buf[position + 4]) << 8) + | ((long) (0xff & buf[position + 3]) << 16) | ((long) (0xff & buf[position + 2]) << 24) + | ((long) (0xff & buf[position + 1]) << 32) | ((long) (buf[position]) << 40); + } + + /** + * Return next 48-bit signed long from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - sint6korr + */ + public final long getLong48() { + if (position + 5 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 5)); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position++])) | ((long) (0xff & buf[position++]) << 8) + | ((long) (0xff & buf[position++]) << 16) | ((long) (0xff & buf[position++]) << 24) + | ((long) (0xff & buf[position++]) << 32) | ((long) (buf[position++]) << 40); + } + + /** + * Return next 48-bit signed long from buffer. (Big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_sint6korr + */ + public final long getBeLong48() { + if (position + 5 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 5)); + } + + byte[] buf = buffer; + return ((long) (buf[position++]) << 40) | ((long) (0xff & buf[position++]) << 32) + | ((long) (0xff & buf[position++]) << 24) | ((long) (0xff & buf[position++]) << 16) + | ((long) (0xff & buf[position++]) << 8) | ((long) (0xff & buf[position++])); + } + + /** + * Return 48-bit unsigned long from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - uint6korr + */ + public final long getUlong48(final int pos) { + final int position = origin + pos; + + if (pos + 5 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 5))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position])) | ((long) (0xff & buf[position + 1]) << 8) + | ((long) (0xff & buf[position + 2]) << 16) | ((long) (0xff & buf[position + 3]) << 24) + | ((long) (0xff & buf[position + 4]) << 32) | ((long) (0xff & buf[position + 5]) << 40); + } + + /** + * Return 48-bit unsigned long from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_uint6korr + */ + public final long getBeUlong48(final int pos) { + final int position = origin + pos; + + if (pos + 5 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 5))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position + 5])) | ((long) (0xff & buf[position + 4]) << 8) + | ((long) (0xff & buf[position + 3]) << 16) | ((long) (0xff & buf[position + 2]) << 24) + | ((long) (0xff & buf[position + 1]) << 32) | ((long) (0xff & buf[position]) << 40); + } + + /** + * Return next 48-bit unsigned long from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - uint6korr + */ + public final long getUlong48() { + if (position + 5 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 5)); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position++])) | ((long) (0xff & buf[position++]) << 8) + | ((long) (0xff & buf[position++]) << 16) | ((long) (0xff & buf[position++]) << 24) + | ((long) (0xff & buf[position++]) << 32) | ((long) (0xff & buf[position++]) << 40); + } + + /** + * Return next 48-bit unsigned long from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_uint6korr + */ + public final long getBeUlong48() { + if (position + 5 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 5)); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position++]) << 40) | ((long) (0xff & buf[position++]) << 32) + | ((long) (0xff & buf[position++]) << 24) | ((long) (0xff & buf[position++]) << 16) + | ((long) (0xff & buf[position++]) << 8) | ((long) (0xff & buf[position++])); + } + + /** + * Return 56-bit unsigned int from buffer. (little-endian) + */ + public final long getUlong56(final int pos) { + final int position = origin + pos; + + if (pos + 6 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 6))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position])) | ((long) (0xff & buf[position + 1]) << 8) + | ((long) (0xff & buf[position + 2]) << 16) | ((long) (0xff & buf[position + 3]) << 24) + | ((long) (0xff & buf[position + 4]) << 32) | ((long) (0xff & buf[position + 5]) << 40) + | ((long) (0xff & buf[position + 6]) << 48); + } + + /** + * Return next 56-bit unsigned int from buffer. (little-endian) + */ + public final long getUlong56() { + if (position + 6 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 6)); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position++])) | ((long) (0xff & buf[position++]) << 8) + | ((long) (0xff & buf[position++]) << 16) | ((long) (0xff & buf[position++]) << 24) + | ((long) (0xff & buf[position++]) << 32) | ((long) (0xff & buf[position++]) << 40) + | ((long) (0xff & buf[position++]) << 48); + } + + /** + * Return 56-bit unsigned int from buffer. (big-endian) + */ + public final long getBeUlong56(final int pos) { + final int position = origin + pos; + + if (pos + 6 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 6))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position + 6])) | ((long) (0xff & buf[position + 5]) << 8) + | ((long) (0xff & buf[position + 4]) << 16) | ((long) (0xff & buf[position + 3]) << 24) + | ((long) (0xff & buf[position + 2]) << 32) | ((long) (0xff & buf[position + 1]) << 40) + | ((long) (0xff & buf[position]) << 48); + } + + /** + * Return next 56-bit unsigned int from buffer. (big-endian) + */ + public final long getBeUlong56() { + if (position + 6 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 6)); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position++]) << 48) | ((long) (0xff & buf[position++]) << 40) + | ((long) (0xff & buf[position++]) << 32) | ((long) (0xff & buf[position++]) << 24) + | ((long) (0xff & buf[position++]) << 16) | ((long) (0xff & buf[position++]) << 8) + | ((long) (0xff & buf[position++])); + } + + /** + * Return 64-bit signed long from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - sint8korr + */ + public final long getLong64(final int pos) { + final int position = origin + pos; + + if (pos + 7 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 7))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position])) | ((long) (0xff & buf[position + 1]) << 8) + | ((long) (0xff & buf[position + 2]) << 16) | ((long) (0xff & buf[position + 3]) << 24) + | ((long) (0xff & buf[position + 4]) << 32) | ((long) (0xff & buf[position + 5]) << 40) + | ((long) (0xff & buf[position + 6]) << 48) | ((long) (buf[position + 7]) << 56); + } + + /** + * Return 64-bit signed long from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_sint8korr + */ + public final long getBeLong64(final int pos) { + final int position = origin + pos; + + if (pos + 7 >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + 7))); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position + 7])) | ((long) (0xff & buf[position + 6]) << 8) + | ((long) (0xff & buf[position + 5]) << 16) | ((long) (0xff & buf[position + 4]) << 24) + | ((long) (0xff & buf[position + 3]) << 32) | ((long) (0xff & buf[position + 2]) << 40) + | ((long) (0xff & buf[position + 1]) << 48) | ((long) (buf[position]) << 56); + } + + /** + * Return next 64-bit signed long from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - sint8korr + */ + public final long getLong64() { + if (position + 7 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 7)); + } + + byte[] buf = buffer; + return ((long) (0xff & buf[position++])) | ((long) (0xff & buf[position++]) << 8) + | ((long) (0xff & buf[position++]) << 16) | ((long) (0xff & buf[position++]) << 24) + | ((long) (0xff & buf[position++]) << 32) | ((long) (0xff & buf[position++]) << 40) + | ((long) (0xff & buf[position++]) << 48) | ((long) (buf[position++]) << 56); + } + + /** + * Return next 64-bit signed long from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_sint8korr + */ + public final long getBeLong64() { + if (position + 7 >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position - origin + 7)); + } + + byte[] buf = buffer; + return ((long) (buf[position++]) << 56) | ((long) (0xff & buf[position++]) << 48) + | ((long) (0xff & buf[position++]) << 40) | ((long) (0xff & buf[position++]) << 32) + | ((long) (0xff & buf[position++]) << 24) | ((long) (0xff & buf[position++]) << 16) + | ((long) (0xff & buf[position++]) << 8) | ((long) (0xff & buf[position++])); + } + + /* The max ulonglong - 0x ff ff ff ff ff ff ff ff */ + public static final BigInteger BIGINT_MAX_VALUE = new BigInteger("18446744073709551615"); + + /** + * Return 64-bit unsigned long from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - uint8korr + */ + public final BigInteger getUlong64(final int pos) { + final long long64 = getLong64(pos); + + return (long64 >= 0) ? BigInteger.valueOf(long64) : BIGINT_MAX_VALUE.add(BigInteger.valueOf(1 + long64)); + } + + /** + * Return 64-bit unsigned long from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_uint8korr + */ + public final BigInteger getBeUlong64(final int pos) { + final long long64 = getBeLong64(pos); + + return (long64 >= 0) ? BigInteger.valueOf(long64) : BIGINT_MAX_VALUE.add(BigInteger.valueOf(1 + long64)); + } + + /** + * Return next 64-bit unsigned long from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - uint8korr + */ + public final BigInteger getUlong64() { + final long long64 = getLong64(); + + return (long64 >= 0) ? BigInteger.valueOf(long64) : BIGINT_MAX_VALUE.add(BigInteger.valueOf(1 + long64)); + } + + /** + * Return next 64-bit unsigned long from buffer. (big-endian) + * + * @see mysql-5.6.10/include/myisampack.h - mi_uint8korr + */ + public final BigInteger getBeUlong64() { + final long long64 = getBeLong64(); + + return (long64 >= 0) ? BigInteger.valueOf(long64) : BIGINT_MAX_VALUE.add(BigInteger.valueOf(1 + long64)); + } + + /** + * Return 32-bit float from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - float4get + */ + public final float getFloat32(final int pos) { + return Float.intBitsToFloat(getInt32(pos)); + } + + /** + * Return next 32-bit float from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - float4get + */ + public final float getFloat32() { + return Float.intBitsToFloat(getInt32()); + } + + /** + * Return 64-bit double from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - float8get + */ + public final double getDouble64(final int pos) { + return Double.longBitsToDouble(getLong64(pos)); + } + + /** + * Return next 64-bit double from buffer. (little-endian) + * + * @see mysql-5.1.60/include/my_global.h - float8get + */ + public final double getDouble64() { + return Double.longBitsToDouble(getLong64()); + } + + public static final long NULL_LENGTH = ((long) ~0); + + /** + * Return packed number from buffer. (little-endian) A Packed Integer has + * the capacity of storing up to 8-byte integers, while small integers still + * can use 1, 3, or 4 bytes. The value of the first byte determines how to + * read the number, according to the following table. + *
    + *
  • 0-250 The first byte is the number (in the range 0-250). No + * additional bytes are used.
  • + *
  • 252 Two more bytes are used. The number is in the range 251-0xffff.
  • + *
  • 253 Three more bytes are used. The number is in the range + * 0xffff-0xffffff.
  • + *
  • 254 Eight more bytes are used. The number is in the range + * 0xffffff-0xffffffffffffffff.
  • + *
+ * That representation allows a first byte value of 251 to represent the SQL + * NULL value. + */ + public final long getPackedLong(final int pos) { + final int lead = getUint8(pos); + if (lead < 251) { + return lead; + } + + switch (lead) { + case 251: + return NULL_LENGTH; + case 252: + return getUint16(pos + 1); + case 253: + return getUint24(pos + 1); + default: /* Must be 254 when here */ + return getUint32(pos + 1); + } + } + + /** + * Return next packed number from buffer. (little-endian) + * + * @see LogBuffer#getPackedLong(int) + */ + public final long getPackedLong() { + final int lead = getUint8(); + if (lead < 251) { + return lead; + } + + switch (lead) { + case 251: + return NULL_LENGTH; + case 252: + return getUint16(); + case 253: + return getUint24(); + default: /* Must be 254 when here */ + final long value = getUint32(); + position += 4; /* ignore other */ + return value; + } + } + + /** + * Return fix length string from buffer. + */ + public final String getFixString(final int pos, final int len) { + return getFixString(pos, len, StandardCharsets.ISO_8859_1); + } + + /** + * Return next fix length string from buffer. + */ + public final String getFixString(final int len) { + return getFixString(len, StandardCharsets.ISO_8859_1); + } + + /** + * Return fix length string from buffer. + */ + public final String getFixString(final int pos, final int len, Charset charset) { + if (pos + len > limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + len))); + } + + final int from = origin + pos; + final int end = from + len; + byte[] buf = buffer; + int found = from; + for (; (found < end) && buf[found] != '\0'; found++) + /* empty loop */ + ; + + return new String(buf, from, found - from, charset); + } + + /** + * Return next fix length string from buffer. + */ + public final String getFixString(final int len, Charset charset) { + if (position + len > origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position + len - origin)); + } + + final int from = position; + final int end = from + len; + byte[] buf = buffer; + int found = from; + for (; (found < end) && buf[found] != '\0'; found++) + /* empty loop */ + ; + + String string = new String(buf, from, found - from, charset); + position += len; + return string; + } + + /** + * Return fix-length string from buffer without null-terminate checking. + * Fix bug #17 {@link https://github.com/AlibabaTech/canal/issues/17 } + */ + public final String getFullString(final int pos, final int len, Charset charset) { + if (pos + len > limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + len))); + } + + return new String(buffer, origin + pos, len, charset); + } + + /** + * Return next fix-length string from buffer without null-terminate + * checking. + * Fix bug #17 {@link https://github.com/AlibabaTech/canal/issues/17 } + */ + public final String getFullString(final int len, Charset charset) { + if (position + len > origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position + len - origin)); + } + + String string = new String(buffer, position, len, charset); + position += len; + return string; + } + + /** + * Return dynamic length string from buffer. + */ + public final String getString(final int pos) { + if (pos >= limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + pos); + } + + byte[] buf = buffer; + final int len = (0xff & buf[origin + pos]); + if (pos + len + 1 > limit) { + throw new IllegalArgumentException("limit excceed: " + (pos + len + 1)); + } + + return new String(buf, origin + pos + 1, len, StandardCharsets.ISO_8859_1); + } + + /** + * Return next dynamic length string from buffer. + */ + public final String getString() { + if (position >= origin + limit) { + throw new IllegalArgumentException("limit excceed: " + position); + } + + byte[] buf = buffer; + final int len = (0xff & buf[position]); + if (position + len + 1 > origin + limit) { + throw new IllegalArgumentException("limit excceed: " + + (position + len + 1 - origin)); + } + + String string = new String(buf, position + 1, len, StandardCharsets.ISO_8859_1); + position += len + 1; + return string; + } + + /** + * Return 16-bit signed int from buffer. (big-endian) + * + * @see mysql-5.1.60/include/myisampack.h - mi_sint2korr + */ + private static final int getInt16BE(byte[] buffer, final int pos) { + return ((buffer[pos]) << 8) | (0xff & buffer[pos + 1]); + } + + /** + * Return 24-bit signed int from buffer. (big-endian) + * + * @see mysql-5.1.60/include/myisampack.h - mi_sint3korr + */ + private static final int getInt24BE(byte[] buffer, final int pos) { + return (buffer[pos] << 16) | ((0xff & buffer[pos + 1]) << 8) | (0xff & buffer[pos + 2]); + } + + /** + * Return 32-bit signed int from buffer. (big-endian) + * + * @see mysql-5.1.60/include/myisampack.h - mi_sint4korr + */ + private static final int getInt32BE(byte[] buffer, final int pos) { + return (buffer[pos] << 24) | ((0xff & buffer[pos + 1]) << 16) | ((0xff & buffer[pos + 2]) << 8) + | (0xff & buffer[pos + 3]); + } + + /* decimal representation */ + public static final int DIG_PER_DEC1 = 9; + public static final int DIG_BASE = 1000000000; + public static final int DIG_MAX = DIG_BASE - 1; + public static final int dig2bytes[] = {0, 1, 1, 2, 2, 3, 3, 4, 4, 4}; + public static final int powers10[] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000}; + + public static final int DIG_PER_INT32 = 9; + public static final int SIZE_OF_INT32 = 4; + + /** + * Return big decimal from buffer. + * + * @see mysql-5.1.60/strings/decimal.c - bin2decimal() + */ + public final BigDecimal getDecimal(final int pos, final int precision, final int scale) { + final int intg = precision - scale; + final int frac = scale; + final int intg0 = intg / DIG_PER_INT32; + final int frac0 = frac / DIG_PER_INT32; + final int intg0x = intg - intg0 * DIG_PER_INT32; + final int frac0x = frac - frac0 * DIG_PER_INT32; + + final int binSize = intg0 * SIZE_OF_INT32 + dig2bytes[intg0x] + frac0 * SIZE_OF_INT32 + dig2bytes[frac0x]; + if (pos + binSize > limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos < 0 ? pos : (pos + binSize))); + } + return getDecimal0(origin + pos, intg, frac, // NL + intg0, + frac0, + intg0x, + frac0x); + } + + /** + * Return next big decimal from buffer. + * + * @see mysql-5.1.60/strings/decimal.c - bin2decimal() + */ + public final BigDecimal getDecimal(final int precision, final int scale) { + final int intg = precision - scale; + final int frac = scale; + final int intg0 = intg / DIG_PER_INT32; + final int frac0 = frac / DIG_PER_INT32; + final int intg0x = intg - intg0 * DIG_PER_INT32; + final int frac0x = frac - frac0 * DIG_PER_INT32; + + final int binSize = intg0 * SIZE_OF_INT32 + dig2bytes[intg0x] + frac0 * SIZE_OF_INT32 + dig2bytes[frac0x]; + if (position + binSize > origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position + binSize - origin)); + } + + BigDecimal decimal = getDecimal0(position, intg, frac, // NL + intg0, + frac0, + intg0x, + frac0x); + position += binSize; + return decimal; + } + + /** + * Return big decimal from buffer. + * + *
+     * Decimal representation in binlog seems to be as follows:
+     *
+     * 1st bit - sign such that set == +, unset == -
+     * every 4 bytes represent 9 digits in big-endian order, so that
+     * if you print the values of these quads as big-endian integers one after
+     * another, you get the whole number string representation in decimal. What
+     * remains is to put a sign and a decimal dot.
+     *
+     * 80 00 00 05 1b 38 b0 60 00 means:
+     *
+     *   0x80 - positive
+     *   0x00000005 - 5
+     *   0x1b38b060 - 456700000
+     *   0x00       - 0
+     *
+     * 54567000000 / 10^{10} = 5.4567
+     * 
+ * + * @see mysql-5.1.60/strings/decimal.c - bin2decimal() + * @see mysql-5.1.60/strings/decimal.c - decimal2string() + */ + private final BigDecimal getDecimal0(final int begin, final int intg, final int frac, final int intg0, + final int frac0, final int intg0x, final int frac0x) { + final int mask = ((buffer[begin] & 0x80) == 0x80) ? 0 : -1; + int from = begin; + + /* max string length */ + final int len = ((mask != 0) ? 1 : 0) + ((intg != 0) ? intg : 1) // NL + + ((frac != 0) ? 1 : 0) + frac; + char[] buf = new char[len]; + int pos = 0; + + if (mask != 0) /* decimal sign */ { + buf[pos++] = ('-'); + } + + final byte[] d_copy = buffer; + d_copy[begin] ^= 0x80; /* clear sign */ + int mark = pos; + + if (intg0x != 0) { + final int i = dig2bytes[intg0x]; + int x = 0; + switch (i) { + case 1: + x = d_copy[from] /* one byte */; + break; + case 2: + x = getInt16BE(d_copy, from); + break; + case 3: + x = getInt24BE(d_copy, from); + break; + case 4: + x = getInt32BE(d_copy, from); + break; + } + from += i; + x ^= mask; + if (x < 0 || x >= powers10[intg0x + 1]) { + throw new IllegalArgumentException("bad format, x exceed: " + x + ", " + powers10[intg0x + 1]); + } + if (x != 0 /* !digit || x != 0 */) { + for (int j = intg0x; j > 0; j--) { + final int divisor = powers10[j - 1]; + final int y = x / divisor; + if (mark < pos || y != 0) { + buf[pos++] = ((char) ('0' + y)); + } + x -= y * divisor; + } + } + } + + for (final int stop = from + intg0 * SIZE_OF_INT32; from < stop; from += SIZE_OF_INT32) { + int x = getInt32BE(d_copy, from); + x ^= mask; + if (x < 0 || x > DIG_MAX) { + throw new IllegalArgumentException("bad format, x exceed: " + x + ", " + DIG_MAX); + } + if (x != 0) { + if (mark < pos) { + for (int i = DIG_PER_DEC1; i > 0; i--) { + final int divisor = powers10[i - 1]; + final int y = x / divisor; + buf[pos++] = ((char) ('0' + y)); + x -= y * divisor; + } + } else { + for (int i = DIG_PER_DEC1; i > 0; i--) { + final int divisor = powers10[i - 1]; + final int y = x / divisor; + if (mark < pos || y != 0) { + buf[pos++] = ((char) ('0' + y)); + } + x -= y * divisor; + } + } + } else if (mark < pos) { + for (int i = DIG_PER_DEC1; i > 0; i--) { + buf[pos++] = ('0'); + } + } + } + + if (mark == pos) + /* fix 0.0 problem, only '.' may cause BigDecimal parsing exception. */ { + buf[pos++] = ('0'); + } + + if (frac > 0) { + buf[pos++] = ('.'); + mark = pos; + + for (final int stop = from + frac0 * SIZE_OF_INT32; from < stop; from += SIZE_OF_INT32) { + int x = getInt32BE(d_copy, from); + x ^= mask; + if (x < 0 || x > DIG_MAX) { + throw new IllegalArgumentException("bad format, x exceed: " + x + ", " + DIG_MAX); + } + if (x != 0) { + for (int i = DIG_PER_DEC1; i > 0; i--) { + final int divisor = powers10[i - 1]; + final int y = x / divisor; + buf[pos++] = ((char) ('0' + y)); + x -= y * divisor; + } + } else { + for (int i = DIG_PER_DEC1; i > 0; i--) { + buf[pos++] = ('0'); + } + } + } + + if (frac0x != 0) { + final int i = dig2bytes[frac0x]; + int x = 0; + switch (i) { + case 1: + x = d_copy[from] /* one byte */; + break; + case 2: + x = getInt16BE(d_copy, from); + break; + case 3: + x = getInt24BE(d_copy, from); + break; + case 4: + x = getInt32BE(d_copy, from); + break; + } + x ^= mask; + if (x != 0) { + final int dig = DIG_PER_DEC1 - frac0x; + x *= powers10[dig]; + if (x < 0 || x > DIG_MAX) { + throw new IllegalArgumentException("bad format, x exceed: " + x + ", " + DIG_MAX); + } + for (int j = DIG_PER_DEC1; j > dig; j--) { + final int divisor = powers10[j - 1]; + final int y = x / divisor; + buf[pos++] = ((char) ('0' + y)); + x -= y * divisor; + } + } + } + + if (mark == pos) + /* make number more friendly */ { + buf[pos++] = ('0'); + } + } + + d_copy[begin] ^= 0x80; /* restore sign */ + // String decimal = String.valueOf(buf, 0, pos); + // return new BigDecimal(decimal); + return new BigDecimal(buf, 0, pos); + } + + /** + * Fill MY_BITMAP structure from buffer. + * + * @param len The length of MY_BITMAP in bits. + */ + public final void fillBitmap(BitSet bitmap, final int pos, final int len) { + if (pos + ((len + 7) / 8) > limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos + (len + 7) / 8)); + } + + fillBitmap0(bitmap, origin + pos, len); + } + + /** + * Fill next MY_BITMAP structure from buffer. + * + * @param len The length of MY_BITMAP in bits. + */ + public final void fillBitmap(BitSet bitmap, final int len) { + if (position + ((len + 7) / 8) > origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position + ((len + 7) / 8) - origin)); + } + + position = fillBitmap0(bitmap, position, len); + } + + /** + * Fill MY_BITMAP structure from buffer. + * + * @param len The length of MY_BITMAP in bits. + */ + private final int fillBitmap0(BitSet bitmap, int pos, final int len) { + final byte[] buf = buffer; + + for (int bit = 0; bit < len; bit += 8) { + int flag = ((int) buf[pos++]) & 0xff; + if (flag == 0) { + continue; + } + if ((flag & 0x01) != 0) { + bitmap.set(bit); + } + if ((flag & 0x02) != 0) { + bitmap.set(bit + 1); + } + if ((flag & 0x04) != 0) { + bitmap.set(bit + 2); + } + if ((flag & 0x08) != 0) { + bitmap.set(bit + 3); + } + if ((flag & 0x10) != 0) { + bitmap.set(bit + 4); + } + if ((flag & 0x20) != 0) { + bitmap.set(bit + 5); + } + if ((flag & 0x40) != 0) { + bitmap.set(bit + 6); + } + if ((flag & 0x80) != 0) { + bitmap.set(bit + 7); + } + } + return pos; + } + + /** + * Return MY_BITMAP structure from buffer. + * + * @param len The length of MY_BITMAP in bits. + */ + public final BitSet getBitmap(final int pos, final int len) { + BitSet bitmap = new BitSet(len); + fillBitmap(bitmap, pos, len); + return bitmap; + } + + /** + * Return next MY_BITMAP structure from buffer. + * + * @param len The length of MY_BITMAP in bits. + */ + public final BitSet getBitmap(final int len) { + BitSet bitmap = new BitSet(len); + fillBitmap(bitmap, len); + return bitmap; + } + + /** + * Fill n bytes into output stream. + */ + public final void fillOutput(OutputStream out, final int pos, final int len) throws IOException { + if (pos + len > limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos + len)); + } + + out.write(buffer, origin + pos, len); + } + + /** + * Fill next n bytes into output stream. + */ + public final void fillOutput(OutputStream out, final int len) throws IOException { + if (position + len > origin + limit) { + throw new IllegalArgumentException("limit excceed: " + + (position + len - origin)); + } + + out.write(buffer, position, len); + position += len; + } + + /** + * Fill n bytes in this buffer. + */ + public final void fillBytes(final int pos, byte[] dest, final int destPos, final int len) { + if (pos + len > limit || pos < 0) { + throw new IllegalArgumentException("limit excceed: " + (pos + len)); + } + + System.arraycopy(buffer, origin + pos, dest, destPos, len); + } + + /** + * Fill next n bytes in this buffer. + */ + public final void fillBytes(byte[] dest, final int destPos, final int len) { + if (position + len > origin + limit) { + throw new IllegalArgumentException("limit excceed: " + (position + len - origin)); + } + + System.arraycopy(buffer, position, dest, destPos, len); + position += len; + } + + /** + * Return n-byte data from buffer. + */ + public final byte[] getData(final int pos, final int len) { + byte[] buf = new byte[len]; + fillBytes(pos, buf, 0, len); + return buf; + } + + /** + * Return next n-byte data from buffer. + */ + public final byte[] getData(final int len) { + byte[] buf = new byte[len]; + fillBytes(buf, 0, len); + return buf; + } + + /** + * Return all remaining data from buffer. + */ + public final byte[] getData() { + return getData(0, limit); + } + + /** + * mariadb compress log event Get the length of uncompress content. + * + * @return 0 means error. + */ + public final long getUncompressLong(int lenPad) { + long len = 0; + switch (lenPad) { + case 1: + len = getInt8(); + break; + case 2: + len = getBeUint16(); + break; + case 3: + len = getBeUint24(); + break; + case 4: + len = getBeUint32(); + break; + default: + len = 0; + break; + } + + return len; + } + + /** + * uncompress mariadb log event + */ + public LogBuffer uncompressBuf() { + int lenPad = getInt8(); + long len = getUncompressLong(lenPad & 0x07); + int alg = (lenPad & 0x70) >> 4; + LogBuffer buffer = null; + try { + switch (alg) { + case 0: + buffer = uncompressZlib(limit - position); + break; + default: + // bad algorithm + return this; + } + } catch (Exception e) { + throw new IllegalArgumentException("uncompress failed ", e); + } + + if (buffer.limit() != len) { + throw new IllegalArgumentException( + "uncompress lenght not match, expected : " + len + " , but actual : " + buffer.limit()); + } + return buffer; + } + + private LogBuffer uncompressZlib(int len) throws Exception { + if (position + len > limit || position < 0) { + throw new IllegalArgumentException("limit excceed: " + (position + len)); + } + + try (DeflateCompressorInputStream in = new DeflateCompressorInputStream( + new ByteArrayInputStream(buffer, position, position + len))) { + byte[] decodeBytes = IOUtils.toByteArray(in); + return new LogBuffer(decodeBytes, 0, decodeBytes.length); + } + } + + /** + * Return full hexdump from position. + */ + public final String hexdump(final int pos) { + if ((limit - pos) > 0) { + final int begin = origin + pos; + final int end = origin + limit; + + byte[] buf = buffer; + StringBuilder dump = new StringBuilder(); + dump.append(Integer.toHexString(buf[begin] >> 4)); + dump.append(Integer.toHexString(buf[begin] & 0xf)); + for (int i = begin + 1; i < end; i++) { + dump.append("_"); + dump.append(Integer.toHexString(buf[i] >> 4)); + dump.append(Integer.toHexString(buf[i] & 0xf)); + } + + return dump.toString(); + } + return ""; + } + + /** + * Return hexdump from position, for len bytes. + */ + public final String hexdump(final int pos, final int len) { + if ((limit - pos) > 0) { + final int begin = origin + pos; + final int end = Math.min(begin + len, origin + limit); + + byte[] buf = buffer; + StringBuilder dump = new StringBuilder(); + dump.append(Integer.toHexString(buf[begin] >> 4)); + dump.append(Integer.toHexString(buf[begin] & 0xf)); + for (int i = begin + 1; i < end; i++) { + dump.append("_"); + dump.append(Integer.toHexString(buf[i] >> 4)); + dump.append(Integer.toHexString(buf[i] & 0xf)); + } + + return dump.toString(); + } + return ""; + } + +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/BlockLongBloomFilter.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/BlockLongBloomFilter.java new file mode 100644 index 000000000..05dc4399f --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/BlockLongBloomFilter.java @@ -0,0 +1,116 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.utils.bloomfilter; + +/** + * A special kind of blocked Bloom filter. It sets 2 to 4 (usually 4) bits in + * two 64-bit words; 1 or 2 (usually 2) per word. It is faster than a regular + * Bloom filter, but needs slightly more space / has a slightly worse false + * positive rate. + */ +public class BlockLongBloomFilter implements RFBloomFilter { + public static final int BITS_PER_KEY = 8; + public static final long RANDOM_SEED = 2528582834704613611L; + + private final int buckets; + private final long seed; + private final long[] data; + + public BlockLongBloomFilter(int entryCount) { + this(entryCount, BITS_PER_KEY); + } + + public BlockLongBloomFilter(int entryCount, int bitsPerKey) { + entryCount = Math.max(1, entryCount); + this.seed = RANDOM_SEED; + long bits = (long) entryCount * bitsPerKey; + this.buckets = (int) bits / 64; + data = new long[buckets + 16 + 1]; + } + + public long getBitCount() { + return data.length * 64L; + } + + @Override + public void putLong(long key) { + long hash = hash64(key, seed); + int start = reduce((int) hash, buckets); + hash = hash ^ Long.rotateLeft(hash, 32); + long m1 = (1L << hash) | (1L << (hash >> 6)); + long m2 = (1L << (hash >> 12)) | (1L << (hash >> 18)); + + data[start] |= m1; + data[start + 1 + (int) (hash >>> 60)] |= m2; + } + + @Override + public boolean mightContainLong(long key) { + long hash = hash64(key, seed); + int start = reduce((int) hash, buckets); + hash = hash ^ Long.rotateLeft(hash, 32); + + long a = data[start]; + long b = data[start + 1 + (int) (hash >>> 60)]; + long m1 = (1L << hash) | (1L << (hash >> 6)); + long m2 = (1L << (hash >> 12)) | (1L << (hash >> 18)); + return ((m1 & a) == m1) && ((m2 & b) == m2); + } + + @Override + public long sizeInBytes() { + return data.length * Long.BYTES; + } + + @Override + public void merge(RFBloomFilter other) { + if (other instanceof BlockLongBloomFilter && this.data.length == ((BlockLongBloomFilter) other).data.length + && this.seed == ((BlockLongBloomFilter) other).seed + && this.buckets == ((BlockLongBloomFilter) other).buckets) { + + long[] array = ((BlockLongBloomFilter) other).data; + for (int i = 0; i < data.length; i++) { + data[i] |= array[i]; + } + + } else { + throw new UnsupportedOperationException(); + } + } + + @Override + public void putInt(int value) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean mightContainInt(int value) { + throw new UnsupportedOperationException(); + } + + private static long hash64(long x, long seed) { + x += seed; + x = (x ^ x >>> 33) * -49064778989728563L; + x = (x ^ x >>> 33) * -4265267296055464877L; + x ^= x >>> 33; + return x; + } + + private static int reduce(int hash, int n) { + return (int) (((long) hash & 4294967295L) * (long) n >>> 32); + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/ConcurrentIntBloomFilter.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/ConcurrentIntBloomFilter.java new file mode 100644 index 000000000..6d7bd32ea --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/ConcurrentIntBloomFilter.java @@ -0,0 +1,174 @@ +/* + * Copyright (C) 2011 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package com.alibaba.polardbx.common.utils.bloomfilter; + +import com.alibaba.polardbx.common.utils.memory.SizeOf; +import com.google.common.math.LongMath; +import com.google.common.primitives.Ints; + +import java.math.RoundingMode; +import java.util.concurrent.atomic.AtomicLongArray; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; + +public final class ConcurrentIntBloomFilter implements RFBloomFilter { + + public static final double DEFAULT_FPP = 0.03; + private final LockFreeBitArray bits; + private final int numHashFunctions; + + private ConcurrentIntBloomFilter(LockFreeBitArray bits, int numHashFunctions) { + checkArgument(numHashFunctions > 0, "numHashFunctions (%s) must be > 0", numHashFunctions); + checkArgument(numHashFunctions < 32, "numHashFunctions (%s) must be < 32", numHashFunctions); + this.bits = checkNotNull(bits); + this.numHashFunctions = numHashFunctions; + } + + @Override + public void putInt(int value) { + int size = bits.bitSize(); + for (int i = 0; i < numHashFunctions; i++) { + bits.set(Integer.remainderUnsigned(hash(value, i), size)); + } + } + + @Override + public boolean mightContainInt(int value) { + int size = bits.bitSize(); + for (int i = 0; i < numHashFunctions; i++) { + if (!bits.get(Integer.remainderUnsigned(hash(value, i), size))) { + return false; + } + } + return true; + } + + @Override + public void putLong(long value) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean mightContainLong(long value) { + throw new UnsupportedOperationException(); + } + + private int hash(int value, int i) { + return Integer.rotateRight(value, i * 5); + } + + @Override + public long sizeInBytes() { + return bits.sizeInBytes(); + } + + @Override + public void merge(RFBloomFilter other) { + throw new UnsupportedOperationException(); + } + + public static long estimatedSizeInBytes(long expectedInsertions, double fpp) { + if (expectedInsertions == 0) { + expectedInsertions = 1; + } + int numBits = BloomFilterUtil.optimalNumOfBits(expectedInsertions, fpp); + int arraySize = Ints.checkedCast(LongMath.divide(numBits, 64, RoundingMode.CEILING)); + return SizeOf.sizeOfLongArray(arraySize); + } + + public static ConcurrentIntBloomFilter create(long expectedInsertions) { + return create(expectedInsertions, DEFAULT_FPP); // FYI, for 3%, we always get 5 hash functions + } + + public static ConcurrentIntBloomFilter create(long expectedInsertions, double fpp) { + checkArgument(expectedInsertions >= 0, "Expected insertions (%s) must be >= 0", expectedInsertions); + checkArgument(fpp > 0.0, "False positive probability (%s) must be > 0.0", fpp); + checkArgument(fpp < 1.0, "False positive probability (%s) must be < 1.0", fpp); + if (expectedInsertions == 0) { + expectedInsertions = 1; + } + /* + * TODO(user): Put a warning in the javadoc about tiny fpp values, since + * the resulting size is proportional to -log(p), but there is not much + * of a point after all, e.g. optimalM(1000, 0.0000000000000001) = 76680 + * which is less than 10kb. Who cares! + */ + int numBits = BloomFilterUtil.optimalNumOfBits(expectedInsertions, fpp); + int numHashFunctions = BloomFilterUtil.optimalNumOfHashFunctions(expectedInsertions, numBits); + try { + return new ConcurrentIntBloomFilter(new LockFreeBitArray(numBits), numHashFunctions); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Could not create BloomFilter of " + numBits + " bits", e); + } + } + + /** + * Models a lock-free array of bits. + *

+ * We use this instead of java.util.BitSet because we need access to the + * array of longs and we need compare-and-swap. + */ + static final class LockFreeBitArray { + + private static final int LONG_ADDRESSABLE_BITS = 6; + + private final AtomicLongArray data; + + LockFreeBitArray(int bits) { + this(new long[Ints.checkedCast(LongMath.divide(bits, 64, RoundingMode.CEILING))]); + } + + LockFreeBitArray(long[] data) { + this.data = new AtomicLongArray(data); + } + + /** + * Returns true if the bit changed value. + */ + boolean set(long bitIndex) { + if (get(bitIndex)) { + return false; + } + + int longIndex = (int) (bitIndex >>> LONG_ADDRESSABLE_BITS); + long mask = 1L << bitIndex; // only cares about low 6 bits of bitIndex + + long oldValue; + long newValue; + do { + oldValue = data.get(longIndex); + newValue = oldValue | mask; + if (oldValue == newValue) { + return false; + } + } while (!data.compareAndSet(longIndex, oldValue, newValue)); + + return true; + } + + boolean get(long bitIndex) { + return (data.get((int) (bitIndex >>> 6)) & (1L << bitIndex)) != 0; + } + + int bitSize() { + return data.length() * Long.SIZE; + } + + long sizeInBytes() { + return data.length() * Long.BYTES; + } + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/FastIntBloomFilter.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/FastIntBloomFilter.java deleted file mode 100644 index 4bfdd8701..000000000 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/FastIntBloomFilter.java +++ /dev/null @@ -1,153 +0,0 @@ - - -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.common.utils.bloomfilter; - -import com.google.common.math.LongMath; -import com.google.common.primitives.Ints; - -import java.math.RoundingMode; -import java.util.concurrent.atomic.AtomicLongArray; - -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkNotNull; - -public final class FastIntBloomFilter { - - private final LockFreeBitArray bits; - private final int numHashFunctions; - - private FastIntBloomFilter(LockFreeBitArray bits, int numHashFunctions) { - checkArgument(numHashFunctions > 0, "numHashFunctions (%s) must be > 0", numHashFunctions); - checkArgument(numHashFunctions < 32, "numHashFunctions (%s) must be < 32", numHashFunctions); - this.bits = checkNotNull(bits); - this.numHashFunctions = numHashFunctions; - } - - public void put(int value) { - int size = bits.bitSize(); - for (int i = 0; i < numHashFunctions; i++) { - bits.set(Integer.remainderUnsigned(hash(value, i), size)); - } - } - - public boolean mightContain(int value) { - int size = bits.bitSize(); - for (int i = 0; i < numHashFunctions; i++) { - if (!bits.get(Integer.remainderUnsigned(hash(value, i), size))) { - return false; - } - } - return true; - } - - private int hash(int value, int i) { - return Integer.rotateRight(value, i * 5); - } - - public long sizeInBytes() { - return bits.sizeInBytes(); - } - - public static FastIntBloomFilter create(long expectedInsertions) { - return create(expectedInsertions, 0.03); - } - - public static FastIntBloomFilter create(long expectedInsertions, double fpp) { - checkArgument(expectedInsertions >= 0, "Expected insertions (%s) must be >= 0", expectedInsertions); - checkArgument(fpp > 0.0, "False positive probability (%s) must be > 0.0", fpp); - checkArgument(fpp < 1.0, "False positive probability (%s) must be < 1.0", fpp); - if (expectedInsertions == 0) { - expectedInsertions = 1; - } - - int numBits = BloomFilterUtil.optimalNumOfBits(expectedInsertions, fpp); - int numHashFunctions = BloomFilterUtil.optimalNumOfHashFunctions(expectedInsertions, numBits); - try { - return new FastIntBloomFilter(new LockFreeBitArray(numBits), numHashFunctions); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException("Could not create BloomFilter of " + numBits + " bits", e); - } - } - - private static int optimalNumOfHashFunctions(long n, long m) { - - return Math.max(1, (int) Math.round((double) m / n * Math.log(2))); - } - - private static long optimalNumOfBits(long n, double p) { - if (p == 0) { - p = Double.MIN_VALUE; - } - return (long) (-n * Math.log(p) / (Math.log(2) * Math.log(2))); - } - - /** - * Models a lock-free array of bits. - *

- * We use this instead of java.util.BitSet because we need access to the - * array of longs and we need compare-and-swap. - */ - static final class LockFreeBitArray { - - private static final int LONG_ADDRESSABLE_BITS = 6; - - private final AtomicLongArray data; - - LockFreeBitArray(int bits) { - this(new long[Ints.checkedCast(LongMath.divide(bits, 64, RoundingMode.CEILING))]); - } - - LockFreeBitArray(long[] data) { - this.data = new AtomicLongArray(data); - } - - boolean set(long bitIndex) { - if (get(bitIndex)) { - return false; - } - - int longIndex = (int) (bitIndex >>> LONG_ADDRESSABLE_BITS); - long mask = 1L << bitIndex; - - long oldValue; - long newValue; - do { - oldValue = data.get(longIndex); - newValue = oldValue | mask; - if (oldValue == newValue) { - return false; - } - } while (!data.compareAndSet(longIndex, oldValue, newValue)); - - return true; - } - - boolean get(long bitIndex) { - return (data.get((int) (bitIndex >>> 6)) & (1L << bitIndex)) != 0; - } - - int bitSize() { - return data.length() * Long.SIZE; - } - - long sizeInBytes() { - return data.length() * Long.BYTES; - } - } -} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/RFBloomFilter.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/RFBloomFilter.java new file mode 100644 index 000000000..253e535b1 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/bloomfilter/RFBloomFilter.java @@ -0,0 +1,39 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.utils.bloomfilter; + +public interface RFBloomFilter { + static BlockLongBloomFilter createBlockLongBloomFilter(int expectedEntries) { + return new BlockLongBloomFilter(expectedEntries); + } + + static ConcurrentIntBloomFilter createConcurrentIntBloomFilter(int expectedEntries, double fpp) { + return ConcurrentIntBloomFilter.create(expectedEntries, fpp); + } + + void putInt(int value); + + boolean mightContainInt(int value); + + void putLong(long value); + + boolean mightContainLong(long value); + + long sizeInBytes(); + + void merge(RFBloomFilter other); +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/CommonAndRowValueConvertor.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/CommonAndRowValueConvertor.java new file mode 100644 index 000000000..e5909d9bb --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/CommonAndRowValueConvertor.java @@ -0,0 +1,35 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.utils.convertor; + +import com.alibaba.polardbx.common.datatype.RowValue; +import org.apache.curator.shaded.com.google.common.collect.Lists; + +public class CommonAndRowValueConvertor { + + public static class CommonToRowValue extends AbastactConvertor { + + @Override + public Object convert(Object src, Class destClass) { + if (RowValue.class.isInstance(src)) { + return src; + } else { + return new RowValue(Lists.newArrayList(src)); + } + } + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/ConvertorHelper.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/ConvertorHelper.java index 9e9f98c96..c7efce025 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/ConvertorHelper.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/ConvertorHelper.java @@ -17,6 +17,7 @@ package com.alibaba.polardbx.common.utils.convertor; import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.RowValue; import com.alibaba.polardbx.common.jdbc.ZeroDate; import com.alibaba.polardbx.common.jdbc.ZeroTime; import com.alibaba.polardbx.common.jdbc.ZeroTimestamp; @@ -51,6 +52,10 @@ public class ConvertorHelper { private static final Convertor collectionToCollection = new CollectionAndCollectionConvertor.CollectionToCollection(); + //rowvalue处理 + public static final Convertor rowValueToCommon = new RowValueAndCommonConvertor.RowValueToCommon(); + public static final Convertor commonToRowValue = new CommonAndRowValueConvertor.CommonToRowValue(); + // 枚举处理 public static final Convertor stringToEnum = new StringAndEnumConvertor.StringToEnum(); public static final Convertor enumToString = new StringAndEnumConvertor.EnumToString(); public static final Convertor sqlToDate = new SqlDateAndDateConvertor.SqlDateToDateConvertor(); @@ -109,6 +114,12 @@ public Convertor getConvertor(Class src, Class dest) { } } + if (convertor == null && dest == RowValue.class) { + convertor = commonToRowValue; + } + + // 处理下Array|Collection的映射 + // 如果src|dest是array类型,取一下Array.class的映射,因为默认数组处理的注册直接注册了Array.class boolean isSrcArray = src.isArray(); boolean isDestArray = dest.isArray(); if (convertor == null && src.isArray() && dest.isArray()) { @@ -135,6 +146,12 @@ public Convertor getConvertor(Class src, Class dest) { } } + // 如果是其中一个是RowValue类 + if (convertor == null && src == RowValue.class) { + convertor = rowValueToCommon; + } + + // 如果src/dest都是Common类型,进行特殊处理 if (convertor == null && commonTypes.containsKey(src) && commonTypes.containsKey(dest)) { convertor = commonToCommon; } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/RowValueAndCommonConvertor.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/RowValueAndCommonConvertor.java new file mode 100644 index 000000000..9c51a3b5c --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/RowValueAndCommonConvertor.java @@ -0,0 +1,49 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.utils.convertor; + +import com.alibaba.polardbx.common.datatype.RowValue; + +public class RowValueAndCommonConvertor { + + public static class RowValueToCommon extends AbastactConvertor { + + @Override + public Object convert(Object src, Class destClass) { + if (destClass == RowValue.class) { + return src; + } + if (RowValue.class.isInstance(src)) { + if (((RowValue) src).getValues().size() > 0) { + throw new ConvertorException("Unsupported convert: [" + src + "," + destClass.getName() + "]"); + } + Object innerValue = ((RowValue) src).getValues().get(0); + if (innerValue == null) { + return null; + } + Convertor innerConvert = ConvertorHelper.getInstance().getConvertor( + innerValue.getClass(), destClass); + + if (innerConvert != null) { + return innerConvert.convert(innerValue, destClass); + } + } + + throw new ConvertorException("Unsupported convert: [" + src + "," + destClass.getName() + "]"); + } + } +} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/StringAndObjectConvertor.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/StringAndObjectConvertor.java index 853ab4694..76fc06e3d 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/StringAndObjectConvertor.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/convertor/StringAndObjectConvertor.java @@ -19,7 +19,6 @@ import com.alibaba.polardbx.common.charset.CharsetName; import java.math.BigDecimal; -import java.nio.charset.StandardCharsets; public class StringAndObjectConvertor { diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/MySQLTimeConverter.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/MySQLTimeConverter.java index a50c2702b..5226e9b3e 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/MySQLTimeConverter.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/MySQLTimeConverter.java @@ -101,6 +101,16 @@ public static MySQLTimeVal convertDatetimeToTimestamp(MysqlDateTime t, TimeParse return convertDatetimeToTimestampInternal(t, status, zoneId); } + public static MySQLTimeVal convertValidDatetimeToTimestamp(MysqlDateTime t, TimeParseStatus status, ZoneId zoneId) { + // check_date(TIME_NO_ZERO_IN_DATE) + boolean isNonZeroDate = t.getYear() != 0 || t.getMonth() != 0 || t.getDay() != 0; + boolean isInvalid = MySQLTimeTypeUtil.isDateInvalid(t, isNonZeroDate, FLAG_TIME_NO_ZERO_IN_DATE); + if (isInvalid) { + throw new AssertionError("Invalid timestamp value"); + } + return convertDatetimeToTimestampInternal(t, status, zoneId); + } + public static MySQLTimeVal convertDatetimeToTimestampWithoutCheck(MysqlDateTime t, TimeParseStatus status, ZoneId zoneId) { return convertDatetimeToTimestampInternal(t, status, zoneId); diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/calculator/MySQLIntervalType.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/calculator/MySQLIntervalType.java index 7adebc915..cad287674 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/calculator/MySQLIntervalType.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/calculator/MySQLIntervalType.java @@ -63,6 +63,34 @@ public static MySQLIntervalType of(String intervalName) { .orElse(null); } + public static boolean isDate(MySQLIntervalType intervalType) { + switch (intervalType) { + case INTERVAL_YEAR: + case INTERVAL_YEAR_MONTH: + case INTERVAL_QUARTER: + case INTERVAL_MONTH: + case INTERVAL_WEEK: + case INTERVAL_DAY: + return true; + case INTERVAL_DAY_HOUR: + case INTERVAL_DAY_MINUTE: + case INTERVAL_DAY_SECOND: + case INTERVAL_HOUR: + case INTERVAL_HOUR_MINUTE: + case INTERVAL_HOUR_SECOND: + case INTERVAL_MINUTE: + case INTERVAL_MINUTE_SECOND: + case INTERVAL_SECOND: + case INTERVAL_MICROSECOND: + case INTERVAL_DAY_MICROSECOND: + case INTERVAL_HOUR_MICROSECOND: + case INTERVAL_MINUTE_MICROSECOND: + case INTERVAL_SECOND_MICROSECOND: + default: + return false; + } + } + private static String normalize(String intervalName) { if (intervalName == null) { return null; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/core/MysqlDateTime.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/core/MysqlDateTime.java index 5da223f80..98bed3784 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/core/MysqlDateTime.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/core/MysqlDateTime.java @@ -62,6 +62,10 @@ public MysqlDateTime(long year, long month, long day, long hour, long minute, lo this.timezone = null; } + public static MysqlDateTime zeroDateTime() { + return new MysqlDateTime(); + } + @Override public MysqlDateTime clone() { MysqlDateTime t = new MysqlDateTime(); @@ -548,4 +552,17 @@ public TimeZone getTimezone() { public void setTimezone(TimeZone timezone) { this.timezone = timezone; } + + // clear all states. + public void reset() { + this.year = 0; + this.month = 0; + this.day = 0; + this.hour = 0; + this.minute = 0; + this.second = 0; + this.secondPart = 0; + this.isNeg = false; + this.timezone = null; + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/core/TimeStorage.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/core/TimeStorage.java index f04caa20c..d05be8a63 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/core/TimeStorage.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/core/TimeStorage.java @@ -134,7 +134,8 @@ public static long writeTimestamp(MysqlDateTime t) { return t.isNeg() ? -l : l; } - public static long writeTimestamp(long year, long month, long day, long hour, long minute, long second, long secondPart, boolean isNeg) { + public static long writeTimestamp(long year, long month, long day, long hour, long minute, long second, + long secondPart, boolean isNeg) { // | 64 - 42 | 41 - 25 | 24 - 1 | // | ymd | hms | nano | long ymd = ((year * 13 + month) << 5) | day; @@ -144,6 +145,32 @@ public static long writeTimestamp(long year, long month, long day, long hour, lo } + public static void readTimestamp(long l, MysqlDateTime t) { + // clear states in mysql datetime. + t.reset(); + t.setNeg(l < 0); + l = Math.abs(l); + + // for nano + t.setSecondPart((l % (1L << 24)) * 1000L); + + // for year - month - day + long l2 = l >> 24; + long ymd = l2 >> 17; + long ym = ymd >> 5; + t.setDay(ymd % (1L << 5)); + t.setMonth(ym % 13); + t.setYear(ym / 13); + + // for hour - minute - second + long hms = l2 % (1L << 17); + t.setSecond(hms % (1L << 6)); + t.setMinute((hms >> 6) % (1L << 6)); + t.setHour(hms >> 12); + + t.setSqlType(Types.TIMESTAMP); + } + public static MysqlDateTime readTimestamp(long l) { MysqlDateTime t = new MysqlDateTime(); t.setNeg(l < 0); @@ -183,6 +210,20 @@ public static MysqlDateTime readDate(long l) { return t; } + public static void readDate(long l, MysqlDateTime t) { + readTimestamp(l, t); + t.setSqlType(Types.DATE); + } + + /** + * According to storage method of Innodb (mysql/sql-common/my_time.c/my_time_packed_to_binary) + * compress the packed long to bytes. + *

+ * decimal 0 -> len = 3 + * decimal 1,2 -> len = 4 + * decimal 3,4 -> len = 5 + * decimal 5,6 -> len = 6 + */ public static byte[] storeAsBinary(long l, int decimal) { Preconditions.checkArgument(decimal <= 6); diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/parser/StringNumericParser.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/parser/StringNumericParser.java index 5b799908c..20afb55e4 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/parser/StringNumericParser.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/time/parser/StringNumericParser.java @@ -18,6 +18,7 @@ import com.alibaba.polardbx.common.datatype.DecimalConverter; import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.utils.version.InstanceVersion; import com.google.common.base.Preconditions; import com.google.common.primitives.UnsignedLongs; import io.airlift.slice.Slice; @@ -502,11 +503,8 @@ public static void parseStringWithRound(byte[] bytes, final int startPos, final if (exponent <= (SIGNED_MAX_LONG - value) / 10) { exponent = exponent * 10 + value; } else { - - results[NUMERIC_INDEX] = isUnsigned ? UNSIGNED_LONG_MAX : - (isNeg ? SIGNED_MIN_LONG : SIGNED_MAX_LONG); - results[ERROR_INDEX] = MY_ERRNO_ERANGE; - results[POSITION_INDEX] = pos; + // too big + handleRetTooBig(results, isUnsigned, isNeg, pos); return; } } @@ -517,11 +515,8 @@ public static void parseStringWithRound(byte[] bytes, final int startPos, final if (shift == 0) { if (addOn != 0) { if (UnsignedLongs.compare(longResult, UNSIGNED_LONG_MAX) == 0) { - - results[NUMERIC_INDEX] = isUnsigned ? UNSIGNED_LONG_MAX : - (isNeg ? SIGNED_MIN_LONG : SIGNED_MAX_LONG); - results[ERROR_INDEX] = MY_ERRNO_ERANGE; - results[POSITION_INDEX] = pos; + // too big + handleRetTooBig(results, isUnsigned, isNeg, pos); return; } longResult++; @@ -557,21 +552,15 @@ public static void parseStringWithRound(byte[] bytes, final int startPos, final handleResult(isUnsigned, results, pos, isNeg, longResult); return; } - - results[NUMERIC_INDEX] = isUnsigned ? UNSIGNED_LONG_MAX : - (isNeg ? SIGNED_MIN_LONG : SIGNED_MAX_LONG); - results[ERROR_INDEX] = MY_ERRNO_ERANGE; - results[POSITION_INDEX] = pos; + // too big + handleRetTooBig(results, isUnsigned, isNeg, pos); return; } for (; shift > 0; shift--, longResult *= 10) { if (UnsignedLongs.compare(longResult, CUT_OFF) > 0) { - - results[NUMERIC_INDEX] = isUnsigned ? UNSIGNED_LONG_MAX : - (isNeg ? SIGNED_MIN_LONG : SIGNED_MAX_LONG); - results[ERROR_INDEX] = MY_ERRNO_ERANGE; - results[POSITION_INDEX] = pos; + // Overflow, number too big + handleRetTooBig(results, isUnsigned, isNeg, pos); return; } } @@ -580,6 +569,22 @@ public static void parseStringWithRound(byte[] bytes, final int startPos, final } + public static void handleRetTooBig(long[] results, boolean isUnsigned, boolean isNeg, int pos) { + long result; + if (InstanceVersion.isMYSQL80()) { + result = isUnsigned ? + (isNeg ? 0 : UNSIGNED_LONG_MAX) : + (isNeg ? SIGNED_MIN_LONG : SIGNED_MAX_LONG); + } else { + result = isUnsigned ? UNSIGNED_LONG_MAX : + (isNeg ? SIGNED_MIN_LONG : SIGNED_MAX_LONG); + } + + results[NUMERIC_INDEX] = result; + results[ERROR_INDEX] = MY_ERRNO_ERANGE; + results[POSITION_INDEX] = pos; + } + public static double parseStringToDouble(byte[] bytes, int offset, int len) { if (bytes == null) { return 0d; diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/version/InstanceVersion.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/version/InstanceVersion.java index 032987dda..916e05759 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/version/InstanceVersion.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/version/InstanceVersion.java @@ -16,58 +16,48 @@ package com.alibaba.polardbx.common.utils.version; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import org.apache.commons.lang.StringUtils; public class InstanceVersion { public static final String systemVersion = "instanceVersion"; + private static final Logger logger = LoggerFactory.getLogger(InstanceVersion.class); private static final String SERVER_ARGS = "serverArgs"; private static final String VERSION_POSTFIX = "-PXC-" + Version.getVersion(); private static final String regex = "\\d+\\.\\d+\\.\\d+"; - + private static final String VERSION_PREFIX_56 = "5.6.29"; + private static final String VERSION_PREFIX_57 = "5.7.25"; + private static final String VERSION_PREFIX_8 = "8.0.3"; + private static final String DEFAULT_VERSION_PREFIX = VERSION_PREFIX_56; static volatile InstanceVersion instanceVersion = new InstanceVersion(); - private String VERSION_PREFIX_5 = "5.6.29"; - private String VERSION_PREFIX_8 = "8.0.3"; - private String VERSION_PREFIX = VERSION_PREFIX_5; - + /** + * 该变量仅用作内核判断 + * 不影响版本号前缀 + */ private static boolean MYSQL80 = false; + private String VERSION_PREFIX = DEFAULT_VERSION_PREFIX; public InstanceVersion() { initialVersion(); } - private void initialVersion() { - try { - final String instanceVersion = System.getProperty(systemVersion); - if (!StringUtils.isEmpty(instanceVersion)) { - setVersionPrefix(instanceVersion); - } else { - String serverArgs = System.getProperty(SERVER_ARGS); - if (StringUtils.isNotEmpty(serverArgs)) { - String[] args = StringUtils.split(serverArgs, ';'); - for (String arg : args) { - String[] config = StringUtils.split(arg, '='); - if (config.length == 2) { - if (config[0].equals(systemVersion)) { - setVersionPrefix(config[1]); - } - } - } - } - } - } catch (Exception e) { - - } - } - - private void setVersionPrefix(String instanceVersion) { - if (instanceVersion.equals("8")) { - VERSION_PREFIX = VERSION_PREFIX_8; - } else if (instanceVersion.equals("5")) { - VERSION_PREFIX = VERSION_PREFIX_5; + public static String parseVersionPrefix(String instanceVersion) throws IllegalVersionException { + if (instanceVersion.equals("5") || instanceVersion.equals("56") + || instanceVersion.equals("5.6")) { + return VERSION_PREFIX_56; + } else if (instanceVersion.equals("57") || instanceVersion.equals("5.7")) { + return VERSION_PREFIX_57; + } else if (instanceVersion.equals("8") || instanceVersion.equals("80") + || instanceVersion.equals("8.0")) { + return VERSION_PREFIX_8; + } else if (instanceVersion.equalsIgnoreCase("default")) { + return DEFAULT_VERSION_PREFIX; } else if (instanceVersion.matches(regex)) { - VERSION_PREFIX = instanceVersion; + return instanceVersion; } + throw new IllegalVersionException(); } public static void reloadVersion(String version) { @@ -99,4 +89,40 @@ public static void setMYSQL80(boolean MYSQL80) { InstanceVersion.MYSQL80 = MYSQL80; } + private void initialVersion() { + try { + final String instanceVersion = System.getProperty(systemVersion); + if (!StringUtils.isEmpty(instanceVersion)) { + setVersionPrefix(instanceVersion); + } else { + String serverArgs = System.getProperty(SERVER_ARGS); + if (StringUtils.isNotEmpty(serverArgs)) { + String[] args = StringUtils.split(serverArgs, ';'); + for (String arg : args) { + String[] config = StringUtils.split(arg, '='); + if (config.length == 2) { + if (config[0].equals(systemVersion)) { + setVersionPrefix(config[1]); + } + } + } + } + } + } catch (Exception e) { + logger.warn("failed to init InstanceVersion", e); + } + } + + private void setVersionPrefix(String instanceVersion) { + try { + VERSION_PREFIX = parseVersionPrefix(instanceVersion); + } catch (IllegalVersionException e) { + logger.error("Illegal version prefix: " + instanceVersion); + VERSION_PREFIX = DEFAULT_VERSION_PREFIX; + } + } + + public static class IllegalVersionException extends Exception { + + } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/version/Version.java b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/version/Version.java index e48dc8d32..b2cc35ad4 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/version/Version.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/common/utils/version/Version.java @@ -34,6 +34,9 @@ public final class Version { private Version() { } + // TODO get product version from config property + public static final String PRODUCT_VERSION = "2.4.0"; + private static final Logger logger = LoggerFactory.getLogger(Version.class); private static final Package myPackage = VersionAnnotation.class.getPackage(); private static final VersionAnnotation va = myPackage.getAnnotation(VersionAnnotation.class); @@ -129,7 +132,7 @@ public static String getVersion(Class cls, String defaultVersion) { + defaultVersion); } else { String file = codeSource.getLocation().getFile(); - version = getVerionByPath(file); + version = getVersionByPath(file); } } @@ -159,7 +162,7 @@ public static boolean validVersion(String name, String path, String minVersion, if (url != null) { String file = url.getFile(); if (file != null && file.length() > 0) { - String version = getVerionByPath(file); + String version = getVersionByPath(file); if (checkVersionNecessary(version)) { Long ver = convertVersion(version); if (ver < minv) { @@ -237,7 +240,7 @@ public static boolean checkDuplicate(String path, boolean failOnError) { return false; } - public static String getVerionByPath(String file) { + public static String getVersionByPath(String file) { if (file != null && file.length() > 0 && StringUtils.contains(file, ".jar")) { int index = StringUtils.lastIndexOf(file, ".jar"); file = file.substring(0, index); diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/config/ConfigDataHandler.java b/polardbx-common/src/main/java/com/alibaba/polardbx/config/ConfigDataHandler.java deleted file mode 100644 index b593c1217..000000000 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/config/ConfigDataHandler.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.config; - -import com.alibaba.polardbx.common.model.lifecycle.Lifecycle; - -import java.util.List; -import java.util.concurrent.Executor; - - -public interface ConfigDataHandler extends Lifecycle { - - public static final long GET_DATA_TIMEOUT = 10 * 1000; - public static final String FIRST_SERVER_STRATEGY = "firstServer"; - public static final String FIRST_CACHE_THEN_SERVER_STRATEGY = "firstCache"; - - String getData(); - - String getData(long timeout, String strategy); - - String getNullableData(); - - String getNullableData(long timeout, String strategy); - - void addListener(ConfigDataListener configDataListener, Executor executor); - - void addListeners(List configDataListenerList, Executor executor); - - boolean publishSingle(String dataId, String content); - - void clearListeners(); - - boolean removeData(String dataId); -} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/config/ConfigDataMode.java b/polardbx-common/src/main/java/com/alibaba/polardbx/config/ConfigDataMode.java index 891a06ee5..67eb9a3c9 100644 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/config/ConfigDataMode.java +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/config/ConfigDataMode.java @@ -16,96 +16,58 @@ package com.alibaba.polardbx.config; -import com.alibaba.fastjson.parser.ParserConfig; import com.alibaba.polardbx.common.properties.DynamicConfig; import com.alibaba.polardbx.common.utils.InstanceRole; -import org.apache.commons.lang.BooleanUtils; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang.StringUtils; public class ConfigDataMode { - public static final String CONFIG_MODE = "tddl.config.mode"; + public static final String INSTANCE_ROLE_VARIABLE = "POLARDBX_INSTANCE_ROLE"; - private static Mode mode; - - private static Mode configServerMode; - - private static boolean isQuotaEscape = true; - private static volatile long refreshConfigTimestamp = 0; - private static boolean supportRuleParameterNullValue = false; - private static String atomAddressMode = null; - private static boolean zeroDataTimeToString = false; - - private static boolean supportSingleDbMultiTbs = false; - private static boolean supportRemoveDdl = false; - private static boolean supportDropAutoSeq = false; - private static boolean allowSimpleSequence = false; - - // Default isolation level be set in `ServerLoader.configSystem` - private static int txIsolation; - private static String cluster; - - static { - enableFastJsonAutoType(); - loadConfigDataMode(); - } - - protected static void enableFastJsonAutoType() { - - try { - ParserConfig.getGlobalInstance().addAccept("com.alibaba.polardbx."); - ParserConfig.getGlobalInstance().addAccept("org.apache.calcite."); - } catch (Throwable e) { + private static InstanceRole instanceRole = InstanceRole.MASTER; - } - - } - - protected static void loadConfigDataMode() { - if (isFastMock()) { - return; - } - String m = System.getProperty(CONFIG_MODE, "auto"); - mode = Mode.nameOf(m); - if (mode == null) { - mode = Mode.AUTO; - } - - configServerMode = Mode.nameOf(m); - if (configServerMode == null) { - configServerMode = Mode.AUTO; - } + private static Mode mode; - if (mode != Mode.FAST_MOCK && System.getProperty("metaDbAddr") != null) { - if (!String.valueOf(System.getProperty("metaDbAddr")).isEmpty()) { - mode = Mode.GMS; - configServerMode = Mode.GMS; + public static InstanceRole getInstanceRole() { + return instanceRole; + } + + public static void setInstanceRole(int instType) { + String fastMode = StringUtils.isNotEmpty(System.getProperty("tddl.config.mode")) ? + System.getProperty("tddl.config.mode") : System.getProperty("configMode"); + if (StringUtils.isNotEmpty(fastMode)) { + //set the instance role by env. + if (fastMode.equalsIgnoreCase("FAST_MOCK")) { + //keep compatible with fast mode. + ConfigDataMode.instanceRole = InstanceRole.FAST_MOCK; + return; + } else if (fastMode.equalsIgnoreCase("COLUMNAR_SLAVE")) { + ConfigDataMode.instanceRole = InstanceRole.COLUMNAR_SLAVE; + return; } } - String singleDbMultiTbsSupported = System.getProperty("supportSingleDbMultiTbs"); - supportSingleDbMultiTbs = BooleanUtils.toBoolean(singleDbMultiTbsSupported); - - String removeDdlSupported = System.getProperty("supportRemoveDdl"); - supportRemoveDdl = BooleanUtils.toBoolean(removeDdlSupported); - - String dropAutoSeqSupported = System.getProperty("supportDropAutoSeq"); - supportDropAutoSeq = BooleanUtils.toBoolean(dropAutoSeqSupported); - - String simpleSequenceAllowed = System.getProperty("allowSimpleSequence"); - allowSimpleSequence = BooleanUtils.toBoolean(simpleSequenceAllowed); + if (instType == 0 || instType == 3) { + ConfigDataMode.instanceRole = InstanceRole.MASTER; + } else if (instType == 4) { + ConfigDataMode.instanceRole = InstanceRole.COLUMNAR_SLAVE; + } else { + ConfigDataMode.instanceRole = InstanceRole.ROW_SLAVE; + } } - public static void reload() { - loadConfigDataMode(); + @VisibleForTesting + public static void setInstanceRole(InstanceRole instanceRole) { + ConfigDataMode.instanceRole = instanceRole; } + /** + * the default mode is GMS, and the MOCK is visible for the Planner UT. + */ public enum Mode { - AUTO(null), MOCK("mock"), - MANAGER("manager"), - GMS("gms"), - FAST_MOCK("diamond"); + GMS("gms"); private String extensionName; @@ -163,143 +125,66 @@ public static Mode getMode() { return mode; } + @VisibleForTesting public static void setMode(Mode mode) { ConfigDataMode.mode = mode; } - public static Mode getConfigServerMode() { - return configServerMode; - } - - public static void setConfigServerMode(Mode mode) { - configServerMode = mode; - } - + /** + * 是否为mock模式,主要用于测试 + */ public static boolean isMock() { return mode != null && mode == Mode.MOCK; } - public static boolean isFastMock() { - return mode != null && mode == Mode.FAST_MOCK; - } - // ========= The DB type of Server ========= public static boolean isPolarDbX() { - if (isFastMock()) { + if (isFastMock() || isMock()) { return false; + } else { + return mode == Mode.GMS; } - // PolarDbX load configs by GMS/MetaDB - return configServerMode == Mode.GMS; - } - - public static boolean isDRDS() { - return !isPolarDbX(); } // ========= The instance role of Server ========= // Check master for all DB type public static boolean isMasterMode() { - return InstanceRoleManager.INSTANCE.getInstanceRole() == InstanceRole.MASTER || ( - InstanceRoleManager.INSTANCE.getInstanceRole() == InstanceRole.SLAVE && + return getInstanceRole() == InstanceRole.MASTER || ( + getInstanceRole() == InstanceRole.ROW_SLAVE && DynamicConfig.getInstance().learnerMode().compareTo(LearnerMode.ALLOW_INIT_DML) > 0); } - public static boolean isSlaveMode() { - return InstanceRoleManager.INSTANCE.getInstanceRole() == InstanceRole.SLAVE && ( + public static boolean isRowSlaveMode() { + return getInstanceRole() == InstanceRole.ROW_SLAVE && ( DynamicConfig.getInstance().learnerMode().compareTo(LearnerMode.ALLOW_USE_DML) < 0); } - public static boolean needInitMasterModeResource() { - return InstanceRoleManager.INSTANCE.getInstanceRole() == InstanceRole.MASTER || ( - InstanceRoleManager.INSTANCE.getInstanceRole() == InstanceRole.SLAVE && - DynamicConfig.getInstance().learnerMode().compareTo(LearnerMode.ONLY_READ) > 0); - } - - public static long getRefreshConfigTimestamp() { - return refreshConfigTimestamp; + public static boolean isColumnarMode() { + return getInstanceRole() == InstanceRole.COLUMNAR_SLAVE; } - public static void setRefreshConfigTimestamp(long refreshConfigTimestamp) { - ConfigDataMode.refreshConfigTimestamp = refreshConfigTimestamp; - } - - public static boolean isSupportRuleParameterNullValue() { - return supportRuleParameterNullValue; - } - - public static void setSupportRuleParameterNullValue(boolean supportRuleParameterNullValue) { - ConfigDataMode.supportRuleParameterNullValue = supportRuleParameterNullValue; - } - - public static boolean isQuotaEscape() { - return isQuotaEscape; - } - - public static void setQuotaEscape(boolean isQuotaEscape) { - ConfigDataMode.isQuotaEscape = isQuotaEscape; - } - - public static boolean isZeroDataTimeToString() { - return zeroDataTimeToString; - } - - public static void setZeroDataTimeToString(boolean zeroDataTimeToString) { - ConfigDataMode.zeroDataTimeToString = zeroDataTimeToString; - } - - public static String getAtomAddressMode() { - return atomAddressMode; - } - - public static void setAtomAddressMode(String atomAddressMode) { - ConfigDataMode.atomAddressMode = atomAddressMode; - } - - public static String getCluster() { - return cluster; - } - - public static void setCluster(String cluster) { - ConfigDataMode.cluster = cluster; - } - - public static boolean isSupportSingleDbMultiTbs() { - return supportSingleDbMultiTbs; - } - - public static void setSupportSingleDbMultiTbs(boolean supportSingleDbMultiTbs) { - ConfigDataMode.supportSingleDbMultiTbs = supportSingleDbMultiTbs; - } - - public static boolean isSupportRemoveDdl() { - return supportRemoveDdl; - } - - public static void setSupportRemoveDdl(boolean supportRemoveDdl) { - ConfigDataMode.supportRemoveDdl = supportRemoveDdl; - } - - public static boolean isSupportDropAutoSeq() { - return supportDropAutoSeq; + public static boolean isFastMock() { + return getInstanceRole() == InstanceRole.FAST_MOCK; } - public static void setSupportDropAutoSeq(boolean supportDropAutoSeq) { - ConfigDataMode.supportDropAutoSeq = supportDropAutoSeq; + public static boolean needInitMasterModeResource() { + return getInstanceRole() == InstanceRole.MASTER || ( + getInstanceRole() == InstanceRole.ROW_SLAVE && + DynamicConfig.getInstance().learnerMode().compareTo(LearnerMode.ONLY_READ) > 0); } - public static boolean isAllowSimpleSequence() { - return allowSimpleSequence; + public static boolean needDNResource() { + return getInstanceRole() == InstanceRole.MASTER || + getInstanceRole() == InstanceRole.ROW_SLAVE; } - public static void setAllowSimpleSequence(boolean allowSimpleSequence) { - ConfigDataMode.allowSimpleSequence = allowSimpleSequence; + public static boolean needGMSResource() { + return getInstanceRole() != InstanceRole.FAST_MOCK; } - public static int getTxIsolation() { - return txIsolation; + public static boolean isReadOnlyMode() { + return getInstanceRole() == InstanceRole.COLUMNAR_SLAVE || + getInstanceRole() == InstanceRole.ROW_SLAVE; } - public static void setTxIsolation(int txIsolation) { - ConfigDataMode.txIsolation = txIsolation; - } } diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/holder/AbstractConfigDataHolder.java b/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/holder/AbstractConfigDataHolder.java deleted file mode 100644 index d66fce409..000000000 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/holder/AbstractConfigDataHolder.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.config.impl.holder; - -import com.alibaba.polardbx.common.model.lifecycle.AbstractLifecycle; - -public abstract class AbstractConfigDataHolder extends AbstractLifecycle implements ConfigDataHolder { - -} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/holder/ConfigDataHolder.java b/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/holder/ConfigDataHolder.java deleted file mode 100644 index 520a34b3a..000000000 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/holder/ConfigDataHolder.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.config.impl.holder; - -import com.alibaba.polardbx.common.model.lifecycle.Lifecycle; -import com.alibaba.polardbx.config.ConfigDataHandler; - -public interface ConfigDataHolder extends Lifecycle { - -} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/holder/ConfigHolderFactory.java b/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/holder/ConfigHolderFactory.java deleted file mode 100644 index 638d50616..000000000 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/holder/ConfigHolderFactory.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.config.impl.holder; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -public class ConfigHolderFactory { - - private static Map holderMap = new ConcurrentHashMap(); - - public static ConfigDataHolder getConfigDataHolder(String appName) { - return holderMap.get(appName); - } - - public static void addConfigDataHolder(String appName, ConfigDataHolder configDataHolder) { - holderMap.put(appName, configDataHolder); - } - - public static void removeConfigHoder(String appName) { - holderMap.remove(appName); - } - - public static boolean isInit(String appName) { - return appName != null && holderMap.containsKey(appName); - } - -} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/mock/MockConfigHolder.java b/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/mock/MockConfigHolder.java deleted file mode 100644 index fc55dd91b..000000000 --- a/polardbx-common/src/main/java/com/alibaba/polardbx/config/impl/mock/MockConfigHolder.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.config.impl.mock; - -import com.alibaba.polardbx.config.impl.holder.AbstractConfigDataHolder; -import com.google.common.collect.Maps; -import com.alibaba.polardbx.common.exception.NotSupportException; -import com.alibaba.polardbx.common.utils.extension.Activate; - -import java.util.Collections; -import java.util.List; -import java.util.Map; - -@Activate(name = "mock", order = 1) -public class MockConfigHolder extends AbstractConfigDataHolder { - - public MockConfigHolder() { - - } -} diff --git a/polardbx-common/src/main/java/com/alibaba/polardbx/lbac/LBACException.java b/polardbx-common/src/main/java/com/alibaba/polardbx/lbac/LBACException.java new file mode 100644 index 000000000..f18dc4690 --- /dev/null +++ b/polardbx-common/src/main/java/com/alibaba/polardbx/lbac/LBACException.java @@ -0,0 +1,36 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.lbac; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; + +public class LBACException extends TddlRuntimeException { + + public LBACException(String... params) { + super(ErrorCode.ERR_LBAC, params); + } + + public LBACException(String param, Throwable e) { + super(ErrorCode.ERR_LBAC, param, e); + } + + public LBACException(Throwable e) { + super(ErrorCode.ERR_LBAC, e.getMessage(), e); + } + +} diff --git a/polardbx-common/src/main/java/com/taobao/tddl/common/privilege/AuthPlugin.java b/polardbx-common/src/main/java/com/taobao/tddl/common/privilege/AuthPlugin.java new file mode 100644 index 000000000..6d96a4b88 --- /dev/null +++ b/polardbx-common/src/main/java/com/taobao/tddl/common/privilege/AuthPlugin.java @@ -0,0 +1,53 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.taobao.tddl.common.privilege; + +import org.apache.commons.lang3.StringUtils; + +/** + * authentication plugin of user password + */ +public enum AuthPlugin { + + /** + * 1-round sha-1 + */ + POLARDBX_NATIVE_PASSWORD, + /** + * 2-round sha-1 + */ + MYSQL_NATIVE_PASSWORD; + + public static AuthPlugin lookupByName(String pluginName) { + if (StringUtils.isBlank(pluginName)) { + return POLARDBX_NATIVE_PASSWORD; + } + + switch (pluginName.toUpperCase()) { + case "POLARDBX_NATIVE_PASSWORD": + return POLARDBX_NATIVE_PASSWORD; + case "MYSQL_NATIVE_PASSWORD": + return MYSQL_NATIVE_PASSWORD; + default: + throw new RuntimeException("Unsupported auth_plugin: " + pluginName); + } + } + + public String toLowerCase() { + return name().toLowerCase(); + } +} diff --git a/polardbx-common/src/main/java/com/taobao/tddl/common/privilege/EncrptPassword.java b/polardbx-common/src/main/java/com/taobao/tddl/common/privilege/EncrptPassword.java index f7b73d21b..f10d0cdf6 100644 --- a/polardbx-common/src/main/java/com/taobao/tddl/common/privilege/EncrptPassword.java +++ b/polardbx-common/src/main/java/com/taobao/tddl/common/privilege/EncrptPassword.java @@ -16,12 +16,16 @@ package com.taobao.tddl.common.privilege; +import com.alibaba.polardbx.common.utils.encrypt.SecurityUtil; + import java.io.Serializable; +import java.security.NoSuchAlgorithmException; public class EncrptPassword implements Serializable { private static final long serialVersionUID = 6757582898782090114L; private boolean enc = true; + private AuthPlugin authPlugin = AuthPlugin.POLARDBX_NATIVE_PASSWORD; private String password = null; private EncryptAlgorithm encryptAlgorithm = EncryptAlgorithm.SHA1; @@ -29,8 +33,13 @@ public EncrptPassword() { } public EncrptPassword(String password, boolean enc) { + this(password, AuthPlugin.POLARDBX_NATIVE_PASSWORD, enc); + } + + public EncrptPassword(String password, AuthPlugin authPlugin, boolean enc) { this.password = password; this.enc = enc; + this.authPlugin = authPlugin; this.encryptAlgorithm = enc ? EncryptAlgorithm.SHA1 : EncryptAlgorithm.NONE; } @@ -69,4 +78,31 @@ public EncryptAlgorithm getEncryptAlgorithm() { public void setEncryptAlgorithm(EncryptAlgorithm encryptAlgorithm) { this.encryptAlgorithm = encryptAlgorithm; } + + public AuthPlugin getAuthPlugin() { + return authPlugin; + } + + public byte[] getMysqlPassword() throws NoSuchAlgorithmException { + if (password == null) { + throw new NullPointerException(); + } + byte[] mysqlUserPassword = null; + if (enc) { + switch (authPlugin) { + case MYSQL_NATIVE_PASSWORD: + // 密码经过两次次sha-1混淆保存的 + mysqlUserPassword = SecurityUtil.hexStr2Bytes(password); + break; + case POLARDBX_NATIVE_PASSWORD: + default: + // 密码经过一次sha-1混淆保存的 + mysqlUserPassword = SecurityUtil.sha1Pass(SecurityUtil.hexStr2Bytes(password)); + break; + } + } else { + mysqlUserPassword = SecurityUtil.calcMysqlUserPassword(password.getBytes()); + } + return mysqlUserPassword; + } } diff --git a/polardbx-common/src/main/java/io/airlift/slice/Preconditions.java b/polardbx-common/src/main/java/io/airlift/slice/Preconditions.java index ddf321983..1618ecb32 100644 --- a/polardbx-common/src/main/java/io/airlift/slice/Preconditions.java +++ b/polardbx-common/src/main/java/io/airlift/slice/Preconditions.java @@ -45,6 +45,12 @@ public static void checkArgument(boolean expression, String errorMessage) { } } + public static void checkArgument(boolean expression) { + if (!expression) { + throw new IllegalArgumentException(); + } + } + public static int checkPositionIndex(int index, int size) { return checkPositionIndex(index, size, "index"); } diff --git a/polardbx-common/src/main/java/io/airlift/slice/Slice.java b/polardbx-common/src/main/java/io/airlift/slice/Slice.java index e37e41072..909ce8d35 100644 --- a/polardbx-common/src/main/java/io/airlift/slice/Slice.java +++ b/polardbx-common/src/main/java/io/airlift/slice/Slice.java @@ -228,14 +228,14 @@ void resetSlice(byte[] base) { } void resetSlice(@Nullable Object base, long address, int size, int retainedSize, @Nullable Object reference) { - checkArgument(this != Slices.EMPTY_SLICE, "EmptySlice shouldn't resetSlice"); + checkArgument(this != Slices.EMPTY_SLICE); if (address <= 0) { throw new IllegalArgumentException(format("Invalid address: %s", address)); } if (size <= 0) { throw new IllegalArgumentException(format("Invalid size: %s", size)); } - checkArgument((address + size) >= size, "Address + size is greater than 64 bits"); + checkArgument((address + size) >= size); this.reference = reference; this.base = base; diff --git a/polardbx-common/src/main/resources/META-INF/services/com.alibaba.polardbx.config.impl.holder.AbstractConfigDataHolder b/polardbx-common/src/main/resources/META-INF/services/com.alibaba.polardbx.config.impl.holder.AbstractConfigDataHolder deleted file mode 100644 index ebeb7ac3c..000000000 --- a/polardbx-common/src/main/resources/META-INF/services/com.alibaba.polardbx.config.impl.holder.AbstractConfigDataHolder +++ /dev/null @@ -1 +0,0 @@ -com.alibaba.polardbx.config.impl.mock.MockConfigHolder \ No newline at end of file diff --git a/polardbx-common/src/main/resources/res/ErrorCode.properties b/polardbx-common/src/main/resources/res/ErrorCode.properties index 9dc39ce82..88e5d02b5 100644 --- a/polardbx-common/src/main/resources/res/ErrorCode.properties +++ b/polardbx-common/src/main/resources/res/ErrorCode.properties @@ -97,6 +97,7 @@ ERR_GLOBAL_SECONDARY_INDEX_TRUNCATE_PRIMARY_TABLE=Does not support truncate tabl ERR_GLOBAL_SECONDARY_INDEX_ALLOW_ADD=Does not support create global secondary index on existing table. ERR_GLOBAL_SECONDARY_INDEX_BACKFILL_DUPLICATE_ENTRY=Duplicated entry ''{0}'' for key ''{1}'' ERR_GLOBAL_SECONDARY_INDEX_CHECKER={0} +ERR_COLUMNAR_INDEX_CHECKER={0} ERR_SERVER=server error by {0} ERR_RPC=rpc error by {0} ERR_NET_SEND=send packet failed by stream {0} @@ -216,6 +217,7 @@ ERR_PENDING_DDL_JOBS_EXCEED_LIMIT=The current number of PENDING jobs has reached ERR_TABLE_PARTITIONS_EXCEED_LIMIT=The number of table partitions ''{0}'' exceeds the upper limit ''{1}''. Please specify less table partitions or adjust the value of the parameter MAX_TABLE_PARTITIONS_PER_DB. ERR_UNKNOWN_TZ=Unknown or incorrect time zone: {0} ERR_BASELINE=Baseline error: {0} +ERR_PLAN_COST=get plan cost error: {0} ERR_VIEW=View error: {0} ERR_DROP_DB_NOT_EXISTS=The database to be dropped doesn''t exist. ERR_DROP_DB_ILLEGAL_STATE=The database to be dropped is in illegal state. Please contact admin to check. @@ -300,6 +302,14 @@ ERR_EXECUTE_ON_OSS={0} ERR_OSS_FORMAT={0} ERR_OSS_CONNECT={0} ERR_FILE_STORAGE_EXISTS={0} +ERR_ARCHIVE_NOT_ENABLED={0} +ERR_ARCHIVE_TABLE_EXISTS={0} +ERR_BITMAP_ROW_GROUP_INDEX={0} +ERR_LOAD_CSV_FILE={0} +ERR_LOAD_DEL_FILE={0} +ERR_LOAD_ORC_FILE={0} +ERR_COLUMNAR_SNAPSHOT={0} +ERR_COLUMNAR_SCHEMA={0} ERR_SET_AUTO_SAVEPOINT=Set auto savepoint fail ERR_REPARTITION_TABLE_WITH_GSI={0} ERR_REPARTITION_KEY={0} @@ -334,11 +344,11 @@ ERR_GLOBAL_SECONDARY_TRUNCATE_PARTITION={0} ERR_CDC_GENERIC={0} ERR_REPLICATION_RESULT={0} ERR_REPLICA_NOT_SUPPORT={0} +ERR_INSTANCE_READ_ONLY_OPTION_SET_FAILED=Set instance read only option ''{0}''failed ERR_CREATE_SELECT_FUNCTION_ALIAS={0} ERR_CREATE_SELECT_UPDATE={0} ERR_CREATE_SELECT_WITH_GSI={0} ERR_CREATE_SELECT_WITH_OSS={0} - ERR_DUPLICATE_NAME_FK_CONSTRAINT=Duplicate foreign key constraint name ''{0}'' ERR_DROP_FK_CONSTRAINT=Can''t DROP ''{0}''; check that column/key exists ERR_DROP_COLUMN_FK_CONSTRAINT=Cannot drop column ''{0}'' needed in ''{1}''.''{2}'' foreign key constraint ''{3}'' @@ -358,3 +368,10 @@ ERR_ADD_FK_CHARSET_COLLATION=Cannot add foreign key constraint due to different ERR_FK_CONVERT_TO_CHARSET=Cannot convert to charset ''{0}'' on table ''{1}''.''{2}'' due to foreign key ERR_FK_REFERENCING_COLUMN_NOT_EXIST=Referencing column ''{0}'' in table ''{1}''.''{2}'' not exist ERR_CANT_CHANGE_TX_ISOLATION=Transaction characteristics can't be changed while a transaction is in progress +ERR_DDL_WITH_CCI=Do not support current ddl [{0}] with CCI +ERR_UNSUPPORTED_COLUMN_TYPE_WITH_CCI=The {0} in the type of {1} is not supported by CCI + +ERR_BINARY_PREDICATE={0} +ERR_ENCDB={0} +ERR_LBAC={0} +ERR_INSTANCE_READ_ONLY_OPTION_NOT_SUPPORT=server is running with the instance-read-only option so it cannot execute this statement diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/EngineTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/EngineTest.java new file mode 100644 index 000000000..5558bdfce --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/EngineTest.java @@ -0,0 +1,73 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common; + +import junit.framework.TestCase; + +public class EngineTest extends TestCase { + + public void testOf() { + assertEquals(Engine.OSS, Engine.of("OSS")); + assertEquals(Engine.INNODB, Engine.of("")); + boolean catchException = false; + try { + Engine.of("NON_EXIST_ENGINE"); + } catch (Throwable t) { + catchException = true; + assertTrue(t.getMessage().equalsIgnoreCase("Unknown engine name:NON_EXIST_ENGINE")); + } + if (!catchException) { + throw new AssertionError("Engine.of() method should fail"); + } + } + + public void testHasCache() { + assertTrue(Engine.hasCache(Engine.OSS)); + assertTrue(Engine.hasCache(Engine.EXTERNAL_DISK)); + assertTrue(Engine.hasCache(Engine.NFS)); + assertTrue(Engine.hasCache(Engine.S3)); + assertTrue(Engine.hasCache(Engine.ABS)); + assertFalse(Engine.hasCache(Engine.INNODB)); + assertFalse(Engine.hasCache(null)); + } + + public void testIsFileStore() { + assertTrue(Engine.isFileStore(Engine.OSS)); + assertTrue(Engine.isFileStore(Engine.EXTERNAL_DISK)); + assertTrue(Engine.isFileStore(Engine.LOCAL_DISK)); + assertTrue(Engine.isFileStore(Engine.NFS)); + assertTrue(Engine.isFileStore(Engine.S3)); + assertTrue(Engine.isFileStore(Engine.ABS)); + assertFalse(Engine.isFileStore(Engine.INNODB)); + assertFalse(Engine.isFileStore((Engine) null)); + } + + public void testSupportColumnar() { + assertTrue(Engine.supportColumnar(Engine.OSS)); + assertTrue(Engine.supportColumnar(Engine.EXTERNAL_DISK)); + assertTrue(Engine.supportColumnar(Engine.LOCAL_DISK)); + assertTrue(Engine.supportColumnar(Engine.NFS)); + assertTrue(Engine.supportColumnar(Engine.S3)); + assertTrue(Engine.supportColumnar(Engine.ABS)); + assertFalse(Engine.supportColumnar(Engine.INNODB)); + assertFalse(Engine.supportColumnar((Engine) null)); + } + + public void testTestIsFileStore() { + assertTrue(Engine.isFileStore("OSS")); + } +} \ No newline at end of file diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/RevisableOrderInvariantHashTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/RevisableOrderInvariantHashTest.java new file mode 100644 index 000000000..1cee8fec6 --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/RevisableOrderInvariantHashTest.java @@ -0,0 +1,464 @@ +package com.alibaba.polardbx.common; + +import org.junit.Assert; +import org.junit.Test; + +import java.security.SecureRandom; +import java.text.NumberFormat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class RevisableOrderInvariantHashTest { + Random random = new SecureRandom(); + NumberFormat numberFormat = NumberFormat.getNumberInstance(); + + /** + * Basic usage of DynamicHash. + */ + @Test + public void usageExamples() { + { + // order invariant. + long a = new RevisableOrderInvariantHash().add(1).add(2).add(3).getResult(); + long b = new RevisableOrderInvariantHash().add(3).add(2).add(1).getResult(); + Assert.assertEquals(a, b); + } + + { + // Revisable case 1. + long a = new RevisableOrderInvariantHash().add(2).add(3).getResult(); + long b = new RevisableOrderInvariantHash().add(1).add(2).add(3).remove(1).getResult(); + Assert.assertEquals(a, b); + } + + { + // Revisable case 2. + // Suppose you have 2 arrays: a = {1, 2} and b = {3} + RevisableOrderInvariantHash a = new RevisableOrderInvariantHash().add(1).add(2); + RevisableOrderInvariantHash b = new RevisableOrderInvariantHash().add(3); + // And you have another array c = {1, 2, 3} + RevisableOrderInvariantHash c = new RevisableOrderInvariantHash().add(1).add(2).add(3); + // You expect hash(a, b) = hash (c) + // Add one result of Dynamic hash, remember to remove one zero. + // Remove one result of Dynamic hash, remember to add one zero back (See the next case). + RevisableOrderInvariantHash ab = new RevisableOrderInvariantHash() + .add(a.getResult()).remove(0) + .add(b.getResult()).remove(0); + Assert.assertEquals(c.getResult(), ab.getResult()); + } + + { + // Revisable case 3. + // Suppose data in innodb are: a = {1, 2} + RevisableOrderInvariantHash a = new RevisableOrderInvariantHash().add(1).add(2); + // And data in orc files (containing deleted data): b = {1, 2, 3, 4} + RevisableOrderInvariantHash b = new RevisableOrderInvariantHash().add(1).add(2).add(3).add(4); + // And data in deleted-bitmap: c = {3, 4} + // We want to prove innodb = orc - deleted-bitmap: a = (b - c) + + // One way to do that is proving b + (-c) = a + RevisableOrderInvariantHash removeC = new RevisableOrderInvariantHash().remove(3).remove(4); + RevisableOrderInvariantHash bRemoveC = new RevisableOrderInvariantHash() + .add(b.getResult()).remove(0) + .add(removeC.getResult()).remove(0); + Assert.assertEquals(a.getResult(), bRemoveC.getResult()); + + // Another way is proving b - c = a + // Remove one result of Dynamic hash, remember to add one zero back. + RevisableOrderInvariantHash c = new RevisableOrderInvariantHash().add(3).add(4); + RevisableOrderInvariantHash bRemoveC2 = new RevisableOrderInvariantHash() + .add(b.getResult()).remove(0) + .remove(c.getResult()).add(0); + Assert.assertEquals(a.getResult(), bRemoveC2.getResult()); + } + } + + private long mod(long x) { + return x & ((1L << 63) - 1); + } + + private long mod2(long x) { + return x & ((1L << 31) - 1); + } + + private long mod(long a, long b) { + long result = a % b; + if (result < 0) { + result += b; + } + return result; + } + + @Test + public void orderInvariantTest() { + { + // Simple test. + List arr0 = Arrays.asList(0L, 100L, 200L, 1L << 30, 1L << 31, 1L << 32, 1L << 33, 1L << 63 - 1, 100L); + Long hash0 = verifyRemoval(arr0); + Collections.shuffle(arr0, random); + Long hash1 = verifyRemoval(arr0); + Assert.assertEquals(hash0, hash1); + long before = System.nanoTime(); + RevisableOrderInvariantHash hash = new RevisableOrderInvariantHash(); + for (Long x : arr0) { + hash.add(x); + } + long duration = System.nanoTime() - before; + System.out.println("Simple test cost " + numberFormat.format(duration) + " ns, hash: " + hash.getResult()); + } + + { + // Random generate 100w positive long integers. + List arr0 = Stream + .generate(() -> mod(random.nextLong())) + .limit(1000000) + .collect(Collectors.toList()); + Long hash0 = verifyRemoval(arr0); + Collections.shuffle(arr0, random); + Long hash1 = verifyRemoval(arr0); + Assert.assertEquals(hash0, hash1); + RevisableOrderInvariantHash hash = new RevisableOrderInvariantHash(); + long before = System.nanoTime(); + for (Long x : arr0) { + hash.add(x); + } + long duration = System.nanoTime() - before; + System.out.println( + "Hashing 100w elements cost " + numberFormat.format(duration) + " ns, hash: " + hash.getResult()); + + hash.reset(); + before = System.nanoTime(); + for (Long x : arr0) { + hash.addNoMod(x); + } + duration = System.nanoTime() - before; + System.out.println( + "Hashing 100w elements (allow overflow) cost " + numberFormat.format(duration) + " ns, hash: " + + hash.getResult()); + } + } + + @Test + public void removalTest() { + { + // Simple test. + List arr0 = Arrays.asList(0L, 100L, 200L, 1L << 30, 1L << 31, 1L << 32, 1L << 33, 1L << 63 - 1, 100L); + // Hash code after removing the i-th element. + List expected = new ArrayList<>(); + RevisableOrderInvariantHash hash = new RevisableOrderInvariantHash(); + for (int i = 0; i < arr0.size(); i++) { + hash.reset(); + for (int j = 0; j < arr0.size(); j++) { + if (i == j) { + // Skip i-th element. + continue; + } + hash.add(arr0.get(j)); + } + expected.add(hash.getResult()); + } + + hash.reset(); + for (Long x : arr0) { + hash.add(x); + } + + Long all = hash.getResult(); + for (int i = 0; i < arr0.size(); i++) { + Assert.assertEquals(hash.getResult(), all); + long x = arr0.get(i); + // remove x + Assert.assertEquals(hash.remove(x).getResult(), expected.get(i)); + // add it back + Assert.assertEquals(hash.add(x).getResult(), all); + } + } + + { + // Random generate 1w positive long integers. + List arr0 = Stream + .generate(() -> mod(random.nextLong())) + .limit(10000) + .collect(Collectors.toList()); + // Hash code after removing the i-th element. + List expected = new ArrayList<>(); + RevisableOrderInvariantHash hash = new RevisableOrderInvariantHash(); + for (int i = 0; i < arr0.size(); i++) { + hash.reset(); + for (int j = 0; j < arr0.size(); j++) { + if (i == j) { + // Skip i-th element. + continue; + } + hash.add(arr0.get(j)); + } + expected.add(hash.getResult()); + } + + hash.reset(); + for (Long x : arr0) { + hash.add(x); + } + + Long all = hash.getResult(); + for (int i = 0; i < arr0.size(); i++) { + Assert.assertEquals(hash.getResult(), all); + long x = arr0.get(i); + // remove x + Assert.assertEquals(hash.remove(x).getResult(), expected.get(i)); + // add it back + Assert.assertEquals(hash.add(x).getResult(), all); + } + } + + { + // Random generate 100w positive long integers. + List arr0 = Stream + .generate(() -> mod(random.nextLong())) + .limit(1000000) + .collect(Collectors.toList()); + List extra = Stream + .generate(() -> mod(random.nextLong())) + .limit(10000) + .collect(Collectors.toList()); + // hash0 = arr0 + extra - extra + RevisableOrderInvariantHash hash0 = new RevisableOrderInvariantHash(); + int i = 0; + for (Long x : arr0) { + hash0.add(x); + if (i < extra.size()) { + hash0.add(extra.get(i)); + i++; + } + } + Collections.shuffle(extra); + for (Long x : extra) { + hash0.remove(x); + } + // hash1 = arr0 + RevisableOrderInvariantHash hash1 = new RevisableOrderInvariantHash(); + Collections.shuffle(arr0); + for (Long x : arr0) { + hash1.add(x); + } + Assert.assertEquals(hash0.getResult(), hash1.getResult()); + } + } + + @Test + public void testFastMod() { + long x = 100; + long m = 1L << 31; + Assert.assertEquals(x % m, RevisableOrderInvariantHash.mod(x)); + x = -100; + Assert.assertEquals((x % m) + m, RevisableOrderInvariantHash.mod(x)); + x = (1L << 32) + 333; + Assert.assertEquals(x % m, RevisableOrderInvariantHash.mod(x)); + x = (-1L << 32) - 333; + Assert.assertEquals((x % m) + m, RevisableOrderInvariantHash.mod(x)); + x = (1L << 62) + 333; + Assert.assertEquals(x % m, RevisableOrderInvariantHash.mod(x)); + x = (-1L << 62) - 333; + Assert.assertEquals((x % m) + m, RevisableOrderInvariantHash.mod(x)); + x = -1; + Assert.assertEquals((x % m) + m, RevisableOrderInvariantHash.mod(x)); + x = (1L << 32); + Assert.assertEquals(x % m, RevisableOrderInvariantHash.mod(x)); + x = (1L << 31); + Assert.assertEquals(x % m, RevisableOrderInvariantHash.mod(x)); + + int negative = 0; + for (int i = 0; i < 10_000_000; i++) { + x = random.nextLong(); + long result = (x % m); + if (result >= 0) { + Assert.assertEquals(result, RevisableOrderInvariantHash.mod(x)); + } else { + negative++; + Assert.assertEquals(result + m, RevisableOrderInvariantHash.mod(x)); + } + } + System.out.println("Total negative: " + negative); + } + + @Test + public void modularInverseTest() { + RevisableOrderInvariantHash.ModularInverseSolver solver = + new RevisableOrderInvariantHash.ModularInverseSolver(); + // gcd(a, b) = 1 -> (a * algorithm.i) mod b = 1 + long a = 101, b = 8; + long inverse = solver.solve(a, b); + Assert.assertEquals(mod(a * inverse, b), 1L); + b = 1L << 31; + for (int i = 0; i < 10_000_000; i++) { + a = mod(random.nextLong(), b); + if ((a & 1) == 0) { + a++; + } + inverse = solver.solve(a, b); + if (mod(a * inverse, b) != 1) { + System.out.println("a: " + a + ", inverse: " + inverse); + throw new RuntimeException(); + } + } + } + + @Test + public void collisionTest() { + RevisableOrderInvariantHash hash = new RevisableOrderInvariantHash(); + long m = 1L << 31; + for (int i = 0; i < 100_000; i++) { + for (int j = 0; j < 100; j++) { + // large x + long x = mod(random.nextLong(), m) + m; + hash.add(x); + } + long r0 = hash.getResult(); + + hash.reset(); + for (int j = 0; j < 100; j++) { + // large x + long x = mod(random.nextLong(), m) + m; + hash.add(x); + } + long r1 = hash.getResult(); + Assert.assertNotEquals(r0, r1); + } + } + + @Test + public void accumulateAddAndRemoveTest() { + { + // Simple test. + long a = 1, b = (1L << 31) + 433, c = (1L << 32) + 233, d = 2077; + long ha = new RevisableOrderInvariantHash().add(a).getResult(); + long hb = new RevisableOrderInvariantHash().add(b).getResult(); + long hc = new RevisableOrderInvariantHash().add(c).getResult(); + long hd = new RevisableOrderInvariantHash().add(d).getResult(); + // a + b + long hab0 = new RevisableOrderInvariantHash().reset(ha).add(hb).remove(0).getResult(); + long hab1 = new RevisableOrderInvariantHash().add(a).add(b).getResult(); + Assert.assertEquals(hab0, hab1); + // a + b + c + long habc0 = new RevisableOrderInvariantHash().reset(ha).add(hb).remove(0).add(hc).remove(0).getResult(); + long habc1 = new RevisableOrderInvariantHash().reset(hab0).add(hc).remove(0).getResult(); + long habc2 = new RevisableOrderInvariantHash().reset(hc).add(hab0).remove(0).getResult(); + long habc3 = new RevisableOrderInvariantHash().add(a).add(b).add(c).getResult(); + Assert.assertEquals(habc0, habc1); + Assert.assertEquals(habc0, habc2); + Assert.assertEquals(habc0, habc3); + // a + b + c + d + long habcd0 = new RevisableOrderInvariantHash().add(a).add(b).add(c).add(d).getResult(); + long hcd0 = new RevisableOrderInvariantHash().add(c).add(d).getResult(); + long habcd1 = new RevisableOrderInvariantHash().reset(hcd0).add(hab0).remove(0).getResult(); + long habcd2 = new RevisableOrderInvariantHash().reset(hab0).add(c).add(d).getResult(); + long habcd3 = new RevisableOrderInvariantHash().add(ha).remove(0).add(hb).remove(0) + .add(hc).remove(0).add(hd).remove(0).getResult(); + Assert.assertEquals(habcd0, habcd1); + Assert.assertEquals(habcd0, habcd2); + Assert.assertEquals(habcd0, habcd3); + } + + { + // Accumulate add. + List arr0 = Stream + .generate(() -> mod(random.nextLong())) + .limit(1_000_000) + .collect(Collectors.toList()); + + // the i-th element in this array represents the hash of [0, i) in arr0. + List result0 = new ArrayList<>(Collections.nCopies(arr0.size(), 0L)); + // the i-the element in this array represents the hash of [n-1-i, n-1] in arr0. + List result1 = new ArrayList<>(Collections.nCopies(arr0.size(), 0L)); + // the hash code of all elements in arr0, hash calculated from first to last. + RevisableOrderInvariantHash hash0 = new RevisableOrderInvariantHash(); + // the hash code of all elements in arr0, hash calculated from last to first. + RevisableOrderInvariantHash hash1 = new RevisableOrderInvariantHash(); + + for (int i = 0; i < arr0.size(); i++) { + result0.set(i, hash0.getResult()); + hash0.add(arr0.get(i)); + } + + for (int i = arr0.size() - 1; i >= 0; i--) { + hash1.add(arr0.get(i)); + result1.set(i, hash1.getResult()); + } + + Assert.assertEquals(hash0.getResult(), hash1.getResult()); + + for (int i = 0; i < arr0.size(); i++) { + RevisableOrderInvariantHash hashTmp = new RevisableOrderInvariantHash(); + hashTmp.reset(result0.get(i)).add(result1.get(i)).remove(0); + Assert.assertEquals(hashTmp.getResult(), hash0.getResult()); + } + } + + { + // Accumulate remove. + List arr0 = Stream + .generate(() -> mod(random.nextLong())) + .limit(1_000_000) + .collect(Collectors.toList()); + + List extra = Stream + .generate(() -> mod(random.nextLong())) + .limit(1_000_000) + .collect(Collectors.toList()); + + // hash code of arr0 + elements [0, i] from extra + List result0 = new ArrayList<>(extra.size()); + // hash code of removing elements [0, i] from extra + List result1 = new ArrayList<>(extra.size()); + // hash code of adding elements [0, i] from extra + List result2 = new ArrayList<>(extra.size()); + RevisableOrderInvariantHash hash0 = new RevisableOrderInvariantHash(); + + for (Long x : arr0) { + hash0.add(x); + } + + long expected = hash0.getResult(); + + RevisableOrderInvariantHash hash1 = new RevisableOrderInvariantHash(); + RevisableOrderInvariantHash hash2 = new RevisableOrderInvariantHash(); + for (Long x : extra) { + hash0.add(x); + result0.add(hash0.getResult()); + hash1.remove(x); + result1.add(hash1.getResult()); + hash2.add(x); + result2.add(hash2.getResult()); + } + + for (int i = 0; i < extra.size(); i++) { + // result0 + result1 should equals expected + long acc0 = + new RevisableOrderInvariantHash().reset(result0.get(i)).add(result1.get(i)).remove(0).getResult(); + Assert.assertEquals(expected, acc0); + // result0 - result2 should equals expected + long acc1 = + new RevisableOrderInvariantHash().reset(result0.get(i)).remove(result2.get(i)).add(0).getResult(); + Assert.assertEquals(expected, acc1); + } + } + } + + private Long verifyRemoval(Collection arr) { + RevisableOrderInvariantHash hash = new RevisableOrderInvariantHash(); + for (Long x : arr) { + Long before = hash.getResult(); + Long middle = hash.add(x).getResult(); + Assert.assertEquals(before, hash.remove(x).getResult()); + Assert.assertEquals(middle, hash.add(x).getResult()); + Assert.assertNotEquals(before, middle); + } + return hash.getResult(); + } +} diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/CharsetNameTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/CharsetNameTest.java index 64cbb9a73..0a411d9bb 100644 --- a/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/CharsetNameTest.java +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/CharsetNameTest.java @@ -16,10 +16,12 @@ package com.alibaba.polardbx.common.charset; +import com.alibaba.polardbx.common.utils.version.InstanceVersion; import org.junit.Assert; import org.junit.Test; import static com.alibaba.polardbx.common.charset.CharsetName.*; +import static com.alibaba.polardbx.common.charset.CharsetName.BINARY; import static com.alibaba.polardbx.common.charset.CollationName.*; public class CharsetNameTest { @@ -132,4 +134,96 @@ public void testOf() { private void doTestOf(String charsetNameStr, CharsetName expected) { Assert.assertEquals(expected, CharsetName.of(charsetNameStr)); } + + @Test + public void testDefaultCollationMySQL57() { + InstanceVersion.setMYSQL80(false); + Assert.assertTrue(ARMSCII8.getDefaultCollationName() == ARMSCII8_GENERAL_CI); + Assert.assertTrue(ASCII.getDefaultCollationName() == ASCII_GENERAL_CI); + Assert.assertTrue(BIG5.getDefaultCollationName() == BIG5_CHINESE_CI); + Assert.assertTrue(BINARY.getDefaultCollationName() == CollationName.BINARY); + Assert.assertTrue(CP1250.getDefaultCollationName() == CP1250_GENERAL_CI); + Assert.assertTrue(CP1251.getDefaultCollationName() == CP1251_GENERAL_CI); + Assert.assertTrue(CP1256.getDefaultCollationName() == CP1256_GENERAL_CI); + Assert.assertTrue(CP1257.getDefaultCollationName() == CP1257_GENERAL_CI); + Assert.assertTrue(CP850.getDefaultCollationName() == CP850_GENERAL_CI); + Assert.assertTrue(CP852.getDefaultCollationName() == CP852_GENERAL_CI); + Assert.assertTrue(CP866.getDefaultCollationName() == CP866_GENERAL_CI); + Assert.assertTrue(CP932.getDefaultCollationName() == CP932_JAPANESE_CI); + Assert.assertTrue(DEC8.getDefaultCollationName() == DEC8_SWEDISH_CI); + Assert.assertTrue(EUCJPMS.getDefaultCollationName() == EUCJPMS_JAPANESE_CI); + Assert.assertTrue(EUCKR.getDefaultCollationName() == EUCKR_KOREAN_CI); + Assert.assertTrue(GB18030.getDefaultCollationName() == GB18030_CHINESE_CI); + Assert.assertTrue(GB2312.getDefaultCollationName() == GB2312_CHINESE_CI); + Assert.assertTrue(GBK.getDefaultCollationName() == GBK_CHINESE_CI); + Assert.assertTrue(GEOSTD8.getDefaultCollationName() == GEOSTD8_GENERAL_CI); + Assert.assertTrue(GREEK.getDefaultCollationName() == GREEK_GENERAL_CI); + Assert.assertTrue(HEBREW.getDefaultCollationName() == HEBREW_GENERAL_CI); + Assert.assertTrue(HP8.getDefaultCollationName() == HP8_ENGLISH_CI); + Assert.assertTrue(KEYBCS2.getDefaultCollationName() == KEYBCS2_GENERAL_CI); + Assert.assertTrue(KOI8R.getDefaultCollationName() == KOI8R_GENERAL_CI); + Assert.assertTrue(KOI8U.getDefaultCollationName() == KOI8U_GENERAL_CI); + Assert.assertTrue(LATIN1.getDefaultCollationName() == LATIN1_SWEDISH_CI); + Assert.assertTrue(LATIN2.getDefaultCollationName() == LATIN2_GENERAL_CI); + Assert.assertTrue(LATIN5.getDefaultCollationName() == LATIN5_TURKISH_CI); + Assert.assertTrue(LATIN7.getDefaultCollationName() == LATIN7_GENERAL_CI); + Assert.assertTrue(MACCE.getDefaultCollationName() == MACCE_GENERAL_CI); + Assert.assertTrue(MACROMAN.getDefaultCollationName() == MACROMAN_GENERAL_CI); + Assert.assertTrue(SJIS.getDefaultCollationName() == SJIS_JAPANESE_CI); + Assert.assertTrue(SWE7.getDefaultCollationName() == SWE7_SWEDISH_CI); + Assert.assertTrue(TIS620.getDefaultCollationName() == TIS620_THAI_CI); + Assert.assertTrue(UCS2.getDefaultCollationName() == UCS2_GENERAL_CI); + Assert.assertTrue(UJIS.getDefaultCollationName() == UJIS_JAPANESE_CI); + Assert.assertTrue(UTF16.getDefaultCollationName() == UTF16_GENERAL_CI); + Assert.assertTrue(UTF16LE.getDefaultCollationName() == UTF16LE_GENERAL_CI); + Assert.assertTrue(UTF32.getDefaultCollationName() == UTF32_GENERAL_CI); + Assert.assertTrue(UTF8MB3.getDefaultCollationName() == UTF8_GENERAL_CI); + Assert.assertTrue(UTF8MB4.getDefaultCollationName() == UTF8MB4_GENERAL_CI); + } + + @Test + public void testDefaultCollationMySQL80() { + InstanceVersion.setMYSQL80(true); + Assert.assertTrue(ARMSCII8.getDefaultCollationName() == ARMSCII8_GENERAL_CI); + Assert.assertTrue(ASCII.getDefaultCollationName() == ASCII_GENERAL_CI); + Assert.assertTrue(BIG5.getDefaultCollationName() == BIG5_CHINESE_CI); + Assert.assertTrue(BINARY.getDefaultCollationName() == CollationName.BINARY); + Assert.assertTrue(CP1250.getDefaultCollationName() == CP1250_GENERAL_CI); + Assert.assertTrue(CP1251.getDefaultCollationName() == CP1251_GENERAL_CI); + Assert.assertTrue(CP1256.getDefaultCollationName() == CP1256_GENERAL_CI); + Assert.assertTrue(CP1257.getDefaultCollationName() == CP1257_GENERAL_CI); + Assert.assertTrue(CP850.getDefaultCollationName() == CP850_GENERAL_CI); + Assert.assertTrue(CP852.getDefaultCollationName() == CP852_GENERAL_CI); + Assert.assertTrue(CP866.getDefaultCollationName() == CP866_GENERAL_CI); + Assert.assertTrue(CP932.getDefaultCollationName() == CP932_JAPANESE_CI); + Assert.assertTrue(DEC8.getDefaultCollationName() == DEC8_SWEDISH_CI); + Assert.assertTrue(EUCJPMS.getDefaultCollationName() == EUCJPMS_JAPANESE_CI); + Assert.assertTrue(EUCKR.getDefaultCollationName() == EUCKR_KOREAN_CI); + Assert.assertTrue(GB18030.getDefaultCollationName() == GB18030_CHINESE_CI); + Assert.assertTrue(GB2312.getDefaultCollationName() == GB2312_CHINESE_CI); + Assert.assertTrue(GBK.getDefaultCollationName() == GBK_CHINESE_CI); + Assert.assertTrue(GEOSTD8.getDefaultCollationName() == GEOSTD8_GENERAL_CI); + Assert.assertTrue(GREEK.getDefaultCollationName() == GREEK_GENERAL_CI); + Assert.assertTrue(HEBREW.getDefaultCollationName() == HEBREW_GENERAL_CI); + Assert.assertTrue(HP8.getDefaultCollationName() == HP8_ENGLISH_CI); + Assert.assertTrue(KEYBCS2.getDefaultCollationName() == KEYBCS2_GENERAL_CI); + Assert.assertTrue(KOI8R.getDefaultCollationName() == KOI8R_GENERAL_CI); + Assert.assertTrue(KOI8U.getDefaultCollationName() == KOI8U_GENERAL_CI); + Assert.assertTrue(LATIN1.getDefaultCollationName() == LATIN1_SWEDISH_CI); + Assert.assertTrue(LATIN2.getDefaultCollationName() == LATIN2_GENERAL_CI); + Assert.assertTrue(LATIN5.getDefaultCollationName() == LATIN5_TURKISH_CI); + Assert.assertTrue(LATIN7.getDefaultCollationName() == LATIN7_GENERAL_CI); + Assert.assertTrue(MACCE.getDefaultCollationName() == MACCE_GENERAL_CI); + Assert.assertTrue(MACROMAN.getDefaultCollationName() == MACROMAN_GENERAL_CI); + Assert.assertTrue(SJIS.getDefaultCollationName() == SJIS_JAPANESE_CI); + Assert.assertTrue(SWE7.getDefaultCollationName() == SWE7_SWEDISH_CI); + Assert.assertTrue(TIS620.getDefaultCollationName() == TIS620_THAI_CI); + Assert.assertTrue(UCS2.getDefaultCollationName() == UCS2_GENERAL_CI); + Assert.assertTrue(UJIS.getDefaultCollationName() == UJIS_JAPANESE_CI); + Assert.assertTrue(UTF16.getDefaultCollationName() == UTF16_GENERAL_CI); + Assert.assertTrue(UTF16LE.getDefaultCollationName() == UTF16LE_GENERAL_CI); + Assert.assertTrue(UTF32.getDefaultCollationName() == UTF32_GENERAL_CI); + Assert.assertTrue(UTF8MB3.getDefaultCollationName() == UTF8_GENERAL_CI); + Assert.assertTrue(UTF8MB4.getDefaultCollationName() == UTF8MB4_0900_AI_CI); + } } diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/CharsetTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/CharsetTest.java index 0d61c3898..f8ba7b5fc 100644 --- a/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/CharsetTest.java +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/CharsetTest.java @@ -71,4 +71,21 @@ public void test() { Assert.assertEquals(CharsetName.of(Charset.forName("BINARY")), CharsetName.BINARY); Assert.assertEquals(CharsetName.of(Charset.forName("ASCII")), CharsetName.ASCII); } + + @Test + public void testMixedCollation() { + Assert.assertSame(CollationName.getMixOfCollation0(CollationName.UTF8MB4_BIN, CollationName.UTF8MB4_GENERAL_CI), + CollationName.UTF8MB4_BIN); + Assert.assertSame(CollationName.getMixOfCollation0(CollationName.UTF8MB4_GENERAL_CI, CollationName.UTF8MB4_BIN), + CollationName.UTF8MB4_BIN); + + Assert.assertSame( + CollationName.getMixOfCollation0(CollationName.UTF8MB4_0900_AI_CI, CollationName.UTF8MB4_GENERAL_CI), + CollationName.UTF8MB4_0900_AI_CI); + Assert.assertSame( + CollationName.getMixOfCollation0(CollationName.UTF8MB4_0900_AI_CI, CollationName.UTF8MB4_UNICODE_CI), + CollationName.UTF8MB4_0900_AI_CI); + Assert.assertSame(CollationName.getMixOfCollation0(CollationName.UTF8MB4_0900_AI_CI, CollationName.UTF8MB4_BIN), + CollationName.UTF8MB4_BIN); + } } diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/MySQLCharsetDDLValidatorTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/MySQLCharsetDDLValidatorTest.java index f0484b738..6702e1356 100644 --- a/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/MySQLCharsetDDLValidatorTest.java +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/charset/MySQLCharsetDDLValidatorTest.java @@ -343,4 +343,61 @@ public void testCheckCharsetSupported() { Assert.assertFalse(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb4", "utf8mb4_estonian_ci", true)); Assert.assertFalse(MySQLCharsetDDLValidator.checkCharsetSupported(null, "utf8mb4_estonian_ci", true)); } + + @Test + public void testCheckCharsetSupportedMySQL80() { + Assert.assertTrue(MySQLCharsetDDLValidator.checkCharsetSupported("utf8", "utf8mb3_general_ci", true)); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCharsetSupported("utf8", "utf8mb3_bin", true)); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCharsetSupported("utf8", "utf8mb3_unicode_ci", true)); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCharsetSupported("utf8", "utf8mb3_general_mysql500_ci", true)); + Assert.assertFalse(MySQLCharsetDDLValidator.checkCharsetSupported("utf8", "utf8mb3_bi", true)); + Assert.assertFalse(MySQLCharsetDDLValidator.checkCharsetSupported("utf8", "utf8mb3_icelandic_ci", true)); + + Assert.assertTrue(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8mb3_general_ci", true)); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8mb3_bin", true)); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8mb3_unicode_ci", true)); + Assert.assertTrue( + MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8mb3_general_mysql500_ci", true)); + Assert.assertFalse(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8mb3_bi", true)); + Assert.assertFalse(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8mb3_icelandic_ci", true)); + + Assert.assertTrue(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8_general_ci", true)); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8_bin", true)); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8_unicode_ci", true)); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8_general_mysql500_ci", true)); + Assert.assertFalse(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8_bi", true)); + Assert.assertFalse(MySQLCharsetDDLValidator.checkCharsetSupported("utf8mb3", "utf8_icelandic_ci", true)); + + } + + @Test + public void testCheckCollationMySQL80() { + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_general_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_bin")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_unicode_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_icelandic_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_latvian_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_romanian_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_slovenian_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_polish_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_estonian_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_spanish_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_swedish_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_turkish_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_czech_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_danish_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_lithuanian_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_slovak_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_spanish2_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_roman_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_persian_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_esperanto_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_hungarian_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_sinhala_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_german2_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_croatian_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_unicode_520_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_vietnamese_ci")); + Assert.assertTrue(MySQLCharsetDDLValidator.checkCollation("utf8mb3_general_mysql500_ci")); + } } \ No newline at end of file diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/Decimal128Test.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/Decimal128Test.java new file mode 100644 index 000000000..eae5c42e6 --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/Decimal128Test.java @@ -0,0 +1,95 @@ +package com.alibaba.polardbx.common.datatype; + +import com.alibaba.polardbx.common.utils.BigDecimalUtil; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.math.BigInteger; + +public class Decimal128Test { + + private static final String[] expectedResults = { + "0", + "92233720368512053380", + "922337203685120550243.23", + "0.01", + "0.000", + "-9991421.5", + "-9223372036851.1947090", + "-83.01034833166075237401", + "535654230258904491850165587133144.8496" + }; + + /** + * Decimal128 都是在运算过程中由 Decimal64 产生的 + * 这里是人工计算出的结果放在用例中用以对比 + */ + private static final long[][] decimal128Bits = { + new long[] {0, 0}, + new long[] {-35704700, 4}, + new long[] {-35703055677L, 4999}, + new long[] {1, 0}, + new long[] {0, 0}, + new long[] {-99914215, -1}, + new long[] {35810990, -5}, + new long[] {3222989799L, -450}, + new long[] {2675267201812608688L, 290378740073877438L}, + }; + + private static final int[] scales = { + 0, 0, 2, 2, 3, 1, 7, 20, 4 + }; + + @BeforeClass + public static void beforeClass() { + Assert.assertEquals(expectedResults.length, decimal128Bits.length); + Assert.assertEquals(expectedResults.length, scales.length); + } + + @Test + public void testDecimal128ToDecimal() { + DecimalStructure buffer = new DecimalStructure(); + DecimalStructure result = new DecimalStructure(); + for (int i = 0; i < decimal128Bits.length; i++) { + long[] decimal128 = decimal128Bits[i]; + FastDecimalUtils.setDecimal128WithScale(buffer, result, decimal128[0], decimal128[1], scales[i]); + Decimal decimal = new Decimal(result); + String resultStr = decimal.toString(); + Assert.assertEquals("Failed at round: " + i, expectedResults[i], resultStr); + } + } + + @Test + public void testDecimalToDecimal128() { + for (int i = 0; i < expectedResults.length; i++) { + try { + Decimal decimal = Decimal.fromString(expectedResults[i]); + long[] decimal128 = FastDecimalUtils.convertToDecimal128(decimal); + Assert.assertArrayEquals("Failed at round: " + i, decimal128Bits[i], decimal128); + } catch (Throwable e) { + if (e instanceof AssertionError) { + throw e; + } + Assert.fail("Failed at round: " + i + ", due to " + e.getMessage()); + } + } + } + + @Test + public void testFastInt128ToBytes() { + for (int i = 0; i < decimal128Bits.length; i++) { + long lowBits = decimal128Bits[i][0]; + long highBits = decimal128Bits[i][1]; + BigInteger highBitsInt = BigInteger.valueOf(highBits).shiftLeft(64); + BigInteger lowBitsInt = BigInteger.valueOf(lowBits & 0x7fffffffffffffffL); + if (lowBits < 0) { + lowBitsInt = lowBitsInt.setBit(63); + } + BigInteger targetBigInt = highBitsInt.add(lowBitsInt); + + byte[] bytes = BigDecimalUtil.fastInt128ToBytes(lowBits, highBits); + Assert.assertArrayEquals("Failed at round: " + i, targetBigInt.toString().getBytes(), bytes); + } + } +} diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/DecimalCalculatorTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/DecimalCalculatorTest.java index 86c9e8a9f..11270aff6 100644 --- a/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/DecimalCalculatorTest.java +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/DecimalCalculatorTest.java @@ -29,6 +29,42 @@ public class DecimalCalculatorTest { + private static final int TEST_SCALE_INCR = 5; + private static final Random R = new Random(); + private static final String NUMBER_STR = "0123456789"; + + private static byte[] generateDecimal() { + return generateDecimal(65); + } + + private static byte[] generateDecimal(int maxPrc) { + int precision = R.nextInt(maxPrc) + 1; + int scale = R.nextInt(precision) + 1; + if (precision == scale) { + scale--; + } + + boolean isNeg = R.nextInt() % 2 == 0; + + byte[] res = new byte[(scale == 0 ? precision : precision + 1) + (isNeg ? 1 : 0)]; + int i = 0; + if (isNeg) { + res[i++] = '-'; + } + res[i++] = (byte) NUMBER_STR.charAt(R.nextInt(9) + 1); + for (; i < precision - scale + (isNeg ? 1 : 0); i++) { + res[i] = (byte) NUMBER_STR.charAt(R.nextInt(10)); + } + if (scale == 0) { + return res; + } + res[i++] = '.'; + for (; i < precision + 1 + (isNeg ? 1 : 0); i++) { + res[i] = (byte) NUMBER_STR.charAt(R.nextInt(10)); + } + return res; + } + @Test public void testDes() { IntStream.range(0, 1 << 5).forEach(i -> { @@ -184,16 +220,17 @@ public void testCmp() { @Test public void testHash() { doTestHash(100000992, "1.1", "1.1000", "1.1000000", "1.10000000000", "01.1", "0001.1", "001.1000000"); - doTestHash(-100000992,"-1.1", "-1.1000", "-1.1000000", "-1.10000000000", "-01.1", "-0001.1", "-001.1000000"); - doTestHash(100000031,".1", "0.1", "0.10", "000000.1", ".10000", "0000.10000", "000000000000000000.1"); + doTestHash(-100000992, "-1.1", "-1.1000", "-1.1000000", "-1.10000000000", "-01.1", "-0001.1", "-001.1000000"); + doTestHash(100000031, ".1", "0.1", "0.10", "000000.1", ".10000", "0000.10000", "000000000000000000.1"); doTestHash(1, "0", "0000", ".0", ".00000", "00000.00000", "-0", "-0000", "-.0", "-.00000", "-00000.00000"); - doTestHash(-344349087,".123456789123456789", ".1234567891234567890", ".12345678912345678900", ".123456789123456789000", + doTestHash(-344349087, ".123456789123456789", ".1234567891234567890", ".12345678912345678900", + ".123456789123456789000", ".1234567891234567890000", "0.123456789123456789", ".1234567891234567890000000000", "0000000.123456789123456789000"); - doTestHash(12376,"12345", "012345", "0012345", "0000012345", "0000000012345", "00000000000012345", "12345.", + doTestHash(12376, "12345", "012345", "0012345", "0000012345", "0000000012345", "00000000000012345", "12345.", "12345.00", "12345.000000000", "000012345.0000"); - doTestHash(12300031,"123E5", "12300000", "00123E5", "000000123E5", "12300000.00000000"); - doTestHash(230000992,"123E-2", "1.23", "00000001.23", "1.2300000000000000", "000000001.23000000000000"); + doTestHash(12300031, "123E5", "12300000", "00123E5", "000000123E5", "12300000.00000000"); + doTestHash(230000992, "123E-2", "1.23", "00000001.23", "1.2300000000000000", "000000001.23000000000000"); } @Test @@ -217,6 +254,28 @@ public void testHiveDecimal() { Assert.assertEquals(h.toString(), d.toString()); } + @Test + public void testSetLongWithScale() { + DecimalStructure bufferDec = new DecimalStructure(); + DecimalStructure resultDec = new DecimalStructure(); + final int[] scales = new int[] {0, 2, 8}; + final int COUNT = 256; + for (int scale : scales) { + for (int i = 0; i < COUNT; i++) { + long val = (i == 0) ? 0 : R.nextLong(); + + FastDecimalUtils.setLongWithScale(bufferDec, resultDec, val, scale); + Decimal resultDecimal = new Decimal(resultDec); + + Decimal targetUnscaled = Decimal.fromLong(val); + FastDecimalUtils.doShift(resultDecimal.getDecimalStructure(), scale); + Assert.assertEquals(String.format("scale=%d, val=%d", scale, val), 0, + FastDecimalUtils.compare(targetUnscaled.getDecimalStructure(), + resultDecimal.getDecimalStructure())); + } + } + } + private void doTestHash(int hashCode, String... decimalStrings) { int[] hs = new int[decimalStrings.length]; for (int i = 0; i < decimalStrings.length; i++) { @@ -242,8 +301,6 @@ private int hash(int[] array, int fromIndex, int toIndex) { return result; } - private static final int TEST_SCALE_INCR = 5; - private void doTestDiv(String dividend, String divisor, String quotient, int error) { DecimalStructure d1 = new DecimalStructure(); int error1 = DecimalConverter.parseString(dividend.getBytes(), d1, false); @@ -295,39 +352,4 @@ private void doTestCmp(String from, String to, int result) { Assert.assertEquals(result, myRes); } - - private static final Random R = new Random(); - private static final String NUMBER_STR = "0123456789"; - - private static byte[] generateDecimal() { - return generateDecimal(65); - } - - private static byte[] generateDecimal(int maxPrc) { - int precision = R.nextInt(maxPrc) + 1; - int scale = R.nextInt(precision) + 1; - if (precision == scale) { - scale--; - } - - boolean isNeg = R.nextInt() % 2 == 0; - - byte[] res = new byte[(scale == 0 ? precision : precision + 1) + (isNeg ? 1 : 0)]; - int i = 0; - if (isNeg) { - res[i++] = '-'; - } - res[i++] = (byte) NUMBER_STR.charAt(R.nextInt(9) + 1); - for (; i < precision - scale + (isNeg ? 1 : 0); i++) { - res[i] = (byte) NUMBER_STR.charAt(R.nextInt(10)); - } - if (scale == 0) { - return res; - } - res[i++] = '.'; - for (; i < precision + 1 + (isNeg ? 1 : 0); i++) { - res[i] = (byte) NUMBER_STR.charAt(R.nextInt(10)); - } - return res; - } } diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/DecimalConverterTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/DecimalConverterTest.java index 2f4d9d8ff..e5e8ff14e 100644 --- a/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/DecimalConverterTest.java +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/DecimalConverterTest.java @@ -48,7 +48,8 @@ public void testString() { doTestString("123E5", "12300000", E_DEC_OK); doTestString("123E-2", "1.23", E_DEC_OK); - doTestString("99999999999999999999999999999999999999999999999999999999999999999", "99999999999999999999999999999999999999999999999999999999999999999", E_DEC_OK); + doTestString("99999999999999999999999999999999999999999999999999999999999999999", + "99999999999999999999999999999999999999999999999999999999999999999", E_DEC_OK); } private void doTestBinary(String from, int precision, int scale, String to, int error) { @@ -210,4 +211,26 @@ public void testUnsignedToDecimal() { doTestUnsignedLongToDecimal(0L, "0", E_DEC_OK); doTestUnsignedLongToDecimal(Long.parseUnsignedLong("18446744073709551615"), "18446744073709551615", E_DEC_OK); } + + @Test + public void testGetUnscaledDecimal() { + doTestGetUnscaledDecimal(new byte[] {-125, -50, 50, -111, 69}, 10, 2, 6384500969L); + } + + private void doTestGetUnscaledDecimal(byte[] buffer, int precision, int scale, + long expectResult) { + long unscaledDecimal = DecimalConverter.getUnscaledDecimal(buffer, precision, scale); + Assert.assertEquals(expectResult, unscaledDecimal); + } + + @Test + public void testGetDecimal() { + doTestGetDecimal(new byte[] {-125, -50, 50, -111, 69}, 10, 2, Decimal.fromString("63845009.69")); + } + + private void doTestGetDecimal(byte[] buffer, int precision, int scale, + Decimal expectResult) { + Decimal decimal = DecimalConverter.getDecimal(buffer, precision, scale); + Assert.assertEquals(expectResult, decimal); + } } \ No newline at end of file diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/RawBytesDecimalUtilsTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/RawBytesDecimalUtilsTest.java index c021f579c..06b1d6bda 100644 --- a/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/RawBytesDecimalUtilsTest.java +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/datatype/RawBytesDecimalUtilsTest.java @@ -19,6 +19,8 @@ import org.junit.Assert; import org.junit.Test; +import java.util.Random; + public class RawBytesDecimalUtilsTest { private void doTestHash(int hashCode, String... decimalStrings) { @@ -78,4 +80,22 @@ public void testCmp() { doTestCmp("10.00004000", "00010.00004", 0); } + @Test + public void testDecimal64() { + Random random = new Random(System.currentTimeMillis()); + for (int i = 0; i < 1000; i++) { + long l = random.nextInt(100_000_000); + testDecimal64Hash(l, 0); + testDecimal64Hash(l, 2); + testDecimal64Hash(l, 5); + testDecimal64Hash(l, 8); + } + } + + private void testDecimal64Hash(long decimal64, int scale) { + Decimal decimal = new Decimal(decimal64, scale); + int expectHash = RawBytesDecimalUtils.hashCode(decimal.getMemorySegment()); + int actualHash = RawBytesDecimalUtils.hashCode(decimal64, scale); + Assert.assertEquals(expectHash, actualHash); + } } \ No newline at end of file diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/jdbc/PruneRawStringTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/jdbc/PruneRawStringTest.java new file mode 100644 index 000000000..31b364db3 --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/jdbc/PruneRawStringTest.java @@ -0,0 +1,59 @@ +package com.alibaba.polardbx.common.jdbc; + +import com.google.common.collect.ImmutableList; +import org.junit.Test; + +import java.util.stream.IntStream; + +/** + * @author fangwu + */ +public class PruneRawStringTest { + + @Test + public void testGetObj() { + PruneRawString pruneRawString = new PruneRawString(ImmutableList.copyOf(IntStream.range(0, 1000).iterator()), + PruneRawString.PRUNE_MODE.RANGE, 0, 1000, null); + assert pruneRawString.display().contains("NonPruneRaw"); + try { + pruneRawString.getObj(1000, -1); + } catch (Exception e) { + System.out.println(e.getMessage()); + assert e.getMessage().contains("PruneRawString error array index out of bounds"); + } + + pruneRawString = new PruneRawString(ImmutableList.copyOf(IntStream.range(0, 1000).iterator()), + PruneRawString.PRUNE_MODE.RANGE, 0, 999, null); + assert !pruneRawString.display().contains("NonPruneRaw"); + + pruneRawString = new PruneRawString(ImmutableList.copyOf(IntStream.range(0, 4096).iterator()), + PruneRawString.PRUNE_MODE.RANGE, 0, 4096, null); + assert pruneRawString.display().contains("NonPruneRaw"); + assert pruneRawString.display().endsWith("..."); + } + + @Test + public void testMerge() { + PruneRawString pruneRawString = new PruneRawString(ImmutableList.copyOf(IntStream.range(0, 1000).iterator()), + PruneRawString.PRUNE_MODE.RANGE, 0, 300, null); + PruneRawString pruneRawString2 = new PruneRawString(ImmutableList.copyOf(IntStream.range(0, 1000).iterator()), + PruneRawString.PRUNE_MODE.RANGE, 100, 500, null); + pruneRawString.merge(pruneRawString2); + + assert pruneRawString.pruneMode == PruneRawString.PRUNE_MODE.MULTI_INDEX; + assert pruneRawString2.pruneMode == PruneRawString.PRUNE_MODE.MULTI_INDEX; + assert pruneRawString.size() == 500; + PruneRawString pruneRawString3 = new PruneRawString(ImmutableList.copyOf(IntStream.range(0, 1000).iterator()), + PruneRawString.PRUNE_MODE.RANGE, 500, 1000, null); + pruneRawString.merge(pruneRawString3); + assert pruneRawString.pruneMode == PruneRawString.PRUNE_MODE.RANGE; + assert pruneRawString3.pruneMode == PruneRawString.PRUNE_MODE.MULTI_INDEX; + + PruneRawString pruneRawString4 = new PruneRawString(ImmutableList.copyOf(IntStream.range(0, 1000).iterator()), + PruneRawString.PRUNE_MODE.RANGE, 0, 1000, null); + pruneRawString4.merge(pruneRawString); + + assert pruneRawString4.pruneMode == PruneRawString.PRUNE_MODE.RANGE; + assert pruneRawString.pruneMode == PruneRawString.PRUNE_MODE.RANGE; + } +} diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/jdbc/RawStringTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/jdbc/RawStringTest.java index b0a01d23d..ac8323fdb 100644 --- a/polardbx-common/src/test/java/com/alibaba/polardbx/common/jdbc/RawStringTest.java +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/jdbc/RawStringTest.java @@ -17,12 +17,14 @@ package com.alibaba.polardbx.common.jdbc; import com.alibaba.polardbx.common.utils.Assert; +import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import junit.framework.TestCase; import org.junit.Test; import java.util.BitSet; import java.util.List; +import java.util.stream.IntStream; /** * @author fangwu @@ -114,4 +116,15 @@ public void testPruneRawStringWithTypeConvert() { RawString newPr = pr.convertType(o -> o, -1); Assert.assertTrue("3,'s\\n'".equals(newPr.buildRawString())); } + + @Test + public void testPruneStep() { + RawString rawString = new RawString(ImmutableList.copyOf(IntStream.range(0, 1000).iterator())); + + for (int i = 0; i < 1000; i++) { + PruneRawString pruneRawString = rawString.pruneStep(i); + assert pruneRawString.size() == 1; + assert pruneRawString.getObj(0, -1) == rawString.getObj(i, -1); + } + } } \ No newline at end of file diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/jdbc/TableNameTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/jdbc/TableNameTest.java new file mode 100644 index 000000000..a9d95936a --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/jdbc/TableNameTest.java @@ -0,0 +1,72 @@ +package com.alibaba.polardbx.common.jdbc; + +import com.alibaba.polardbx.common.utils.Assert; +import org.junit.Test; + +import java.sql.SQLException; + +public class TableNameTest { + + @Test + public void testTableName() throws SQLException { + try { + TableName tableName = new TableName("t1"); + } catch (SQLException e) { + throw e; + } + } + + @Test + public void testTableNameWithBlank() throws SQLException { + try { + TableName tableName = new TableName("t1 t2"); + } catch (SQLException e) { + throw e; + } + } + + @Test + public void testTableNameErr() throws SQLException { + try { + TableName tableName = new TableName("t'1"); + } catch (SQLException e) { + Assert.assertTrue(e.getMessage().contains("tableName format error")); + } + } + + @Test + public void testTableNameErr2() throws SQLException { + try { + TableName tableName = new TableName("t\"1"); + } catch (SQLException e) { + Assert.assertTrue(e.getMessage().contains("tableName format error")); + } + } + + @Test + public void testTableNameErr3() throws SQLException { + try { + TableName tableName = new TableName("t\\1"); + } catch (SQLException e) { + Assert.assertTrue(e.getMessage().contains("tableName format error")); + } + } + + @Test + public void testTableNameErr4() throws SQLException { + try { + TableName tableName = new TableName("t\n1"); + } catch (SQLException e) { + Assert.assertTrue(e.getMessage().contains("tableName format error")); + } + } + + @Test + public void testTableNameErr5() throws SQLException { + try { + TableName tableName = new TableName(""); + } catch (SQLException e) { + Assert.assertTrue(e.getMessage().contains("tableName should not be empty")); + } + } +} diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/InputStreamWithRateLimiterTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/InputStreamWithRateLimiterTest.java new file mode 100644 index 000000000..9e32e65a1 --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/InputStreamWithRateLimiterTest.java @@ -0,0 +1,189 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.oss.filesystem; + +import com.alibaba.polardbx.common.mock.MockUtils; +import org.apache.hadoop.fs.PositionedReadable; +import org.apache.hadoop.fs.Seekable; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; + +@RunWith(MockitoJUnitRunner.class) +public class InputStreamWithRateLimiterTest { + + private FileSystemRateLimiter rateLimiter; + private InputStreamWithRateLimiter inputStreamWithRateLimiter; + private InputStream inputStream; + + @Before + public void setUp() { + rateLimiter = Mockito.mock(FileSystemRateLimiter.class); + inputStream = Mockito.mock(SeekableByteArrayInputStream.class); + inputStreamWithRateLimiter = new InputStreamWithRateLimiter(inputStream, rateLimiter); + } + + @Test + public void testNewStream() { + MockUtils.assertThrows( + IllegalArgumentException.class, + "In is not an instance of Seekable or PositionedReadable", + () -> { + new InputStreamWithRateLimiter(new ByteArrayInputStream(new byte[0]), rateLimiter); + } + ); + } + + @Test + public void testRead() throws IOException { + // Arrange + Mockito.when(inputStream.read()).thenReturn(1); + Mockito.when(inputStream.read(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt())) + .thenReturn(2); + Mockito.when(inputStream.read(Mockito.any(byte[].class))).thenReturn(3); + Mockito.when( + ((PositionedReadable) inputStream).read(Mockito.anyLong(), Mockito.any(byte[].class), Mockito.anyInt(), + Mockito.anyInt())).thenReturn(4); + + int byteRead = inputStreamWithRateLimiter.read(); + Assert.assertEquals(1, byteRead); + Mockito.verify(rateLimiter).acquireRead(1); + Mockito.verify(inputStream).read(); + + byte[] buf = new byte[3]; + byte[] buf2 = new byte[6]; + byteRead = inputStreamWithRateLimiter.read(buf, 0, 2); + Assert.assertEquals(2, byteRead); + Mockito.verify(rateLimiter).acquireRead(2); + Mockito.verify(inputStream).read(buf, 0, 2); + + byteRead = inputStreamWithRateLimiter.read(buf); + Assert.assertEquals(3, byteRead); + Mockito.verify(rateLimiter).acquireRead(3); + Mockito.verify(inputStream).read(buf); + + byteRead = inputStreamWithRateLimiter.read(0, buf2, 0, 4); + Assert.assertEquals(4, byteRead); + Mockito.verify(rateLimiter).acquireRead(4); + Mockito.verify((PositionedReadable) inputStream).read(0, buf2, 0, 4); + + inputStreamWithRateLimiter.readFully(0, buf2, 0, 5); + Mockito.verify(rateLimiter).acquireRead(5); + Mockito.verify((PositionedReadable) inputStream).readFully(0, buf2, 0, 5); + + inputStreamWithRateLimiter.readFully(0, buf2); + Mockito.verify(rateLimiter).acquireRead(6); + Mockito.verify((PositionedReadable) inputStream).readFully(0, buf2); + } + + @Test + public void testForwardingMethods() throws IOException { + inputStreamWithRateLimiter.seek(1); + Mockito.verify(((Seekable) inputStream)).seek(1); + inputStreamWithRateLimiter.seekToNewSource(1); + Mockito.verify(((Seekable) inputStream)).seekToNewSource(1); + inputStreamWithRateLimiter.getPos(); + Mockito.verify(((Seekable) inputStream)).getPos(); + + inputStreamWithRateLimiter.skip(1); + Mockito.verify(inputStream).skip(1); + + inputStreamWithRateLimiter.available(); + Mockito.verify(inputStream).available(); + + inputStreamWithRateLimiter.mark(1); + Mockito.verify(inputStream).mark(1); + + inputStreamWithRateLimiter.reset(); + Mockito.verify(inputStream).reset(); + + inputStreamWithRateLimiter.markSupported(); + Mockito.verify(inputStream).markSupported(); + + inputStreamWithRateLimiter.close(); + Mockito.verify(inputStream).close(); + } + + @Test + public void testReadWhenRateLimiterThrowsIOException() throws IOException { + // Arrange + Mockito.doThrow(new IOException("Rate limit exceeded")).when(rateLimiter).acquireRead(1); + + // Act & Assert + MockUtils.assertThrows(IOException.class, "Rate limit exceeded", () -> inputStreamWithRateLimiter.read()); + + Mockito.verify(rateLimiter).acquireRead(1); + Mockito.verify(inputStream, Mockito.never()).read(); + } + + public class SeekableByteArrayInputStream extends ByteArrayInputStream implements Seekable, PositionedReadable { + + public SeekableByteArrayInputStream(byte[] buf) { + super(buf); + } + + @Override + public void seek(long pos) throws IOException { + if (pos < 0 || pos > count) { + throw new IOException("Position out of bounds"); + } + this.pos = (int) pos; + } + + @Override + public long getPos() throws IOException { + return pos; + } + + @Override + public boolean seekToNewSource(long targetPos) throws IOException { + return false; + } + + @Override + public int read(long position, byte[] buffer, int offset, int length) throws IOException { + if (position >= count) { + return -1; + } + if (position + length > count) { + length = count - (int) position; + } + System.arraycopy(buf, (int) position, buffer, offset, length); + return length; + } + + @Override + public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { + int nread = read(position, buffer, offset, length); + if (nread < length) { + throw new IOException("Reached end of stream"); + } + } + + @Override + public void readFully(long position, byte[] buffer) throws IOException { + readFully(position, buffer, 0, buffer.length); + } + } +} \ No newline at end of file diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/OutputStreamWithRateLimiterTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/OutputStreamWithRateLimiterTest.java new file mode 100644 index 000000000..5f77eb863 --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/OutputStreamWithRateLimiterTest.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.common.oss.filesystem; + +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import java.io.IOException; +import java.io.OutputStream; + +@RunWith(MockitoJUnitRunner.class) +public class OutputStreamWithRateLimiterTest { + private OutputStream mockOutputStream; + private FileSystemRateLimiter mockRateLimiter; + private OutputStreamWithRateLimiter outputStreamWithRateLimiter; + + @Before + public void setUp() { + mockOutputStream = Mockito.mock(OutputStream.class); + mockRateLimiter = Mockito.mock(FileSystemRateLimiter.class); + outputStreamWithRateLimiter = new OutputStreamWithRateLimiter(mockOutputStream, mockRateLimiter); + } + + @Test + public void testWriteArray() throws IOException { + byte[] b = new byte[] {1, 2, 3, 4}; + outputStreamWithRateLimiter.write(b); + Mockito.verify(mockRateLimiter).acquireWrite(b.length); + Mockito.verify(mockOutputStream).write(b); + } + + @Test + public void testWriteArrayWithOffsetAndLength() throws IOException { + byte[] b = new byte[] {1, 2, 3, 4, 5, 6}; + int off = 2; + int len = 4; + outputStreamWithRateLimiter.write(b, off, len); + Mockito.verify(mockRateLimiter).acquireWrite(len); + Mockito.verify(mockOutputStream).write(b, off, len); + } + + @Test + public void testFlush() throws IOException { + outputStreamWithRateLimiter.flush(); + Mockito.verify(mockOutputStream).flush(); + } + + @Test + public void testClose() throws IOException { + outputStreamWithRateLimiter.close(); + Mockito.verify(mockOutputStream).close(); + } + + @Test + public void testWriteInt() throws IOException { + byte b = 1; + outputStreamWithRateLimiter.write(b); + Mockito.verify(mockRateLimiter).acquireWrite(1); + Mockito.verify(mockOutputStream).write(b); + } +} \ No newline at end of file diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/cache/CompressedBytesCacheTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/cache/CompressedBytesCacheTest.java new file mode 100644 index 000000000..9cf6c4d8f --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/cache/CompressedBytesCacheTest.java @@ -0,0 +1,223 @@ +package com.alibaba.polardbx.common.oss.filesystem.cache; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.properties.FileConfig; +import com.google.common.cache.Cache; +import io.airlift.slice.SizeOf; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Paths; +import java.util.Random; + +import static com.google.common.base.Verify.verify; + +public class CompressedBytesCacheTest { + public static final int CHAR_SIZE = 1024 * 1024; + public static final Random RANDOM = new Random(); + + private Configuration configuration; + private FileSystem localFileSystem; + + private final String RANDOM_STRING = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; + private final File spillPath = Paths.get("./tmp/" + this.getClass().getSimpleName()).toAbsolutePath().toFile(); + private final File dataPath = Paths.get("./data/" + this.getClass().getSimpleName()).toAbsolutePath().toFile(); + + private final File dataFile1 = new File(dataPath, "data_1"); + private final File dataFile2 = new File(dataPath, "data_2"); + private final File dataFile3 = new File(dataPath, "data_3"); + private final File dataFile4 = new File(dataPath, "data_4"); + private final File dataFile5 = new File(dataPath, "data_5"); + private final File[] dataFiles = new File[] { + dataFile1, dataFile2, dataFile3, dataFile4, dataFile5 + }; + + public final char[] chars = new char[CHAR_SIZE]; + + private CacheManager cacheManager; + + @Before + public void initializeCacheManager() throws IOException { + if (!dataPath.exists()) { + dataPath.mkdirs(); + } + configuration = new Configuration(); + configuration.setBoolean("fs.file.impl.disable.cache", true); + + localFileSystem = FileSystem.get(new Path(dataPath.getAbsolutePath()).toUri(), configuration); + + Engine engine = Engine.LOCAL_DISK; + + FileConfig fileConfig = FileConfig.getInstance(); + fileConfig.loadValue(null, ConnectionProperties.OSS_FS_MAX_CACHED_ENTRIES, "4"); + fileConfig.loadValue(null, ConnectionProperties.OSS_FS_USE_BYTES_CACHE, "true"); + fileConfig.loadValue(null, ConnectionProperties.OSS_FS_MEMORY_RATIO_OF_BYTES_CACHE, "0.3"); + fileConfig.loadValue(null, ConnectionProperties.MPP_SPILL_PATHS, spillPath.getAbsolutePath()); + + CacheConfig cacheConfig = fileConfig.getCacheConfig(); + + FileMergeCacheConfig fileMergeCacheConfig = fileConfig.getMergeCacheConfig(); + + cacheManager = FileMergeCacheManager.createMergeCacheManager(engine, cacheConfig, fileMergeCacheConfig); + } + + @Before + public void initializeFiles() throws IOException { + for (File file : dataFiles) { + writeFile(file); + } + } + + @Test + public void testSingleFile() throws IOException, InterruptedException { + final int length = 1024; + final int position = 2048; + + doTestRead(dataFile1, length, position); + + long maxSizeOfCompressedBytes = + ((FileMergeCacheManager) cacheManager).getMaxSizeOfCompressedBytes(); + + Cache compressedBytesCache = + ((FileMergeCacheManager) cacheManager).getCompressedBytesCache(); + + CacheStats cacheStats = + ((FileMergeCacheManager) cacheManager).getCompressedBytesCacheStats(); + + // The writing cache action is asynchronous. + Thread.sleep(2000); + + Assert.assertTrue(compressedBytesCache.size() == 1); + Assert.assertTrue(cacheStats.getInMemoryRetainedBytes() + == length + FileMergeCacheManager.LocalCacheFile.BASE_SIZE_IN_BYTES + 16); + } + + @Test + public void testEvict() throws IOException, InterruptedException { + + final int length = 1024; + + doTestRead(dataFile1, length, 1024 * 1); + doTestRead(dataFile2, length, 1024 * 2 + 100); + doTestRead(dataFile3, length, 1024 * 3 + 99); + doTestRead(dataFile4, length, 1024 * 4 + 88); + doTestRead(dataFile5, length, 1024 * 5 + 77); + + Cache compressedBytesCache = + ((FileMergeCacheManager) cacheManager).getCompressedBytesCache(); + + CacheStats cacheStats = + ((FileMergeCacheManager) cacheManager).getCompressedBytesCacheStats(); + + // The writing cache action is asynchronous. + Thread.sleep(2000); + + // Evicted 1 file. + Assert.assertTrue(compressedBytesCache.size() == 4); + Assert.assertTrue(cacheStats.getInMemoryRetainedBytes() + == (length + FileMergeCacheManager.LocalCacheFile.BASE_SIZE_IN_BYTES + 16) * 4); + } + + @Test + public void testMerge() throws IOException, InterruptedException { + + final int length = 1024; + + doTestRead(dataFile1, length, 1024 * 1); // file1: [1024, 2048) + doTestRead(dataFile2, length, 1024 * 3 + 99); // file2: [3171, 4195) + + doTestRead(dataFile1, length, 1024 * 2 - 100); // file1: [1948, 2972) + doTestRead(dataFile2, length, 1024 * 4 - 88); // file2: [4088, 5032) + + doTestRead(dataFile3, length, 1024 * 5 + 77); // file3: [5197, 6221) + + Cache compressedBytesCache = + ((FileMergeCacheManager) cacheManager).getCompressedBytesCache(); + + CacheStats cacheStats = + ((FileMergeCacheManager) cacheManager).getCompressedBytesCacheStats(); + + // The writing cache action is asynchronous. + Thread.sleep(2000); + + // 3 files have been written. + Assert.assertTrue(compressedBytesCache.size() == 3); + + // data file 1 has been merged: [1024, 2972) + // data file 2 has been merged: [3171, 5032) + // data file 3 is single: [5197, 6221) + Assert.assertTrue(cacheStats.getInMemoryRetainedBytes() + == ((2972 - 1024) + FileMergeCacheManager.LocalCacheFile.BASE_SIZE_IN_BYTES + 16) + + ((5032 - 3171) + FileMergeCacheManager.LocalCacheFile.BASE_SIZE_IN_BYTES + 16) + + (length + FileMergeCacheManager.LocalCacheFile.BASE_SIZE_IN_BYTES + 16) + ); + } + + private void doTestRead(File file, int length, int position) throws IOException { + + Path filePath = new Path(file.getAbsolutePath()); + + FSDataInputStream fileInputStream = localFileSystem.open(filePath); + FileMergeCachingInputStream inputStream = new FileMergeCachingInputStream( + fileInputStream, + cacheManager, + filePath, + cacheManager.getMaxCacheQuota(), false + ); + + // read from cache manager + byte[] buffer = new byte[length]; + inputStream.readFully(position, buffer); + + // read from physical file + byte[] validationBuffer = new byte[length]; + fileInputStream.readFully(position, validationBuffer, 0, length); + + // verify the bytes + for (int i = 0; i < length; i++) { + verify(buffer[i] == validationBuffer[i], "corrupted buffer at position " + i); + } + } + + @After + public void deleteFiles() { + for (File file : dataFiles) { + deleteFile(file); + } + } + + private void writeFile(File dataFile) throws IOException { + if (!dataPath.exists()) { + dataPath.mkdirs(); + } + if (!dataFile.exists()) { + dataFile.createNewFile(); + } + + try (FileWriter fileWriter = new FileWriter(dataFile)) { + for (int i = 0; i < CHAR_SIZE; i++) { + chars[i] = RANDOM_STRING.charAt(RANDOM.nextInt(RANDOM_STRING.length())); + } + fileWriter.write(chars); + fileWriter.flush(); + } + } + + private void deleteFile(File dataFile) { + if (dataFile.exists()) { + dataFile.delete(); + } + Assert.assertTrue(!dataFile.exists()); + } + +} diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/cache/TestFileMergeCacheManager.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/cache/TestFileMergeCacheManager.java new file mode 100644 index 000000000..de87cde7a --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/oss/filesystem/cache/TestFileMergeCacheManager.java @@ -0,0 +1,368 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.alibaba.polardbx.common.oss.filesystem.cache; + +import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; +import com.google.common.io.MoreFiles; +import com.google.common.io.RecursiveDeleteOption; +import com.google.common.util.concurrent.SettableFuture; +import io.airlift.slice.DataSize; +import io.airlift.slice.Duration; +import org.apache.hadoop.fs.Path; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.net.URI; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.Random; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicReference; + +import static com.alibaba.polardbx.common.utils.Assert.assertTrue; +import static com.google.common.base.Preconditions.checkState; +import static io.airlift.slice.DataSize.Unit.KILOBYTE; +import static io.airlift.slice.Slices.wrappedBuffer; +import static java.lang.Integer.max; +import static java.lang.String.format; +import static java.nio.file.Files.createTempDirectory; +import static java.nio.file.StandardOpenOption.CREATE_NEW; +import static java.util.concurrent.Executors.newScheduledThreadPool; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.fail; + +public class TestFileMergeCacheManager { + public static final CacheQuota NO_CACHE_CONSTRAINTS = new CacheQuota("NO_IDENTITY", Optional.empty()); + private static final int DATA_LENGTH = (int) new DataSize(20, KILOBYTE).toBytes(); + private final byte[] data = new byte[DATA_LENGTH]; + private final ExecutorService flushExecutor = + newScheduledThreadPool(5, new NamedThreadFactory("test-cache-flusher-%s")); + private final ExecutorService removeExecutor = + newScheduledThreadPool(5, new NamedThreadFactory("test-cache-remover-%s")); + private final ScheduledExecutorService cacheSizeCalculator = + newScheduledThreadPool(1, new NamedThreadFactory("hive-cache-size-calculator-%s")); + + private URI cacheDirectory; + private URI fileDirectory; + private File dataFile; + + @Before + public void setup() + throws IOException { + new Random().nextBytes(data); + + this.cacheDirectory = createTempDirectory("cache").toUri(); + this.fileDirectory = createTempDirectory("file").toUri(); + this.dataFile = new File(fileDirectory.getPath() + "/data"); + + Files.write((new File(dataFile.toString())).toPath(), data, CREATE_NEW); + } + + @After + public void close() + throws IOException { + flushExecutor.shutdown(); + removeExecutor.shutdown(); + + checkState(cacheDirectory != null); + checkState(fileDirectory != null); + + Files.deleteIfExists(dataFile.toPath()); + MoreFiles.deleteRecursively(new File(cacheDirectory).toPath(), RecursiveDeleteOption.ALLOW_INSECURE); + + Files.deleteIfExists(new File(cacheDirectory).toPath()); + Files.deleteIfExists(new File(fileDirectory).toPath()); + + } + + @Test(timeout = 30_000) + public void testBasic() + throws InterruptedException, ExecutionException, IOException { + TestingCacheStats stats = new TestingCacheStats(); + CacheManager cacheManager = fileMergeCacheManager(stats); + byte[] buffer = new byte[1024]; + + // new read + assertFalse(readFully(cacheManager, NO_CACHE_CONSTRAINTS, 42, buffer, 0, 100)); + assertEquals(stats.getCacheMiss(), 1); + assertEquals(stats.getCacheHit(), 0); + stats.trigger(); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 42, buffer, 0, 100); + + // within the range of the cache + assertTrue(readFully(cacheManager, NO_CACHE_CONSTRAINTS, 47, buffer, 0, 90)); + assertEquals(stats.getCacheMiss(), 1); + assertEquals(stats.getCacheHit(), 1); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 47, buffer, 0, 90); + + // partially within the range of the cache + assertFalse(readFully(cacheManager, NO_CACHE_CONSTRAINTS, 52, buffer, 0, 100)); + assertEquals(stats.getCacheMiss(), 2); + assertEquals(stats.getCacheHit(), 1); + stats.trigger(); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 52, buffer, 0, 100); + + // partially within the range of the cache + assertFalse(readFully(cacheManager, NO_CACHE_CONSTRAINTS, 32, buffer, 10, 50)); + assertEquals(stats.getCacheMiss(), 3); + assertEquals(stats.getCacheHit(), 1); + stats.trigger(); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 32, buffer, 10, 50); + + // create a hole within two caches + assertFalse(readFully(cacheManager, NO_CACHE_CONSTRAINTS, 200, buffer, 40, 50)); + assertEquals(stats.getCacheMiss(), 4); + assertEquals(stats.getCacheHit(), 1); + stats.trigger(); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 200, buffer, 40, 50); + + // use a range to cover the hole + assertFalse(readFully(cacheManager, NO_CACHE_CONSTRAINTS, 40, buffer, 400, 200)); + assertEquals(stats.getCacheMiss(), 5); + assertEquals(stats.getCacheHit(), 1); + stats.trigger(); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 40, buffer, 400, 200); + } + + @Test(timeout = 30_000) + public void testSimpleStress() + throws InterruptedException, ExecutionException, IOException { + TestingCacheStats stats = new TestingCacheStats(); + CacheManager cacheManager = fileMergeCacheManager(stats); + byte[] buffer = new byte[1024]; + + // new read + assertFalse(readFully(cacheManager, NO_CACHE_CONSTRAINTS, 42, buffer, 0, 100)); + assertEquals(stats.getCacheMiss(), 1); + assertEquals(stats.getCacheHit(), 0); + stats.trigger(); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 42, buffer, 0, 100); + + // outside the range of the cache + assertFalse(readFully(cacheManager, NO_CACHE_CONSTRAINTS, 110, buffer, 0, 90)); + assertEquals(stats.getCacheMiss(), 2); + assertEquals(stats.getCacheHit(), 0); + stats.trigger(); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 110, buffer, 0, 90); + + // between the two cache + assertTrue(readFully(cacheManager, NO_CACHE_CONSTRAINTS, 50, buffer, 0, 150)); + assertEquals(stats.getCacheMiss(), 2); + assertEquals(stats.getCacheHit(), 1); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 50, buffer, 0, 150); + } + + @Test + public void testDeleteFile() + throws InterruptedException, ExecutionException, IOException { + TestingCacheStats stats = new TestingCacheStats(); + CacheManager cacheManager = fileMergeCacheManager(stats); + byte[] buffer = new byte[1024]; + + // new read + assertFalse(readFully(cacheManager, NO_CACHE_CONSTRAINTS, 42, buffer, 0, 100)); + assertEquals(stats.getCacheMiss(), 1); + assertEquals(stats.getCacheHit(), 0); + stats.trigger(); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 42, buffer, 0, 100); + + //delete + Path path = ((FileMergeCacheManager) cacheManager).getBaseDirectory(); + File[] files = new File(path.toUri()).listFiles(); + if (files != null) { + for (File file : files) { + Files.delete(file.toPath()); + } + } + + //retry + assertFalse(readFully(cacheManager, NO_CACHE_CONSTRAINTS, 42, buffer, 0, 100)); + assertEquals(stats.getCacheMiss(), 2); + assertEquals(stats.getCacheHit(), 0); + stats.trigger(); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 42, buffer, 0, 100); + + } + + @Test + public void testStress() + throws ExecutionException, InterruptedException { + CacheConfig cacheConfig = new CacheConfig().setBaseDirectory(cacheDirectory); + FileMergeCacheConfig fileMergeCacheConfig = + new FileMergeCacheConfig().setCacheTtl(new Duration(10, MILLISECONDS)); + + CacheManager cacheManager = fileMergeCacheManager(cacheConfig, fileMergeCacheConfig); + + stressTest(data, + (position, buffer, offset, length) -> readFully(cacheManager, NO_CACHE_CONSTRAINTS, position, buffer, + offset, length)); + } + + @Test(timeout = 30_000) + public void testQuota() + throws InterruptedException, ExecutionException, IOException { + TestingCacheStats stats = new TestingCacheStats(); + CacheManager cacheManager = fileMergeCacheManager(stats); + byte[] buffer = new byte[10240]; + + CacheQuota cacheQuota = new CacheQuota("test.table", Optional.of(DataSize.succinctDataSize(1, KILOBYTE))); + // read within the cache quota + assertFalse(readFully(cacheManager, cacheQuota, 42, buffer, 0, 100)); + assertEquals(stats.getCacheMiss(), 1); + assertEquals(stats.getCacheHit(), 0); + assertEquals(stats.getQuotaExceed(), 0); + stats.trigger(); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 42, buffer, 0, 100); + + // read beyond cache quota + assertFalse(readFully(cacheManager, cacheQuota, 47, buffer, 0, 9000)); + assertEquals(stats.getCacheMiss(), 1); + assertEquals(stats.getCacheHit(), 0); + assertEquals(stats.getQuotaExceed(), 1); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 47, buffer, 0, 90); + + // previous data won't be evicted if last read exceed quota + assertTrue(readFully(cacheManager, cacheQuota, 47, buffer, 0, 90)); + assertEquals(stats.getCacheMiss(), 1); + assertEquals(stats.getCacheHit(), 1); + assertEquals(stats.getQuotaExceed(), 1); + assertEquals(stats.getInMemoryRetainedBytes(), 0); + validateBuffer(data, 47, buffer, 0, 90); + } + + private CacheManager fileMergeCacheManager(CacheConfig cacheConfig, FileMergeCacheConfig fileMergeCacheConfig) { + return new FileMergeCacheManager(cacheConfig, fileMergeCacheConfig, new CacheStats(), flushExecutor, + removeExecutor, cacheSizeCalculator); + } + + private CacheManager fileMergeCacheManager(CacheStats cacheStats) { + CacheConfig cacheConfig = new CacheConfig(); + FileMergeCacheConfig fileMergeCacheConfig = new FileMergeCacheConfig(); + return new FileMergeCacheManager(cacheConfig.setBaseDirectory(cacheDirectory), fileMergeCacheConfig, cacheStats, + flushExecutor, removeExecutor, cacheSizeCalculator); + } + + private boolean readFully(CacheManager cacheManager, CacheQuota cacheQuota, long position, byte[] buffer, + int offset, int length) + throws IOException { + FileReadRequest key = new FileReadRequest(new Path(dataFile.getAbsolutePath()), position, length); + switch (cacheManager.get(key, buffer, offset, cacheQuota)) { + case HIT: + return true; + case MISS: + RandomAccessFile file = new RandomAccessFile(dataFile.getAbsolutePath(), "r"); + file.seek(position); + file.readFully(buffer, offset, length); + file.close(); + cacheManager.put(key, wrappedBuffer(buffer, offset, length), NO_CACHE_CONSTRAINTS); + return false; + case CACHE_QUOTA_EXCEED: + default: + return false; + } + } + + private static class TestingCacheStats + extends CacheStats { + private SettableFuture trigger; + + public TestingCacheStats() { + this.trigger = SettableFuture.create(); + } + + @Override + public void addInMemoryRetainedBytes(long bytes) { + super.addInMemoryRetainedBytes(bytes); + if (bytes < 0) { + trigger.set(null); + } + } + + public void trigger() + throws InterruptedException, ExecutionException { + trigger.get(); + trigger = SettableFuture.create(); + } + } + + public static void validateBuffer(byte[] data, long position, byte[] buffer, int offset, int length) { + for (int i = 0; i < length; i++) { + assertEquals(format("corrupted buffer at position %s offset %s", position, i), + (Object) (buffer[i + offset]), (Object) (data[i + (int) position])); + } + } + + public static void stressTest(byte[] data, TestingReadOperation testingReadOperation) + throws ExecutionException, InterruptedException { + ExecutorService executor = newScheduledThreadPool(5); + List> futures = new ArrayList<>(); + AtomicReference exception = new AtomicReference<>(); + + for (int i = 0; i < 5; i++) { + byte[] buffer = new byte[data.length]; + futures.add(executor.submit(() -> { + Random random = new Random(); + for (int j = 0; j < 200; j++) { + int position = random.nextInt(data.length - 1); + int length = random.nextInt(max((data.length - position) / 3, 1)); + int offset = random.nextInt(data.length - length); + + try { + testingReadOperation.invoke(position, buffer, offset, length); + } catch (IOException e) { + exception.compareAndSet(null, e.getMessage()); + return; + } + validateBuffer(data, position, buffer, offset, length); + } + })); + } + + for (Future future : futures) { + future.get(); + } + + if (exception.get() != null) { + fail(exception.get()); + } + } + + public interface TestingReadOperation { + void invoke(long position, byte[] buffer, int offset, int length) throws IOException; + } +} diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/properties/ConnectionParamsTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/properties/ConnectionParamsTest.java new file mode 100644 index 000000000..17754fad1 --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/properties/ConnectionParamsTest.java @@ -0,0 +1,33 @@ +package com.alibaba.polardbx.common.properties; + +import org.junit.Test; + +import static org.junit.Assert.assertTrue; + +public class ConnectionParamsTest { + @Test + public void testENABLE_XA_TSO() { + // Assert that ENABLE_XA_TSO is an instance of BooleanConfigParam + assertTrue("ENABLE_XA_TSO should be an instance of BooleanConfigParam", + ConnectionParams.ENABLE_XA_TSO instanceof BooleanConfigParam); + + // Assert that the default value of ENABLE_XA_TSO is true + assertTrue("ENABLE_XA_TSO default value should be true", + Boolean.parseBoolean(ConnectionParams.ENABLE_XA_TSO.getDefault())); + + // Assert that ENABLE_XA_TSO is editable + assertTrue("ENABLE_XA_TSO should be editable", ConnectionParams.ENABLE_XA_TSO.isMutable()); + } + + @Test + public void testENABLE_AUTO_COMMIT_TSO() { + assertTrue("ENABLE_AUTO_COMMIT_TSO should be an instance of BooleanConfigParam", + ConnectionParams.ENABLE_AUTO_COMMIT_TSO instanceof BooleanConfigParam); + + assertTrue("ENABLE_AUTO_COMMIT_TSO default value should be true", + Boolean.parseBoolean(ConnectionParams.ENABLE_AUTO_COMMIT_TSO.getDefault())); + + assertTrue("ENABLE_AUTO_COMMIT_TSO should be editable", + ConnectionParams.ENABLE_AUTO_COMMIT_TSO.isMutable()); + } +} diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/properties/DynamicConfigTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/properties/DynamicConfigTest.java new file mode 100644 index 000000000..09535f19b --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/properties/DynamicConfigTest.java @@ -0,0 +1,33 @@ +package com.alibaba.polardbx.common.properties; + +import com.alibaba.polardbx.common.TddlConstants; +import com.alibaba.polardbx.common.constants.ServerVariables; +import org.junit.Test; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +/** + * @author fangwu + */ +public class DynamicConfigTest { + @Test + public void testLoadInDegradationNum() { + assertTrue(DynamicConfig.getInstance().getInDegradationNum() == 100L); + DynamicConfig.getInstance().loadValue(null, ConnectionProperties.STATISTIC_IN_DEGRADATION_NUMBER, "1357"); + assertTrue(DynamicConfig.getInstance().getInDegradationNum() == 1357L); + } + + @Test + public void testBlackListConf() { + assertTrue(DynamicConfig.getInstance().getBlacklistConf().size() == 0); + DynamicConfig.getInstance().loadValue(null, TddlConstants.BLACK_LIST_CONF, ""); + assertTrue(DynamicConfig.getInstance().getBlacklistConf().size() == 0); + + DynamicConfig.getInstance().loadValue(null, TddlConstants.BLACK_LIST_CONF, "x1,y1"); + assertTrue(DynamicConfig.getInstance().getBlacklistConf().size() == 2); + assertTrue(ServerVariables.isGlobalBlackList("x1")); + assertTrue(ServerVariables.isGlobalBlackList("y1")); + assertFalse(ServerVariables.isGlobalBlackList("y1,x1")); + } +} diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/GeneralUtilTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/GeneralUtilTest.java new file mode 100644 index 000000000..e53af02e9 --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/GeneralUtilTest.java @@ -0,0 +1,34 @@ +package com.alibaba.polardbx.common.utils; + +import org.junit.Test; + +import java.io.IOException; +import java.sql.SQLException; +import java.util.Map; + +public class GeneralUtilTest { + + @Test + public void testLsnException() { + SQLException sqlException = new SQLException("" + + "Fatal error when fetch data: Variable 'read_lsn' can't be set to the value of'2267571542"); + RuntimeException runtimeException = GeneralUtil.nestedException(sqlException); + Assert.assertTrue(runtimeException.getMessage().contains("show storage")); + } + + @Test + public void testDecode() throws IOException { + String traceInfo = "MULTI[22]\n" + + " Catalog:gdcams_tp,mk_run_meter_day_energy,data_time,2023-12-07 00:00:00_2023-12-08 00:00:00\n" + + " Action:datetimeTypeCompensation\n" + + " StatisticValue:81892745\n" + + " normal val:1\n" + + " compensation value 2023-12-04 00:00:00_2023-12-05 00:00:00:81892744"; + Map result = GeneralUtil.decode(traceInfo); + System.out.println(result); + Assert.assertTrue(result.size() == 1); + Assert.assertTrue( + result.get("catalog:gdcams_tp,mk_run_meter_day_energy,data_time,2023-12-07 00:00:00_2023-12-08 00:00:00\n" + + "action:datetimetypecompensation").equals("81892745")); + } +} diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/MySQLUnicodeUtilsTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/MySQLUnicodeUtilsTest.java new file mode 100644 index 000000000..4d890b987 --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/MySQLUnicodeUtilsTest.java @@ -0,0 +1,18 @@ +package com.alibaba.polardbx.common.utils; + +import com.alibaba.polardbx.common.charset.MySQLUnicodeUtils; +import org.junit.Test; + +public class MySQLUnicodeUtilsTest { + + @Test + public void testUtf8ToLatin1() { + byte[] latin1 = {-128, 0, 0, 0, 0, 0, 0, 3, 0}; + int len = 9; + byte[] res = new byte[len]; + Assert.assertTrue(!MySQLUnicodeUtils.utf8ToLatin1(latin1, 0, len, res)); + byte[] utf8 = MySQLUnicodeUtils.latin1ToUtf8(latin1).getBytes(); + Assert.assertTrue(MySQLUnicodeUtils.utf8ToLatin1(utf8, 0, len, res)); + } + +} diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/StringNumericParserTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/StringNumericParserTest.java index 59330daa1..16f2e79a2 100644 --- a/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/StringNumericParserTest.java +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/StringNumericParserTest.java @@ -17,10 +17,12 @@ package com.alibaba.polardbx.common.utils; import com.alibaba.polardbx.common.utils.time.parser.StringNumericParser; +import com.alibaba.polardbx.common.utils.version.InstanceVersion; import org.junit.Test; import java.math.BigDecimal; import java.math.RoundingMode; +import java.util.Arrays; import java.util.Random; import java.util.stream.IntStream; @@ -28,6 +30,7 @@ public class StringNumericParserTest { private static final Random R = new Random(); private static final String NUMBER_STR = "0123456789"; private static final int MAX_PRECISION = 15; + private static byte[] generateDecimal() { int precision = R.nextInt(MAX_PRECISION) + 1; int scale = Math.min(R.nextInt(precision) + 1, 5); @@ -60,7 +63,8 @@ public void testParsingWithRound() { StringNumericParser.parseStringWithRound(bytes, 0, bytes.length, false, result); String actual = String.valueOf(result[StringNumericParser.NUMERIC_INDEX]); String expect = new BigDecimal(new String(bytes)).setScale(0, RoundingMode.HALF_UP).toPlainString(); - Assert.assertTrue(actual.equals(expect), "original bytes = " + new String(bytes) + ", actual = " + actual + ", expect = " + expect); + Assert.assertTrue(actual.equals(expect), + "original bytes = " + new String(bytes) + ", actual = " + actual + ", expect = " + expect); } ); } @@ -79,7 +83,8 @@ public void testParsingWithRoundMinus() { StringNumericParser.parseStringWithRound(newBytes, 0, newBytes.length, false, result); String actual = String.valueOf(result[StringNumericParser.NUMERIC_INDEX]); - String expect = new BigDecimal(new String(newBytes)).setScale(0, RoundingMode.HALF_UP).toPlainString(); + String expect = + new BigDecimal(new String(newBytes)).setScale(0, RoundingMode.HALF_UP).toPlainString(); Assert.assertTrue(actual.equals(expect), "actual = " + actual + ", expect = " + expect); } ); @@ -95,7 +100,8 @@ public void testParsingWithRoundUnsigned() { StringNumericParser.parseStringWithRound(bytes, 0, bytes.length, true, result); String actual = String.valueOf(result[StringNumericParser.NUMERIC_INDEX]); String expect = new BigDecimal(new String(bytes)).setScale(0, RoundingMode.HALF_UP).toPlainString(); - Assert.assertTrue(actual.equals(expect), "original bytes = " + new String(bytes) + ", actual = " + actual + ", expect = " + expect); + Assert.assertTrue(actual.equals(expect), + "original bytes = " + new String(bytes) + ", actual = " + actual + ", expect = " + expect); } ); } @@ -119,4 +125,43 @@ public void testParsingWithRoundMinusUnsigned() { } ); } + + @Test + public void testResultTooBig() { + long signedMinLong = -0x7fffffffffffffffL - 1; + long signedMaxLong = 0x7fffffffffffffffL; + long unsignedLongMax = 0xffffffffffffffffL; + + byte[] bigNegVal = "-184467440737095516150".getBytes(); + byte[] bigPosVal = "184467440737095516160".getBytes(); + + long[] results = new long[3]; + InstanceVersion.setMYSQL80(false); + StringNumericParser.parseStringWithRound(bigNegVal, 0, bigNegVal.length, true, results); + Assert.assertTrue(results[StringNumericParser.NUMERIC_INDEX] == unsignedLongMax); + + StringNumericParser.parseStringWithRound(bigNegVal, 0, bigNegVal.length, false, results); + Assert.assertTrue(results[StringNumericParser.NUMERIC_INDEX] == signedMinLong); + + InstanceVersion.setMYSQL80(true); + StringNumericParser.parseStringWithRound(bigNegVal, 0, bigNegVal.length, true, results); + Assert.assertTrue(results[StringNumericParser.NUMERIC_INDEX] == 0); + + StringNumericParser.parseStringWithRound(bigNegVal, 0, bigNegVal.length, false, results); + Assert.assertTrue(results[StringNumericParser.NUMERIC_INDEX] == signedMinLong); + + InstanceVersion.setMYSQL80(false); + StringNumericParser.parseStringWithRound(bigPosVal, 0, bigPosVal.length, true, results); + Assert.assertTrue(results[StringNumericParser.NUMERIC_INDEX] == unsignedLongMax); + + StringNumericParser.parseStringWithRound(bigPosVal, 0, bigPosVal.length, false, results); + Assert.assertTrue(results[StringNumericParser.NUMERIC_INDEX] == signedMaxLong); + + InstanceVersion.setMYSQL80(true); + StringNumericParser.parseStringWithRound(bigPosVal, 0, bigPosVal.length, true, results); + Assert.assertTrue(results[StringNumericParser.NUMERIC_INDEX] == unsignedLongMax); + + StringNumericParser.parseStringWithRound(bigPosVal, 0, bigPosVal.length, false, results); + Assert.assertTrue(results[StringNumericParser.NUMERIC_INDEX] == signedMaxLong); + } } diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/VersionTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/VersionTest.java index ad95531dd..c894bd355 100644 --- a/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/VersionTest.java +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/VersionTest.java @@ -37,9 +37,5 @@ public void testConvertVersion() { Assert.assertEquals(15181806l, l); l = Version.convertVersion("0.2.6-SNAPSHOT"); Assert.assertEquals(20600, l); - - String v = Version.getVerionByPath("yunos-yunying-strom-1.0.0-jar-with-dependencies.jar"); - System.out.println(v); - } } diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/bloomfilter/BlockLongBloomFilterTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/bloomfilter/BlockLongBloomFilterTest.java new file mode 100644 index 000000000..690fa838b --- /dev/null +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/bloomfilter/BlockLongBloomFilterTest.java @@ -0,0 +1,32 @@ +package com.alibaba.polardbx.common.utils.bloomfilter; + +import org.junit.Assert; +import org.junit.Test; + +public class BlockLongBloomFilterTest { + @Test + public void testMerge() { + BlockLongBloomFilter b1 = new BlockLongBloomFilter(3_000_000); + BlockLongBloomFilter b2 = new BlockLongBloomFilter(3_000_000); + BlockLongBloomFilter global = new BlockLongBloomFilter(3_000_000); + + b1.putLong(5657485L); + b1.putLong(892945435543543283L); + + b2.putLong(9542358545444L); + b2.putLong(18992578498L); + + global.merge(b1); + global.merge(b2); + + Assert.assertTrue(global.mightContainLong(5657485L)); + Assert.assertTrue(global.mightContainLong(892945435543543283L)); + Assert.assertTrue(global.mightContainLong(9542358545444L)); + Assert.assertTrue(global.mightContainLong(18992578498L)); + + Assert.assertFalse(global.mightContainLong(5657485L + 1)); + Assert.assertFalse(global.mightContainLong(892945435543543283L + 1)); + Assert.assertFalse(global.mightContainLong(9542358545444L + 1)); + Assert.assertFalse(global.mightContainLong(18992578498L + 1)); + } +} diff --git a/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/hash/Murmur3_128MethodTest.java b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/hash/Murmur3_128MethodTest.java index a7916d275..39f11d362 100644 --- a/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/hash/Murmur3_128MethodTest.java +++ b/polardbx-common/src/test/java/com/alibaba/polardbx/common/utils/hash/Murmur3_128MethodTest.java @@ -163,7 +163,7 @@ public void testAllWithMurmurHashUtils() { private void equalWithGuavaHash(long data) { long originResult = zeroSeedMurmur3hashFunc.hashLong(data).asLong(); - long curResult = MurmurHashUtils.murmurHashWithZeroSeed(data); + long curResult = MurmurHashUtils.murmurHash128WithZeroSeed(data); Assert.assertEquals(originResult, curResult); } } diff --git a/polardbx-executor/pom.xml b/polardbx-executor/pom.xml index 843809d80..c704e309f 100644 --- a/polardbx-executor/pom.xml +++ b/polardbx-executor/pom.xml @@ -4,7 +4,7 @@ com.alibaba.polardbx polardbx - 5.4.18-SNAPSHOT + ${revision} ../pom.xml com.alibaba.polardbx @@ -14,12 +14,11 @@ 206 - 3.2.2 1.6.9 3.10.2 - 3.2.2 5.9.0 43.0 + 4.12 @@ -51,7 +50,6 @@ ${project.groupId} polardbx-calcite - ${project.version} commons-logging @@ -80,9 +78,16 @@ test + + nl.jqno.equalsverifier + equalsverifier + ${equalsverifier.version} + test + + org.mockito - mockito-core + mockito-inline ${mockito.verison} test @@ -108,6 +113,15 @@ com.google.guava guava + + + ${project.groupId} + polardbx-optimizer + ${project.version} + test + test-jar + + io.airlift bootstrap @@ -150,16 +164,21 @@ ${google.guice.version} + + io.github.java-diff-utils + java-diff-utils + ${google.diffutils.version} + javax.inject javax.inject ${javax.inject.version} - - - - - + + + + + @@ -290,6 +309,21 @@ cron-utils + + org.apache.arrow + arrow-vector + + + + org.apache.arrow + arrow-memory-netty + + + + io.dropwizard.metrics + metrics-core + + @@ -300,7 +334,9 @@ copy-fmpp-resources initialize - copy-resources + + copy-resources + ${project.build.directory}/codegen @@ -321,7 +357,9 @@ generate-fmpp ${codegen.phase} - generate + + generate + ${project.build.directory}/codegen/config.fmpp ${project.build.directory}/generated-sources diff --git a/polardbx-executor/src/main/codegen/templates/AndColumnColumn.ftl b/polardbx-executor/src/main/codegen/templates/AndColumnColumn.ftl index 7086942c0..204b5a32d 100644 --- a/polardbx-executor/src/main/codegen/templates/AndColumnColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/AndColumnColumn.ftl @@ -67,21 +67,21 @@ public class ${className} extends AbstractVectorizedExpression { <#list 1..(operator.operandCount) as i> - long[] array${i} = ((LongBlock) inputVec${i}).longArray(); - boolean[] nulls${i} = inputVec${i}.nulls(); - boolean input${i}HasNull = inputVec${i}.hasNull(); + long[] array${i} = (inputVec${i}.cast(LongBlock.class)).longArray(); + boolean[] nulls${i} = inputVec${i}.nulls(); + boolean input${i}HasNull = inputVec${i}.hasNull(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); - boolean[] outputNulls = outputVectorSlot.nulls(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + boolean[] outputNulls = outputVectorSlot.nulls(); - boolean outputVectorHasNull = false; - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; + boolean outputVectorHasNull = false; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; - <#list 1..(operator.operandCount) as c> + <#list 1..(operator.operandCount) as c> boolean null${c} = !input${c}HasNull ? false : nulls${c}[j]; diff --git a/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorColumnColumn.ftl b/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorColumnColumn.ftl index 05dbbcaec..9216695fc 100644 --- a/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorColumnColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorColumnColumn.ftl @@ -28,29 +28,29 @@ public class ${className} extends AbstractVectorizedExpression { @Override public void eval(EvaluationContext ctx) { - super.evalChildren(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); - ${type.outputType}[] res = ((${type.outputVectorType}) outputVectorSlot).${type.outputType}Array(); + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); + ${type.outputType}[] res = (outputVectorSlot.cast(${type.outputVectorType}.class)).${type.outputType}Array(); - VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), children[1].getOutputIndex()); - boolean[] outputNulls = outputVectorSlot.nulls(); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - ${type.outputType} right = (${type.outputType})array2[j]; - <#if operator.classHeader == "Divide"> - if (right == 0) { - outputNulls[j] = true; + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), children[1].getOutputIndex()); + boolean[] outputNulls = outputVectorSlot.nulls(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + ${type.outputType} right = (${type.outputType})array2[j]; + <#if operator.classHeader == "Divide"> + if (right == 0) { + outputNulls[j] = true; } <#if operator.classHeader == "Modulo"> diff --git a/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorColumnConst.ftl b/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorColumnConst.ftl index 48786cf03..92b216737 100644 --- a/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorColumnConst.ftl +++ b/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorColumnConst.ftl @@ -51,27 +51,27 @@ public class ${className} extends AbstractVectorizedExpression { } <#if (operator.classHeader == "Divide") || (operator.classHeader == "Modulo")> - if (right == 0) { - VectorizedExpressionUtils.setNulls(chunk, outputIndex); - return; - } + if (right == 0) { + VectorizedExpressionUtils.setNulls(chunk, outputIndex); + return; + } - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); - ${type.outputType}[] res = ((${type.outputVectorType}) outputVectorSlot).${type.outputType}Array(); - - VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - res[j] = ((${type.outputType})array1[j]) ${operator.op} ((${type.outputType})right); - } - } else { - for (int i = 0; i < batchSize; i++) { - res[i] = ((${type.outputType})array1[i]) ${operator.op} ((${type.outputType})right); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); + ${type.outputType}[] res = (outputVectorSlot.cast(${type.outputVectorType}.class)).${type.outputType}Array(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + res[j] = ((${type.outputType})array1[j]) ${operator.op} ((${type.outputType})right); + } + } else { + for (int i = 0; i < batchSize; i++) { + res[i] = ((${type.outputType})array1[i]) ${operator.op} ((${type.outputType})right); } } diff --git a/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorConstColumn.ftl b/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorConstColumn.ftl index d6157ea70..12dd91e14 100644 --- a/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorConstColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/ArithmeticBinaryOperatorConstColumn.ftl @@ -43,28 +43,28 @@ public class ${className} extends AbstractVectorizedExpression { MutableChunk chunk = ctx.getPreAllocatedChunk(); int batchSize = chunk.batchSize(); boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + int[] sel = chunk.selection(); - if (leftIsNull) { - VectorizedExpressionUtils.setNulls(chunk, outputIndex); - return; - } + if (leftIsNull) { + VectorizedExpressionUtils.setNulls(chunk, outputIndex); + return; + } - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); - ${type.outputType}[] res = ((${type.outputVectorType}) outputVectorSlot).${type.outputType}Array(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); + ${type.outputType}[] res = (outputVectorSlot.cast(${type.outputVectorType}.class)).${type.outputType}Array(); - VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[1].getOutputIndex()); - boolean[] outputNulls = outputVectorSlot.nulls(); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - ${type.outputType} right = (${type.outputType}) array2[j]; - <#if operator.classHeader == "Modulo"> - if (right == 0) { - outputNulls[j] = true; + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[1].getOutputIndex()); + boolean[] outputNulls = outputVectorSlot.nulls(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + ${type.outputType} right = (${type.outputType}) array2[j]; + <#if operator.classHeader == "Modulo"> + if (right == 0) { + outputNulls[j] = true; right = 1; } diff --git a/polardbx-executor/src/main/codegen/templates/BooleanTestOperator.ftl b/polardbx-executor/src/main/codegen/templates/BooleanTestOperator.ftl index 376b831cb..0ee883736 100644 --- a/polardbx-executor/src/main/codegen/templates/BooleanTestOperator.ftl +++ b/polardbx-executor/src/main/codegen/templates/BooleanTestOperator.ftl @@ -27,29 +27,29 @@ public class ${className} extends AbstractVectorizedExpression { } @Override - public void eval(EvaluationContext ctx) { - super.evalChildren(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + public void eval(EvaluationContext ctx) { + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - ${type.inputType}[] inputArray = ((${type.inputVectorType}) inputVectorSlot).${type.inputType}Array(); - boolean[] inputNulls = inputVectorSlot.nulls(); - boolean inputHasNull = inputVectorSlot.hasNull(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); - boolean[] outputNulls = outputVectorSlot.nulls(); - outputVectorSlot.setHasNull(inputVectorSlot.hasNull()); + ${type.inputType}[] inputArray = (inputVectorSlot.cast(${type.inputVectorType}.class)).${type.inputType}Array(); + boolean[] inputNulls = inputVectorSlot.nulls(); + boolean inputHasNull = inputVectorSlot.hasNull(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + boolean[] outputNulls = outputVectorSlot.nulls(); + outputVectorSlot.setHasNull(inputVectorSlot.hasNull()); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - outputNulls[j] = false; - boolean inputNull = !inputHasNull ? false : inputNulls[j]; - <#if operator.classHeader = "IsTrue"> + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + outputNulls[j] = false; + boolean inputNull = !inputHasNull ? false : inputNulls[j]; + <#if operator.classHeader = "IsTrue"> res[j] = inputNull ? 0 : (inputArray[j] != 0) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; <#if operator.classHeader = "IsFalse"> diff --git a/polardbx-executor/src/main/codegen/templates/CastNumericDecimal.ftl b/polardbx-executor/src/main/codegen/templates/CastNumericDecimal.ftl index c6501c062..a71647fa9 100644 --- a/polardbx-executor/src/main/codegen/templates/CastNumericDecimal.ftl +++ b/polardbx-executor/src/main/codegen/templates/CastNumericDecimal.ftl @@ -47,8 +47,8 @@ public class ${className} extends AbstractVectorizedExpression { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - ${type.inputType}[] input = ((${type.inputVectorType}) inputVectorSlot).${type.inputType}Array(); - Slice output = ((DecimalBlock) outputVectorSlot).getMemorySegments(); + ${type.inputType}[] input = (inputVectorSlot.cast(${type.inputVectorType}.class)).${type.inputType}Array(); + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); DecimalStructure tmpDecimal = new DecimalStructure(); int precision = outputDataType.getPrecision(); @@ -86,6 +86,7 @@ public class ${className} extends AbstractVectorizedExpression { DecimalConverter.rescale(tmpDecimal, toValue, precision, scale, isUnsigned); } } + outputVectorSlot.cast(DecimalBlock.class).setFullState(); } } diff --git a/polardbx-executor/src/main/codegen/templates/CastNumericSigned.ftl b/polardbx-executor/src/main/codegen/templates/CastNumericSigned.ftl index 13659722c..3dd17b603 100644 --- a/polardbx-executor/src/main/codegen/templates/CastNumericSigned.ftl +++ b/polardbx-executor/src/main/codegen/templates/CastNumericSigned.ftl @@ -38,28 +38,28 @@ public class ${className} extends AbstractVectorizedExpression { @Override public void eval(EvaluationContext ctx) { - super.evalChildren(ctx); + super.evalChildren(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - ${type.inputType}[] input = ((${type.inputVectorType}) inputVectorSlot).${type.inputType}Array(); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + ${type.inputType}[] input = (inputVectorSlot.cast(${type.inputVectorType}.class)).${type.inputType}Array(); + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); - // handle nulls - VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + // handle nulls + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - <#if type.inputType == "float" || type.inputType == "double"> - output[j] = (long) Math.rint(input[j]); - + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + <#if type.inputType == "float" || type.inputType == "double"> + output[j] = (long) Math.rint(input[j]); + <#if type.inputType != "float" && type.inputType != "double"> output[j] = input[j]; diff --git a/polardbx-executor/src/main/codegen/templates/CastNumericUnsigned.ftl b/polardbx-executor/src/main/codegen/templates/CastNumericUnsigned.ftl index 0ffc7b412..a3723d03d 100644 --- a/polardbx-executor/src/main/codegen/templates/CastNumericUnsigned.ftl +++ b/polardbx-executor/src/main/codegen/templates/CastNumericUnsigned.ftl @@ -48,18 +48,18 @@ public class ${className} extends AbstractVectorizedExpression { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - ${type.inputType}[] input = ((${type.inputVectorType}) inputVectorSlot).${type.inputType}Array(); - long[] output = ((ULongBlock) outputVectorSlot).longArray(); + ${type.inputType}[] input = (inputVectorSlot.cast(${type.inputVectorType}.class)).${type.inputType}Array(); + long[] output = (outputVectorSlot.cast(ULongBlock.class)).longArray(); // handle nulls VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - <#if type.inputType == "float" || type.inputType == "double"> - output[j] = (long) Math.rint(input[j]); - + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + <#if type.inputType == "float" || type.inputType == "double"> + output[j] = (long) Math.rint(input[j]); + <#if type.inputType != "float" && type.inputType != "double"> output[j] = input[j]; diff --git a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDateColumnConst.ftl b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDateColumnConst.ftl index 10f3860a6..68bd568a5 100644 --- a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDateColumnConst.ftl +++ b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDateColumnConst.ftl @@ -52,28 +52,28 @@ public class ${className} extends AbstractVectorizedExpression { public void eval(EvaluationContext ctx) { children[0].eval(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); - - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock leftInputVectorSlot = - chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - - if (leftInputVectorSlot instanceof DateBlock) { - long[] array1 = ((DateBlock) leftInputVectorSlot).getPacked(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); - - if (rightIsNull) { - boolean[] outputNulls = outputVectorSlot.nulls(); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - outputNulls[j] = true; - } - } else { - for (int i = 0; i < batchSize; i++) { + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + + if (leftInputVectorSlot instanceof DateBlock) { + long[] array1 = (leftInputVectorSlot.cast(DateBlock.class)).getPacked(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + if (rightIsNull) { + boolean[] outputNulls = outputVectorSlot.nulls(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + outputNulls[j] = true; + } + } else { + for (int i = 0; i < batchSize; i++) { outputNulls[i] = true; } } @@ -86,23 +86,23 @@ public class ${className} extends AbstractVectorizedExpression { } else { for (int i = 0; i < batchSize; i++) { res[i] = (array1[i] ${operator.op} right) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; - } - } - - VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); - } - } else if (leftInputVectorSlot instanceof ReferenceBlock) { - long[] res = ((LongBlock) outputVectorSlot).longArray(); - if (rightIsNull) { - boolean[] outputNulls = outputVectorSlot.nulls(); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - outputNulls[j] = true; - } - } else { - for (int i = 0; i < batchSize; i++) { - outputNulls[i] = true; + } + } + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + } + } else if (leftInputVectorSlot instanceof ReferenceBlock) { + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + if (rightIsNull) { + boolean[] outputNulls = outputVectorSlot.nulls(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + outputNulls[j] = true; + } + } else { + for (int i = 0; i < batchSize; i++) { + outputNulls[i] = true; } } } else { diff --git a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDatetimeColumnColumn.ftl b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDatetimeColumnColumn.ftl index 7d668fdff..ea7389406 100644 --- a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDatetimeColumnColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDatetimeColumnColumn.ftl @@ -41,42 +41,42 @@ public class ${className} extends AbstractVectorizedExpression { MutableChunk chunk = ctx.getPreAllocatedChunk(); int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock leftInputVectorSlot = + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - RandomAccessBlock rightInputVectorSlot = + RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); - if (leftInputVectorSlot instanceof TimestampBlock && rightInputVectorSlot instanceof TimestampBlock) { - long[] array1 = ((TimestampBlock) leftInputVectorSlot).getPacked(); - long[] array2 = ((TimestampBlock) rightInputVectorSlot).getPacked(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); + if (leftInputVectorSlot instanceof TimestampBlock && rightInputVectorSlot instanceof TimestampBlock) { + long[] array1 = (leftInputVectorSlot.cast(TimestampBlock.class)).getPacked(); + long[] array2 = (rightInputVectorSlot.cast(TimestampBlock.class)).getPacked(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - res[j] = (array1[j] ${operator.op} array2[j]) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + res[j] = (array1[j] ${operator.op} array2[j]) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } } else { for (int i = 0; i < batchSize; i++) { res[i] = (array1[i] ${operator.op} array2[i]) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; - } + } } VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), - children[1].getOutputIndex()); - } else if (leftInputVectorSlot instanceof ReferenceBlock && rightInputVectorSlot instanceof ReferenceBlock) { - long[] res = ((LongBlock) outputVectorSlot).longArray(); + children[1].getOutputIndex()); + } else if (leftInputVectorSlot instanceof ReferenceBlock && rightInputVectorSlot instanceof ReferenceBlock) { + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - long lPack = VectorizedExpressionUtils.packedLong(leftInputVectorSlot, j); - long rPack = VectorizedExpressionUtils.packedLong(rightInputVectorSlot, j); + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long lPack = VectorizedExpressionUtils.packedLong(leftInputVectorSlot, j); + long rPack = VectorizedExpressionUtils.packedLong(rightInputVectorSlot, j); - res[j] = lPack ${operator.op} rPack ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + res[j] = lPack ${operator.op} rPack ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } } else { for (int i = 0; i < batchSize; i++) { @@ -84,39 +84,39 @@ public class ${className} extends AbstractVectorizedExpression { long rPack = VectorizedExpressionUtils.packedLong(rightInputVectorSlot, i); res[i] = lPack ${operator.op} rPack ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; - } + } } VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), - children[1].getOutputIndex()); - } else if (leftInputVectorSlot instanceof TimestampBlock && rightInputVectorSlot instanceof ReferenceBlock) { - long[] res = ((LongBlock) outputVectorSlot).longArray(); - long[] array1 = ((TimestampBlock) leftInputVectorSlot).getPacked(); + children[1].getOutputIndex()); + } else if (leftInputVectorSlot instanceof TimestampBlock && rightInputVectorSlot instanceof ReferenceBlock) { + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + long[] array1 = (leftInputVectorSlot.cast(TimestampBlock.class)).getPacked(); if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - long rPack = VectorizedExpressionUtils.packedLong(rightInputVectorSlot, j); - res[j] = (array1[j] ${operator.op} rPack) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long rPack = VectorizedExpressionUtils.packedLong(rightInputVectorSlot, j); + res[j] = (array1[j] ${operator.op} rPack) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } } else { for (int i = 0; i < batchSize; i++) { long rPack = VectorizedExpressionUtils.packedLong(rightInputVectorSlot, i); res[i] = (array1[i] ${operator.op} rPack) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; - } + } } VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), - children[1].getOutputIndex()); - } else if (leftInputVectorSlot instanceof ReferenceBlock && rightInputVectorSlot instanceof TimestampBlock) { - long[] res = ((LongBlock) outputVectorSlot).longArray(); + children[1].getOutputIndex()); + } else if (leftInputVectorSlot instanceof ReferenceBlock && rightInputVectorSlot instanceof TimestampBlock) { + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); long[] array2 = ((TimestampBlock) rightInputVectorSlot).getPacked(); if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - long lPack = VectorizedExpressionUtils.packedLong(leftInputVectorSlot, j); - res[j] = (lPack ${operator.op} array2[j]) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long lPack = VectorizedExpressionUtils.packedLong(leftInputVectorSlot, j); + res[j] = (lPack ${operator.op} array2[j]) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } } else { for (int i = 0; i < batchSize; i++) { diff --git a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDatetimeColumnConst.ftl b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDatetimeColumnConst.ftl index fa5c6de86..5451099f6 100644 --- a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDatetimeColumnConst.ftl +++ b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDatetimeColumnConst.ftl @@ -54,28 +54,28 @@ public class ${className} extends AbstractVectorizedExpression { public void eval(EvaluationContext ctx) { children[0].eval(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); - - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock leftInputVectorSlot = - chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - - if (leftInputVectorSlot instanceof TimestampBlock) { - long[] array1 = ((TimestampBlock) leftInputVectorSlot).getPacked(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); - - if (rightIsNull) { - boolean[] outputNulls = outputVectorSlot.nulls(); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - outputNulls[j] = true; - } - } else { - for (int i = 0; i < batchSize; i++) { + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + + if (leftInputVectorSlot instanceof TimestampBlock) { + long[] array1 = (leftInputVectorSlot.cast(TimestampBlock.class)).getPacked(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + if (rightIsNull) { + boolean[] outputNulls = outputVectorSlot.nulls(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + outputNulls[j] = true; + } + } else { + for (int i = 0; i < batchSize; i++) { outputNulls[i] = true; } } @@ -88,23 +88,23 @@ public class ${className} extends AbstractVectorizedExpression { } else { for (int i = 0; i < batchSize; i++) { res[i] = (array1[i] ${operator.op} right) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; - } - } - - VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); - } - } else if (leftInputVectorSlot instanceof ReferenceBlock) { - long[] res = ((LongBlock) outputVectorSlot).longArray(); - if (rightIsNull) { - boolean[] outputNulls = outputVectorSlot.nulls(); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - outputNulls[j] = true; - } - } else { - for (int i = 0; i < batchSize; i++) { - outputNulls[i] = true; + } + } + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + } + } else if (leftInputVectorSlot instanceof ReferenceBlock) { + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + if (rightIsNull) { + boolean[] outputNulls = outputVectorSlot.nulls(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + outputNulls[j] = true; + } + } else { + for (int i = 0; i < batchSize; i++) { + outputNulls[i] = true; } } } else { diff --git a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDecimalColumnConst.ftl b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDecimalColumnConst.ftl index a2d21ba01..02278432b 100644 --- a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDecimalColumnConst.ftl +++ b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryDecimalColumnConst.ftl @@ -51,43 +51,43 @@ public class ${className} extends AbstractVectorizedExpression { @Override public void eval(EvaluationContext ctx) { - children[0].eval(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock leftInputVectorSlot = - chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); - DecimalStructure leftDec; - DecimalStructure operand1Dec = operand1.getDecimalStructure(); + DecimalStructure leftDec; + DecimalStructure operand1Dec = operand1.getDecimalStructure(); - VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - int fromIndex = j * DECIMAL_MEMORY_SIZE; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int fromIndex = j * DECIMAL_MEMORY_SIZE; - // fetch left decimal value - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(j)); + // fetch left decimal value + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); - boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) ${operator.op} 0; + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) ${operator.op} 0; - output[j] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; - } - } else { - for (int i = 0; i < batchSize; i++) { - int fromIndex = i * DECIMAL_MEMORY_SIZE; + output[j] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + int fromIndex = i * DECIMAL_MEMORY_SIZE; - // fetch left decimal value - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(i)); + // fetch left decimal value + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); - boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) ${operator.op} 0; + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) ${operator.op} 0; output[i] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } diff --git a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorColumnColumn.ftl b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorColumnColumn.ftl index 3c58c1afe..64a5a4594 100644 --- a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorColumnColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorColumnColumn.ftl @@ -28,29 +28,29 @@ public class ${className} extends AbstractVectorizedExpression { @Override public void eval(EvaluationContext ctx) { - super.evalChildren(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); <#if operator.classHeader != "SEQ"> - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - res[j] = (array1[j] ${operator.op} array2[j]) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; - } - } else { - for (int i = 0; i < batchSize; i++) { - res[i] = (array1[i] ${operator.op} array2[i]) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + res[j] = (array1[j] ${operator.op} array2[j]) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + res[i] = (array1[i] ${operator.op} array2[i]) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } } diff --git a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorColumnConst.ftl b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorColumnConst.ftl index c743f17b8..a3f175bdb 100644 --- a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorColumnConst.ftl +++ b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorColumnConst.ftl @@ -33,28 +33,28 @@ public class ${className} extends AbstractVectorizedExpression { @Override public void eval(EvaluationContext ctx) { - children[0].eval(ctx); + children[0].eval(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); <#if operator.classHeader != 'SEQ'> - if (rightIsNull) { - boolean[] outputNulls = outputVectorSlot.nulls(); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - outputNulls[j] = true; - } - } else { + if (rightIsNull) { + boolean[] outputNulls = outputVectorSlot.nulls(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + outputNulls[j] = true; + } + } else { for (int i = 0; i < batchSize; i++) { outputNulls[i] = true; } diff --git a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorConstColumn.ftl b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorConstColumn.ftl index 62f8ac60e..1ad71e64a 100644 --- a/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorConstColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/ComparisonBinaryOperatorConstColumn.ftl @@ -43,17 +43,17 @@ public class ${className} extends AbstractVectorizedExpression { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); <#if operator.classHeader != "SEQ"> - if (leftIsNull) { + if (leftIsNull) { boolean[] outputNulls = outputVectorSlot.nulls(); if (isSelectionInUse) { - for (int i=0; i < batchSize; i++) { - int j = sel[i]; - outputNulls[j] = true; - } + for (int i=0; i < batchSize; i++) { + int j = sel[i]; + outputNulls[j] = true; + } } else { for (int i=0; i < batchSize; i++) { outputNulls[i] = true; diff --git a/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorColumnColumn.ftl b/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorColumnColumn.ftl index 60637fb65..f9e157681 100644 --- a/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorColumnColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorColumnColumn.ftl @@ -49,19 +49,19 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "Decimal"> <#else> - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); <#if type.inputDataType2 == "Decimal"> <#else> - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); <#if type.outputDataType == "Decimal"> - Slice output = ((DecimalBlock) outputVectorSlot).getMemorySegments(); + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); <#else> - ${type.outputType}[] res = ((${type.outputVectorType}) outputVectorSlot).${type.outputType}Array(); + ${type.outputType}[] res = (outputVectorSlot.cast(${type.outputVectorType}.class)).${type.outputType}Array(); DecimalStructure leftDec = new DecimalStructure(); @@ -98,7 +98,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "ULong"> DecimalConverter.unsignedlongToDecimal(array1[j], leftDec); <#elseif type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(j)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); <#else> DecimalConverter.longToDecimal(array1[j], leftDec, isLeftUnsigned); @@ -107,7 +107,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType2 == "ULong"> DecimalConverter.unsignedlongToDecimal(array2[j], rightDec); <#elseif type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(j)); + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); <#else> DecimalConverter.longToDecimal(array2[j], rightDec, isRightUnsigned); @@ -116,14 +116,14 @@ public class ${className} extends AbstractVectorizedExpression { FastDecimalUtils.${operator.decimalOp}(leftDec, rightDec, toValue); <#else> <#if type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(j)); - double leftDouble = DecimalConverter.decimalToDouble(leftDec); - res[j] = leftDouble ${operator.doubleOp} array2[j]; + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); + double leftDouble = DecimalConverter.decimalToDouble(leftDec); + res[j] = leftDouble ${operator.doubleOp} array2[j]; <#if type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(j)); - double rightDouble = DecimalConverter.decimalToDouble(rightDec); - res[j] = array1[j] ${operator.doubleOp} rightDouble; + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); + double rightDouble = DecimalConverter.decimalToDouble(rightDec); + res[j] = array1[j] ${operator.doubleOp} rightDouble; } @@ -150,7 +150,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "ULong"> DecimalConverter.unsignedlongToDecimal(array1[i], leftDec); <#elseif type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(i)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); <#else> DecimalConverter.longToDecimal(array1[i], leftDec, isLeftUnsigned); @@ -159,7 +159,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType2 == "ULong"> DecimalConverter.unsignedlongToDecimal(array2[i], rightDec); <#elseif type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(i)); + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); <#else> DecimalConverter.longToDecimal(array2[i], rightDec, isRightUnsigned); @@ -168,18 +168,21 @@ public class ${className} extends AbstractVectorizedExpression { FastDecimalUtils.${operator.decimalOp}(leftDec, rightDec, toValue); <#else> <#if type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(i)); - double leftDouble = DecimalConverter.decimalToDouble(leftDec); - res[i] = leftDouble ${operator.doubleOp} array2[i]; + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); + double leftDouble = DecimalConverter.decimalToDouble(leftDec); + res[i] = leftDouble ${operator.doubleOp} array2[i]; <#if type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(i)); - double rightDouble = DecimalConverter.decimalToDouble(rightDec); - res[i] = array1[i] ${operator.doubleOp} rightDouble; + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); + double rightDouble = DecimalConverter.decimalToDouble(rightDec); + res[i] = array1[i] ${operator.doubleOp} rightDouble; } } + <#if type.outputDataType == "Decimal"> + outputVectorSlot.cast(DecimalBlock.class).setFullState(); + } } diff --git a/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorColumnConst.ftl b/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorColumnConst.ftl index c65f4766d..b7b440a31 100644 --- a/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorColumnConst.ftl +++ b/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorColumnConst.ftl @@ -74,12 +74,12 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "Decimal"> <#else> - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); <#if type.outputDataType == "Decimal"> - Slice output = ((DecimalBlock) outputVectorSlot).getMemorySegments(); + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); <#else> - ${type.outputType}[] res = ((${type.outputVectorType}) outputVectorSlot).${type.outputType}Array(); + ${type.outputType}[] res = (outputVectorSlot.cast(${type.outputVectorType}.class)).${type.outputType}Array(); DecimalStructure leftDec = new DecimalStructure(); @@ -124,7 +124,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "ULong"> DecimalConverter.unsignedlongToDecimal(array1[j], leftDec); <#elseif type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(j)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); <#else> DecimalConverter.longToDecimal(array1[j], leftDec, isLeftUnsigned); @@ -133,9 +133,9 @@ public class ${className} extends AbstractVectorizedExpression { FastDecimalUtils.${operator.decimalOp}(leftDec, rightDec, toValue); <#else> <#if type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(j)); - double leftDouble = DecimalConverter.decimalToDouble(leftDec); - res[j] = leftDouble ${operator.doubleOp} right; + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); + double leftDouble = DecimalConverter.decimalToDouble(leftDec); + res[j] = leftDouble ${operator.doubleOp} right; <#if type.inputDataType2 == "Decimal"> res[j] = array1[j] ${operator.doubleOp} rightDouble; @@ -162,7 +162,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "ULong"> DecimalConverter.unsignedlongToDecimal(array1[i], leftDec); <#elseif type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(i)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); <#else> DecimalConverter.longToDecimal(array1[i], leftDec, isLeftUnsigned); @@ -171,9 +171,9 @@ public class ${className} extends AbstractVectorizedExpression { FastDecimalUtils.${operator.decimalOp}(leftDec, rightDec, toValue); <#else> <#if type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(i)); - double leftDouble = DecimalConverter.decimalToDouble(leftDec); - res[i] = leftDouble ${operator.doubleOp} right; + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); + double leftDouble = DecimalConverter.decimalToDouble(leftDec); + res[i] = leftDouble ${operator.doubleOp} right; <#if type.inputDataType2 == "Decimal"> res[i] = array1[i] ${operator.doubleOp} rightDouble; @@ -181,6 +181,9 @@ public class ${className} extends AbstractVectorizedExpression { } } + <#if type.outputDataType == "Decimal"> + outputVectorSlot.cast(DecimalBlock.class).setFullState(); + } } diff --git a/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorConstColumn.ftl b/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorConstColumn.ftl index 931c602ae..9ee15f2f9 100644 --- a/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorConstColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/DecimalAddSubMulOperatorConstColumn.ftl @@ -74,13 +74,13 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType2 == "Decimal"> <#else> - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); <#if type.outputDataType == "Decimal"> - Slice output = ((DecimalBlock) outputVectorSlot).getMemorySegments(); + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); <#else> - ${type.outputType}[] res = ((${type.outputVectorType}) outputVectorSlot).${type.outputType}Array(); + ${type.outputType}[] res = (outputVectorSlot.cast(${type.outputVectorType}.class)).${type.outputType}Array(); DecimalStructure leftDec = new DecimalStructure(); @@ -125,7 +125,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType2 == "ULong"> DecimalConverter.unsignedlongToDecimal(array2[j], rightDec); <#elseif type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(j)); + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); <#else> DecimalConverter.longToDecimal(array2[j], rightDec, isRightUnsigned); @@ -137,9 +137,9 @@ public class ${className} extends AbstractVectorizedExpression { res[j] = leftDouble ${operator.doubleOp} array2[j]; <#if type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(j)); - double rightDouble = DecimalConverter.decimalToDouble(rightDec); - res[j] = left ${operator.doubleOp} rightDouble; + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); + double rightDouble = DecimalConverter.decimalToDouble(rightDec); + res[j] = left ${operator.doubleOp} rightDouble; } @@ -163,7 +163,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType2 == "ULong"> DecimalConverter.unsignedlongToDecimal(array2[i], rightDec); <#elseif type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(i)); + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); <#else> DecimalConverter.longToDecimal(array2[i], rightDec, isRightUnsigned); @@ -175,13 +175,16 @@ public class ${className} extends AbstractVectorizedExpression { res[i] = leftDouble ${operator.doubleOp} array2[i]; <#if type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(i)); - double rightDouble = DecimalConverter.decimalToDouble(rightDec); - res[i] = left ${operator.doubleOp} rightDouble; + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); + double rightDouble = DecimalConverter.decimalToDouble(rightDec); + res[i] = left ${operator.doubleOp} rightDouble; } } + <#if type.outputDataType == "Decimal"> + outputVectorSlot.cast(DecimalBlock.class).setFullState(); + } } diff --git a/polardbx-executor/src/main/codegen/templates/DivideOperatorColumnColumn.ftl b/polardbx-executor/src/main/codegen/templates/DivideOperatorColumnColumn.ftl index bab937db2..4c71f7aaa 100644 --- a/polardbx-executor/src/main/codegen/templates/DivideOperatorColumnColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/DivideOperatorColumnColumn.ftl @@ -58,19 +58,19 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "Decimal"> <#else> - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); <#if type.inputDataType2 == "Decimal"> <#else> - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); <#if type.outputDataType == "Decimal"> - Slice output = ((DecimalBlock) outputVectorSlot).getMemorySegments(); + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); <#else> - ${type.outputType}[] res = ((${type.outputVectorType}) outputVectorSlot).${type.outputType}Array(); + ${type.outputType}[] res = (outputVectorSlot.cast(${type.outputVectorType}.class)).${type.outputType}Array(); DecimalStructure leftDec = new DecimalStructure(); @@ -113,7 +113,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "ULong"> DecimalConverter.unsignedlongToDecimal(array1[j], leftDec); <#elseif type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(j)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); <#else> DecimalConverter.longToDecimal(array1[j], leftDec, isLeftUnsigned); @@ -122,7 +122,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType2 == "ULong"> DecimalConverter.unsignedlongToDecimal(array2[j], rightDec); <#elseif type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(j)); + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); <#else> DecimalConverter.longToDecimal(array2[j], rightDec, isRightUnsigned); @@ -139,14 +139,14 @@ public class ${className} extends AbstractVectorizedExpression { } <#else> <#if type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(j)); - double leftDouble = DecimalConverter.decimalToDouble(leftDec); - res[j] = leftDouble / array2[j]; + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); + double leftDouble = DecimalConverter.decimalToDouble(leftDec); + res[j] = leftDouble / array2[j]; <#if type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(j)); - double rightDouble = DecimalConverter.decimalToDouble(rightDec); - res[j] = array1[j] / rightDouble; + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); + double rightDouble = DecimalConverter.decimalToDouble(rightDec); + res[j] = array1[j] / rightDouble; } @@ -179,7 +179,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "ULong"> DecimalConverter.unsignedlongToDecimal(array1[i], leftDec); <#elseif type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(i)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); <#else> DecimalConverter.longToDecimal(array1[i], leftDec, isLeftUnsigned); @@ -188,7 +188,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType2 == "ULong"> DecimalConverter.unsignedlongToDecimal(array2[i], rightDec); <#elseif type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(i)); + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); <#else> DecimalConverter.longToDecimal(array2[i], rightDec, isRightUnsigned); @@ -205,18 +205,21 @@ public class ${className} extends AbstractVectorizedExpression { } <#else> <#if type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(i)); - double leftDouble = DecimalConverter.decimalToDouble(leftDec); - res[i] = leftDouble / array2[i]; + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); + double leftDouble = DecimalConverter.decimalToDouble(leftDec); + res[i] = leftDouble / array2[i]; <#if type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(i)); - double rightDouble = DecimalConverter.decimalToDouble(rightDec); - res[i] = array1[i] / rightDouble; + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); + double rightDouble = DecimalConverter.decimalToDouble(rightDec); + res[i] = array1[i] / rightDouble; } } + <#if type.outputDataType == "Decimal"> + outputVectorSlot.cast(DecimalBlock.class).setFullState(); + } } diff --git a/polardbx-executor/src/main/codegen/templates/DivideOperatorColumnConst.ftl b/polardbx-executor/src/main/codegen/templates/DivideOperatorColumnConst.ftl index a79374cd1..015e2ccfe 100644 --- a/polardbx-executor/src/main/codegen/templates/DivideOperatorColumnConst.ftl +++ b/polardbx-executor/src/main/codegen/templates/DivideOperatorColumnConst.ftl @@ -83,12 +83,12 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "Decimal"> <#else> - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); <#if type.outputDataType == "Decimal"> - Slice output = ((DecimalBlock) outputVectorSlot).getMemorySegments(); + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); <#else> - ${type.outputType}[] res = ((${type.outputVectorType}) outputVectorSlot).${type.outputType}Array(); + ${type.outputType}[] res = (outputVectorSlot.cast(${type.outputVectorType}.class)).${type.outputType}Array(); DecimalStructure leftDec = new DecimalStructure(); @@ -134,7 +134,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "ULong"> DecimalConverter.unsignedlongToDecimal(array1[j], leftDec); <#elseif type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(j)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); <#else> DecimalConverter.longToDecimal(array1[j], leftDec, isLeftUnsigned); @@ -151,9 +151,9 @@ public class ${className} extends AbstractVectorizedExpression { } <#else> <#if type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(j)); - double leftDouble = DecimalConverter.decimalToDouble(leftDec); - res[j] = leftDouble / right; + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); + double leftDouble = DecimalConverter.decimalToDouble(leftDec); + res[j] = leftDouble / right; <#if type.inputDataType2 == "Decimal"> res[j] = array1[j] / rightDouble; @@ -181,7 +181,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType1 == "ULong"> DecimalConverter.unsignedlongToDecimal(array1[i], leftDec); <#elseif type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(i)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); <#else> DecimalConverter.longToDecimal(array1[i], leftDec, isLeftUnsigned); @@ -198,9 +198,9 @@ public class ${className} extends AbstractVectorizedExpression { } <#else> <#if type.inputDataType1 == "Decimal"> - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(i)); - double leftDouble = DecimalConverter.decimalToDouble(leftDec); - res[i] = leftDouble / right; + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); + double leftDouble = DecimalConverter.decimalToDouble(leftDec); + res[i] = leftDouble / right; <#if type.inputDataType2 == "Decimal"> res[i] = array1[i] / rightDouble; @@ -208,6 +208,9 @@ public class ${className} extends AbstractVectorizedExpression { } } + <#if type.outputDataType == "Decimal"> + outputVectorSlot.cast(DecimalBlock.class).setFullState(); + } } diff --git a/polardbx-executor/src/main/codegen/templates/DivideOperatorConstColumn.ftl b/polardbx-executor/src/main/codegen/templates/DivideOperatorConstColumn.ftl index 002423a76..e5d324581 100644 --- a/polardbx-executor/src/main/codegen/templates/DivideOperatorConstColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/DivideOperatorConstColumn.ftl @@ -83,13 +83,13 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType2 == "Decimal"> <#else> - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); <#if type.outputDataType == "Decimal"> - Slice output = ((DecimalBlock) outputVectorSlot).getMemorySegments(); + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); <#else> - ${type.outputType}[] res = ((${type.outputVectorType}) outputVectorSlot).${type.outputType}Array(); + ${type.outputType}[] res = (outputVectorSlot.cast(${type.outputVectorType}.class)).${type.outputType}Array(); DecimalStructure leftDec = new DecimalStructure(); @@ -140,7 +140,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType2 == "ULong"> DecimalConverter.unsignedlongToDecimal(array2[j], rightDec); <#elseif type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure( ((DecimalBlock) rightInputVectorSlot).getRegion(j)); + rightDec = new DecimalStructure( (rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); <#else> DecimalConverter.longToDecimal(array2[j], rightDec, isRightUnsigned); @@ -160,9 +160,9 @@ public class ${className} extends AbstractVectorizedExpression { res[j] = leftDouble / array2[j]; <#if type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure( ((DecimalBlock) rightInputVectorSlot).getRegion(j)); - double rightDouble = DecimalConverter.decimalToDouble(rightDec); - res[j] = left / rightDouble; + rightDec = new DecimalStructure( (rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); + double rightDouble = DecimalConverter.decimalToDouble(rightDec); + res[j] = left / rightDouble; } @@ -192,7 +192,7 @@ public class ${className} extends AbstractVectorizedExpression { <#if type.inputDataType2 == "ULong"> DecimalConverter.unsignedlongToDecimal(array2[i], rightDec); <#elseif type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(i)); + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); <#else> DecimalConverter.longToDecimal(array2[i], rightDec, isRightUnsigned); @@ -212,13 +212,16 @@ public class ${className} extends AbstractVectorizedExpression { res[i] = leftDouble / array2[i]; <#if type.inputDataType2 == "Decimal"> - rightDec = new DecimalStructure(((DecimalBlock) rightInputVectorSlot).getRegion(i)); - double rightDouble = DecimalConverter.decimalToDouble(rightDec); - res[i] = left / rightDouble; + rightDec = new DecimalStructure((rightInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); + double rightDouble = DecimalConverter.decimalToDouble(rightDec); + res[i] = left / rightDouble; } } + <#if type.outputDataType == "Decimal"> + outputVectorSlot.cast(DecimalBlock.class).setFullState(); + } } diff --git a/polardbx-executor/src/main/codegen/templates/FilterBooleanTestOperator.ftl b/polardbx-executor/src/main/codegen/templates/FilterBooleanTestOperator.ftl index 2c45db729..ed786233a 100644 --- a/polardbx-executor/src/main/codegen/templates/FilterBooleanTestOperator.ftl +++ b/polardbx-executor/src/main/codegen/templates/FilterBooleanTestOperator.ftl @@ -27,26 +27,26 @@ public class ${className} extends AbstractVectorizedExpression { super(null, outputIndex, children); } - @Override - public void eval(EvaluationContext ctx) { - super.evalChildren(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + @Override + public void eval(EvaluationContext ctx) { + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - ${type.inputType}[] inputArray = ((${type.inputVectorType}) inputVectorSlot).${type.inputType}Array(); + RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + ${type.inputType}[] inputArray = (inputVectorSlot.cast(${type.inputVectorType}.class)).${type.inputType}Array(); - boolean[] inputNulls = inputVectorSlot.nulls(); + boolean[] inputNulls = inputVectorSlot.nulls(); - int newSize = 0; - if (inputVectorSlot.hasNull()) { - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - boolean inputNull = inputNulls[j]; - <#if operator.classHeader = "IsTrue"> + int newSize = 0; + if (inputVectorSlot.hasNull()) { + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + boolean inputNull = inputNulls[j]; + <#if operator.classHeader = "IsTrue"> if (!inputNull && (inputArray[j] != 0)) { <#if operator.classHeader = "IsNotTrue"> diff --git a/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorColumnColumn.ftl b/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorColumnColumn.ftl index ac0ad9ab7..6d7ec2483 100644 --- a/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorColumnColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorColumnColumn.ftl @@ -28,28 +28,28 @@ public class ${className} extends AbstractVectorizedExpression { } @Override - public void eval(EvaluationContext ctx) { - super.evalChildren(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + public void eval(EvaluationContext ctx) { + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); + RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); - int newSize = 0; + int newSize = 0; <#if operator.classHeader != "SEQ"> - newSize = VectorizedExpressionUtils.filterNulls(leftInputVectorSlot, isSelectionInUse, sel, batchSize); - if(newSize < batchSize) { - chunk.setBatchSize(newSize); - chunk.setSelectionInUse(true); - batchSize = newSize; - isSelectionInUse = true; - } + newSize = VectorizedExpressionUtils.filterNulls(leftInputVectorSlot, isSelectionInUse, sel, batchSize); + if(newSize < batchSize) { + chunk.setBatchSize(newSize); + chunk.setSelectionInUse(true); + batchSize = newSize; + isSelectionInUse = true; + } newSize = VectorizedExpressionUtils.filterNulls(rightInputVectorSlot, isSelectionInUse, sel, batchSize); if(newSize < batchSize) { chunk.setBatchSize(newSize); diff --git a/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorColumnConst.ftl b/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorColumnConst.ftl index 78f3a324c..022fca6ce 100644 --- a/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorColumnConst.ftl +++ b/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorColumnConst.ftl @@ -33,27 +33,27 @@ public class ${className} extends AbstractVectorizedExpression { } @Override - public void eval(EvaluationContext ctx) { - children[0].eval(ctx); + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); - int newSize = 0; + int newSize = 0; <#if operator.classHeader != "SEQ"> - newSize = VectorizedExpressionUtils.filterNulls(leftInputVectorSlot, isSelectionInUse, sel, batchSize); - if(newSize < batchSize) { - chunk.setBatchSize(newSize); - chunk.setSelectionInUse(true); - batchSize = newSize; - isSelectionInUse = true; - } + newSize = VectorizedExpressionUtils.filterNulls(leftInputVectorSlot, isSelectionInUse, sel, batchSize); + if(newSize < batchSize) { + chunk.setBatchSize(newSize); + chunk.setSelectionInUse(true); + batchSize = newSize; + isSelectionInUse = true; + } newSize = 0; if (isSelectionInUse) { diff --git a/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorConstColumn.ftl b/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorConstColumn.ftl index c0b65c7e6..323e706b8 100644 --- a/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorConstColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/FilterComparisonBinaryOperatorConstColumn.ftl @@ -33,27 +33,27 @@ public class ${className} extends AbstractVectorizedExpression { } @Override - public void eval(EvaluationContext ctx) { - children[1].eval(ctx); + public void eval(EvaluationContext ctx) { + children[1].eval(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); + RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); - int newSize = 0; + int newSize = 0; <#if operator.classHeader != "SEQ"> - newSize = VectorizedExpressionUtils.filterNulls(rightInputVectorSlot, isSelectionInUse, sel, batchSize); - if(newSize < batchSize) { - chunk.setBatchSize(newSize); - chunk.setSelectionInUse(true); - batchSize = newSize; - isSelectionInUse = true; - } + newSize = VectorizedExpressionUtils.filterNulls(rightInputVectorSlot, isSelectionInUse, sel, batchSize); + if(newSize < batchSize) { + chunk.setBatchSize(newSize); + chunk.setSelectionInUse(true); + batchSize = newSize; + isSelectionInUse = true; + } newSize = 0; if (isSelectionInUse) { diff --git a/polardbx-executor/src/main/codegen/templates/FilterNullTestOperator.ftl b/polardbx-executor/src/main/codegen/templates/FilterNullTestOperator.ftl index afcfdcf61..e34900940 100644 --- a/polardbx-executor/src/main/codegen/templates/FilterNullTestOperator.ftl +++ b/polardbx-executor/src/main/codegen/templates/FilterNullTestOperator.ftl @@ -27,27 +27,27 @@ public class ${className} extends AbstractVectorizedExpression { super(null, outputIndex, children); } - @Override - public void eval(EvaluationContext ctx) { - super.evalChildren(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + @Override + public void eval(EvaluationContext ctx) { + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - ${type.inputType}[] inputArray = ((${type.inputVectorType}) inputVectorSlot).${type.inputType}Array(); - boolean[] inputNulls = inputVectorSlot.nulls(); + ${type.inputType}[] inputArray = (inputVectorSlot.cast(${type.inputVectorType}.class)).${type.inputType}Array(); + boolean[] inputNulls = inputVectorSlot.nulls(); - int newSize = 0; - if (inputVectorSlot.hasNull()) { - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - <#if operator.classHeader = "IsNull"> - if (inputNulls[j]) { - + int newSize = 0; + if (inputVectorSlot.hasNull()) { + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + <#if operator.classHeader = "IsNull"> + if (inputNulls[j]) { + <#if operator.classHeader = "IsNotNull"> if (!inputNulls[j]) { diff --git a/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorColumnColumn.ftl b/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorColumnColumn.ftl index f66fdcc11..0417671e0 100644 --- a/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorColumnColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorColumnColumn.ftl @@ -28,33 +28,33 @@ public class ${className} extends AbstractVectorizedExpression { @Override public void eval(EvaluationContext ctx) { - super.evalChildren(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); - boolean[] nulls1 = leftInputVectorSlot.nulls(); - boolean leftInputHasNull = leftInputVectorSlot.hasNull(); - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); - boolean[] nulls2 = rightInputVectorSlot.nulls(); - boolean rightInputHasNull = rightInputVectorSlot.hasNull(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); - boolean[] outputNulls = outputVectorSlot.nulls(); - outputVectorSlot.setHasNull(leftInputVectorSlot.hasNull() | rightInputVectorSlot.hasNull()); + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); + boolean[] nulls1 = leftInputVectorSlot.nulls(); + boolean leftInputHasNull = leftInputVectorSlot.hasNull(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); + boolean[] nulls2 = rightInputVectorSlot.nulls(); + boolean rightInputHasNull = rightInputVectorSlot.hasNull(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + boolean[] outputNulls = outputVectorSlot.nulls(); + outputVectorSlot.setHasNull(leftInputVectorSlot.hasNull() | rightInputVectorSlot.hasNull()); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - boolean null1 = !leftInputHasNull ? false : nulls1[j]; - boolean null2 = !rightInputHasNull ? false : nulls2[j]; - boolean b1 = (array1[j] != 0); - boolean b2 = (array2[j] != 0); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + boolean null1 = !leftInputHasNull ? false : nulls1[j]; + boolean null2 = !rightInputHasNull ? false : nulls2[j]; + boolean b1 = (array1[j] != 0); + boolean b2 = (array2[j] != 0); <#if operator.classHeader = "And"> outputNulls[j] = (null1 && null2) || (null1 && b2) || (null2 && b1); diff --git a/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorColumnConst.ftl b/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorColumnConst.ftl index a71211f64..d56c7d50c 100644 --- a/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorColumnConst.ftl +++ b/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorColumnConst.ftl @@ -33,30 +33,30 @@ public class ${className} extends AbstractVectorizedExpression { @Override public void eval(EvaluationContext ctx) { - children[0].eval(ctx); + children[0].eval(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); - boolean[] nulls1 = leftInputVectorSlot.nulls(); - boolean leftInputHasNull = leftInputVectorSlot.hasNull(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); - boolean[] outputNulls = outputVectorSlot.nulls(); - outputVectorSlot.setHasNull(leftInputVectorSlot.hasNull() | rightIsNull); + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); + boolean[] nulls1 = leftInputVectorSlot.nulls(); + boolean leftInputHasNull = leftInputVectorSlot.hasNull(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + boolean[] outputNulls = outputVectorSlot.nulls(); + outputVectorSlot.setHasNull(leftInputVectorSlot.hasNull() | rightIsNull); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - boolean null1 = !leftInputHasNull ? false : nulls1[j]; - boolean b1 = (array1[j] != 0); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + boolean null1 = !leftInputHasNull ? false : nulls1[j]; + boolean b1 = (array1[j] != 0); - <#if operator.classHeader = "And"> + <#if operator.classHeader = "And"> outputNulls[j] = (null1 && rightIsNull) || (null1 && right) || (rightIsNull && b1); res[j] = ((!null1 && !b1) || (!rightIsNull && !right)) ? LongBlock.FALSE_VALUE : LongBlock.TRUE_VALUE; diff --git a/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorConstColumn.ftl b/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorConstColumn.ftl index 5e89b71a8..8ed82835b 100644 --- a/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorConstColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/LogicalBinaryOperatorConstColumn.ftl @@ -44,20 +44,20 @@ public class ${className} extends AbstractVectorizedExpression { RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); - ${type.inputType2}[] array2 = ((${type.inputVectorType2}) rightInputVectorSlot).${type.inputType2}Array(); + ${type.inputType2}[] array2 = (rightInputVectorSlot.cast(${type.inputVectorType2}.class)).${type.inputType2}Array(); boolean[] nulls2 = rightInputVectorSlot.nulls(); boolean rightInputHasNull = rightInputVectorSlot.hasNull(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); boolean[] outputNulls = outputVectorSlot.nulls(); outputVectorSlot.setHasNull(leftIsNull | rightInputVectorSlot.hasNull()); if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - boolean null2 = !rightInputHasNull ? false : nulls2[j]; - boolean b2 = (array2[j] != 0); + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + boolean null2 = !rightInputHasNull ? false : nulls2[j]; + boolean b2 = (array2[j] != 0); - <#if operator.classHeader = "And"> + <#if operator.classHeader = "And"> outputNulls[j] = (leftIsNull && null2) || (leftIsNull && b2) || (null2 && left); res[j] = ((!leftIsNull && !left) || (!null2 && !b2)) ? LongBlock.FALSE_VALUE : LongBlock.TRUE_VALUE; diff --git a/polardbx-executor/src/main/codegen/templates/NotOperator.ftl b/polardbx-executor/src/main/codegen/templates/NotOperator.ftl index 362060fc7..46239addd 100644 --- a/polardbx-executor/src/main/codegen/templates/NotOperator.ftl +++ b/polardbx-executor/src/main/codegen/templates/NotOperator.ftl @@ -25,28 +25,28 @@ public class ${className} extends AbstractVectorizedExpression { } @Override - public void eval(EvaluationContext ctx) { - super.evalChildren(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); - - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - - ${type.inputType}[] inputArray = ((${type.inputVectorType}) inputVectorSlot).${type.inputType}Array(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); - - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - res[j] = (inputArray[j] == 0) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; - } - } else { - for (int i = 0; i < batchSize; i++) { - res[i] = (inputArray[i] == 0) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; - } + public void eval(EvaluationContext ctx) { + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + + ${type.inputType}[] inputArray = (inputVectorSlot.cast(${type.inputVectorType}.class)).${type.inputType}Array(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + res[j] = (inputArray[j] == 0) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + res[i] = (inputArray[i] == 0) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } } VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); } diff --git a/polardbx-executor/src/main/codegen/templates/NullTestOperator.ftl b/polardbx-executor/src/main/codegen/templates/NullTestOperator.ftl index 4eea1bdd7..a2f1183da 100644 --- a/polardbx-executor/src/main/codegen/templates/NullTestOperator.ftl +++ b/polardbx-executor/src/main/codegen/templates/NullTestOperator.ftl @@ -27,29 +27,29 @@ public class ${className} extends AbstractVectorizedExpression { } @Override - public void eval(EvaluationContext ctx) { - super.evalChildren(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); + public void eval(EvaluationContext ctx) { + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - ${type.inputType}[] inputArray = ((${type.inputVectorType}) inputVectorSlot).${type.inputType}Array(); - boolean[] inputNulls = inputVectorSlot.nulls(); - boolean inputHasNull = inputVectorSlot.hasNull(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); - boolean[] outputNulls = outputVectorSlot.nulls(); - outputVectorSlot.setHasNull(inputVectorSlot.hasNull()); + ${type.inputType}[] inputArray = (inputVectorSlot.cast(${type.inputVectorType}.class)).${type.inputType}Array(); + boolean[] inputNulls = inputVectorSlot.nulls(); + boolean inputHasNull = inputVectorSlot.hasNull(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + boolean[] outputNulls = outputVectorSlot.nulls(); + outputVectorSlot.setHasNull(inputVectorSlot.hasNull()); - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - outputNulls[j] = false; - boolean inputNull = !inputHasNull ? false : inputNulls[j]; - <#if operator.classHeader = "IsNull"> + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + outputNulls[j] = false; + boolean inputNull = !inputHasNull ? false : inputNulls[j]; + <#if operator.classHeader = "IsNull"> res[j] = inputNull ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; <#if operator.classHeader = "IsNotNull"> diff --git a/polardbx-executor/src/main/codegen/templates/OrColumnColumn.ftl b/polardbx-executor/src/main/codegen/templates/OrColumnColumn.ftl index 23e42e7a1..9fd67a1ba 100644 --- a/polardbx-executor/src/main/codegen/templates/OrColumnColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/OrColumnColumn.ftl @@ -67,21 +67,21 @@ public class ${className} extends AbstractVectorizedExpression { <#list 1..(operator.operandCount) as i> - long[] array${i} = ((LongBlock) inputVec${i}).longArray(); - boolean[] nulls${i} = inputVec${i}.nulls(); - boolean input${i}HasNull = inputVec${i}.hasNull(); + long[] array${i} = (inputVec${i}.cast(LongBlock.class)).longArray(); + boolean[] nulls${i} = inputVec${i}.nulls(); + boolean input${i}HasNull = inputVec${i}.hasNull(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); - boolean[] outputNulls = outputVectorSlot.nulls(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + boolean[] outputNulls = outputVectorSlot.nulls(); - boolean outputVectorHasNull = false; - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; + boolean outputVectorHasNull = false; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; - <#list 1..(operator.operandCount) as c> + <#list 1..(operator.operandCount) as c> boolean null${c} = !input${c}HasNull ? false : nulls${c}[j]; diff --git a/polardbx-executor/src/main/codegen/templates/UnaryMinusOperatorColumn.ftl b/polardbx-executor/src/main/codegen/templates/UnaryMinusOperatorColumn.ftl index 7d037e615..2ee407b56 100644 --- a/polardbx-executor/src/main/codegen/templates/UnaryMinusOperatorColumn.ftl +++ b/polardbx-executor/src/main/codegen/templates/UnaryMinusOperatorColumn.ftl @@ -25,28 +25,28 @@ public class ${className} extends AbstractVectorizedExpression { @Override public void eval(EvaluationContext ctx) { - super.evalChildren(ctx); - MutableChunk chunk = ctx.getPreAllocatedChunk(); - int batchSize = chunk.batchSize(); - boolean isSelectionInUse = chunk.isSelectionInUse(); - int[] sel = chunk.selection(); - - RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); - - ${type.inputType1}[] array1 = ((${type.inputVectorType1}) leftInputVectorSlot).${type.inputType1}Array(); - ${type.outputType}[] res = ((${type.outputVectorType}) outputVectorSlot).${type.outputType}Array(); - - if (isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - res[j] = -1 * (${type.outputType})array1[j]; - } - } else { - for (int i = 0; i < batchSize; i++) { - res[i] = -1 * (${type.outputType})array1[i]; - } + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + RandomAccessBlock rightInputVectorSlot = chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); + + ${type.inputType1}[] array1 = (leftInputVectorSlot.cast(${type.inputVectorType1}.class)).${type.inputType1}Array(); + ${type.outputType}[] res = (outputVectorSlot.cast(${type.outputVectorType}.class)).${type.outputType}Array(); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + res[j] = -1 * (${type.outputType})array1[j]; + } + } else { + for (int i = 0; i < batchSize; i++) { + res[i] = -1 * (${type.outputType})array1[i]; + } } VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/TAtomConnectionProxy.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/TAtomConnectionProxy.java index 797bed962..ba767fadd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/TAtomConnectionProxy.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/TAtomConnectionProxy.java @@ -213,7 +213,11 @@ private void setVariables(Map newVariables, boolean isGlobal) th if (!first) { query.append(", "); } - query.append("@").append(key).append("=").append("NULL"); + if (key.equalsIgnoreCase("sql_log_bin")) { + query.append(key).append("=").append("'ON'"); + } else { + query.append("@").append(key).append("=").append("NULL"); + } } if (!first) { // 需要确保SET指令是完整的, 而不是只有一个SET前缀. @@ -229,6 +233,10 @@ private void setVariables(Map newVariables, boolean isGlobal) th ps.close(); ps = null; if (!isGlobal) { + for (String key : serverVariablesNeedToRemove) { + sessionVariables.remove(key); + sessionVariablesChanged.remove(key); + } for (Entry e : tmpVariablesChanged.entrySet()) { sessionVariables.put(e.getKey(), e.getValue()); sessionVariablesChanged.put(e.getKey(), e.getValue()); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/config/TAtomConfParser.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/config/TAtomConfParser.java index 4cb0cd94b..e806ad27f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/config/TAtomConfParser.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/config/TAtomConfParser.java @@ -20,9 +20,7 @@ import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.config.ConfigDataMode; import org.apache.commons.lang.BooleanUtils; -import org.apache.commons.lang.StringUtils; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -105,41 +103,16 @@ public static TAtomDsConfDO parserTAtomDsConfDO(String globalConfStr, String app Properties globaProp = TAtomConfParser.parserConfStr2Properties(globalConfStr); if (!globaProp.isEmpty()) { String ipKey = TAtomConfParser.GLOBA_IP_KEY; - if (!StringUtils.isEmpty(ConfigDataMode.getAtomAddressMode())) { - String modeIpKey = ipKey + ConfigDataMode.getAtomAddressMode(); - String ip = TStringUtil.trim(globaProp.getProperty(modeIpKey)); - - if (TStringUtil.isBlank(ip)) { - ip = TStringUtil.trim(globaProp.getProperty(ipKey)); - } - if (TStringUtil.isNotBlank(ip)) { - pasObj.setIp(ip); - } - } else { - String ip = TStringUtil.trim(globaProp.getProperty(ipKey)); - if (TStringUtil.isNotBlank(ip)) { - pasObj.setIp(ip); - } + String ip = TStringUtil.trim(globaProp.getProperty(ipKey)); + if (TStringUtil.isNotBlank(ip)) { + pasObj.setIp(ip); } String portKey = TAtomConfParser.GLOBA_PORT_KEY; - if (!StringUtils.isEmpty(ConfigDataMode.getAtomAddressMode())) { - String modePortKey = portKey + ConfigDataMode.getAtomAddressMode(); - String port = TStringUtil.trim(globaProp.getProperty(modePortKey)); - - if (TStringUtil.isBlank(port)) { - port = TStringUtil.trim(globaProp.getProperty(portKey)); - } - if (TStringUtil.isNotBlank(port)) { - pasObj.setPort(port); - } - } else { - String port = TStringUtil.trim(globaProp.getProperty(portKey)); - if (TStringUtil.isNotBlank(port)) { - pasObj.setPort(port); - } + String port = TStringUtil.trim(globaProp.getProperty(portKey)); + if (TStringUtil.isNotBlank(port)) { + pasObj.setPort(port); } - String dbName = TStringUtil.trim(globaProp.getProperty(TAtomConfParser.GLOBA_DB_NAME_KEY)); if (TStringUtil.isNotBlank(dbName)) { pasObj.setDbName(dbName); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/config/TAtomDsConfHandle.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/config/TAtomDsConfHandle.java index 0ff4cd724..46d6db0ec 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/config/TAtomDsConfHandle.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/atom/config/TAtomDsConfHandle.java @@ -107,7 +107,14 @@ public class TAtomDsConfHandle extends AbstractLifecycle implements Lifecycle { * 本地配置,优先于推送的动态配置 */ private TAtomDsConfDO localConf = new TAtomDsConfDO(); - + /** + * 用于监听解析新密码时存储配置 + */ + private Map freshPasswdMap = new ConcurrentHashMap(); + /** + * druid数据源通过init初始化 + */ + private volatile DruidDataSource druidDataSource = null; /** * Xproto数据源 */ @@ -119,8 +126,8 @@ public class TAtomDsConfHandle extends AbstractLifecycle implements Lifecycle { /** * 记录一下共享这个handle的数据源有哪些,最后一个引用关闭时才触发handle的关闭 */ - private final Map dataSourceReferences = Collections.synchronizedMap(new HashMap()); - private final TAtomDataSource atomDataSource; + private Map dataSourceReferences = Collections.synchronizedMap(new HashMap()); + private TAtomDataSource atomDataSource; /** *

@@ -180,22 +187,6 @@ public static void fillConnectionProperties(TAtomDsConfDO tAtomDsConfDO) throws
         if (!connectionProperties.containsKey(readOnlyPropagatesToServerKey)) {
             connectionProperties.put(readOnlyPropagatesToServerKey, "false");
         }
-
-        if (ConfigDataMode.isZeroDataTimeToString()) {
-            // 是否开启0值时间处理
-            String zeroDateTimeBehavior = "zeroDateTimeBehavior";
-            if (!connectionProperties.containsKey(zeroDateTimeBehavior)) {
-                connectionProperties.put(zeroDateTimeBehavior, "convertToNull");// 将0000-00-00的时间类型返回null
-            }
-            String yearIsDateType = "yearIsDateType";
-            if (!connectionProperties.containsKey(yearIsDateType)) {
-                connectionProperties.put(yearIsDateType, "false");// 直接返回字符串,不做year转换date处理
-            }
-            String noDatetimeStringSync = "noDatetimeStringSync";
-            if (!connectionProperties.containsKey(noDatetimeStringSync)) {
-                connectionProperties.put(noDatetimeStringSync, "true");// 返回时间类型的字符串,不做时区处理
-            }
-        }
     }
 
     /**
@@ -428,7 +419,6 @@ public void doInit() {
                 "DBKey",
                 this.dbKey,
                 this.appName,
-                null,
                 this.unitName);
         }
 
@@ -437,10 +427,9 @@ public void doInit() {
                 "AppName",
                 this.dbKey,
                 this.appName,
-                null,
                 this.unitName);
         }
-        // To Be load By MetaDb
+
         TAtomDsConfDO newRunTimeConf = runTimeConf;
 
         lock.lock();
@@ -475,7 +464,6 @@ public void doInit() {
                         "userName",
                         this.dbKey,
                         this.appName,
-                        null,
                         this.unitName);
                 }
             }
@@ -677,6 +665,13 @@ public void addAtomAppConfigChangeListener(String listenerName, AtomAppConfigCha
 
     @Override
     protected void doDestroy() {
+        if (null != this.druidDataSource) {
+            logger.info("[DataSource Stop] Start!");
+            LoggerInit.TDDL_DYNAMIC_CONFIG.info("[DataSource Stop] Start! dbKey is " + this.dbKey);
+            this.druidDataSource.close();
+            logger.info("[DataSource Stop] End!");
+            LoggerInit.TDDL_DYNAMIC_CONFIG.info("[DataSource Stop] End! dbKey is " + this.dbKey);
+        }
         if (this.xDataSource != null) {
             logger.info("[XDataSource Stop] Start!");
             LoggerInit.TDDL_DYNAMIC_CONFIG.info("[XDataSource Stop] Start! dbKey is " + this.dbKey);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/InsertSplitter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/InsertSplitter.java
index 9ebc00841..3f8c53fb2 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/InsertSplitter.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/InsertSplitter.java
@@ -16,14 +16,14 @@
 
 package com.alibaba.polardbx.executor;
 
-import com.alibaba.polardbx.druid.sql.dialect.mysql.parser.MySqlLexer;
-import com.alibaba.polardbx.druid.sql.parser.ByteString;
-import com.alibaba.polardbx.druid.sql.parser.Lexer;
-import com.alibaba.polardbx.druid.sql.parser.Token;
 import com.alibaba.polardbx.common.constants.BatchInsertAttribute;
 import com.alibaba.polardbx.common.jdbc.BatchInsertPolicy;
 import com.alibaba.polardbx.common.jdbc.Parameters;
 import com.alibaba.polardbx.common.properties.ConnectionParams;
+import com.alibaba.polardbx.druid.sql.dialect.mysql.parser.MySqlLexer;
+import com.alibaba.polardbx.druid.sql.parser.ByteString;
+import com.alibaba.polardbx.druid.sql.parser.Lexer;
+import com.alibaba.polardbx.druid.sql.parser.Token;
 import com.alibaba.polardbx.executor.cursor.MultiResultCursor;
 import com.alibaba.polardbx.executor.cursor.ResultCursor;
 import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor;
@@ -168,6 +168,9 @@ private ResultCursor executeSplit(ByteString sql, ExecutionContext executionCont
         executionContext.getConnection().setLastInsertId(lastInsertId);
         executionContext.getConnection().setReturnedLastInsertId(returnedLastInsertId);
 
+        //insert split没有走参数化,所以要在这添加MultiLine的判断
+        sql.setMultiLine(lexer.getLine() > 1);
+
         AffectRowCursor arc = new AffectRowCursor(affectRows);
         return new ResultCursor(arc);
     }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/PlanExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/PlanExecutor.java
index b8a925f80..5ca67f50a 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/PlanExecutor.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/PlanExecutor.java
@@ -18,6 +18,7 @@
 
 import com.alibaba.polardbx.common.model.lifecycle.AbstractLifecycle;
 import com.alibaba.polardbx.common.properties.ConnectionParams;
+import com.alibaba.polardbx.common.properties.ConnectionProperties;
 import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.executor.cursor.Cursor;
 import com.alibaba.polardbx.executor.cursor.ResultCursor;
@@ -26,6 +27,7 @@
 import com.alibaba.polardbx.executor.mpp.client.MppResultCursor;
 import com.alibaba.polardbx.executor.utils.ExecUtils;
 import com.alibaba.polardbx.executor.utils.ExplainExecutorUtil;
+import com.alibaba.polardbx.gms.config.impl.MetaDbInstConfigManager;
 import com.alibaba.polardbx.optimizer.PlannerContext;
 import com.alibaba.polardbx.optimizer.config.meta.CostModelWeight;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
@@ -45,8 +47,11 @@
 import org.apache.calcite.rel.logical.LogicalUnion;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
 
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Optional;
+import java.util.Properties;
 import java.util.stream.Collectors;
 
 public class PlanExecutor extends AbstractLifecycle {
@@ -61,7 +66,7 @@ public static ResultCursor execute(ExecutionPlan plan, ExecutionContext context)
 
         ResultCursor result;
         //record the workload
-        WorkloadUtil.getWorkloadType(context, plan);
+        WorkloadUtil.getAndSetWorkloadType(context, plan);
         try {
             if (enableProfileStat) {
                 context.getRuntimeStatistics().setPlanTree(plan.getPlan());
@@ -76,6 +81,10 @@ public static ResultCursor execute(ExecutionPlan plan, ExecutionContext context)
                     SqlConverter.getInstance(context.getSchemaName(), context).getCatalog()));
                 PlanManagerUtil.applyCache(plan.getPlan());
             }
+
+            // reset params for columnar mode.
+            resetParams(plan, context);
+
             if (plan.isExplain()) {
                 result = ExplainExecutorUtil.explain(plan, context, explain);
             } else {
@@ -93,6 +102,79 @@ public static ResultCursor execute(ExecutionPlan plan, ExecutionContext context)
         return result;
     }
 
+    private static void resetParams(ExecutionPlan plan, ExecutionContext context) {
+        // enable columnar schedule
+        if (plan.isUseColumnar()) {
+            context.putIntoHintCmds(ConnectionProperties.ENABLE_COLUMNAR_SCHEDULE, true);
+        }
+
+        // reset connection parameters by plan mode
+        boolean automaticColumnarParams =
+            context.getParamManager().getBoolean(ConnectionParams.ENABLE_AUTOMATIC_COLUMNAR_PARAMS);
+        if (plan.isUseColumnar() && automaticColumnarParams) {
+            Map columnarParams = getColumnarParams(context);
+            context.putAllHintCmds(columnarParams);
+        }
+    }
+
+    static Map getColumnarParams(ExecutionContext context) {
+        Map columnarParams = new HashMap<>();
+        // Basic connection params in query of columnar index for MPP mode.
+        if (ExecUtils.needPutIfAbsent(context, ConnectionProperties.OSS_FILE_CONCURRENT)) {
+            columnarParams.put(ConnectionProperties.OSS_FILE_CONCURRENT, true);
+        }
+
+        // Some parameters are only available in columnar mode , because it may result in higher base overhead
+        // Lots of array allocation
+        if (ExecUtils.needPutIfAbsent(context, ConnectionProperties.ENABLE_VEC_JOIN)) {
+            columnarParams.put(ConnectionProperties.ENABLE_VEC_JOIN, true);
+        }
+        if (ExecUtils.needPutIfAbsent(context, ConnectionProperties.ENABLE_VEC_ACCUMULATOR)) {
+            columnarParams.put(ConnectionProperties.ENABLE_VEC_ACCUMULATOR, true);
+        }
+        if (ExecUtils.needPutIfAbsent(context, ConnectionProperties.ENABLE_VEC_BUILD_JOIN_ROW)) {
+            columnarParams.put(ConnectionProperties.ENABLE_VEC_BUILD_JOIN_ROW, true);
+        }
+
+        // The random shuffle will result in lock cost from local buffer exec.
+        if (ExecUtils.needPutIfAbsent(context, ConnectionProperties.ENABLE_SCAN_RANDOM_SHUFFLE)) {
+            columnarParams.put(ConnectionProperties.ENABLE_SCAN_RANDOM_SHUFFLE, true);
+        }
+
+        // If chunk size is less than chunk limit, the reuse of vector is useless.
+        if (ExecUtils.needPutIfAbsent(context, ConnectionProperties.ENABLE_REUSE_VECTOR)) {
+            columnarParams.put(ConnectionProperties.ENABLE_REUSE_VECTOR, true);
+        }
+
+        // It's not compatible with parameter ENABLE_VEC_JOIN
+        if (ExecUtils.needPutIfAbsent(context, ConnectionProperties.ENABLE_HASH_TABLE_BLOOM_FILTER)) {
+            columnarParams.put(ConnectionProperties.ENABLE_HASH_TABLE_BLOOM_FILTER, false);
+        }
+
+        boolean enableOssCompatible = context.getParamManager().getBoolean(ConnectionParams.ENABLE_OSS_COMPATIBLE);
+
+        // It will result in severe performance regressions
+        if (ExecUtils.needPutIfAbsent(context, ConnectionProperties.ENABLE_OSS_COMPATIBLE)) {
+            enableOssCompatible = false;
+            columnarParams.put(ConnectionProperties.ENABLE_OSS_COMPATIBLE, false);
+        }
+
+        // if oss compatible is enabled, disable slice block with dictionary for correctness
+        if (enableOssCompatible) {
+            // ENABLE_COLUMNAR_SLICE_DICT not set
+            if (ExecUtils.needPutIfAbsent(context, ConnectionProperties.ENABLE_COLUMNAR_SLICE_DICT)) {
+                columnarParams.put(ConnectionProperties.ENABLE_COLUMNAR_SLICE_DICT, false);
+            }
+        }
+
+        // enable new runtime filter in columnar query.
+        if (ExecUtils.needPutIfAbsent(context, ConnectionProperties.ENABLE_NEW_RF)) {
+            columnarParams.put(ConnectionProperties.ENABLE_NEW_RF, true);
+        }
+
+        return columnarParams;
+    }
+
     public static ResultCursor execByExecPlanNodeByOne(
         ExecutionPlan executionPlan, ExecutionContext ec) {
         try {
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/TddlGroupExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/TddlGroupExecutor.java
index 247868186..b80ee7bd9 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/TddlGroupExecutor.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/TddlGroupExecutor.java
@@ -16,8 +16,8 @@
 
 package com.alibaba.polardbx.executor;
 
-import com.alibaba.polardbx.group.jdbc.TGroupDataSource;
 import com.alibaba.polardbx.executor.spi.IRepository;
+import com.alibaba.polardbx.group.jdbc.TGroupDataSource;
 
 /**
  * 为TGroupDatasource实现的groupexecutor
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/Xprotocol/XRowSet.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/Xprotocol/XRowSet.java
index 0d89382f1..5c8fce644 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/Xprotocol/XRowSet.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/Xprotocol/XRowSet.java
@@ -17,32 +17,46 @@
 package com.alibaba.polardbx.executor.Xprotocol;
 
 import com.alibaba.polardbx.common.CrcAccumulator;
-import com.alibaba.polardbx.common.datatype.UInt64;
-import com.alibaba.polardbx.optimizer.core.row.Row;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.CodedInputStream;
-import com.mysql.cj.polarx.protobuf.PolarxResultset;
-import com.alibaba.polardbx.rpc.jdbc.CharsetMapping;
-import com.alibaba.polardbx.rpc.result.XResult;
-import com.alibaba.polardbx.rpc.result.XResultUtil;
 import com.alibaba.polardbx.common.datatype.Decimal;
+import com.alibaba.polardbx.common.datatype.DecimalConverter;
+import com.alibaba.polardbx.common.datatype.DecimalStructure;
+import com.alibaba.polardbx.common.datatype.UInt64;
+import com.alibaba.polardbx.common.datatype.UInt64Utils;
 import com.alibaba.polardbx.common.exception.TddlRuntimeException;
 import com.alibaba.polardbx.common.exception.code.ErrorCode;
+import com.alibaba.polardbx.common.utils.BigDecimalUtil;
 import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.common.utils.Pair;
 import com.alibaba.polardbx.common.utils.TStringUtil;
+import com.alibaba.polardbx.common.utils.time.MySQLTimeConverter;
+import com.alibaba.polardbx.common.utils.time.core.MySQLTimeVal;
+import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime;
+import com.alibaba.polardbx.common.utils.time.core.TimeStorage;
+import com.alibaba.polardbx.common.utils.time.parser.StringNumericParser;
+import com.alibaba.polardbx.common.utils.time.parser.StringTimeParser;
+import com.alibaba.polardbx.common.utils.time.parser.TimeParseStatus;
 import com.alibaba.polardbx.executor.chunk.BlockBuilder;
 import com.alibaba.polardbx.executor.chunk.IXRowChunk;
+import com.alibaba.polardbx.executor.operator.ResultSetCursorExec;
 import com.alibaba.polardbx.optimizer.config.table.ColumnMeta;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.CursorMeta;
+import com.alibaba.polardbx.optimizer.core.datatype.BigBitType;
 import com.alibaba.polardbx.optimizer.core.datatype.Blob;
 import com.alibaba.polardbx.optimizer.core.datatype.DataType;
 import com.alibaba.polardbx.optimizer.core.datatype.EnumType;
 import com.alibaba.polardbx.optimizer.core.datatype.YearType;
 import com.alibaba.polardbx.optimizer.core.expression.bean.EnumValue;
 import com.alibaba.polardbx.optimizer.core.row.AbstractRow;
+import com.alibaba.polardbx.rpc.jdbc.CharsetMapping;
+import com.alibaba.polardbx.rpc.result.XResult;
+import com.alibaba.polardbx.rpc.result.XResultUtil;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedInputStream;
+import com.mysql.cj.polarx.protobuf.PolarxResultset;
 import io.airlift.slice.Slice;
 import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
+import org.apache.orc.impl.TypeUtils;
 
 import java.math.BigDecimal;
 import java.math.BigInteger;
@@ -52,6 +66,8 @@
 import java.sql.SQLException;
 import java.sql.Time;
 import java.sql.Timestamp;
+import java.sql.Types;
+import java.text.MessageFormat;
 import java.time.ZoneId;
 import java.util.List;
 import java.util.Optional;
@@ -71,6 +87,8 @@ public class XRowSet extends AbstractRow implements IXRowChunk {
 
     private final boolean legacy;
 
+    public static final ZoneId DEFAULT_TIME_ZONE = TimeZone.getTimeZone("GMT+08:00").toZoneId();
+
     public XRowSet(XResult result, CursorMeta cursorMeta, List metaData,
                    List row, boolean legacy) throws SQLException {
         super(cursorMeta);
@@ -95,14 +113,17 @@ public byte[] fastGetBytes(int index, String targetCharset) throws Exception {
         return XResultUtil.resultToBytes(metaData.get(index), row.get(index), targetCharset);
     }
 
-    public void fastParseToColumnVector(int index, String targetCharset, ColumnVector columnVector, int rowNumber, Optional accumulator)
+    public void fastParseToColumnVector(int index, String targetCharset, ColumnVector columnVector, int rowNumber,
+                                        Optional accumulator)
         throws Exception {
-        XResultUtil.resultToColumnVector(metaData.get(index), row.get(index), targetCharset, columnVector, rowNumber, false,
+        XResultUtil.resultToColumnVector(metaData.get(index), row.get(index), targetCharset, columnVector, rowNumber,
+            false,
             -1, -1, -1, null, null, null, accumulator);
     }
 
     public void fastParseToColumnVector(int index, String targetCharset, ColumnVector columnVector, int rowNumber,
-                                        ZoneId timezone, int scale, Optional accumulator) throws Exception {
+                                        ZoneId timezone, int scale, Optional accumulator)
+        throws Exception {
         XResultUtil.resultToColumnVector(metaData.get(index), row.get(index), targetCharset, columnVector, rowNumber,
             false, -1, scale, -1, timezone, null, null, accumulator);
     }
@@ -114,14 +135,16 @@ public void fastParseToColumnVector(int index, String targetCharset, ColumnVecto
     }
 
     public void fastParseToColumnVector(int index, String targetCharset, ColumnVector columnVector, int rowNumber,
-                                        boolean flipUnsigned, int precision, int scale, Optional accumulator) throws Exception {
+                                        boolean flipUnsigned, int precision, int scale,
+                                        Optional accumulator) throws Exception {
         XResultUtil.resultToColumnVector(metaData.get(index), row.get(index), targetCharset, columnVector, rowNumber,
             flipUnsigned, precision, scale, -1, null, null, null, accumulator);
     }
 
     public void fastParseToColumnVector(int index, String targetCharset, ColumnVector columnVector, int rowNumber,
                                         int length, ColumnVector redundantColumnVector,
-                                        BiFunction collationHandler, Optional accumulator) throws Exception {
+                                        BiFunction collationHandler,
+                                        Optional accumulator) throws Exception {
         XResultUtil
             .resultToColumnVector(metaData.get(index), row.get(index), targetCharset, columnVector, rowNumber, false,
                 -1, -1, length, null, redundantColumnVector, collationHandler, accumulator);
@@ -196,15 +219,6 @@ public void setObject(int index, Object value) {
         throw new UnsupportedOperationException();
     }
 
-    private static long bytesToLong(byte[] bytes) {
-        assert bytes.length <= 8;
-        long val = 0;
-        for (int i = 0; i < bytes.length; i++) {
-            val |= (bytes[i] & 0xFF) << ((bytes.length - i - 1) * 8);
-        }
-        return val;
-    }
-
     @Override
     public void buildChunkRow(DataType[] dataTypes, BlockBuilder[] blockBuilders) {
         buildChunkRow(result, metaData, row, dataTypes, blockBuilders);
@@ -433,7 +447,7 @@ public static void buildChunkRow(XResult result, List row = xResult.current().getRow();
+        final List metaData = xResult.getMetaData();
+        for (int i = 0; i < dataTypes.length; i++) {
+            final BlockBuilder builder = blockBuilders[i];
+            final PolarxResultset.ColumnMetaData meta = metaData.get(i);
+            final ByteString data = row.get(i);
+
+            if (0 == data.size()) {
+                builder.appendNull();
+                continue;
+            }
+            final byte[] rawBytes = data.toByteArray();
+            final CodedInputStream stream = data.newCodedInput();
+
+            // Convert data into orc raw type: Long, Double, or byte[]
+            switch (dataTypes[i].fieldType()) {
+            case MYSQL_TYPE_DATETIME:
+            case MYSQL_TYPE_DATETIME2:
+            case MYSQL_TYPE_TIMESTAMP:
+            case MYSQL_TYPE_TIMESTAMP2: {
+                long longTime = parseMysqlDateTime(meta, data, stream, Types.TIMESTAMP, context);
+                builder.writeLong(longTime);
+                break;
+            }
+
+            case MYSQL_TYPE_DATE:
+            case MYSQL_TYPE_NEWDATE: {
+                long longTime = parseMysqlDateTime(meta, data, stream, Types.DATE, context);
+                builder.writeLong(longTime);
+                break;
+            }
+
+            case MYSQL_TYPE_TIME: {
+                long longTime = parseMysqlDateTime(meta, data, stream, Types.TIME, context);
+                builder.writeLong(longTime);
+                break;
+            }
+
+            case MYSQL_TYPE_YEAR:
+            case MYSQL_TYPE_INT24:
+            case MYSQL_TYPE_LONG:
+            case MYSQL_TYPE_SHORT:
+            case MYSQL_TYPE_TINY: {
+                long year = XRowSet.getU64(meta.getType(), stream);
+                builder.writeLong(year);
+                break;
+            }
+
+            case MYSQL_TYPE_DECIMAL:
+            case MYSQL_TYPE_NEWDECIMAL: {
+                byte[] bytes = XResultUtil.resultToBytes(meta, data, "utf8");
+                if (TypeUtils.isDecimal64Precision(dataTypes[i].getPrecision())) {
+                    // Convert to Long for Decimal64
+                    long decimal64 = BigDecimalUtil.decodeAsUnscaledLong(bytes, dataTypes[i].getScale());
+                    builder.writeLong(decimal64);
+                } else {
+                    // Convert to byte[]
+                    DecimalStructure d = new DecimalStructure();
+                    DecimalConverter.parseString(bytes, 0, bytes.length, d, false);
+
+                    final int precision = dataTypes[i].getPrecision();
+                    final int scale = dataTypes[i].getScale();
+
+                    // NOTE: It will be handled as string in latin1 character set for .orc
+                    byte[] result = new byte[DecimalConverter.binarySize(precision, scale)];
+                    DecimalConverter.decimalToBin(d, result, precision, scale);
+                    builder.writeByteArray(result);
+                }
+                break;
+            }
+
+            case MYSQL_TYPE_LONGLONG: {
+                if (dataTypes[i].isUnsigned()) {
+                    // for bigint unsigned
+                    // fetch unsigned long value represented by bytes
+                    byte[] bytes = XResultUtil.resultToBytes(meta, data, "utf8");
+                    long[] parseResult = StringNumericParser.parseString(bytes);
+                    // check error occurs
+                    if (parseResult[StringNumericParser.ERROR_INDEX] != 0) {
+                        throw GeneralUtil.nestedException(MessageFormat.format(
+                            "failed to parse unsigned long value %s.", new String(bytes)));
+                    }
+
+                    // use flip mask to ensure the sort-consistency of unsigned value.
+                    long parsedNumber = parseResult[StringNumericParser.NUMERIC_INDEX];
+                    builder.writeLong(parsedNumber ^ UInt64Utils.FLIP_MASK);
+                } else {
+                    long result = XRowSet.getU64(meta.getType(), stream);
+                    builder.writeLong(result);
+                }
+                break;
+            }
+
+            case MYSQL_TYPE_BIT: {
+                if (dataTypes[i] instanceof BigBitType) {
+                    byte[] bytes = XResultUtil.resultToBytes(meta, data, "utf8");
+                    builder.writeLong(ResultSetCursorExec.bytesToLong(bytes));
+                } else {
+                    long result = XRowSet.getU64(meta.getType(), stream);
+                    builder.writeLong(result);
+                }
+                break;
+            }
+
+            case MYSQL_TYPE_DOUBLE: {
+                final double val;
+                val = getDoubleVal(meta, rawBytes, stream);
+                builder.writeDouble(val);
+                break;
+            }
+
+            case MYSQL_TYPE_FLOAT: {
+                final float val = getFloatVal(meta, rawBytes, stream);
+                builder.writeDouble(val);
+                break;
+            }
+
+            case MYSQL_TYPE_VAR_STRING:
+            case MYSQL_TYPE_STRING:
+            case MYSQL_TYPE_SET:
+            case MYSQL_TYPE_BLOB:
+            case MYSQL_TYPE_ENUM:
+            case MYSQL_TYPE_JSON: {
+                byte[] bytes = XResultUtil.resultToBytes(meta, data, "utf8");
+                builder.writeByteArray(bytes);
+                break;
+            }
+
+            default:
+                throw new UnsupportedOperationException(dataTypes[i].fieldType().toString());
+            }
+        }
+    }
+
+    private static float getFloatVal(PolarxResultset.ColumnMetaData meta, byte[] rawBytes, CodedInputStream stream)
+        throws Exception {
+        final float val;
+        switch (meta.getType()) {
+        case UINT:
+            val =
+                (new BigInteger(ByteBuffer.allocate(9).put((byte) 0).putLong(stream.readUInt64()).array()))
+                    .floatValue();
+            break;
+
+        case FLOAT:
+            val = stream.readFloat();
+            break;
+
+        case DOUBLE:
+            val = (float) stream.readDouble();
+            break;
+
+        case BYTES:
+            val = Float.parseFloat(new String(rawBytes, 0, rawBytes.length - 1));
+            break;
+
+        default:
+            val = XRowSet.getU64(meta.getType(), stream);
+        }
+        return val;
+    }
+
+    private static double getDoubleVal(PolarxResultset.ColumnMetaData meta, byte[] rawBytes, CodedInputStream stream)
+        throws Exception {
+        final double val;
+        switch (meta.getType()) {
+        case UINT:
+            val = (new BigInteger(ByteBuffer.allocate(9).put((byte) 0)
+                .putLong(stream.readUInt64()).array())).doubleValue();
+            break;
+
+        case FLOAT:
+            val = stream.readFloat();
+            break;
+
+        case DOUBLE:
+            val = stream.readDouble();
+            break;
+
+        case DECIMAL: {
+            byte scale = stream.readRawByte();
+            // we allocate an extra char for the sign
+            CharBuffer unscaledString = CharBuffer.allocate(2 * stream.getBytesUntilLimit());
+            unscaledString.position(1);
+            byte sign = 0;
+            // read until we encounter the sign bit
+            while (true) {
+                int b = 0xFF & stream.readRawByte();
+                if ((b >> 4) > 9) {
+                    sign = (byte) (b >> 4);
+                    break;
+                }
+                unscaledString.append((char) ((b >> 4) + '0'));
+                if ((b & 0x0f) > 9) {
+                    sign = (byte) (b & 0x0f);
+                    break;
+                }
+                unscaledString.append((char) ((b & 0x0f) + '0'));
+            }
+            if (stream.getBytesUntilLimit() > 0) {
+                throw new TddlRuntimeException(ErrorCode.ERR_X_PROTOCOL_RESULT,
+                    "Did not read all bytes while decoding decimal. Bytes left: " + stream
+                        .getBytesUntilLimit());
+            }
+            switch (sign) {
+            case 0xa:
+            case 0xc:
+            case 0xe:
+            case 0xf:
+                unscaledString.put(0, '+');
+                break;
+            case 0xb:
+            case 0xd:
+                unscaledString.put(0, '-');
+                break;
+            }
+            // may have filled the CharBuffer or one remaining. need to remove it before toString()
+            int characters = unscaledString.position();
+            unscaledString.clear(); // reset position
+            BigInteger unscaled = new BigInteger(unscaledString.subSequence(0, characters).toString());
+            val = (new BigDecimal(unscaled, scale)).doubleValue();
+        }
+        break;
+
+        case BYTES:
+            val = Double.parseDouble(new String(rawBytes, 0, rawBytes.length - 1));
+            break;
+
+        default:
+            val = XRowSet.getU64(meta.getType(), stream);
+        }
+        return val;
+    }
+
+    private static long parseMysqlDateTime(PolarxResultset.ColumnMetaData meta, ByteString data,
+                                           CodedInputStream stream, int type, ExecutionContext context)
+        throws Exception {
+        final long longTime;
+        if (context.isEnableFastParseOrcRawType()) {
+            switch (meta.getType()) {
+            case BYTES: {
+                byte[] bytes = stream.readRawBytes(data.size() - 1);
+                MysqlDateTime t = StringTimeParser.parseString(bytes, type);
+                longTime = TimeStorage.writeTime(t);
+                break;
+            }
+
+            case TIME: {
+                boolean negative = stream.readRawByte() > 0;
+                int hours = 0;
+                int minutes = 0;
+                int seconds = 0;
+                int nanos = 0;
+
+                if (!stream.isAtEnd()) {
+                    hours = (int) stream.readInt64();
+                    if (!stream.isAtEnd()) {
+                        minutes = (int) stream.readInt64();
+                        if (!stream.isAtEnd()) {
+                            seconds = (int) stream.readInt64();
+                            if (!stream.isAtEnd()) {
+                                nanos = 1000 * (int) stream.readInt64();
+                            }
+                        }
+                    }
+                }
+
+                // get bytes of time value from mysql datetime.
+                longTime = TimeStorage.writeTime(hours, minutes, seconds, nanos, negative);
+                break;
+            }
+
+            case DATETIME: {
+                int year = (int) stream.readUInt64();
+                int month = (int) stream.readUInt64();
+                int day = (int) stream.readUInt64();
+                if (stream.getBytesUntilLimit() > 0) {
+                    int hours = 0;
+                    int minutes = 0;
+                    int seconds = 0;
+
+                    int nanos = 0;
+
+                    if (!stream.isAtEnd()) {
+                        hours = (int) stream.readInt64();
+                        if (!stream.isAtEnd()) {
+                            minutes = (int) stream.readInt64();
+                            if (!stream.isAtEnd()) {
+                                seconds = (int) stream.readInt64();
+                                if (!stream.isAtEnd()) {
+                                    nanos = 1000 * (int) stream.readInt64();
+                                }
+                            }
+                        }
+                    }
+
+                    switch (meta.getOriginalType()) {
+                    case MYSQL_TYPE_DATETIME:
+                    case MYSQL_TYPE_DATETIME2: {
+                        longTime = TimeStorage.writeTimestamp(year, month, day, hours, minutes, seconds, nanos, false);
+                        break;
+                    }
+
+                    case MYSQL_TYPE_TIMESTAMP:
+                    case MYSQL_TYPE_TIMESTAMP2: {
+                        MysqlDateTime mysqlDateTime =
+                            new MysqlDateTime(year, month, day, hours, minutes, seconds, nanos);
+                        TimeParseStatus timeParseStatus = new TimeParseStatus();
+                        MySQLTimeVal timeVal =
+                            MySQLTimeConverter.convertDatetimeToTimestampWithoutCheck(mysqlDateTime, timeParseStatus,
+                                DEFAULT_TIME_ZONE);
+                        if (timeVal == null) {
+                            // for error time value, set to zero.
+                            timeVal = new MySQLTimeVal();
+                        }
+                        longTime = XResultUtil.timeValToLong(timeVal);
+                        break;
+                    }
+                    default:
+                        throw new TddlRuntimeException(ErrorCode.ERR_X_PROTOCOL_RESULT, "Unsupported type: "
+                            + meta.getType().name() + " org_type: " + meta.getOriginalType().name());
+                    }
+                    break;
+                } else {
+                    switch (meta.getOriginalType()) {
+                    case MYSQL_TYPE_DATE:
+                    case MYSQL_TYPE_NEWDATE:
+                        longTime = TimeStorage.writeDate(year, month, day);
+                        break;
+                    case MYSQL_TYPE_DATETIME:
+                    case MYSQL_TYPE_DATETIME2:
+                        longTime = TimeStorage.writeTimestamp(year, month, day, 0, 0, 0, 0, false);
+                        break;
+                    case MYSQL_TYPE_TIMESTAMP:
+                    case MYSQL_TYPE_TIMESTAMP2:
+                        MysqlDateTime mysqlDateTime = new MysqlDateTime(year, month, day, 0, 0, 0, 0);
+                        TimeParseStatus timeParseStatus = new TimeParseStatus();
+                        MySQLTimeVal timeVal =
+                            MySQLTimeConverter.convertDatetimeToTimestampWithoutCheck(mysqlDateTime, timeParseStatus,
+                                DEFAULT_TIME_ZONE);
+                        if (timeVal == null) {
+                            // for error time value, set to zero.
+                            timeVal = new MySQLTimeVal();
+                        }
+                        longTime = XResultUtil.timeValToLong(timeVal);
+                        break;
+                    default:
+                        throw new TddlRuntimeException(ErrorCode.ERR_X_PROTOCOL_RESULT, "Unsupported type: "
+                            + meta.getType().name() + " org_type: " + meta.getOriginalType().name());
+
+                    }
+                    break;
+                }
+            }
+
+            default:
+                throw new TddlRuntimeException(ErrorCode.ERR_X_PROTOCOL_RESULT,
+                    "Unsupported type: " + meta.getType().name()
+                        + " org_type: " + meta.getOriginalType().name() + " convert to time");
+            }
+        } else {
+            MysqlDateTime t;
+            byte[] bytes = XResultUtil.resultToBytes(meta, data, "utf8");
+            t = StringTimeParser.parseString(bytes, type);
+            longTime = TimeStorage.writeTime(t);
+        }
+        return longTime;
+    }
+
     @Override
     public long estimateSize() {
         if (row != null) {
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/AbstractAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/AbstractAccumulator.java
new file mode 100644
index 000000000..8e520c655
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/AbstractAccumulator.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.accumulator;
+
+import com.alibaba.polardbx.executor.chunk.Block;
+import com.alibaba.polardbx.executor.chunk.Chunk;
+
+/**
+ * Base class for Accumulator.
+ * 

+ * Override one of the three accumulate method to make it work + * + * @author Eric Fu + */ +abstract class AbstractAccumulator implements Accumulator { + + @Override + public final void accumulate(int groupId, Chunk inputChunk, int position) { + final int inputSize = getInputTypes().length; + if (inputSize == 0) { + accumulate(groupId); + } else if (inputSize == 1) { + accumulate(groupId, inputChunk.getBlock(0), position); + } else { + throw new UnsupportedOperationException(getClass().getName() + " has multiple arguments"); + } + } + + /** + * accumulate method with no arguments e.g. COUNT(*) + */ + void accumulate(int groupId) { + throw new AssertionError("not implemented"); + } + + /** + * accumulate method with one arguments e.g. SUM(x) + * + * @param position value position in block + */ + void accumulate(int groupId, Block block, int position) { + throw new AssertionError("not implemented"); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/Accumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/Accumulator.java new file mode 100644 index 000000000..7cc527030 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/Accumulator.java @@ -0,0 +1,80 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; + +public interface Accumulator { + + /** + * Get expected input types. Returns null if any type(s) are accepted + */ + DataType[] getInputTypes(); + + /** + * Append a new group with initial value + */ + void appendInitValue(); + + /** + * Accumulate a value into group + */ + void accumulate(int groupId, Chunk inputChunk, int position); + + default void accumulate(int groupId, Chunk inputChunk, int[] groupIdSelection, int selSize) { + // Fall back to normal processing if method is not override. + for (int i = 0; i < selSize; i++) { + accumulate(groupId, inputChunk, groupIdSelection[i]); + } + } + + default void accumulate(int groupId, Chunk inputChunk, int startIndexIncluded, int endIndexExcluded) { + // Fall back to normal processing if method is not override. + for (int i = startIndexIncluded; i < endIndexExcluded; i++) { + accumulate(groupId, inputChunk, i); + } + } + + default void accumulate(int[] groupIds, Chunk inputChunk, int positionCount) { + // Fall back to normal processing if method is not override. + for (int position = 0; position < positionCount; position++) { + accumulate(groupIds[position], inputChunk, position); + } + } + + // for group join + // the probe positions array may have repeated elements like {0, 0, 1, 1, 1, 2, 5, 5, 7 ...} + default void accumulate(int[] groupIds, Chunk inputChunk, int[] probePositions, int selSize) { + // Fall back to normal processing if method is not override. + for (int i = 0; i < selSize; i++) { + int position = probePositions[i]; + accumulate(groupIds[position], inputChunk, position); + } + } + + /** + * Get the aggregated result + */ + void writeResultTo(int groupId, BlockBuilder bb); + + /** + * Estimate the memory consumption + */ + long estimateSize(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/AccumulatorBuilders.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/AccumulatorBuilders.java index e69de29bb..8a4115a42 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/AccumulatorBuilders.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/AccumulatorBuilders.java @@ -0,0 +1,90 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; +import org.apache.calcite.sql.SqlKind; + +public abstract class AccumulatorBuilders { + + public static Accumulator create(Aggregator aggregator, DataType aggValueType, DataType[] inputType, int capacity, + ExecutionContext context) { + Class clazz = aggValueType.getDataClass(); + if (aggregator.getSqlKind() == SqlKind.COUNT) { + assert clazz == Long.class; + if (aggregator.getInputColumnIndexes().length == 0) { + return new CountRowsAccumulator(capacity); + } else { + return new CountAccumulator(capacity); + } + } else if (aggregator.getSqlKind() == SqlKind.SUM) { + if (clazz == Decimal.class) { + return new DecimalSumAccumulator(capacity, aggValueType); + } else if (clazz == Long.class) { + return new LongSumAccumulator(capacity); + } else if (clazz == Double.class) { + return new DoubleSumAccumulator(capacity); + } + } else if (aggregator.getSqlKind() == SqlKind.SUM0) { + assert clazz == Long.class; + return new LongSum0Accumulator(capacity); + } else if (aggregator.getSqlKind() == SqlKind.MIN || aggregator.getSqlKind() == SqlKind.MAX) { + final boolean isMin = aggregator.getSqlKind() == SqlKind.MIN; + if (clazz == Decimal.class) { + return new DecimalMaxMinAccumulator(capacity, isMin); + } else if (clazz == Double.class) { + return new DoubleMaxMinAccumulator(capacity, isMin); + } else if (clazz == Long.class) { + return new LongMaxMinAccumulator(capacity, isMin); + } + } else if (aggregator.getSqlKind() == SqlKind.AVG) { + if (clazz == Decimal.class) { + return new DecimalAvgAccumulator(capacity, context); + } else if (clazz == Double.class) { + return new DoubleAvgAccumulator(capacity); + } + } else if (aggregator.getSqlKind() == SqlKind.BIT_OR) { + if (clazz == Long.class) { + return new LongBitOrAccumulator(capacity); + } + } else if (aggregator.getSqlKind() == SqlKind.BIT_XOR) { + if (clazz == Long.class) { + return new LongBitXorAccumulator(capacity); + } + } else if (aggregator.getSqlKind() == SqlKind.__FIRST_VALUE) { + return new FirstValueAccumulator(aggValueType, context); + } else if (aggregator.getSqlKind() == SqlKind.HYPER_LOGLOG) { + return new HyperLogLogAccumulator(aggregator, inputType, capacity); + } else if (aggregator.getSqlKind() == SqlKind.PARTIAL_HYPER_LOGLOG) { + return new PartialHyperLogLogAccumulator(aggregator, inputType, capacity); + } else if (aggregator.getSqlKind() == SqlKind.FINAL_HYPER_LOGLOG) { + return new FinalHyperLogLogAccumulator(aggregator, inputType, capacity); + } else if (aggregator.getSqlKind() == SqlKind.CHECK_SUM) { + return new CheckSumAccumulator(aggregator, inputType, capacity); + } else if (aggregator.getSqlKind() == SqlKind.CHECK_SUM_MERGE) { + return new CheckSumMergeAccumulator(capacity); + } else if (aggregator.getSqlKind() == SqlKind.CHECK_SUM_V2) { + return new CheckSumV2Accumulator(aggregator, inputType, capacity); + } else if (aggregator.getSqlKind() == SqlKind.CHECK_SUM_V2_MERGE) { + return new CheckSumV2MergeAccumulator(capacity); + } + return new WrapAggregatorAccumulator(aggregator, inputType, aggValueType, capacity); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumAccumulator.java index 0963ec2c4..40a140fc7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumAccumulator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumAccumulator.java @@ -16,33 +16,37 @@ package com.alibaba.polardbx.executor.accumulator; +import com.alibaba.polardbx.common.IOrderInvariantHash; import com.alibaba.polardbx.common.OrderInvariantHasher; import com.alibaba.polardbx.executor.accumulator.state.NullableCheckSumGroupState; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import java.util.zip.CRC32; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; -import java.util.List; +import java.util.zip.CRC32; -public class CheckSumAccumulator extends AbstractAggregator { - private final List inputTypes; +public class CheckSumAccumulator implements Accumulator { + private final DataType[] inputTypes; - private NullableCheckSumGroupState groupState; + private final NullableCheckSumGroupState groupState; private final static byte SEPARATOR_TAG = (byte) 255; private final static byte NULL_TAG = (byte) 254; - public CheckSumAccumulator(int[] aggTargetIndexes, DataType outType, int filterArg, List inputTypes) { - super(aggTargetIndexes, false, new DataType[] {outType}, outType, filterArg); - this.inputTypes = inputTypes; + public CheckSumAccumulator(Aggregator aggregator, DataType[] rowInputType, int capacity) { + int[] inputColumnIndexes = aggregator.getInputColumnIndexes(); + this.inputTypes = new DataType[inputColumnIndexes.length]; + for (int i = 0; i < inputTypes.length; i++) { + inputTypes[i] = rowInputType[inputColumnIndexes[i]]; + } + this.groupState = new NullableCheckSumGroupState(capacity, OrderInvariantHasher.class); } @Override - public void open(int capacity) { - groupState = new NullableCheckSumGroupState(capacity); + public DataType[] getInputTypes() { + return inputTypes; } @Override @@ -55,8 +59,8 @@ public void accumulate(int groupId, Chunk inputChunk, int position) { // get crc result CRC32 crc = new CRC32(); - for (int i = 0; i < aggIndexInChunk.length; i++) { - Block inputBlock = inputChunk.getBlock(aggIndexInChunk[i]); + for (int i = 0; i < inputChunk.getBlockCount(); i++) { + Block inputBlock = inputChunk.getBlock(i); if (inputBlock.isNull(position)) { crc.update(NULL_TAG); } else { @@ -74,7 +78,7 @@ public void accumulate(int groupId, Chunk inputChunk, int position) { orderInvariantHasher.add(crcResult); groupState.set(groupId, orderInvariantHasher); } else { - OrderInvariantHasher orderInvariantHasher = groupState.getHasher(groupId); + IOrderInvariantHash orderInvariantHasher = groupState.getHasher(groupId); orderInvariantHasher.add(crcResult); } } @@ -88,11 +92,6 @@ public void writeResultTo(int groupId, BlockBuilder bb) { } } - @Override - public void resetToInitValue(int groupId) { - this.groupState.set(groupId, null); - } - @Override public long estimateSize() { return groupState.estimateSize(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumMergeAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumMergeAccumulator.java index de1a3c361..cf0330fe2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumMergeAccumulator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumMergeAccumulator.java @@ -16,32 +16,31 @@ package com.alibaba.polardbx.executor.accumulator; +import com.alibaba.polardbx.common.IOrderInvariantHash; import com.alibaba.polardbx.common.OrderInvariantHasher; import com.alibaba.polardbx.executor.accumulator.state.NullableCheckSumGroupState; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import java.util.List; +public class CheckSumMergeAccumulator implements Accumulator { -public class CheckSumMergeAccumulator extends AbstractAggregator { - private final List inputTypes; + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.LongType}; - private NullableCheckSumGroupState groupState; + private final NullableCheckSumGroupState groupState; private final static byte SEPARATOR_TAG = (byte) 255; private final static byte NULL_TAG = (byte) 254; - public CheckSumMergeAccumulator(int index, DataType outType, int filterArg, List inputTypes) { - super(new int[] {index}, false, new DataType[] {outType}, outType, filterArg); - this.inputTypes = inputTypes; + public CheckSumMergeAccumulator(int capacity) { + this.groupState = new NullableCheckSumGroupState(capacity, OrderInvariantHasher.class); } @Override - public void open(int capacity) { - groupState = new NullableCheckSumGroupState(capacity); + public DataType[] getInputTypes() { + return INPUT_TYPES; } @Override @@ -63,7 +62,7 @@ public void accumulate(int groupId, Chunk inputChunk, int position) { orderInvariantHasher.add(toMerge); groupState.set(groupId, orderInvariantHasher); } else { - OrderInvariantHasher orderInvariantHasher = groupState.getHasher(groupId); + IOrderInvariantHash orderInvariantHasher = groupState.getHasher(groupId); orderInvariantHasher.add(toMerge); } } @@ -77,11 +76,6 @@ public void writeResultTo(int groupId, BlockBuilder bb) { } } - @Override - public void resetToInitValue(int groupId) { - this.groupState.set(groupId, null); - } - @Override public long estimateSize() { return groupState.estimateSize(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumV2Accumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumV2Accumulator.java new file mode 100644 index 000000000..2377b4d67 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumV2Accumulator.java @@ -0,0 +1,104 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.common.CrcAccumulator; +import com.alibaba.polardbx.common.IOrderInvariantHash; +import com.alibaba.polardbx.common.RevisableOrderInvariantHash; +import com.alibaba.polardbx.executor.accumulator.state.NullableCheckSumGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; + +import java.util.zip.CRC32; + +/** + * @author yaozhili + */ +public class CheckSumV2Accumulator implements Accumulator { + private final DataType[] inputTypes; + + private final NullableCheckSumGroupState groupState; +// +// private final static byte SEPARATOR_TAG = (byte) 255; +// private final static byte NULL_TAG = (byte) 254; + + public CheckSumV2Accumulator(Aggregator aggregator, DataType[] rowInputType, int capacity) { + int[] inputColumnIndexes = aggregator.getInputColumnIndexes(); + this.inputTypes = new DataType[inputColumnIndexes.length]; + for (int i = 0; i < inputTypes.length; i++) { + inputTypes[i] = rowInputType[inputColumnIndexes[i]]; + } + this.groupState = new NullableCheckSumGroupState(capacity, RevisableOrderInvariantHash.class); + } + + @Override + public DataType[] getInputTypes() { + return inputTypes; + } + + @Override + public void appendInitValue() { + this.groupState.appendNull(); + } + + @Override + public void accumulate(int groupId, Chunk inputChunk, int position) { + // get crc result + CRC32 crc = new CRC32(); + + for (int i = 0; i < inputChunk.getBlockCount(); i++) { + Block inputBlock = inputChunk.getBlock(i); + if (inputBlock.isNull(position)) { + crc.update(CrcAccumulator.NULL_TAG); + } else { + // Must keep compatible to columnar writers. + int checksum = inputBlock.checksum(position); + crc.update(new byte[] { + (byte) (checksum >>> 24), (byte) (checksum >>> 16), (byte) (checksum >>> 8), (byte) checksum}); + } + crc.update(CrcAccumulator.SEPARATOR_TAG); + } + long crcResult = crc.getValue(); + + // write to group state + if (groupState.isNull(groupId)) { + RevisableOrderInvariantHash revisableOrderInvariantHash = new RevisableOrderInvariantHash(); + revisableOrderInvariantHash.add(crcResult); + groupState.set(groupId, revisableOrderInvariantHash); + } else { + IOrderInvariantHash revisableOrderInvariantHash = groupState.getHasher(groupId); + revisableOrderInvariantHash.add(crcResult); + } + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (groupState.isNull(groupId)) { + bb.appendNull(); + } else { + bb.writeLong(groupState.get(groupId)); + } + } + + @Override + public long estimateSize() { + return groupState.estimateSize(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumV2MergeAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumV2MergeAccumulator.java new file mode 100644 index 000000000..748a0f7c4 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CheckSumV2MergeAccumulator.java @@ -0,0 +1,85 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.common.RevisableOrderInvariantHash; +import com.alibaba.polardbx.executor.accumulator.state.NullableCheckSumGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +/** + * @author yaozhili + */ +public class CheckSumV2MergeAccumulator implements Accumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.LongType}; + + private final NullableCheckSumGroupState groupState; + + private final static byte SEPARATOR_TAG = (byte) 255; + private final static byte NULL_TAG = (byte) 254; + + public CheckSumV2MergeAccumulator(int capacity) { + this.groupState = new NullableCheckSumGroupState(capacity, RevisableOrderInvariantHash.class); + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void appendInitValue() { + this.groupState.appendNull(); + } + + @Override + public void accumulate(int groupId, Chunk inputChunk, int position) { + Block inputBlock = inputChunk.getBlock(0); + if (inputBlock.isNull(position)) { + return; + } + long toMerge = inputBlock.getLong(position); + + // write to group state + if (groupState.isNull(groupId)) { + RevisableOrderInvariantHash hash = new RevisableOrderInvariantHash(); + hash.add(toMerge).remove(0); + groupState.set(groupId, hash); + } else { + RevisableOrderInvariantHash hash = (RevisableOrderInvariantHash) groupState.getHasher(groupId); + hash.add(toMerge).remove(0); + } + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (groupState.isNull(groupId)) { + bb.appendNull(); + } else { + bb.writeLong(groupState.get(groupId)); + } + } + + @Override + public long estimateSize() { + return groupState.estimateSize(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CountAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CountAccumulator.java new file mode 100644 index 000000000..2dee64efb --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CountAccumulator.java @@ -0,0 +1,80 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.NullableLongGroupState; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; + +public class CountAccumulator implements Accumulator { + + private final NullableLongGroupState state; + + public CountAccumulator(int capacity) { + this.state = new NullableLongGroupState(capacity); + } + + @Override + public void appendInitValue() { + state.append(0L); + } + + @Override + public void accumulate(int[] groupIds, Chunk inputChunk, int[] probePositions, int selSize) { + if (inputChunk.getBlockCount() == 1) { + inputChunk.getBlock(0).count(groupIds, probePositions, selSize, state); + } else { + Accumulator.super.accumulate(groupIds, inputChunk, probePositions, selSize); + } + } + + @Override + public void accumulate(int groupId, Chunk inputChunk, int position) { + assert inputChunk.getBlockCount() > 0; + boolean notNull = true; + for (int i = 0; i < inputChunk.getBlockCount(); i++) { + if (inputChunk.getBlock(i).isNull(position)) { + notNull = false; + break; + } + } + if (notNull) { + state.set(groupId, state.get(groupId) + 1); + } + } + + @Override + public DataType[] getInputTypes() { + // COUNT() accepts any input types + return null; + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (state.isNull(groupId)) { + bb.appendNull(); + } else { + bb.writeLong(state.get(groupId)); + } + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CountRowsAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CountRowsAccumulator.java new file mode 100644 index 000000000..fefe67419 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/CountRowsAccumulator.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.LongGroupState; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; + +public class CountRowsAccumulator extends AbstractAccumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {}; + + private final LongGroupState state; + + CountRowsAccumulator(int capacity) { + this.state = new LongGroupState(capacity); + } + + @Override + public void appendInitValue() { + state.append(0L); + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void accumulate(int groupId) { + state.set(groupId, state.get(groupId) + 1); + } + + @Override + public void accumulate(int groupId, Chunk inputChunk, int[] groupIdSelection, int selSize) { + final int accumulation = selSize; + state.set(groupId, state.get(groupId) + accumulation); + } + + @Override + public void accumulate(int groupId, Chunk inputChunk, int startIndexIncluded, int endIndexExcluded) { + final int accumulation = endIndexExcluded - startIndexIncluded; + state.set(groupId, state.get(groupId) + accumulation); + } + + @Override + public void accumulate(int[] groupIds, Chunk inputChunk, int positionCount) { + for (int i = 0; i < positionCount; i++) { + int groupId = groupIds[i]; + state.set(groupId, state.get(groupId) + 1); + } + } + + @Override + public void writeResultTo(int position, BlockBuilder bb) { + bb.writeLong(state.get(position)); + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DecimalAvgAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DecimalAvgAccumulator.java new file mode 100644 index 000000000..6623f49b5 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DecimalAvgAccumulator.java @@ -0,0 +1,124 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalRoundMod; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.executor.accumulator.state.NullableDecimalLongGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import java.util.Optional; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DEFAULT_DIV_PRECISION_INCREMENT; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DIV_PRECISION_INCREMENT; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_DIV_ZERO; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.MAX_DECIMAL_SCALE; + +public class DecimalAvgAccumulator extends AbstractAccumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.DecimalType}; + + private final NullableDecimalLongGroupState state; + + private Decimal cache; + + /** + * get div_precision_increment user variables from session. + */ + private final int divPrecisionIncr; + + DecimalAvgAccumulator(int capacity, ExecutionContext context) { + this.state = new NullableDecimalLongGroupState(capacity); + this.divPrecisionIncr = Optional.ofNullable(context) + .map(ExecutionContext::getServerVariables) + .map(m -> m.get(DIV_PRECISION_INCREMENT)) + .map(n -> ((Number) n).intValue()) + .map(i -> Math.min(i, MAX_DECIMAL_SCALE)) + .orElse(DEFAULT_DIV_PRECISION_INCREMENT); + this.cache = new Decimal(); + } + + public void appendInitValue() { + state.appendNull(); + } + + @Override + void accumulate(int groupId, Block block, int position) { + if (block.isNull(position)) { + return; + } + + final Decimal value = block.getDecimal(position); + if (state.isNull(groupId)) { + state.set(groupId, value.copy(), 1); + } else { + Decimal before = state.getDecimal(groupId); + + // avoid to allocate memory + before.add(value, cache); + Decimal sum = cache; + cache = before; + long count = state.getLong(groupId) + 1; + state.set(groupId, sum, count); + } + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (state.isNull(groupId)) { + bb.appendNull(); + } else { + DecimalStructure rounded = new DecimalStructure(); + DecimalStructure unRounded = new DecimalStructure(); + + // fetch sum & count decimal value + Decimal sum = state.getDecimal(groupId); + Decimal count = Decimal.fromLong(state.getLong(groupId)); + + // do divide + int error = FastDecimalUtils.div(sum.getDecimalStructure(), count.getDecimalStructure(), unRounded, + divPrecisionIncr); + if (error == E_DEC_DIV_ZERO) { + // divide zero, set null + bb.appendNull(); + } else { + // do round + FastDecimalUtils.round(unRounded, rounded, divPrecisionIncr, DecimalRoundMod.HALF_UP); + Decimal avg = new Decimal(rounded); + bb.writeDecimal(avg); + } + } + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} + + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DecimalMaxMinAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DecimalMaxMinAccumulator.java new file mode 100644 index 000000000..628db1820 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DecimalMaxMinAccumulator.java @@ -0,0 +1,83 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.executor.accumulator.state.NullableDecimalGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +public class DecimalMaxMinAccumulator extends AbstractAccumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.DecimalType}; + + private final NullableDecimalGroupState state; + private final boolean isMin; + + public DecimalMaxMinAccumulator(int capacity, boolean isMin) { + this.state = new NullableDecimalGroupState(capacity); + this.isMin = isMin; + } + + @Override + public void appendInitValue() { + state.appendNull(); + } + + @Override + void accumulate(int groupId, Block block, int position) { + if (block.isNull(position)) { + return; + } + + final Decimal value = block.getDecimal(position); + if (state.isNull(groupId)) { + state.set(groupId, value); + } else { + Decimal beforeValue = state.get(groupId); + + // compare the decimal & before decimal value, find the min/max value. + int cmpRes = FastDecimalUtils.compare(beforeValue.getDecimalStructure(), value.getDecimalStructure()); + Decimal afterValue = isMin ? + (cmpRes <= 0 ? beforeValue : value) : + (cmpRes >= 0 ? beforeValue : value); + state.set(groupId, afterValue); + } + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (state.isNull(groupId)) { + bb.appendNull(); + } else { + bb.writeDecimal(state.get(groupId)); + } + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DecimalSumAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DecimalSumAccumulator.java new file mode 100644 index 000000000..ba99795dd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DecimalSumAccumulator.java @@ -0,0 +1,573 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalBox; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.common.utils.MathUtils; +import com.alibaba.polardbx.executor.accumulator.state.DecimalBoxGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.DecimalBlockBuilder; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DecimalType; +import com.google.common.annotations.VisibleForTesting; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_DEC128; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_DEC64; + +/** + * does not support mixed scale input + */ +public class DecimalSumAccumulator extends AbstractAccumulator { + private final DecimalStructure decimalStructure = new DecimalStructure(); + + private final DataType[] inputTypes; + private final DecimalBoxGroupState state; + private Decimal cache; + private int scale; + + /** + * low bits, high bits, error code + */ + private final long[] results = new long[3]; + + public DecimalSumAccumulator(int capacity, DataType inputType) { + this.cache = new Decimal(); + this.inputTypes = new DataType[1]; + this.inputTypes[0] = inputType; + this.scale = inputType.getScale(); + + this.state = new DecimalBoxGroupState(capacity, scale); + } + + @Override + public void appendInitValue() { + state.appendNull(); + } + + @Override + public void accumulate(int[] groupIds, Chunk inputChunk, int positionCount) { + doAccumulateV2(groupIds, inputChunk, positionCount); + } + + private void doAccumulateV2(int[] groupIds, Chunk inputChunk, int positionCount) { + Block inputBlock = inputChunk.getBlock(0); + DecimalBlock decimalBlock = inputBlock.cast(DecimalBlock.class); + + if (decimalBlock.isDecimal64()) { + rescale(decimalBlock.getScale()); + boolean[] nullArray = decimalBlock.mayHaveNull() ? decimalBlock.nulls() : null; + long[] decimalValueArray = decimalBlock.getDecimal64Values(); + int[] selection = decimalBlock.getSelection(); + + if (selection == null) { + if (nullArray == null) { + // CASE 1: no selection & no nulls + for (int position = 0; position < positionCount; position++) { + int groupId = groupIds[position]; + long decimal64Val = decimalValueArray[position]; + + accumulateDecimal64(groupId, decimal64Val); + } + } else { + // CASE 2: has no selection but has nulls + for (int position = 0; position < positionCount; position++) { + int groupId = groupIds[position]; + if (nullArray[position]) { + continue; + } + long decimal64Val = decimalValueArray[position]; + + accumulateDecimal64(groupId, decimal64Val); + } + } + } else { + if (nullArray == null) { + // CASE 3: has selection & no nulls + for (int position = 0; position < positionCount; position++) { + int groupId = groupIds[position]; + long decimal64Val = decimalValueArray[selection[position]]; + + accumulateDecimal64(groupId, decimal64Val); + } + } else { + // CASE 4: has selection & has nulls + for (int position = 0; position < positionCount; position++) { + int groupId = groupIds[position]; + if (nullArray[selection[position]]) { + continue; + } + long decimal64Val = decimalValueArray[selection[position]]; + + accumulateDecimal64(groupId, decimal64Val); + } + } + } + } else if (decimalBlock.isDecimal128()) { + rescale(decimalBlock.getScale()); + boolean[] nullArray = decimalBlock.mayHaveNull() ? decimalBlock.nulls() : null; + long[] decimal128LowValues = decimalBlock.getDecimal128LowValues(); + long[] decimal128HighValues = decimalBlock.getDecimal128HighValues(); + int[] selection = decimalBlock.getSelection(); + + if (selection == null) { + if (nullArray == null) { + // CASE 1: no selection & no nulls + for (int position = 0; position < positionCount; position++) { + int groupId = groupIds[position]; + long decimal128Low = decimal128LowValues[position]; + long decimal128High = decimal128HighValues[position]; + + accumulateDecimal128(groupId, decimal128Low, decimal128High); + } + } else { + // CASE 2: has no selection but has nulls + for (int position = 0; position < positionCount; position++) { + int groupId = groupIds[position]; + if (nullArray[position]) { + continue; + } + long decimal128Low = decimal128LowValues[position]; + long decimal128High = decimal128HighValues[position]; + + accumulateDecimal128(groupId, decimal128Low, decimal128High); + } + } + } else { + if (nullArray == null) { + // CASE 3: has selection & no nulls + for (int position = 0; position < positionCount; position++) { + int groupId = groupIds[position]; + long decimal128Low = decimal128LowValues[selection[position]]; + long decimal128High = decimal128HighValues[selection[position]]; + + accumulateDecimal128(groupId, decimal128Low, decimal128High); + } + } else { + // CASE 4: has selection & has nulls + for (int position = 0; position < positionCount; position++) { + int groupId = groupIds[position]; + if (nullArray[selection[position]]) { + continue; + } + long decimal128Low = decimal128LowValues[selection[position]]; + long decimal128High = decimal128HighValues[selection[position]]; + + accumulateDecimal128(groupId, decimal128Low, decimal128High); + } + } + } + } else { + // Fall back to row-by-row mode + for (int position = 0; position < positionCount; position++) { + accumulate(groupIds[position], inputBlock, position); + } + } + } + + @Override + public void accumulate(int groupId, Chunk inputChunk, int startIndexIncluded, int endIndexExcluded) { + Block inputBlock = inputChunk.getBlock(0); + DecimalBlock decimalBlock = inputBlock.cast(DecimalBlock.class); + + // Prepare result array and execute summary in vectorization mode. + results[0] = results[1] = results[2] = 0; + decimalBlock.cast(Block.class).sum(startIndexIncluded, endIndexExcluded, results); + + // Check sum result state and try to directly append sum result. + if (results[2] == E_DEC_DEC64) { + rescale(decimalBlock.getScale()); + long sumResult = results[0]; + accumulateDecimal64(groupId, sumResult); + } else if (results[2] == E_DEC_DEC128) { + rescale(decimalBlock.getScale()); + long decimal128Low = results[0]; + long decimal128High = results[1]; + accumulateDecimal128(groupId, decimal128Low, decimal128High); + } else { + // Fall back to row-by-row mode + for (int position = startIndexIncluded; position < endIndexExcluded; position++) { + accumulate(groupId, inputBlock, position); + } + } + } + + @Override + public void accumulate(int groupId, Chunk inputChunk, int[] groupIdSelection, int selSize) { + Block inputBlock = inputChunk.getBlock(0); + DecimalBlock decimalBlock = inputBlock.cast(DecimalBlock.class); + + // Prepare result array and execute summary in vectorization mode. + results[0] = results[1] = results[2] = 0; + decimalBlock.cast(Block.class).sum(groupIdSelection, selSize, results); + + // Check sum result state and try to directly append sum result. + if (results[2] == E_DEC_DEC64) { + rescale(decimalBlock.getScale()); + long sumResult = results[0]; + accumulateDecimal64(groupId, sumResult); + } else if (results[2] == E_DEC_DEC128) { + rescale(decimalBlock.getScale()); + long decimal128Low = results[0]; + long decimal128High = results[1]; + accumulateDecimal128(groupId, decimal128Low, decimal128High); + } else { + // Fall back to row-by-row mode + for (int i = 0; i < selSize; i++) { + int position = groupIdSelection[i]; + accumulate(groupId, inputBlock, position); + } + } + } + + @Override + public void accumulate(int groupId, Block block, int position) { + if (block.isNull(position)) { + return; + } + + DecimalBlock decimalBlock = block.cast(DecimalBlock.class); + if (decimalBlock.isDecimal64()) { + accumulateDecimal64(groupId, decimalBlock, position); + } else if (decimalBlock.isDecimal128()) { + accumulateDecimal128(groupId, decimalBlock, position); + } else { + accumulateDecimal(groupId, decimalBlock, position); + } + } + + private void accumulateDecimal64(int groupId, DecimalBlock decimalBlock, int position) { + rescale(decimalBlock.getScale()); + long decimal64Val = decimalBlock.getLong(position); + if (state.isNull(groupId)) { + state.set(groupId, decimal64Val); + } else if (state.isDecimal64(groupId)) { + long oldResult = state.getLong(groupId); + long addResult = decimal64Val + oldResult; + if (MathUtils.longAddOverflow(decimal64Val, oldResult, addResult)) { + // decimal64 overflow to decimal128 + accumulateDecimal64ToDecimal128(groupId, decimal64Val); + } else { + state.set(groupId, addResult); + } + } else if (state.isDecimal128(groupId)) { + accumulateDecimal64ToDecimal128(groupId, decimal64Val); + } else { + // already overflowed + // fall back to normal decimal add + normalAddDecimal64(groupId, decimal64Val); + } + } + + private void accumulateDecimal64ToDecimal128(int groupId, long decimal64) { + if (decimal64 >= 0) { + accumulateDecimal128(groupId, decimal64, 0); + } else { + accumulateDecimal128(groupId, decimal64, -1); + } + } + + private void accumulateDecimal128(int groupId, DecimalBlock decimalBlock, int position) { + rescale(decimalBlock.getScale()); + long decimal128Low = decimalBlock.getDecimal128Low(position); + long decimal128High = decimalBlock.getDecimal128High(position); + accumulateDecimal128(groupId, decimal128Low, decimal128High); + } + + private void accumulateDecimal64(int groupId, long decimal64Val) { + if (state.isNull(groupId)) { + state.set(groupId, decimal64Val); + } else if (state.isDecimal64(groupId)) { + long oldResult = state.getLong(groupId); + long addResult = decimal64Val + oldResult; + if (MathUtils.longAddOverflow(decimal64Val, oldResult, addResult)) { + // decimal64 overflow to decimal128 + accumulateDecimal64ToDecimal128(groupId, decimal64Val); + } else { + state.set(groupId, addResult); + } + } else if (state.isDecimal128(groupId)) { + accumulateDecimal64ToDecimal128(groupId, decimal64Val); + } else { + // already overflowed + // fall back to normal decimal add + normalAddDecimal64(groupId, decimal64Val); + } + } + + private void accumulateDecimal128(int groupId, long decimal128Low, long decimal128High) { + if (state.isNull(groupId)) { + state.set(groupId, decimal128Low, decimal128High); + } else if (state.isDecimal64(groupId)) { + // convert state from decimal64 to decimal128 + long oldDecimal64 = state.getLong(groupId); + long valHigh = oldDecimal64 >= 0 ? 0 : -1; + decimal128High += valHigh; + long newDecimal128Low = oldDecimal64 + decimal128Low; + long carryOut = ((oldDecimal64 & decimal128Low) + | ((oldDecimal64 | decimal128Low) & (~newDecimal128Low))) >>> 63; + long newDecimal128High = decimal128High + carryOut; + if (MathUtils.longAddOverflow(carryOut, decimal128High, newDecimal128High)) { + // decimal128 result overflow + normalAddDecimal128(groupId, decimal128Low, decimal128High); + } else { + state.set(groupId, newDecimal128Low, newDecimal128High); + } + } else if (state.isDecimal128(groupId)) { + long oldDecimal128Low = state.getDecimal128Low(groupId); + long oldDecimal128High = state.getDecimal128High(groupId); + long newDecimal128High = oldDecimal128High + decimal128High; + if (MathUtils.longAddOverflow(oldDecimal128High, decimal128High, newDecimal128High)) { + // decimal128 result overflow + normalAddDecimal128(groupId, decimal128Low, decimal128High); + return; + } + + long newDecimal128Low = oldDecimal128Low + decimal128Low; + long carryOut = ((oldDecimal128Low & decimal128Low) + | ((oldDecimal128Low | decimal128Low) & (~newDecimal128Low))) >>> 63; + newDecimal128High += carryOut; + state.set(groupId, newDecimal128Low, newDecimal128High); + } else { + // already overflowed + // fall back to normal decimal add + normalAddDecimal128(groupId, decimal128Low, decimal128High); + } + } + + private void normalAddDecimal64(int groupId, long decimal64Val) { + Decimal value = new Decimal(decimal64Val, scale); + if (state.isNull(groupId)) { + // initialize the operand (not null) + state.set(groupId, value); + return; + } + Decimal beforeValue; + if (state.isNormalDecimal(groupId)) { + beforeValue = state.getDecimal(groupId); + } else if (state.isDecimal64(groupId)) { + DecimalStructure buffer = decimalStructure; + DecimalStructure result = new DecimalStructure(); + FastDecimalUtils.setDecimal128WithScale(buffer, result, + state.getDecimal128Low(groupId), state.getDecimal128High(groupId), scale); + beforeValue = new Decimal(result); + } else if (state.isDecimal128(groupId)) { + throw new UnsupportedOperationException(); + } else if (state.isDecimalBox(groupId)) { + beforeValue = state.getBox(groupId).getDecimalSum(); + } else { + throw new IllegalStateException("Expected Decimal64 state"); + } + + // avoid reset memory to 0 + FastDecimalUtils.add( + beforeValue.getDecimalStructure(), + value.getDecimalStructure(), + cache.getDecimalStructure(), + false); + + // swap variants to avoid allocating memory + Decimal afterValue = cache; + cache = beforeValue; + + state.set(groupId, afterValue); + } + + private void normalAddDecimal128(int groupId, long decimal128Low, long decimal128High) { + DecimalStructure buffer = decimalStructure; + DecimalStructure result = new DecimalStructure(); + FastDecimalUtils.setDecimal128WithScale(buffer, result, decimal128Low, decimal128High, scale); + Decimal value = new Decimal(result); + if (state.isNull(groupId)) { + // initialize the operand (not null) + state.set(groupId, value); + return; + } + Decimal beforeValue; + if (state.isNormalDecimal(groupId)) { + beforeValue = state.getDecimal(groupId); + } else if (state.isDecimal64(groupId)) { + beforeValue = new Decimal(state.getLong(groupId), scale); + } else if (state.isDecimal128(groupId)) { + DecimalStructure result2 = new DecimalStructure(); + FastDecimalUtils.setDecimal128WithScale(buffer, result2, + state.getDecimal128Low(groupId), state.getDecimal128High(groupId), scale); + beforeValue = new Decimal(result2); + } else if (state.isDecimalBox(groupId)) { + beforeValue = state.getBox(groupId).getDecimalSum(); + } else { + throw new IllegalStateException("Expected Decimal state: " + state.getFlag(groupId)); + } + + // avoid reset memory to 0 + FastDecimalUtils.add( + beforeValue.getDecimalStructure(), + value.getDecimalStructure(), + cache.getDecimalStructure(), + false); + + // swap variants to avoid allocating memory + Decimal afterValue = cache; + cache = beforeValue; + + state.set(groupId, afterValue); + } + + private void accumulateDecimal(int groupId, DecimalBlock decimalBlock, int position) { + boolean isSimple = decimalBlock.isSimple(); + if (state.isNormalDecimal(groupId)) { + // normalDecimal + ANY -> normalDecimal + normalAddDecimal(groupId, decimalBlock, position); + return; + } + + if (isSimple) { + // 1. best case: all decimal value in block is simple + if (state.isNull(groupId)) { + // null + decimalBox -> decimalBox + DecimalBox box = new DecimalBox(scale); + int a1 = decimalBlock.fastInt1(position); + int a2 = decimalBlock.fastInt2(position); + int b = decimalBlock.fastFrac(position); + box.add(a1, a2, b); + + state.set(groupId, box); + } else if (state.isDecimal64(groupId)) { + // decimal64 + decimalBox -> normalDecimal + state.toNormalDecimalGroupState(); + normalAddDecimal(groupId, decimalBlock, position); + } else if (state.isDecimal128(groupId)) { + // decimal128 + decimalBox -> normalDecimal + state.toNormalDecimalGroupState(); + normalAddDecimal(groupId, decimalBlock, position); + } else if (state.isDecimalBox(groupId)) { + // decimalBox + decimalBox -> decimalBox + DecimalBox box = state.getBox(groupId); + + int a1 = decimalBlock.fastInt1(position); + int a2 = decimalBlock.fastInt2(position); + int b = decimalBlock.fastFrac(position); + box.add(a1, a2, b); + } else { + throw new UnsupportedOperationException("Unsupported decimal group state: " + + state.getFlag(groupId)); + } + // state.isNormalDecimal(groupId) is already handled + } else { + // 2. bad case: a decimal value is not simple in the block + // change state to normal + state.toNormalDecimalGroupState(); + + // do normal add + normalAddDecimal(groupId, decimalBlock, position); + } + } + + private void normalAddDecimal(int groupId, DecimalBlock decimalBlock, int position) { + DecimalStructure decimalStructure = this.decimalStructure; + decimalBlock.getDecimalStructure(decimalStructure, position); + + if (state.isNull(groupId)) { + // initialize the operand (not null) + state.set(groupId, new Decimal(decimalStructure.copy())); + } else { + Decimal beforeValue = state.getDecimal(groupId); + + // avoid reset memory to 0 + FastDecimalUtils.add( + beforeValue.getDecimalStructure(), + decimalStructure, + cache.getDecimalStructure(), + false); + + // swap variants to avoid allocating memory + Decimal afterValue = cache; + cache = beforeValue; + + state.set(groupId, afterValue); + } + } + + private void rescale(int newScale) { + if (scale == newScale) { + return; + } + if (!((DecimalType) inputTypes[0]).isDefaultScale()) { + throw new IllegalStateException("Decimal sum agg input scale does not match in runtime"); + } + this.scale = newScale; + this.inputTypes[0] = new DecimalType(inputTypes[0].getPrecision(), newScale); + this.state.rescale(newScale); + } + + @Override + public DataType[] getInputTypes() { + return inputTypes; + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + DecimalBlockBuilder decimalBlockBuilder = (DecimalBlockBuilder) bb; + if (decimalBlockBuilder.isUnset()) { + decimalBlockBuilder.setScale(scale); + } + if (state.isNull(groupId)) { + decimalBlockBuilder.appendNull(); + } else if (state.isDecimal64(groupId)) { + decimalBlockBuilder.writeLong(state.getLong(groupId)); + } else if (state.isDecimal128(groupId)) { + decimalBlockBuilder.writeDecimal128( + state.getDecimal128Low(groupId), state.getDecimal128High(groupId)); + } else { + decimalBlockBuilder.writeDecimal(state.getDecimal(groupId)); + } + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } + + @VisibleForTesting + public boolean isOverflowDecimal64(int groupId) { + if (state.isNull(groupId)) { + return false; + } + return !state.isDecimal64(groupId); + } + + @VisibleForTesting + public boolean isOverflowDecimal128(int groupId) { + if (state.isNull(groupId)) { + return false; + } + return !state.isDecimal64(groupId) && !state.isDecimal128(groupId); + } + + @VisibleForTesting + public boolean isDecimalBox(int groupId) { + if (state.isNull(groupId)) { + return false; + } + return state.isDecimalBox(groupId); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DoubleAvgAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DoubleAvgAccumulator.java new file mode 100644 index 000000000..c9d4c8dbf --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DoubleAvgAccumulator.java @@ -0,0 +1,83 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.NullableDoubleLongGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +public class DoubleAvgAccumulator extends AbstractAccumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.DoubleType}; + + private final NullableDoubleLongGroupState state; + + DoubleAvgAccumulator(int capacity) { + this.state = new NullableDoubleLongGroupState(capacity); + } + + @Override + public void appendInitValue() { + state.appendNull(); + } + + @Override + void accumulate(int groupId, Block block, int position) { + if (block.isNull(position)) { + return; + } + + final double value = block.getDouble(position); + if (state.isNull(groupId)) { + state.set(groupId, value, 1); + } else { + double sum = state.getDouble(groupId) + value; + long count = state.getLong(groupId) + 1; + state.set(groupId, sum, count); + } + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (state.isNull(groupId)) { + bb.appendNull(); + } else { + Double avg = (Double) DataTypes.DoubleType.getCalculator().divide( + state.getDouble(groupId), + state.getLong(groupId)); + if (avg == null) { + bb.appendNull(); + } else { + bb.writeDouble(avg); + } + } + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} + + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DoubleMaxMinAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DoubleMaxMinAccumulator.java new file mode 100644 index 000000000..4ae1e465e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DoubleMaxMinAccumulator.java @@ -0,0 +1,77 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.NullableDoubleGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +public class DoubleMaxMinAccumulator extends AbstractAccumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.DoubleType}; + + private final NullableDoubleGroupState state; + private final boolean isMin; + + DoubleMaxMinAccumulator(int capacity, boolean isMin) { + this.state = new NullableDoubleGroupState(capacity); + this.isMin = isMin; + } + + @Override + public void appendInitValue() { + state.appendNull(); + } + + @Override + void accumulate(int groupId, Block block, int position) { + if (block.isNull(position)) { + return; + } + + final double value = block.getDouble(position); + if (state.isNull(groupId)) { + state.set(groupId, value); + } else { + double beforeValue = state.get(groupId); + double afterValue = isMin ? Math.min(beforeValue, value) : Math.max(beforeValue, value); + state.set(groupId, afterValue); + } + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (state.isNull(groupId)) { + bb.appendNull(); + } else { + bb.writeDouble(state.get(groupId)); + } + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DoubleSumAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DoubleSumAccumulator.java new file mode 100644 index 000000000..ebde02e71 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/DoubleSumAccumulator.java @@ -0,0 +1,75 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.NullableDoubleGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +public class DoubleSumAccumulator extends AbstractAccumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.DoubleType}; + + private final NullableDoubleGroupState state; + + DoubleSumAccumulator(int capacity) { + this.state = new NullableDoubleGroupState(capacity); + } + + @Override + public void appendInitValue() { + state.appendNull(); + } + + @Override + void accumulate(int groupId, Block block, int position) { + if (block.isNull(position)) { + return; + } + + final double processValue = block.getDouble(position); + if (state.isNull(groupId)) { + state.set(groupId, processValue); + } else { + double beforeValue = state.get(groupId); + double afterValue = beforeValue + processValue; + state.set(groupId, afterValue); + } + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (state.isNull(groupId)) { + bb.appendNull(); + } else { + bb.writeDouble(state.get(groupId)); + } + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/FinalHyperLogLogAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/FinalHyperLogLogAccumulator.java new file mode 100644 index 000000000..c776ae8db --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/FinalHyperLogLogAccumulator.java @@ -0,0 +1,101 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.NullableHyperLogLogGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.statistic.ndv.HyperLogLogUtil; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; + +public class FinalHyperLogLogAccumulator implements Accumulator { + private final DataType[] inputTypes; + + private final NullableHyperLogLogGroupState groupState; + + public FinalHyperLogLogAccumulator(Aggregator aggregator, DataType[] rowInputType, int capacity) { + int[] inputColumnIndexes = aggregator.getInputColumnIndexes(); + this.inputTypes = new DataType[inputColumnIndexes.length]; + for (int i = 0; i < inputTypes.length; i++) { + inputTypes[i] = rowInputType[inputColumnIndexes[i]]; + } + this.groupState = new NullableHyperLogLogGroupState(capacity); + } + + @Override + public DataType[] getInputTypes() { + return inputTypes; + } + + @Override + public void appendInitValue() { + this.groupState.append(); + } + + @Override + public void accumulate(int groupId, Chunk inputChunk, int startIndexIncluded, int endIndexExcluded) { + byte[] hll; + if (groupState.isNull(groupId)) { + hll = new byte[HyperLogLogUtil.HLL_REGBYTES_DE]; + groupState.set(groupId, hll); + } else { + hll = groupState.getHll(groupId); + } + + Block inputBlock = inputChunk.getBlock(0); + + for (int i = startIndexIncluded; i < endIndexExcluded; i++) { + if (inputBlock.isNull(i)) { + return; + } + HyperLogLogUtil.merge(hll, inputBlock.getByteArray(i)); + } + } + + @Override + public void accumulate(int groupId, Chunk inputChunk, int position) { + Block inputBlock = inputChunk.getBlock(0); + if (inputBlock.isNull(position)) { + return; + } + + byte[] hll; + if (groupState.isNull(groupId)) { + hll = new byte[HyperLogLogUtil.HLL_REGBYTES_DE]; + groupState.set(groupId, hll); + } else { + hll = groupState.getHll(groupId); + } + HyperLogLogUtil.merge(hll, inputBlock.getByteArray(position)); + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (groupState.isNull(groupId)) { + bb.writeLong(0); + } else { + bb.writeLong(groupState.get(groupId)); + } + } + + @Override + public long estimateSize() { + return groupState.estimateSize(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/FirstValueAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/FirstValueAccumulator.java new file mode 100644 index 000000000..7279780f6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/FirstValueAccumulator.java @@ -0,0 +1,183 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.BlockBuilders; +import com.alibaba.polardbx.executor.chunk.NullBlock; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; + +import java.util.ArrayList; +import java.util.List; + +public class FirstValueAccumulator extends AbstractAccumulator { + + private final DataType[] inputTypes; + + private TypedBlockBuffer typedBlockBuffer; + + private ExecutionContext context; + + private static final int SEGMENT_SIZE = 1024; + + /** + * use fixScalarAggValue with not null first value to fix append only typedBlockBuffer + * + *

example: + *

+ * [table t data]: + * ------------ + * | id | age | + * -----+------ + * | 1 | 9 | + * ------------ + *

+ * [sql]: + * select max(id), age from t; + *

+ * handle the special case: + * 1. partition table with only one row data + * 2. scalar agg with first value + * 3. two phase agg were generated + *

+ * There will be possible to produce follow unexpected result for two phase scalar first value agg : + * ------------ + * | id | age | + * -----+------ + * | 1 | NULL| + * ------------ + *

+ * so we need non-null first value to fix scalar agg + */ + private Object fixScalarAggValue; + + FirstValueAccumulator(DataType type, ExecutionContext context) { + this.inputTypes = new DataType[] {type}; + this.context = context; + this.typedBlockBuffer = new TypedBlockBuffer(type, SEGMENT_SIZE); + } + + @Override + public void appendInitValue() { + // delay append value to accumulate, because first_value can only append once + } + + @Override + public void accumulate(int groupId, Block block, int position) { + if (groupId == 0 && fixScalarAggValue == null && !block.isNull(position)) { + fixScalarAggValue = block.getObject(position); + } + if (groupId < typedBlockBuffer.size()) { + // pass + } else if (groupId == typedBlockBuffer.size()) { + // do append value here + typedBlockBuffer.appendValue(block, position); + } else { + throw new AssertionError("impossible case"); + } + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + Object value; + if (typedBlockBuffer.size() == 0 && groupId == 0) { + /* + * This line handles a very special case: this IS a scalar agg and there IS NO input rows. + * In this case `appendInitValue()` was called but `accumulate()` was not, which leads to + * an empty buffer. We put a NULL here to make it behave correctly. + */ + typedBlockBuffer.appendValue(new NullBlock(1), 0); + value = typedBlockBuffer.get(groupId); + } else if (typedBlockBuffer.size() == 1) { + value = fixScalarAggValue; + } else { + value = typedBlockBuffer.get(groupId); + } + bb.writeObject(value); + } + + @Override + public DataType[] getInputTypes() { + return inputTypes; + } + + @Override + public long estimateSize() { + return typedBlockBuffer.estimateSize(); + } + + public class TypedBlockBuffer { + + private BlockBuilder blockBuilder; + private final int blockSize; + + private int currentSize; + private final List blocks = new ArrayList<>(); + private long estimateSize = 0; + + private TypedBlockBuffer(DataType dataType, int blockSize) { + this.blockBuilder = BlockBuilders.create(dataType, context); + this.blockSize = blockSize; + } + + public Object get(int position) { + return blockOf(position).getObject(offsetOf(position)); + } + + public void appendValue(Block block, int position) { + // Block fulfilled before appending + if (currentSize == blockSize) { + Block buildingBlock = getBuildingBlock(); + blocks.add(buildingBlock); + estimateSize += buildingBlock.estimateSize(); + blockBuilder = blockBuilder.newBlockBuilder(); + currentSize = 0; + } + + block.writePositionTo(position, blockBuilder); + currentSize++; + } + + private Block blockOf(int position) { + int chunkId = position / blockSize; + if (chunkId < blocks.size()) { + return blocks.get(chunkId); + } else { + return getBuildingBlock(); + } + } + + public int size() { + return currentSize + blocks.size() * blockSize; + } + + private int offsetOf(int position) { + return position % blockSize; + } + + private Block getBuildingBlock() { + return blockBuilder.build(); + } + + public long estimateSize() { + return estimateSize; + } + } +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/HyperLogLogAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/HyperLogLogAccumulator.java new file mode 100644 index 000000000..ebade7368 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/HyperLogLogAccumulator.java @@ -0,0 +1,106 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.NullableHyperLogLogGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.statistic.ndv.HyperLogLogUtil; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; +import com.aliyun.oss.common.utils.CRC64; + +public class HyperLogLogAccumulator implements Accumulator { + protected final DataType[] inputTypes; + + protected final NullableHyperLogLogGroupState groupState; + + protected final static byte SEPARATOR_TAG = (byte) 255; + protected final static byte NULL_TAG = (byte) 254; + + public HyperLogLogAccumulator(Aggregator aggregator, DataType[] rowInputType, int capacity) { + int[] inputColumnIndexes = aggregator.getInputColumnIndexes(); + this.inputTypes = new DataType[inputColumnIndexes.length]; + for (int i = 0; i < inputTypes.length; i++) { + inputTypes[i] = rowInputType[inputColumnIndexes[i]]; + } + this.groupState = new NullableHyperLogLogGroupState(capacity); + } + + @Override + public DataType[] getInputTypes() { + return inputTypes; + } + + @Override + public void appendInitValue() { + this.groupState.append(); + } + + @Override + public void accumulate(int groupId, Chunk inputChunk, int position) { + byte[] hll; + if (groupState.isNull(groupId)) { + hll = new byte[HyperLogLogUtil.HLL_REGBYTES_DE]; + groupState.set(groupId, hll); + } else { + hll = groupState.getHll(groupId); + } + + // only one column, no need to use crc + if (inputChunk.getBlockCount() == 1) { + HyperLogLogUtil.hllSet(hll, inputChunk.getBlock(0).hashCodeUseXxhash(position)); + return; + } + + // get crc result + //CRC32 crc = new CRC32(); + CRC64 crc = new CRC64(); + byte[] bytes = new byte[8]; + for (int i = 0; i < inputChunk.getBlockCount(); i++) { + Block inputBlock = inputChunk.getBlock(i); + if (inputBlock.isNull(position)) { + crc.update(NULL_TAG); + } else { + long hash = inputBlock.hashCodeUseXxhash(position); + + for (int j = 0; (j * 8) < 64; j++) { + bytes[j] = (byte) (hash >>> (j << 3)); + } + crc.update(bytes, 8); + } + crc.update(SEPARATOR_TAG); + } + long crcResult = crc.getValue(); + HyperLogLogUtil.hllSet(hll, crcResult); + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (groupState.isNull(groupId)) { + bb.writeLong(0); + } else { + bb.writeLong(groupState.get(groupId)); + } + } + + @Override + public long estimateSize() { + return groupState.estimateSize(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongBitOrAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongBitOrAccumulator.java new file mode 100644 index 000000000..857bf8922 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongBitOrAccumulator.java @@ -0,0 +1,67 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.LongGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +public class LongBitOrAccumulator extends AbstractAccumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.LongType}; + + private final LongGroupState state; + + LongBitOrAccumulator(int capacity) { + this.state = new LongGroupState(capacity); + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void appendInitValue() { + state.append(0L); + } + + @Override + public void accumulate(int groupId, Block block, int position) { + if (block.isNull(position)) { + return; + } + + long value = block.getLong(position); + long beforeValue = state.get(groupId); + long afterValue = beforeValue | value; + state.set(groupId, afterValue); + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + bb.writeLong(state.get(groupId)); + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongBitXorAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongBitXorAccumulator.java new file mode 100644 index 000000000..3c15450c9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongBitXorAccumulator.java @@ -0,0 +1,67 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.LongGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +public class LongBitXorAccumulator extends AbstractAccumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.LongType}; + + private final LongGroupState state; + + LongBitXorAccumulator(int capacity) { + this.state = new LongGroupState(capacity); + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void appendInitValue() { + state.append(0L); + } + + @Override + public void accumulate(int groupId, Block block, int position) { + if (block.isNull(position)) { + return; + } + + long value = block.getLong(position); + long beforeValue = state.get(groupId); + long afterValue = beforeValue ^ value; + state.set(groupId, afterValue); + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + bb.writeLong(state.get(groupId)); + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongMaxMinAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongMaxMinAccumulator.java new file mode 100644 index 000000000..22613b8bf --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongMaxMinAccumulator.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.NullableLongGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +public class LongMaxMinAccumulator extends AbstractAccumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.LongType}; + + private final NullableLongGroupState state; + private final boolean isMin; + + LongMaxMinAccumulator(int capacity, boolean isMin) { + this.state = new NullableLongGroupState(capacity); + this.isMin = isMin; + } + + @Override + public void appendInitValue() { + state.appendNull(); + } + + @Override + void accumulate(int groupId, Block block, int position) { + if (block.isNull(position)) { + return; + } + + final long value = block.getLong(position); + if (state.isNull(groupId)) { + state.set(groupId, value); + } else { + long beforeValue = state.get(groupId); + long afterValue = isMin ? Math.min(beforeValue, value) : Math.max(beforeValue, value); + state.set(groupId, afterValue); + } + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (state.isNull(groupId)) { + bb.appendNull(); + } else { + bb.writeLong(state.get(groupId)); + } + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} + + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongSum0Accumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongSum0Accumulator.java new file mode 100644 index 000000000..3b025b2e7 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongSum0Accumulator.java @@ -0,0 +1,66 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.LongGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +public class LongSum0Accumulator extends AbstractAccumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.LongType}; + + private final LongGroupState state; + + LongSum0Accumulator(int capacity) { + this.state = new LongGroupState(capacity); + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void appendInitValue() { + state.append(0L); + } + + @Override + public void accumulate(int groupId, Block block, int position) { + if (block.isNull(position)) { + return; + } + + long value = block.getLong(position); + long beforeValue = state.get(groupId); + long afterValue = beforeValue + value; + state.set(groupId, afterValue); + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + bb.writeLong(state.get(groupId)); + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongSumAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongSumAccumulator.java new file mode 100644 index 000000000..ad4f5118e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/LongSumAccumulator.java @@ -0,0 +1,74 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.accumulator.state.NullableLongGroupState; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +public class LongSumAccumulator extends AbstractAccumulator { + + private static final DataType[] INPUT_TYPES = new DataType[] {DataTypes.LongType}; + + private final NullableLongGroupState state; + + LongSumAccumulator(int capacity) { + this.state = new NullableLongGroupState(capacity); + } + + @Override + public DataType[] getInputTypes() { + return INPUT_TYPES; + } + + @Override + public void appendInitValue() { + state.appendNull(); + } + + @Override + public void accumulate(int groupId, Block block, int position) { + if (block.isNull(position)) { + return; + } + + long value = block.getLong(position); + if (state.isNull(groupId)) { + state.set(groupId, value); + } else { + long beforeValue = state.get(groupId); + long afterValue = beforeValue + value; + state.set(groupId, afterValue); + } + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (state.isNull(groupId)) { + bb.appendNull(); + } else { + bb.writeLong(state.get(groupId)); + } + } + + @Override + public long estimateSize() { + return state.estimateSize(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/PartialHyperLogLogAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/PartialHyperLogLogAccumulator.java new file mode 100644 index 000000000..920271ad3 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/PartialHyperLogLogAccumulator.java @@ -0,0 +1,37 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; + +public class PartialHyperLogLogAccumulator extends HyperLogLogAccumulator { + + public PartialHyperLogLogAccumulator(Aggregator aggregator, DataType[] rowInputType, int capacity) { + super(aggregator, rowInputType, capacity); + } + + @Override + public void writeResultTo(int groupId, BlockBuilder bb) { + if (groupState.isNull(groupId)) { + bb.appendNull(); + } else { + bb.writeByteArray(groupState.getHll(groupId)); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/WrapAggregatorAccumulator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/WrapAggregatorAccumulator.java new file mode 100644 index 000000000..23658408b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/WrapAggregatorAccumulator.java @@ -0,0 +1,81 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator; + +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; + +import java.util.ArrayList; +import java.util.List; + +public class WrapAggregatorAccumulator implements Accumulator { + + private Aggregator aggregator; + + private DataType[] inputTypes; + + private DataType aggValueType; + + private List aggregatorList; + + public WrapAggregatorAccumulator(Aggregator aggregator, DataType[] rowInputType, DataType aggValueType, + int capacity) { + this.aggregator = aggregator; + int[] inputColumnIndexes = aggregator.getInputColumnIndexes(); + this.inputTypes = new DataType[inputColumnIndexes.length]; + this.aggValueType = aggValueType; + for (int i = 0; i < inputTypes.length; i++) { + inputTypes[i] = rowInputType[inputColumnIndexes[i]]; + } + this.aggregatorList = new ArrayList<>(capacity); + } + + public DataType[] getInputTypes() { + return inputTypes; + } + + /** + * Append a new group with initial value + */ + public void appendInitValue() { + aggregatorList.add(aggregator.getNewForAccumulator()); + } + + /** + * Accumulate a value into group + */ + public void accumulate(int groupId, Chunk inputChunk, int position) { + aggregatorList.get(groupId).aggregate(inputChunk.rowAt(position)); + } + + /** + * Get the aggregated result + */ + public void writeResultTo(int groupId, BlockBuilder bb) { + bb.writeObject(aggValueType.convertFrom(aggregatorList.get(groupId).value())); + } + + /** + * Estimate the memory consumption + */ + public long estimateSize() { + return aggregatorList.size() * 64; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/BooleanSegmentArrayList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/BooleanSegmentArrayList.java new file mode 100644 index 000000000..0aae76174 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/BooleanSegmentArrayList.java @@ -0,0 +1,77 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.datastruct; + +import com.alibaba.polardbx.common.utils.MathUtils; +import com.google.common.base.Preconditions; +import org.openjdk.jol.info.ClassLayout; + +import java.util.ArrayList; +import java.util.List; + +/** + * Boolean Segmented Array List + * + * @author Eric Fu + */ +public class BooleanSegmentArrayList implements SegmentArrayList { + + private static final long INSTANCE_SIZE = ClassLayout.parseClass(BooleanSegmentArrayList.class).instanceSize(); + + private static final int SEGMENT_SIZE = 1024; + + private List arrays; + + private int size; + private int capacity; + + public BooleanSegmentArrayList(int capacity) { + this.arrays = new ArrayList<>(MathUtils.ceilDiv(capacity, SEGMENT_SIZE)); + this.size = 0; + this.capacity = arrays.size() * SEGMENT_SIZE; + } + + public void add(boolean value) { + if (size == capacity) { + grow(); + } + arrays.get(arrays.size() - 1)[size++ % SEGMENT_SIZE] = value; + } + + public void set(int index, boolean value) { + Preconditions.checkArgument(index < size); + arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE] = value; + } + + public boolean get(int index) { + return arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE]; + } + + private void grow() { + arrays.add(new boolean[SEGMENT_SIZE]); + capacity += SEGMENT_SIZE; + } + + public int size() { + return size; + } + + @Override + public long estimateSize() { + return INSTANCE_SIZE + (long) arrays.size() * SEGMENT_SIZE; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/ByteSegmentArrayList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/ByteSegmentArrayList.java new file mode 100644 index 000000000..45795441b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/ByteSegmentArrayList.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.datastruct; + +import com.alibaba.polardbx.common.utils.MathUtils; +import org.openjdk.jol.info.ClassLayout; + +import java.util.ArrayList; +import java.util.List; + +/** + * Byte Segmented Array List + * + * @author Eric Fu + */ +public class ByteSegmentArrayList implements SegmentArrayList { + + private static final long INSTANCE_SIZE = ClassLayout.parseClass(ByteSegmentArrayList.class).instanceSize(); + + private static final int SEGMENT_SIZE = 1024 * 128; + + private List arrays; + + private int size; + private int capacity; + + public ByteSegmentArrayList(int capacity) { + this.arrays = new ArrayList<>(MathUtils.ceilDiv(capacity, SEGMENT_SIZE)); + this.size = 0; + this.capacity = arrays.size() * SEGMENT_SIZE; + } + + public void add(byte value) { + if (size == capacity) { + grow(); + } + arrays.get(arrays.size() - 1)[size++ % SEGMENT_SIZE] = value; + } + + public void set(int index, byte value) { + assert index < size; + arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE] = value; + } + + public byte get(int index) { + return arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE]; + } + + private void grow() { + arrays.add(new byte[SEGMENT_SIZE]); + capacity += SEGMENT_SIZE; + } + + public int size() { + return size; + } + + @Override + public long estimateSize() { + return INSTANCE_SIZE + (long) arrays.size() * SEGMENT_SIZE * Byte.BYTES; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/CharSegmentArrayList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/CharSegmentArrayList.java new file mode 100644 index 000000000..024a79acd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/CharSegmentArrayList.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.datastruct; + +import com.alibaba.polardbx.common.utils.MathUtils; +import org.openjdk.jol.info.ClassLayout; + +import java.util.ArrayList; +import java.util.List; + +/** + * Char Segmented Array List + * + * @author Eric Fu + */ +public class CharSegmentArrayList implements SegmentArrayList { + + private static final long INSTANCE_SIZE = ClassLayout.parseClass(CharSegmentArrayList.class).instanceSize(); + + private static final int SEGMENT_SIZE = 1024; + + private List arrays; + + private int size; + private int capacity; + + public CharSegmentArrayList(int capacity) { + this.arrays = new ArrayList<>(MathUtils.ceilDiv(capacity, SEGMENT_SIZE)); + this.size = 0; + this.capacity = arrays.size() * SEGMENT_SIZE; + } + + public void add(char value) { + if (size == capacity) { + grow(); + } + arrays.get(arrays.size() - 1)[size++ % SEGMENT_SIZE] = value; + } + + public void set(int index, char value) { + assert index < size; + arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE] = value; + } + + public char get(int index) { + return arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE]; + } + + private void grow() { + arrays.add(new char[SEGMENT_SIZE]); + capacity += SEGMENT_SIZE; + } + + public int size() { + return size; + } + + @Override + public long estimateSize() { + return INSTANCE_SIZE + (long) arrays.size() * SEGMENT_SIZE * Character.BYTES; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/DoubleSegmentArrayList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/DoubleSegmentArrayList.java new file mode 100644 index 000000000..2a7434cfa --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/DoubleSegmentArrayList.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.datastruct; + +import com.alibaba.polardbx.common.utils.MathUtils; +import org.openjdk.jol.info.ClassLayout; + +import java.util.ArrayList; +import java.util.List; + +/** + * Double Segmented Array List + * + * @author Eric Fu + */ +public class DoubleSegmentArrayList implements SegmentArrayList { + + private static final long INSTANCE_SIZE = ClassLayout.parseClass(DoubleSegmentArrayList.class).instanceSize(); + + private static final int SEGMENT_SIZE = 1024; + + private List arrays; + + private int size; + private int capacity; + + public DoubleSegmentArrayList(int capacity) { + this.arrays = new ArrayList<>(MathUtils.ceilDiv(capacity, SEGMENT_SIZE)); + this.size = 0; + this.capacity = arrays.size() * SEGMENT_SIZE; + } + + public void add(double value) { + if (size == capacity) { + grow(); + } + arrays.get(arrays.size() - 1)[size++ % SEGMENT_SIZE] = value; + } + + public void set(int index, double value) { + assert index < size; + arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE] = value; + } + + public double get(int index) { + return arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE]; + } + + private void grow() { + arrays.add(new double[SEGMENT_SIZE]); + capacity += SEGMENT_SIZE; + } + + public int size() { + return size; + } + + @Override + public long estimateSize() { + return INSTANCE_SIZE + (long) arrays.size() * SEGMENT_SIZE * Double.BYTES; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/FloatSegmentArrayList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/FloatSegmentArrayList.java new file mode 100644 index 000000000..4ca5f7695 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/FloatSegmentArrayList.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.datastruct; + +import com.alibaba.polardbx.common.utils.MathUtils; +import org.openjdk.jol.info.ClassLayout; + +import java.util.ArrayList; +import java.util.List; + +/** + * Float Segmented Array List + * + * @author Eric Fu + */ +public class FloatSegmentArrayList implements SegmentArrayList { + + private static final long INSTANCE_SIZE = ClassLayout.parseClass(FloatSegmentArrayList.class).instanceSize(); + + private static final int SEGMENT_SIZE = 1024; + + private List arrays; + + private int size; + private int capacity; + + public FloatSegmentArrayList(int capacity) { + this.arrays = new ArrayList<>(MathUtils.ceilDiv(capacity, SEGMENT_SIZE)); + this.size = 0; + this.capacity = arrays.size() * SEGMENT_SIZE; + } + + public void add(float value) { + if (size == capacity) { + grow(); + } + arrays.get(arrays.size() - 1)[size++ % SEGMENT_SIZE] = value; + } + + public void set(int index, float value) { + assert index < size; + arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE] = value; + } + + public float get(int index) { + return arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE]; + } + + private void grow() { + arrays.add(new float[SEGMENT_SIZE]); + capacity += SEGMENT_SIZE; + } + + public int size() { + return size; + } + + @Override + public long estimateSize() { + return INSTANCE_SIZE + (long) arrays.size() * SEGMENT_SIZE * Float.BYTES; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/IntegerSegmentArrayList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/IntegerSegmentArrayList.java new file mode 100644 index 000000000..db76f5250 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/IntegerSegmentArrayList.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.datastruct; + +import com.alibaba.polardbx.common.utils.MathUtils; +import org.openjdk.jol.info.ClassLayout; + +import java.util.ArrayList; +import java.util.List; + +/** + * Integer Segmented Array List + * + * @author Eric Fu + */ +public class IntegerSegmentArrayList implements SegmentArrayList { + + private static final long INSTANCE_SIZE = ClassLayout.parseClass(IntegerSegmentArrayList.class).instanceSize(); + + private static final int SEGMENT_SIZE = 1024; + + private List arrays; + + private int size; + private int capacity; + + public IntegerSegmentArrayList(int capacity) { + this.arrays = new ArrayList<>(MathUtils.ceilDiv(capacity, SEGMENT_SIZE)); + this.size = 0; + this.capacity = arrays.size() * SEGMENT_SIZE; + } + + public void add(int value) { + if (size == capacity) { + grow(); + } + arrays.get(arrays.size() - 1)[size++ % SEGMENT_SIZE] = value; + } + + public void set(int index, int value) { + assert index < size; + arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE] = value; + } + + public int get(int index) { + return arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE]; + } + + private void grow() { + arrays.add(new int[SEGMENT_SIZE]); + capacity += SEGMENT_SIZE; + } + + public int size() { + return size; + } + + @Override + public long estimateSize() { + return INSTANCE_SIZE + (long) arrays.size() * SEGMENT_SIZE * Integer.BYTES; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/LongSegmentArrayList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/LongSegmentArrayList.java new file mode 100644 index 000000000..c29aa80b3 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/LongSegmentArrayList.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.datastruct; + +import com.alibaba.polardbx.common.utils.MathUtils; +import org.openjdk.jol.info.ClassLayout; + +import java.util.ArrayList; +import java.util.List; + +/** + * Long Segmented Array List + * + * @author Eric Fu + */ +public class LongSegmentArrayList implements SegmentArrayList { + + private static final long INSTANCE_SIZE = ClassLayout.parseClass(LongSegmentArrayList.class).instanceSize(); + + private static final int SEGMENT_SIZE = 1024; + + private List arrays; + + private int size; + private int capacity; + + public LongSegmentArrayList(int capacity) { + this.arrays = new ArrayList<>(MathUtils.ceilDiv(capacity, SEGMENT_SIZE)); + this.size = 0; + this.capacity = arrays.size() * SEGMENT_SIZE; + } + + public void add(long value) { + if (size == capacity) { + grow(); + } + arrays.get(arrays.size() - 1)[size++ % SEGMENT_SIZE] = value; + } + + public void set(int index, long value) { + assert index < size; + arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE] = value; + } + + public long get(int index) { + return arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE]; + } + + private void grow() { + arrays.add(new long[SEGMENT_SIZE]); + capacity += SEGMENT_SIZE; + } + + public int size() { + return size; + } + + @Override + public long estimateSize() { + return INSTANCE_SIZE + (long) arrays.size() * SEGMENT_SIZE * Long.BYTES; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/ObjectSegmentArrayList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/ObjectSegmentArrayList.java new file mode 100644 index 000000000..aa7e6a28d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/ObjectSegmentArrayList.java @@ -0,0 +1,96 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.datastruct; + +import com.alibaba.polardbx.common.utils.MathUtils; +import com.alibaba.polardbx.common.utils.memory.ObjectSizeUtils; +import org.openjdk.jol.info.ClassLayout; + +import java.lang.reflect.Array; +import java.util.ArrayList; +import java.util.List; + +/** + * Disaggregated stored object in 2-dimension big array. + * + * @param The type of object. + */ +public class ObjectSegmentArrayList implements SegmentArrayList { + private static final long INSTANCE_SIZE = ClassLayout.parseClass(ObjectSegmentArrayList.class).instanceSize(); + + private static final int SEGMENT_SIZE = 1024; + + /** + * two-dimension array + */ + private List arrays; + + /** + * Current size of objects in array list. + */ + private int size; + + /** + * The capacity of array list. + */ + private int capacity; + + /** + * Type of stored object. + */ + private final Class clazz; + + public ObjectSegmentArrayList(int capacity, Class clazz) { + this.arrays = new ArrayList<>(MathUtils.ceilDiv(capacity, SEGMENT_SIZE)); + this.size = 0; + this.capacity = arrays.size() * SEGMENT_SIZE; + this.clazz = clazz; + } + + public void add(T value) { + if (size == capacity) { + grow(); + } + // value is nullable + arrays.get(arrays.size() - 1)[size++ % SEGMENT_SIZE] = value; + } + + public void set(int index, T value) { + assert index < size; + // value is nullable + arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE] = value; + } + + public T get(int index) { + return arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE]; + } + + private void grow() { + T[] array = (T[]) Array.newInstance(clazz, SEGMENT_SIZE); + arrays.add(array); + capacity += SEGMENT_SIZE; + } + + public int size() { + return size; + } + + @Override + public long estimateSize() { + return INSTANCE_SIZE + (long) arrays.size() * SEGMENT_SIZE * ObjectSizeUtils.REFERENCE_SIZE; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/SegmentArrayList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/SegmentArrayList.java new file mode 100644 index 000000000..97b00c33c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/SegmentArrayList.java @@ -0,0 +1,28 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.datastruct; + +/** + * Interface of Segmented Array List + * + * @author Eric Fu + */ +public interface SegmentArrayList { + + long estimateSize(); + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/ShortSegmentArrayList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/ShortSegmentArrayList.java new file mode 100644 index 000000000..df627d550 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/datastruct/ShortSegmentArrayList.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.datastruct; + +import com.alibaba.polardbx.common.utils.MathUtils; +import org.openjdk.jol.info.ClassLayout; + +import java.util.ArrayList; +import java.util.List; + +/** + * Short Segmented Array List + * + * @author Eric Fu + */ +public class ShortSegmentArrayList implements SegmentArrayList { + + private static final long INSTANCE_SIZE = ClassLayout.parseClass(ShortSegmentArrayList.class).instanceSize(); + + private static final int SEGMENT_SIZE = 1024; + + private List arrays; + + private int size; + private int capacity; + + public ShortSegmentArrayList(int capacity) { + this.arrays = new ArrayList<>(MathUtils.ceilDiv(capacity, SEGMENT_SIZE)); + this.size = 0; + this.capacity = arrays.size() * SEGMENT_SIZE; + } + + public void add(short value) { + if (size == capacity) { + grow(); + } + arrays.get(arrays.size() - 1)[size++ % SEGMENT_SIZE] = value; + } + + public void set(int index, short value) { + assert index < size; + arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE] = value; + } + + public short get(int index) { + return arrays.get(index / SEGMENT_SIZE)[index % SEGMENT_SIZE]; + } + + private void grow() { + arrays.add(new short[SEGMENT_SIZE]); + capacity += SEGMENT_SIZE; + } + + public int size() { + return size; + } + + @Override + public long estimateSize() { + return INSTANCE_SIZE + (long) arrays.size() * SEGMENT_SIZE * Short.BYTES; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/DecimalBoxGroupState.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/DecimalBoxGroupState.java index c4f999090..3350d0e90 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/DecimalBoxGroupState.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/DecimalBoxGroupState.java @@ -18,76 +18,186 @@ import com.alibaba.polardbx.common.datatype.Decimal; import com.alibaba.polardbx.common.datatype.DecimalBox; -import com.alibaba.polardbx.optimizer.datastruct.BooleanSegmentArrayList; -import com.alibaba.polardbx.optimizer.datastruct.ObjectSegmentArrayList; -import com.alibaba.polardbx.optimizer.datastruct.ObjectWithClassSegmentArrayList; -import com.alibaba.polardbx.optimizer.state.GroupState; -import com.alibaba.polardbx.optimizer.state.NullableDecimalGroupState; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.executor.accumulator.datastruct.LongSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.ObjectSegmentArrayList; +import it.unimi.dsi.fastutil.bytes.ByteArrayList; import org.openjdk.jol.info.ClassLayout; +import javax.annotation.concurrent.NotThreadSafe; + +@NotThreadSafe public class DecimalBoxGroupState implements GroupState { - private static final long INSTANCE_SIZE = ClassLayout.parseClass(NullableDecimalGroupState.class).instanceSize(); + private static final byte IS_NULL = 0; + private static final byte IS_NORMAL_DECIMAL = 1; + private static final byte IS_DECIMAL_BOX = 2; + private static final byte IS_DECIMAL_64 = 3; + private static final byte IS_DECIMAL_128 = 4; - /** - * The null value bitmap. - */ - private final BooleanSegmentArrayList valueIsNull; + private static final long INSTANCE_SIZE = ClassLayout.parseClass(DecimalBoxGroupState.class).instanceSize(); + private final LongSegmentArrayList decimal64List; + private final LongSegmentArrayList decimal128HighList; /** * Disaggregated stored decimal objects. */ - private final ObjectWithClassSegmentArrayList decimals; + private final ObjectSegmentArrayList decimalBoxes; + private final ObjectSegmentArrayList decimals; + + private final ByteArrayList flags; private final int capacity; + private int scale; + + protected boolean isNormalDecimal = false; - public DecimalBoxGroupState(int capacity) { + public DecimalBoxGroupState(int capacity, int scale) { this.capacity = capacity; - this.valueIsNull = new BooleanSegmentArrayList(capacity); - this.decimals = new ObjectWithClassSegmentArrayList<>(capacity, DecimalBox.class); + this.scale = scale; + this.decimal64List = new LongSegmentArrayList(capacity); + this.decimal128HighList = new LongSegmentArrayList(capacity); + this.decimalBoxes = new ObjectSegmentArrayList<>(capacity, DecimalBox.class); + this.decimals = new ObjectSegmentArrayList<>(capacity, Decimal.class); + + this.flags = new ByteArrayList(capacity); } public void set(int groupId, DecimalBox value) { - valueIsNull.set(groupId, false); + decimalBoxes.set(groupId, value); + flags.set(groupId, IS_DECIMAL_BOX); + } + + public void set(int groupId, long value) { + decimal64List.set(groupId, value); + flags.set(groupId, IS_DECIMAL_64); + } + + public void set(int groupId, long decimal128Low, long decimal128High) { + decimal64List.set(groupId, decimal128Low); + decimal128HighList.set(groupId, decimal128High); + flags.set(groupId, IS_DECIMAL_128); + } + + public void set(int groupId, Decimal value) { decimals.set(groupId, value); + flags.set(groupId, IS_NORMAL_DECIMAL); } public void appendNull() { + decimal64List.add(0L); + decimal128HighList.add(0L); + decimalBoxes.add(null); decimals.add(null); - valueIsNull.add(true); + flags.add(IS_NULL); } public boolean isNull(int groupId) { - return valueIsNull.get(groupId); + return flags.getByte(groupId) == IS_NULL; } - public Decimal get(int groupId) { - return decimals.get(groupId).getDecimalSum(); + public boolean isDecimalBox(int groupId) { + return flags.getByte(groupId) == IS_DECIMAL_BOX; + } + + public boolean isDecimal64(int groupId) { + return flags.getByte(groupId) == IS_DECIMAL_64; + } + + public boolean isDecimal128(int groupId) { + return flags.getByte(groupId) == IS_DECIMAL_128; + } + + public boolean isNormalDecimal(int groupId) { + return flags.getByte(groupId) == IS_NORMAL_DECIMAL; + } + + public byte getFlag(int groupId) { + return flags.getByte(groupId); + } + + public long getLong(int groupId) { + return decimal64List.get(groupId); + } + + public long getDecimal128Low(int groupId) { + return decimal64List.get(groupId); + } + + public long getDecimal128High(int groupId) { + return decimal128HighList.get(groupId); + } + + public Decimal getDecimal(int groupId) { + switch (flags.getByte(groupId)) { + case IS_DECIMAL_128: + DecimalStructure buffer = new DecimalStructure(); + DecimalStructure result = new DecimalStructure(); + FastDecimalUtils.setDecimal128WithScale(buffer, result, + getDecimal128Low(groupId), getDecimal128High(groupId), scale); + return new Decimal(result); + case IS_DECIMAL_64: + return new Decimal(getLong(groupId), scale); + case IS_NORMAL_DECIMAL: + return decimals.get(groupId); + case IS_DECIMAL_BOX: + return decimalBoxes.get(groupId).getDecimalSum(); + default: + throw new IllegalStateException("Current flag: " + flags.getByte(groupId)); + } } public DecimalBox getBox(int groupId) { - return decimals.get(groupId); + return decimalBoxes.get(groupId); + } + + public boolean isNormalDecimal() { + return isNormalDecimal; } @Override public long estimateSize() { - long size = INSTANCE_SIZE + decimals.estimateSize() + valueIsNull.estimateSize(); + long size = INSTANCE_SIZE + decimalBoxes.estimateSize() + decimal64List.estimateSize() + + decimal128HighList.estimateSize() + flags.size(); return size; } - public NullableDecimalGroupState toDecimalGroupState() { - ObjectWithClassSegmentArrayList decimalValues = new ObjectWithClassSegmentArrayList<>(capacity, Decimal.class); - for (int i = 0; i < valueIsNull.size(); i++) { - if (!isNull(i)) { - DecimalBox box = this.decimals.get(i); - Decimal decimal = box.getDecimalSum(); - decimalValues.add(decimal); - } else { - decimalValues.add(null); + public void toNormalDecimalGroupState() { + if (isNormalDecimal) { + return; + } + this.isNormalDecimal = true; + for (int i = 0; i < flags.size(); i++) { + if (isDecimal64(i)) { + // long -> decimal + Decimal decimal = new Decimal(getLong(i), scale); + set(i, decimal); + } else if (isDecimal128(i)) { + DecimalStructure buffer = new DecimalStructure(); + DecimalStructure result = new DecimalStructure(); + FastDecimalUtils.setDecimal128WithScale(buffer, result, + getDecimal128Low(i), getDecimal128High(i), scale); + set(i, new Decimal(result)); + } else if (isDecimalBox(i)) { + // decimalBox -> decimal + Decimal decimal = getBox(i).getDecimalSum(); + set(i, decimal); } } + } - NullableDecimalGroupState decimalGroupState = new NullableDecimalGroupState(this.valueIsNull, decimalValues); - return decimalGroupState; + public void rescale(int newScale) { + if (this.scale == newScale) { + return; + } + + // do nothing just update scale + this.scale = newScale; + for (int i = 0; i < flags.size(); i++) { + if (isDecimalBox(i)) { + getBox(i).setScale(newScale); + } + } } } diff --git a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/GroupState.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/GroupState.java similarity index 92% rename from polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/GroupState.java rename to polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/GroupState.java index 808c4cad1..44e533500 100644 --- a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/GroupState.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/GroupState.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package com.alibaba.polardbx.optimizer.state; +package com.alibaba.polardbx.executor.accumulator.state; /** * State of groups @@ -22,5 +22,7 @@ * @author Eric Fu */ public interface GroupState { + long estimateSize(); + } diff --git a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/LongGroupState.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/LongGroupState.java similarity index 90% rename from polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/LongGroupState.java rename to polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/LongGroupState.java index 1a97aa9ac..1ca8d42cb 100644 --- a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/LongGroupState.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/LongGroupState.java @@ -14,9 +14,9 @@ * limitations under the License. */ -package com.alibaba.polardbx.optimizer.state; +package com.alibaba.polardbx.executor.accumulator.state; -import com.alibaba.polardbx.optimizer.datastruct.LongSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.LongSegmentArrayList; import org.openjdk.jol.info.ClassLayout; /** diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableCheckSumGroupState.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableCheckSumGroupState.java index fde7e91ed..b657a34f5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableCheckSumGroupState.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableCheckSumGroupState.java @@ -16,10 +16,9 @@ package com.alibaba.polardbx.executor.accumulator.state; -import com.alibaba.polardbx.common.OrderInvariantHasher; -import com.alibaba.polardbx.optimizer.datastruct.BooleanSegmentArrayList; -import com.alibaba.polardbx.optimizer.datastruct.ObjectWithClassSegmentArrayList; -import com.alibaba.polardbx.optimizer.state.GroupState; +import com.alibaba.polardbx.common.IOrderInvariantHash; +import com.alibaba.polardbx.executor.accumulator.datastruct.BooleanSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.ObjectSegmentArrayList; import org.openjdk.jol.info.ClassLayout; public class NullableCheckSumGroupState implements GroupState { @@ -33,16 +32,17 @@ public class NullableCheckSumGroupState implements GroupState { /** * Disaggregated stored decimal objects. */ - private final ObjectWithClassSegmentArrayList hasherList; + private final ObjectSegmentArrayList hasherList; private final int capacity; - public NullableCheckSumGroupState(int capacity) { + public NullableCheckSumGroupState(int capacity, Class clazz) { this.capacity = capacity; - this.valueIsNull = new BooleanSegmentArrayList(capacity); this.hasherList = new ObjectWithClassSegmentArrayList<>(capacity, OrderInvariantHasher.class); + this.valueIsNull = new BooleanSegmentArrayList(capacity); + this.hasherList = new ObjectSegmentArrayList(capacity, clazz); } - public void set(int groupId, OrderInvariantHasher value) { + public void set(int groupId, IOrderInvariantHash value) { valueIsNull.set(groupId, false); hasherList.set(groupId, value); } @@ -60,7 +60,7 @@ public Long get(int groupId) { return hasherList.get(groupId).getResult(); } - public OrderInvariantHasher getHasher(int groupId) { + public IOrderInvariantHash getHasher(int groupId) { return hasherList.get(groupId); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDecimalGroupState.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDecimalGroupState.java index e69de29bb..3e103b228 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDecimalGroupState.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDecimalGroupState.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.state; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.executor.accumulator.datastruct.BooleanSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.ObjectSegmentArrayList; +import org.openjdk.jol.info.ClassLayout; + +/** + * Nullable Decimal Group State + * + * @author Eric Fu + */ +public class NullableDecimalGroupState implements GroupState { + + private static final long INSTANCE_SIZE = ClassLayout.parseClass(NullableDecimalGroupState.class).instanceSize(); + + /** + * The null value bitmap. + */ + private final BooleanSegmentArrayList valueIsNull; + + /** + * Disaggregated stored decimal objects. + */ + private final ObjectSegmentArrayList decimals; + + public NullableDecimalGroupState(int capacity) { + this.valueIsNull = new BooleanSegmentArrayList(capacity); + this.decimals = new ObjectSegmentArrayList(capacity, Decimal.class); + } + + public NullableDecimalGroupState( + BooleanSegmentArrayList valueIsNull, + ObjectSegmentArrayList decimals) { + this.valueIsNull = valueIsNull; + this.decimals = decimals; + } + + public void set(int groupId, Decimal value) { + valueIsNull.set(groupId, false); + decimals.set(groupId, value); + } + + public void appendNull() { + decimals.add(null); + valueIsNull.add(true); + } + + public boolean isNull(int groupId) { + return valueIsNull.get(groupId); + } + + public Decimal get(int groupId) { + return decimals.get(groupId); + } + + @Override + public long estimateSize() { + long size = INSTANCE_SIZE + decimals.estimateSize() + valueIsNull.estimateSize(); + return size; + } +} diff --git a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableDecimalLongGroupState.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDecimalLongGroupState.java similarity index 89% rename from polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableDecimalLongGroupState.java rename to polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDecimalLongGroupState.java index 93cdb2294..e2a44de46 100644 --- a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableDecimalLongGroupState.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDecimalLongGroupState.java @@ -14,10 +14,10 @@ * limitations under the License. */ -package com.alibaba.polardbx.optimizer.state; +package com.alibaba.polardbx.executor.accumulator.state; import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.optimizer.datastruct.LongSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.LongSegmentArrayList; import org.openjdk.jol.info.ClassLayout; /** @@ -48,11 +48,6 @@ public void appendNull() { longValues.add(0); } - @Override - public void setNull(int groupId) { - super.setNull(groupId); - } - @Override public boolean isNull(int groupId) { return super.isNull(groupId); diff --git a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableDoubleGroupState.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDoubleGroupState.java similarity index 86% rename from polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableDoubleGroupState.java rename to polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDoubleGroupState.java index 2d2e7e9ee..ebdf70c1f 100644 --- a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableDoubleGroupState.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDoubleGroupState.java @@ -14,10 +14,10 @@ * limitations under the License. */ -package com.alibaba.polardbx.optimizer.state; +package com.alibaba.polardbx.executor.accumulator.state; -import com.alibaba.polardbx.optimizer.datastruct.BooleanSegmentArrayList; -import com.alibaba.polardbx.optimizer.datastruct.DoubleSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.BooleanSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.DoubleSegmentArrayList; import org.openjdk.jol.info.ClassLayout; /** @@ -42,10 +42,6 @@ public void set(int groupId, double value) { valueIsNull.set(groupId, false); } - public void setNull(int groupId) { - valueIsNull.set(groupId, true); - } - public void append(double value) { values.add(value); valueIsNull.add(false); diff --git a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableDoubleLongGroupState.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDoubleLongGroupState.java similarity index 86% rename from polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableDoubleLongGroupState.java rename to polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDoubleLongGroupState.java index c5b487c7a..03ece8af0 100644 --- a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableDoubleLongGroupState.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableDoubleLongGroupState.java @@ -14,11 +14,11 @@ * limitations under the License. */ -package com.alibaba.polardbx.optimizer.state; +package com.alibaba.polardbx.executor.accumulator.state; -import com.alibaba.polardbx.optimizer.datastruct.BooleanSegmentArrayList; -import com.alibaba.polardbx.optimizer.datastruct.DoubleSegmentArrayList; -import com.alibaba.polardbx.optimizer.datastruct.LongSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.BooleanSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.DoubleSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.LongSegmentArrayList; import org.openjdk.jol.info.ClassLayout; /** @@ -58,10 +58,6 @@ public void appendNull() { valueIsNull.add(true); } - public void setNull(int groupId) { - valueIsNull.set(groupId, true); - } - public boolean isNull(int groupId) { return valueIsNull.get(groupId); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableHyperLogLogGroupState.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableHyperLogLogGroupState.java new file mode 100644 index 000000000..49298ce5a --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableHyperLogLogGroupState.java @@ -0,0 +1,73 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.accumulator.state; + +import com.alibaba.polardbx.executor.accumulator.datastruct.BooleanSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.ObjectSegmentArrayList; +import com.alibaba.polardbx.executor.statistic.ndv.HyperLogLogUtil; +import org.openjdk.jol.info.ClassLayout; + +public class NullableHyperLogLogGroupState implements GroupState { + private static final long INSTANCE_SIZE = + ClassLayout.parseClass(NullableHyperLogLogGroupState.class).instanceSize(); + + /** + * The null value bitmap. + */ + private final BooleanSegmentArrayList valueIsNull; + + /** + * Disaggregated stored decimal objects. + */ + private final ObjectSegmentArrayList hllList; + + private final int capacity; + + public NullableHyperLogLogGroupState(int capacity) { + this.capacity = capacity; + this.valueIsNull = new BooleanSegmentArrayList(capacity); + this.hllList = new ObjectSegmentArrayList(capacity, byte[].class); + } + + public void set(int groupId, byte[] value) { + valueIsNull.set(groupId, false); + hllList.set(groupId, value); + } + + public void append() { + hllList.add(null); + valueIsNull.add(true); + } + + public boolean isNull(int groupId) { + return valueIsNull.get(groupId); + } + + public Long get(int groupId) { + return HyperLogLogUtil.getCardinality(hllList.get(groupId)); + } + + public byte[] getHll(int groupId) { + return hllList.get(groupId); + } + + @Override + public long estimateSize() { + long size = INSTANCE_SIZE + hllList.estimateSize() + valueIsNull.estimateSize(); + return size; + } +} diff --git a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableLongGroupState.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableLongGroupState.java similarity index 86% rename from polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableLongGroupState.java rename to polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableLongGroupState.java index 994d15ed7..e25b7be8d 100644 --- a/polardbx-optimizer/src/main/java/com/alibaba/polardbx/optimizer/state/NullableLongGroupState.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/accumulator/state/NullableLongGroupState.java @@ -14,10 +14,10 @@ * limitations under the License. */ -package com.alibaba.polardbx.optimizer.state; +package com.alibaba.polardbx.executor.accumulator.state; -import com.alibaba.polardbx.optimizer.datastruct.BooleanSegmentArrayList; -import com.alibaba.polardbx.optimizer.datastruct.LongSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.BooleanSegmentArrayList; +import com.alibaba.polardbx.executor.accumulator.datastruct.LongSegmentArrayList; import org.openjdk.jol.info.ClassLayout; /** @@ -64,8 +64,4 @@ public long get(int groupId) { public long estimateSize() { return INSTANCE_SIZE + values.estimateSize() + valueIsNull.estimateSize(); } - - public void setNull(int groupId) { - valueIsNull.set(groupId, true); - } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BigBitColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BigBitColumnProvider.java index d5bfd4b4d..736e2ad70 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BigBitColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BigBitColumnProvider.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.executor.Xprotocol.XRowSet; import com.alibaba.polardbx.executor.chunk.BigIntegerBlockBuilder; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; @@ -119,4 +120,16 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } } } + + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + long longVal = ColumnProvider.bigBitLongFromByte(bytes, bytes.length); + blockBuilder.writeBigInteger(BigInteger.valueOf(longVal)); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BinaryColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BinaryColumnProvider.java index cc90ad253..ca60a0b8f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BinaryColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BinaryColumnProvider.java @@ -21,6 +21,8 @@ import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.Xprotocol.XRowSet; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; +import com.alibaba.polardbx.optimizer.core.datatype.BinaryType; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; @@ -28,8 +30,8 @@ import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.apache.orc.TypeDescription; -import java.nio.charset.StandardCharsets; import java.time.ZoneId; +import java.util.Arrays; import java.util.Optional; public class BinaryColumnProvider implements ColumnProvider { @@ -39,7 +41,8 @@ public TypeDescription orcType() { } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, + SessionProperties sessionProperties) { BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; for (int i = startIndex; i < endIndex; i++) { int idx = i; @@ -59,7 +62,8 @@ public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startI } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, + SessionProperties sessionProperties) { BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; for (int i = 0; i < selSize; i++) { int j = selection[i]; @@ -96,10 +100,12 @@ public void putBloomFilter(ColumnVector vector, OrcBloomFilter bf, int startInde } @Override - public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, ZoneId timezone, Optional accumulator) { + public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, + ZoneId timezone, Optional accumulator) { if (row instanceof XRowSet) { try { - ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, accumulator); + ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, + accumulator); } catch (Exception e) { throw GeneralUtil.nestedException(e); } @@ -118,4 +124,22 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } } } + + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + BinaryType binaryType = (BinaryType) dataType; + + if (binaryType.isFixedLength()) { + byte[] paddingBytes = ColumnProvider.convertToPaddingBytes(bytes, binaryType); + blockBuilder.writeByteArray(paddingBytes); + } else { + blockBuilder.writeByteArray(bytes); + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BitColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BitColumnProvider.java index eec2f34be..edff3880c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BitColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BitColumnProvider.java @@ -19,6 +19,8 @@ import com.alibaba.polardbx.common.CrcAccumulator; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.Xprotocol.XRowSet; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.row.Row; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; @@ -53,4 +55,15 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } } } + + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + } else { + byte[] bytes = row.getBytes(columnId); + int intVal = ColumnProvider.intFromByte(bytes, bytes.length); + blockBuilder.writeInt(intVal); + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BlobColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BlobColumnProvider.java index b78351b5d..6193eb8c2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BlobColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/BlobColumnProvider.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.Xprotocol.XRowSet; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; @@ -29,7 +30,6 @@ import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.apache.orc.TypeDescription; -import java.nio.charset.StandardCharsets; import java.sql.Blob; import java.sql.SQLException; import java.time.ZoneId; @@ -42,7 +42,8 @@ public TypeDescription orcType() { } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, + SessionProperties sessionProperties) { BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; for (int i = startIndex; i < endIndex; i++) { int idx = i; @@ -61,7 +62,8 @@ public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startI } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, + SessionProperties sessionProperties) { BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; for (int i = 0; i < selSize; i++) { int idx = selection[i]; @@ -96,10 +98,12 @@ public void putBloomFilter(ColumnVector vector, OrcBloomFilter bf, int startInde } @Override - public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, ZoneId timezone, Optional accumulator) { + public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, + ZoneId timezone, Optional accumulator) { if (row instanceof XRowSet) { try { - ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, accumulator); + ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, + accumulator); } catch (Exception e) { throw GeneralUtil.nestedException(e); } @@ -123,4 +127,15 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } } } + + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + blockBuilder.writeBlob(new com.alibaba.polardbx.optimizer.core.datatype.Blob(bytes)); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ColumnProvider.java index 03b339ab8..9bd1600ed 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ColumnProvider.java @@ -20,11 +20,17 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.orc.OrcBloomFilter; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.binlog.JsonConversion; +import com.alibaba.polardbx.common.utils.binlog.LogBuffer; +import com.alibaba.polardbx.common.utils.time.core.TimeStorage; import com.alibaba.polardbx.executor.archive.pruning.OrcFilePruningResult; import com.alibaba.polardbx.executor.archive.pruning.OssAggPruner; import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; +import com.alibaba.polardbx.optimizer.core.datatype.BinaryType; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; @@ -34,11 +40,15 @@ import org.apache.orc.TypeDescription; import org.apache.orc.sarg.PredicateLeaf; +import java.nio.charset.Charset; import java.time.ZoneId; +import java.util.Arrays; import java.util.Map; import java.util.Optional; public interface ColumnProvider { + long MYSQL_TIME_ZERO2 = 0x800000; + TypeDescription orcType(); /** @@ -48,7 +58,8 @@ public interface ColumnProvider { * @param endIndex end of columnVector, useful when selection is null */ default void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, int startIndex, - int endIndex, SessionProperties sessionProperties) { + int endIndex, + SessionProperties sessionProperties) { if (selection == null) { transform(vector, blockBuilder, startIndex, endIndex, sessionProperties); } else { @@ -79,6 +90,8 @@ default void putRow(ColumnVector columnVector, ColumnVector redundantColumnVecto putRow(columnVector, rowNumber, row, columnId, dataType, timezone, accumulator); } + void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType); + default PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { return OrcFilePruningResult.PASS; @@ -101,4 +114,170 @@ default void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS, new UnsupportedOperationException(), "unsupported sum type."); } + + /** + * Fetch bits as integer from given positions. + * + * @param value source long value. + * @param bitOffset the offset of bit. + * @param numberOfBits the number of bit. + * @param payloadSize payload size. + * @return bits in integer format. + */ + static int bitSlice(long value, int bitOffset, int numberOfBits, int payloadSize) { + long result = value >> payloadSize - (bitOffset + numberOfBits); + return (int) (result & ((1 << numberOfBits) - 1)); + } + + static int intFromByte(byte[] bytes, int size) { + int result = 0; + for (int i = 0; i < size; ++i) { + result |= (((int) (bytes[i] & 0xFF)) << (i << 3)); + } + + return result; + } + + static long longFromByte(byte[] bytes, int size) { + long result = 0; + for (int i = 0; i < size; ++i) { + result |= (((long) (bytes[i] & 0xFF)) << (i << 3)); + } + + return result; + } + + static long bigBitLongFromByte(byte[] bytes, int size) { + long result = 0; + for (int i = 0; i < size; ++i) { + result |= (((long) (bytes[i] & 0xFF)) << ((size - i - 1) << 3)); + } + + return result; + } + + static long convertDateToLong(byte[] bytes) { + if (bytes.length != 3) { + throw GeneralUtil.nestedException("Bad format in row value"); + } + + // convert 3 byte to integer + int value = 0; + for (int i = 0; i < bytes.length; ++i) { + value |= (((int) (bytes[i] & 0xFF)) << (i << 3)); + } + + //cdc 二进制编码:5 bit(day)|4 bit(month)|其它(year) + // parse year, month, day from integer. (equivalent to MySQL storage format) + int day = value % 32; + value >>>= 5; + int month = value % 16; + int year = value >> 4; + + //CN侧 编码:5 bit(day)| 0~13范围(month)| + // pack to orc format (equivalent to MySQL computation format) + return TimeStorage.writeDate(year, month, day); + } + + static long convertDateTimeToLong(byte[] bytes, int scale) { + // parse datetime + long datetime = 0; + for (int i = 0; i < 5; i++) { + byte b = bytes[i]; + datetime = (datetime << 8) | (b >= 0 ? (int) b : (b + 256)); + } + + // parse sign + int sign = ColumnProvider.bitSlice(datetime, 0, 1, 40); + + // parse year month + int yearMonth = ColumnProvider.bitSlice(datetime, 1, 17, 40); + + // parse year ~ second + int year = yearMonth / 13; + int month = yearMonth % 13; + int day = ColumnProvider.bitSlice(datetime, 18, 5, 40); + int hours = ColumnProvider.bitSlice(datetime, 23, 5, 40); + int minute = ColumnProvider.bitSlice(datetime, 28, 6, 40); + int second = ColumnProvider.bitSlice(datetime, 34, 6, 40); + + // parse fsp + int micro = 0; + int length = (scale + 1) / 2; + if (length > 0) { + int fraction = 0; + for (int i = 5; i < (5 + length); i++) { + byte b = bytes[i]; + fraction = (fraction << 8) | (b >= 0 ? (int) b : (b + 256)); + } + micro = fraction * (int) Math.pow(100, 3 - length); + } + + // pack to long + return TimeStorage.writeTimestamp( + year, month, day, hours, minute, second, micro * 1000L, sign == 0 + ); + } + + static long convertTimeToLong(byte[] bytes, int scale) { + // parse time + long time = 0; + for (int i = 0; i < 3; i++) { + byte b = bytes[i]; + time = (time << 8) | (b >= 0 ? (int) b : (b + 256)); + } + + int sign = ColumnProvider.bitSlice(time, 0, 1, 24); + + // negative time value + if (sign == 0) { + time = MYSQL_TIME_ZERO2 - time; + } + + int hour = ColumnProvider.bitSlice(time, 2, 10, 24); + int minute = ColumnProvider.bitSlice(time, 12, 6, 24); + int second = ColumnProvider.bitSlice(time, 18, 6, 24); + + // parse fsp + int micro = 0; + int length = (scale + 1) / 2; + if (length > 0) { + int fraction = 0; + for (int i = 3; i < (3 + length); i++) { + byte b = bytes[i]; + fraction = (fraction << 8) | (b >= 0 ? (int) b : (b + 256)); + } + if (sign == 0 && fraction > 0) { + fraction = (1 << (length << 3)) - fraction; + second--; + } + micro = fraction * (int) Math.pow(100, 3 - length); + } + + // pack result to long + return TimeStorage.writeTime( + hour, minute, second, micro * 1000L, sign == 0 + ); + } + + static String convertToString(byte[] bytes, String charsetName) { + LogBuffer buffer = new LogBuffer(bytes, 0, bytes.length); + + Charset charset = Charset.forName(charsetName); + JsonConversion.Json_Value jsonValue = + JsonConversion.parse_value(buffer.getUint8(), buffer, bytes.length - 1, charset); + + StringBuilder builder = new StringBuilder(); + jsonValue.toJsonString(builder, charset); + return builder.toString(); + } + + static byte[] convertToPaddingBytes(byte[] bytes, BinaryType binaryType) { + byte[] paddingBytes = new byte[binaryType.length()]; + System.arraycopy(bytes, 0, paddingBytes, 0, Math.min(bytes.length, paddingBytes.length)); + if (bytes.length < paddingBytes.length) { + Arrays.fill(paddingBytes, bytes.length, paddingBytes.length, (byte) 0); + } + return paddingBytes; + } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ColumnProviders.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ColumnProviders.java index f267426fc..77cc662c7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ColumnProviders.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ColumnProviders.java @@ -18,11 +18,12 @@ import com.alibaba.polardbx.common.charset.CollationName; import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.optimizer.config.table.PolarDBXOrcSchema; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.PolarDBXOrcSchema; import com.alibaba.polardbx.optimizer.core.datatype.BigBitType; import com.alibaba.polardbx.optimizer.core.datatype.BinaryType; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.SetType; import java.nio.charset.Charset; import java.sql.Blob; @@ -50,10 +51,14 @@ public class ColumnProviders { public static final ColumnProvider LONG_COLUMN_PROVIDER = new LongColumnProvider(); + public static final ColumnProvider YEAR_COLUMN_PROVIDER = new YearColumnProvider(); + public static final ColumnProvider UNSIGNED_LONG_COLUMN_PROVIDER = new UnsignedLongColumnProvider(); public static final ColumnProvider INTEGER_COLUMN_PROVIDER = new IntegerColumnProvider(); + public static final ColumnProvider INTEGER_24_COLUMN_PROVIDER = new Integer24ColumnProvider(); + public static final ColumnProvider BIT_COLUMN_PROVIDER = new BitColumnProvider(); public static final ColumnProvider SHORT_COLUMN_PROVIDER = new ShortColumnProvider(); @@ -74,12 +79,16 @@ public class ColumnProviders { public static final ColumnProvider BLOB_COLUMN_PROVIDER = new BlobColumnProvider(); - public static final ColumnProvider STRING_COLUMN_PROVIDER = new StringColumnProvider(); + public static final ColumnProvider ENUM_COLUMN_PROVIDER = new EnumColumnProvider(); + + public static final ColumnProvider JSON_COLUMN_PROVIDER = new JsonColumnProvider(); public static final ColumnProvider BIG_BIT_COLUMN_PROVIDER = new BigBitColumnProvider(); public static final ColumnProvider BINARY_COLUMN_PROVIDER = new BinaryColumnProvider(); + public static final ColumnProvider SET_COLUMN_PROVIDER = new SetColumnProvider(); + public static List getColumnProviders(PolarDBXOrcSchema orcSchema) { return orcSchema.getColumnMetas().stream() .map(t -> ColumnProviders.getProvider(t)).collect(Collectors.toList()); @@ -123,7 +132,7 @@ public static ColumnProvider getProvider(DataType dataType) { // for year case MYSQL_TYPE_YEAR: - return LONG_COLUMN_PROVIDER; + return YEAR_COLUMN_PROVIDER; /* =========== Fixed-point Numeric ============ */ case MYSQL_TYPE_DECIMAL: @@ -154,7 +163,7 @@ public static ColumnProvider getProvider(DataType dataType) { return INTEGER_COLUMN_PROVIDER; } else { // for mediumint signed - return INTEGER_COLUMN_PROVIDER; + return INTEGER_24_COLUMN_PROVIDER; } case MYSQL_TYPE_SHORT: @@ -196,6 +205,8 @@ public static ColumnProvider getProvider(DataType dataType) { // for varchar/char if (dataType instanceof BinaryType) { return BINARY_COLUMN_PROVIDER; + } else if (dataType instanceof SetType) { + return SET_COLUMN_PROVIDER; } else { return VARCHAR_COLUMN_PROVIDERS.get(dataType.getCollationName()); } @@ -204,10 +215,11 @@ public static ColumnProvider getProvider(DataType dataType) { return BLOB_COLUMN_PROVIDER; case MYSQL_TYPE_ENUM: - case MYSQL_TYPE_JSON: // for enum - return STRING_COLUMN_PROVIDER; - + return ENUM_COLUMN_PROVIDER; + case MYSQL_TYPE_JSON: + // for json + return JSON_COLUMN_PROVIDER; default: return null; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DateColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DateColumnProvider.java index 203317bc4..7a51fde00 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DateColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DateColumnProvider.java @@ -17,9 +17,6 @@ package com.alibaba.polardbx.executor.archive.columns; import com.alibaba.polardbx.common.CrcAccumulator; -import com.alibaba.polardbx.common.charset.MySQLUnicodeUtils; -import com.alibaba.polardbx.common.datatype.DecimalConverter; -import com.alibaba.polardbx.common.datatype.DecimalStructure; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.orc.OrcBloomFilter; @@ -27,19 +24,17 @@ import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; import com.alibaba.polardbx.common.utils.time.core.TimeStorage; import com.alibaba.polardbx.common.utils.time.parser.StringTimeParser; -import com.alibaba.polardbx.common.utils.time.parser.TimeParserFlags; import com.alibaba.polardbx.executor.Xprotocol.XRowSet; import com.alibaba.polardbx.executor.archive.pruning.OssAggPruner; import com.alibaba.polardbx.executor.archive.pruning.OssOrcFilePruner; import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; import org.apache.calcite.sql.SqlKind; -import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.orc.ColumnStatistics; @@ -47,7 +42,6 @@ import org.apache.orc.TypeDescription; import org.apache.orc.sarg.PredicateLeaf; -import java.sql.Date; import java.sql.Types; import java.time.ZoneId; import java.util.Map; @@ -140,6 +134,19 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } } + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + long result = ColumnProvider.convertDateToLong(bytes); + + blockBuilder.writeDatetimeRawLong(result); + } + @Override public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DatetimeColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DatetimeColumnProvider.java index bdb8aa1f9..cd36b4caa 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DatetimeColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DatetimeColumnProvider.java @@ -30,10 +30,10 @@ import com.alibaba.polardbx.executor.archive.pruning.OssOrcFilePruner; import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; -import com.alibaba.polardbx.optimizer.core.datatype.DecimalType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; import org.apache.calcite.sql.SqlKind; @@ -136,6 +136,20 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + } else { + final int scale = dataType.getScale(); + byte[] bytes = row.getBytes(columnId); + + long result = ColumnProvider.convertDateTimeToLong(bytes, scale); + + blockBuilder.writeLong(result); + } + } + @Override public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DecimalColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DecimalColumnProvider.java index 126075b02..135a4ec2d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DecimalColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DecimalColumnProvider.java @@ -32,20 +32,23 @@ import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.DecimalBlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DecimalType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; import org.apache.calcite.sql.SqlKind; import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.orc.ColumnStatistics; +import org.apache.orc.IntegerColumnStatistics; import org.apache.orc.StringColumnStatistics; import org.apache.orc.TypeDescription; import org.apache.orc.sarg.PredicateLeaf; import java.math.BigDecimal; -import java.nio.charset.StandardCharsets; import java.time.ZoneId; import java.util.Map; import java.util.Optional; @@ -58,96 +61,188 @@ public TypeDescription orcType() { } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, + SessionProperties sessionProperties) { blockBuilder.ensureCapacity(endIndex - startIndex); - BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; - for (int i = startIndex; i < endIndex; i++) { - int idx = i; - if (vector.isRepeating) { - idx = 0; + if (vector instanceof LongColumnVector) { + long[] array = ((LongColumnVector) vector).vector; + for (int i = startIndex; i < endIndex; i++) { + int idx = i; + if (vector.isRepeating) { + idx = 0; + } + if (vector.isNull[idx]) { + blockBuilder.appendNull(); + } else { + blockBuilder.writeLong(array[idx]); + } } - if (vector.isNull[idx]) { - blockBuilder.appendNull(); - } else { - int pos = bytesColumnVector.start[idx]; - int len = bytesColumnVector.length[idx]; - byte[] tmp = new byte[len]; + } else if (vector instanceof BytesColumnVector) { + BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; + for (int i = startIndex; i < endIndex; i++) { + int idx = i; + if (vector.isRepeating) { + idx = 0; + } + if (vector.isNull[idx]) { + blockBuilder.appendNull(); + } else { + int pos = bytesColumnVector.start[idx]; + int len = bytesColumnVector.length[idx]; + byte[] tmp = new byte[len]; - MySQLUnicodeUtils.utf8ToLatin1(bytesColumnVector.vector[idx], pos, pos + len, tmp); - ((DecimalBlockBuilder) blockBuilder).writeDecimalBin(tmp); + boolean isUtf8FromLatin1 = + MySQLUnicodeUtils.utf8ToLatin1(bytesColumnVector.vector[idx], pos, pos + len, tmp); + if (!isUtf8FromLatin1) { + // in columnar, decimals are stored already in latin1 encoding + System.arraycopy(bytesColumnVector.vector[idx], pos, tmp, 0, len); + } + ((DecimalBlockBuilder) blockBuilder).writeDecimalBin(tmp); + } } + } else { + throw new UnsupportedOperationException("Unsupported decimal vector type: " + vector.getClass().getName()); } + } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, + SessionProperties sessionProperties) { blockBuilder.ensureCapacity(selSize); - BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; - for (int i = 0; i < selSize; i++) { - int j = selection[i]; - int idx = j; - if (vector.isRepeating) { - idx = 0; - } - if (vector.isNull[idx]) { - blockBuilder.appendNull(); - } else { - int pos = bytesColumnVector.start[idx]; - int len = bytesColumnVector.length[idx]; - byte[] tmp = new byte[len]; + if (vector instanceof LongColumnVector) { + long[] array = ((LongColumnVector) vector).vector; + for (int i = 0; i < selSize; i++) { + int j = selection[i]; + int idx = j; + if (vector.isRepeating) { + idx = 0; + } + if (vector.isNull[idx]) { + blockBuilder.appendNull(); + } else { + blockBuilder.writeLong(array[idx]); + } + } + } else if (vector instanceof BytesColumnVector) { + BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; + for (int i = 0; i < selSize; i++) { + int j = selection[i]; + int idx = j; + if (vector.isRepeating) { + idx = 0; + } + if (vector.isNull[idx]) { + blockBuilder.appendNull(); + } else { + int pos = bytesColumnVector.start[idx]; + int len = bytesColumnVector.length[idx]; + byte[] tmp = new byte[len]; + boolean isUtf8FromLatin1 = + MySQLUnicodeUtils.utf8ToLatin1(bytesColumnVector.vector[idx], pos, pos + len, tmp); + if (!isUtf8FromLatin1) { + // in columnar, decimals are stored already in latin1 encoding + System.arraycopy(bytesColumnVector.vector[idx], pos, tmp, 0, len); + } - MySQLUnicodeUtils.utf8ToLatin1(bytesColumnVector.vector[idx], pos, pos + len, tmp); - ((DecimalBlockBuilder) blockBuilder).writeDecimalBin(tmp); + ((DecimalBlockBuilder) blockBuilder).writeDecimalBin(tmp); + } } + } else { + throw new UnsupportedOperationException("Unsupported decimal vector type: " + vector.getClass().getName()); } + } @Override public void putBloomFilter(ColumnVector vector, OrcBloomFilter bf, int startIndex, int endIndex) { - BytesColumnVector vec = (BytesColumnVector) vector; - for (int i = startIndex; i < endIndex; i++) { - int idx = i; - if (vector.isRepeating) { - idx = 0; - } - if (vector.isNull[idx]) { - bf.add(null); - } else { - bf.addBytes(vec.vector[idx], vec.start[idx], vec.length[idx]); + if (vector instanceof LongColumnVector) { + long[] array = ((LongColumnVector) vector).vector; + for (int i = startIndex; i < endIndex; i++) { + int idx = i; + if (vector.isRepeating) { + idx = 0; + } + if (vector.isNull[idx]) { + bf.add(null); + } else { + bf.addLong(array[idx]); + } } + } else if (vector instanceof BytesColumnVector) { + BytesColumnVector vec = (BytesColumnVector) vector; + for (int i = startIndex; i < endIndex; i++) { + int idx = i; + if (vector.isRepeating) { + idx = 0; + } + if (vector.isNull[idx]) { + bf.add(null); + } else { + bf.addBytes(vec.vector[idx], vec.start[idx], vec.length[idx]); + } + } + } else { + throw new UnsupportedOperationException("Unsupported decimal vector type: " + vector.getClass().getName()); } } @Override - public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, ZoneId timezone, Optional accumulator) { + public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, + ZoneId timezone, Optional accumulator) { if (row instanceof XRowSet) { try { - ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, dataType.isUnsigned(), dataType.getPrecision(), dataType.getScale(), accumulator); + ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, + dataType.isUnsigned(), dataType.getPrecision(), dataType.getScale(), accumulator); } catch (Exception e) { throw GeneralUtil.nestedException(e); } } else { BigDecimal bigDecimal = row.getBigDecimal(columnId); - if (bigDecimal == null) { - columnVector.isNull[rowNumber] = true; - columnVector.noNulls = false; - ((BytesColumnVector) columnVector).setRef(rowNumber, new byte[]{}, 0, 0); + if (columnVector instanceof BytesColumnVector) { + if (bigDecimal == null) { + columnVector.isNull[rowNumber] = true; + columnVector.noNulls = false; + ((BytesColumnVector) columnVector).setRef(rowNumber, new byte[] {}, 0, 0); - accumulator.ifPresent(CrcAccumulator::appendNull); - return; - } else { - DecimalStructure dec = Decimal.fromBigDecimal(bigDecimal).getDecimalStructure(); - byte[] result = new byte[DecimalConverter.binarySize(dataType.getPrecision(), dataType.getScale())]; - DecimalConverter.decimalToBin(dec, result, dataType.getPrecision(), dataType.getScale()); - ((BytesColumnVector) columnVector).setVal(rowNumber, MySQLUnicodeUtils.latin1ToUtf8(result).getBytes()); + accumulator.ifPresent(CrcAccumulator::appendNull); + } else { + DecimalStructure dec = Decimal.fromBigDecimal(bigDecimal).getDecimalStructure(); + byte[] result = new byte[DecimalConverter.binarySize(dataType.getPrecision(), dataType.getScale())]; + DecimalConverter.decimalToBin(dec, result, dataType.getPrecision(), dataType.getScale()); + ((BytesColumnVector) columnVector).setVal(rowNumber, + MySQLUnicodeUtils.latin1ToUtf8(result).getBytes()); + + accumulator.ifPresent( + a -> a.appendHash(RawBytesDecimalUtils.hashCode(dec.getDecimalMemorySegment()))); + } + } else if (columnVector instanceof LongColumnVector) { + if (bigDecimal == null) { + columnVector.isNull[rowNumber] = true; + columnVector.noNulls = false; + ((LongColumnVector) columnVector).vector[rowNumber] = 0; + + accumulator.ifPresent(CrcAccumulator::appendNull); + } else { + long decimal64 = bigDecimal.unscaledValue().longValue(); + ((LongColumnVector) columnVector).vector[rowNumber] = decimal64; - accumulator.ifPresent(a -> a.appendHash(RawBytesDecimalUtils.hashCode(dec.getDecimalMemorySegment()))); + // handle checksum + + Decimal decimal = new Decimal(decimal64, dataType.getScale()); + accumulator.ifPresent(a -> a.appendHash( + RawBytesDecimalUtils.hashCode(decimal.getMemorySegment()))); + } + } else { + throw new UnsupportedOperationException( + "Unsupported decimal vector type: " + columnVector.getClass().getName()); } } } @Override - public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { + public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, + Map stripeColumnMetaMap) { return OssOrcFilePruner.pruneDecimal(predicateLeaf, columnStatistics, stripeColumnMetaMap); } @@ -158,30 +253,112 @@ public void pruneAgg(PredicateLeaf predicateLeaf, Map st } @Override - public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, DataType dataType, SessionProperties sessionProperties) { - StringColumnStatistics stringColumnStatistics = (StringColumnStatistics) columnStatistics; - if (stringColumnStatistics.getNumberOfValues() == 0) { - blockBuilder.appendNull(); - return; + public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, + DataType dataType, SessionProperties sessionProperties) { + if (columnStatistics instanceof IntegerColumnStatistics) { + IntegerColumnStatistics integerColumnStatistics = (IntegerColumnStatistics) columnStatistics; + if (integerColumnStatistics.getNumberOfValues() == 0) { + blockBuilder.appendNull(); + return; + } + switch (aggKind) { + case MAX: { + ColumnVector columnVector = TypeDescription.createLong().createRowBatch(1).cols[0]; + LongColumnVector longColumnVector = (LongColumnVector) columnVector; + longColumnVector.vector[0] = integerColumnStatistics.getMaximum(); + transform(columnVector, blockBuilder, 0, 1, sessionProperties); + break; + } + + case MIN: { + ColumnVector columnVector = TypeDescription.createLong().createRowBatch(1).cols[0]; + LongColumnVector longColumnVector = (LongColumnVector) columnVector; + longColumnVector.vector[0] = integerColumnStatistics.getMinimum(); + transform(columnVector, blockBuilder, 0, 1, sessionProperties); + break; + } + + case SUM: { + // FIXME is long? + ColumnVector columnVector = TypeDescription.createVarchar().createRowBatch(1).cols[0]; + BytesColumnVector bytesColumnVector = (BytesColumnVector) columnVector; + long unscaledSum = integerColumnStatistics.getSum(); + Decimal decimal = new Decimal(unscaledSum, dataType.getScale()); + + // compact to bin (latin1 format) + byte[] result = new byte[DecimalConverter.binarySize(dataType.getPrecision(), dataType.getScale())]; + DecimalConverter.decimalToBin(decimal.getDecimalStructure(), result, dataType.getPrecision(), + dataType.getScale()); + + // convert latin1 to utf8 + bytesColumnVector.setVal(0, MySQLUnicodeUtils.latin1ToUtf8(result).getBytes()); + ColumnProviders.DECIMAL_COLUMN_PROVIDER.transform(bytesColumnVector, blockBuilder, 0, 1, + sessionProperties); + } + } + } else if (columnStatistics instanceof StringColumnStatistics) { + StringColumnStatistics stringColumnStatistics = (StringColumnStatistics) columnStatistics; + if (stringColumnStatistics.getNumberOfValues() == 0) { + blockBuilder.appendNull(); + return; + } + ColumnVector columnVector = TypeDescription.createVarchar().createRowBatch(1).cols[0]; + switch (aggKind) { + case MAX: { + BytesColumnVector bytesColumnVector = (BytesColumnVector) columnVector; + bytesColumnVector.setVal(0, stringColumnStatistics.getMaximum().getBytes()); + transform(bytesColumnVector, blockBuilder, 0, 1, sessionProperties); + break; + } + + case MIN: { + BytesColumnVector bytesColumnVector = (BytesColumnVector) columnVector; + bytesColumnVector.setVal(0, stringColumnStatistics.getMinimum().getBytes()); + transform(bytesColumnVector, blockBuilder, 0, 1, sessionProperties); + break; + } + + case SUM: + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS, new UnsupportedOperationException(), + "unsupported sum type."); + } + } else { + throw new UnsupportedOperationException( + "Unsupported decimal statistics: " + columnStatistics.getClass().getName()); } - ColumnVector columnVector = TypeDescription.createVarchar().createRowBatch(1).cols[0]; - switch (aggKind) { - case MAX: { - BytesColumnVector bytesColumnVector = (BytesColumnVector) columnVector; - bytesColumnVector.setVal(0, stringColumnStatistics.getMaximum().getBytes()); - transform(bytesColumnVector, blockBuilder, 0, 1, sessionProperties); - break; + + } + + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (((DecimalType) dataType).isDecimal64()) { + parseDecimal64(blockBuilder, row, columnId, dataType); + } else { + parseNormalDecimal(blockBuilder, row, columnId, dataType); } + } - case MIN: { - BytesColumnVector bytesColumnVector = (BytesColumnVector) columnVector; - bytesColumnVector.setVal(0, stringColumnStatistics.getMinimum().getBytes()); - transform(bytesColumnVector, blockBuilder, 0, 1, sessionProperties); - break; + private void parseDecimal64(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + } else { + byte[] bytes = row.getBytes(columnId); + int precision = dataType.getPrecision(); + int scale = dataType.getScale(); + long longVal = DecimalConverter.getUnscaledDecimal(bytes, precision, scale); + blockBuilder.writeLong(longVal); } + } - case SUM: - throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS, new UnsupportedOperationException(), "unsupported sum type."); + private void parseNormalDecimal(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + } else { + byte[] bytes = row.getBytes(columnId); + int precision = dataType.getPrecision(); + int scale = dataType.getScale(); + Decimal decimal = DecimalConverter.getDecimal(bytes, precision, scale); + blockBuilder.writeDecimal(decimal); } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DoubleColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DoubleColumnProvider.java index 432dbffc8..4dc34b913 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DoubleColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/DoubleColumnProvider.java @@ -17,30 +17,24 @@ package com.alibaba.polardbx.executor.archive.columns; import com.alibaba.polardbx.common.CrcAccumulator; -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.orc.OrcBloomFilter; +import com.alibaba.polardbx.common.type.MySQLStandardFieldType; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.Xprotocol.XRowSet; import com.alibaba.polardbx.executor.archive.pruning.OssAggPruner; import com.alibaba.polardbx.executor.archive.pruning.OssOrcFilePruner; import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.DecimalBlockBuilder; -import com.alibaba.polardbx.executor.chunk.DoubleBlockBuilder; -import com.alibaba.polardbx.executor.operator.util.DataTypeUtils; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; import org.apache.calcite.sql.SqlKind; -import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector; import org.apache.orc.ColumnStatistics; import org.apache.orc.DoubleColumnStatistics; -import org.apache.orc.StringColumnStatistics; import org.apache.orc.TypeDescription; import org.apache.orc.sarg.PredicateLeaf; @@ -55,7 +49,8 @@ public TypeDescription orcType() { } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, + SessionProperties sessionProperties) { double[] array = ((DoubleColumnVector) vector).vector; for (int i = startIndex; i < endIndex; i++) { int idx = i; @@ -71,7 +66,8 @@ public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startI } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, + SessionProperties sessionProperties) { double[] array = ((DoubleColumnVector) vector).vector; for (int i = 0; i < selSize; i++) { int idx = selection[i]; @@ -103,10 +99,12 @@ public void putBloomFilter(ColumnVector vector, OrcBloomFilter bf, int startInde } @Override - public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, ZoneId timezone, Optional accumulator) { + public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, + ZoneId timezone, Optional accumulator) { if (row instanceof XRowSet) { try { - ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, accumulator); + ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, + accumulator); } catch (Exception e) { throw GeneralUtil.nestedException(e); } @@ -125,7 +123,24 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } @Override - public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + blockBuilder.writeDouble(convertFromBinaryToDouble(dataType, bytes)); + } + + private double convertFromBinaryToDouble(DataType dataType, byte[] bytes) { + long result = ColumnProvider.longFromByte(bytes, bytes.length); + return Double.longBitsToDouble(result); + } + + @Override + public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, + Map stripeColumnMetaMap) { return OssOrcFilePruner.pruneDouble(predicateLeaf, columnStatistics, stripeColumnMetaMap); } @@ -136,7 +151,8 @@ public void pruneAgg(PredicateLeaf predicateLeaf, Map st } @Override - public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, DataType dataType, SessionProperties sessionProperties) { + public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, + DataType dataType, SessionProperties sessionProperties) { DoubleColumnStatistics doubleColumnStatistics = (DoubleColumnStatistics) columnStatistics; if (doubleColumnStatistics.getNumberOfValues() == 0) { blockBuilder.appendNull(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/EnumColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/EnumColumnProvider.java new file mode 100644 index 000000000..3bf7a1fa1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/EnumColumnProvider.java @@ -0,0 +1,36 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.archive.columns; + +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.EnumType; + +public class EnumColumnProvider extends StringColumnProvider { + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + int intVal = ColumnProvider.intFromByte(bytes, bytes.length); + blockBuilder.writeString(((EnumType) dataType).convertTo(intVal)); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/FloatColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/FloatColumnProvider.java index 5d08e26d5..5f7499f7b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/FloatColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/FloatColumnProvider.java @@ -17,9 +17,6 @@ package com.alibaba.polardbx.executor.archive.columns; import com.alibaba.polardbx.common.CrcAccumulator; -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.orc.OrcBloomFilter; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.Xprotocol.XRowSet; @@ -27,9 +24,7 @@ import com.alibaba.polardbx.executor.archive.pruning.OssOrcFilePruner; import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.DecimalBlockBuilder; -import com.alibaba.polardbx.executor.chunk.DoubleBlockBuilder; -import com.alibaba.polardbx.executor.operator.util.DataTypeUtils; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; @@ -53,7 +48,8 @@ public TypeDescription orcType() { } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, + SessionProperties sessionProperties) { double[] array = ((DoubleColumnVector) vector).vector; for (int i = startIndex; i < endIndex; i++) { int idx = i; @@ -69,7 +65,8 @@ public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startI } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, + SessionProperties sessionProperties) { double[] array = ((DoubleColumnVector) vector).vector; for (int i = 0; i < selSize; i++) { int idx = selection[i]; @@ -101,10 +98,12 @@ public void putBloomFilter(ColumnVector vector, OrcBloomFilter bf, int startInde } @Override - public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, ZoneId timezone, Optional accumulator) { + public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, + ZoneId timezone, Optional accumulator) { if (row instanceof XRowSet) { try { - ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, accumulator); + ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, + accumulator); } catch (Exception e) { throw GeneralUtil.nestedException(e); } @@ -123,7 +122,24 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } @Override - public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + blockBuilder.writeFloat(convertFromBinaryToFloat(dataType, bytes)); + } + + private float convertFromBinaryToFloat(DataType dataType, byte[] bytes) { + int result = ColumnProvider.intFromByte(bytes, bytes.length); + return Float.intBitsToFloat(result); + } + + @Override + public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, + Map stripeColumnMetaMap) { return OssOrcFilePruner.pruneDouble(predicateLeaf, columnStatistics, stripeColumnMetaMap); } @@ -134,7 +150,8 @@ public void pruneAgg(PredicateLeaf predicateLeaf, Map st } @Override - public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, DataType dataType, SessionProperties sessionProperties) { + public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, + DataType dataType, SessionProperties sessionProperties) { DoubleColumnStatistics doubleColumnStatistics = (DoubleColumnStatistics) columnStatistics; if (doubleColumnStatistics.getNumberOfValues() == 0) { blockBuilder.appendNull(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/Integer24ColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/Integer24ColumnProvider.java new file mode 100644 index 000000000..bf4e2919b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/Integer24ColumnProvider.java @@ -0,0 +1,42 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.archive.columns; + +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; + +public class Integer24ColumnProvider extends IntegerColumnProvider { + + private static final int INT_24_SIGNED_BIT = 0x00800000; + private static final int INT_24_SIGNED_PAD = 0xFF000000; + + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + } else { + byte[] bytes = row.getBytes(columnId); + int intVal = ColumnProvider.intFromByte(bytes, bytes.length); + if ((intVal & INT_24_SIGNED_BIT) > 0) { + // For signed negative int_24, fill leading ones + intVal |= INT_24_SIGNED_PAD; + } + blockBuilder.writeInt(intVal); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/IntegerColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/IntegerColumnProvider.java index d35d3b5ca..ef078b16c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/IntegerColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/IntegerColumnProvider.java @@ -27,9 +27,9 @@ import com.alibaba.polardbx.executor.archive.pruning.OssOrcFilePruner; import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.LongType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; import org.apache.calcite.sql.SqlKind; @@ -52,7 +52,8 @@ public TypeDescription orcType() { } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, + SessionProperties sessionProperties) { long[] array = ((LongColumnVector) vector).vector; for (int i = startIndex; i < endIndex; i++) { int idx = i; @@ -68,7 +69,8 @@ public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startI } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, + SessionProperties sessionProperties) { long[] array = ((LongColumnVector) vector).vector; for (int i = 0; i < selSize; i++) { int idx = selection[i]; @@ -100,10 +102,12 @@ public void putBloomFilter(ColumnVector vector, OrcBloomFilter bf, int startInde } @Override - public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, ZoneId timezone, Optional accumulator) { + public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, + ZoneId timezone, Optional accumulator) { if (row instanceof XRowSet) { try { - ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, accumulator); + ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, + accumulator); } catch (Exception e) { throw GeneralUtil.nestedException(e); } @@ -122,7 +126,19 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } @Override - public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + } else { + byte[] bytes = row.getBytes(columnId); + int intVal = ColumnProvider.intFromByte(bytes, bytes.length); + blockBuilder.writeInt(intVal); + } + } + + @Override + public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, + Map stripeColumnMetaMap) { return OssOrcFilePruner.pruneLong(predicateLeaf, columnStatistics, stripeColumnMetaMap); } @@ -133,7 +149,8 @@ public void pruneAgg(PredicateLeaf predicateLeaf, Map st } @Override - public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, DataType dataType, SessionProperties sessionProperties) { + public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, + DataType dataType, SessionProperties sessionProperties) { IntegerColumnStatistics integerColumnStatistics = (IntegerColumnStatistics) columnStatistics; if (integerColumnStatistics.getNumberOfValues() == 0) { blockBuilder.appendNull(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/JsonColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/JsonColumnProvider.java new file mode 100644 index 000000000..bda45e15e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/JsonColumnProvider.java @@ -0,0 +1,40 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.archive.columns; + +import com.alibaba.polardbx.common.utils.binlog.JsonConversion; +import com.alibaba.polardbx.common.utils.binlog.LogBuffer; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; + +import java.nio.charset.Charset; + +public class JsonColumnProvider extends StringColumnProvider { + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + String charsetName = dataType.getCharsetName().getJavaCharset(); + + blockBuilder.writeString(ColumnProvider.convertToString(bytes, charsetName)); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/LongColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/LongColumnProvider.java index 3de2368f2..043b4292c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/LongColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/LongColumnProvider.java @@ -27,6 +27,7 @@ import com.alibaba.polardbx.executor.archive.pruning.OssOrcFilePruner; import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; @@ -52,7 +53,8 @@ public TypeDescription orcType() { } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, + SessionProperties sessionProperties) { long[] array = ((LongColumnVector) vector).vector; for (int i = startIndex; i < endIndex; i++) { int idx = i; @@ -68,7 +70,8 @@ public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startI } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, + SessionProperties sessionProperties) { long[] array = ((LongColumnVector) vector).vector; for (int i = 0; i < selSize; i++) { int idx = selection[i]; @@ -100,10 +103,12 @@ public void putBloomFilter(ColumnVector vector, OrcBloomFilter bf, int startInde } @Override - public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, ZoneId timezone, Optional accumulator) { + public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, + ZoneId timezone, Optional accumulator) { if (row instanceof XRowSet) { try { - ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, accumulator); + ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, + accumulator); } catch (Exception e) { throw GeneralUtil.nestedException(e); } @@ -122,7 +127,19 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } @Override - public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + } else { + byte[] bytes = row.getBytes(columnId); + long longVal = ColumnProvider.longFromByte(bytes, bytes.length); + blockBuilder.writeLong(longVal); + } + } + + @Override + public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, + Map stripeColumnMetaMap) { return OssOrcFilePruner.pruneLong(predicateLeaf, columnStatistics, stripeColumnMetaMap); } @@ -133,7 +150,8 @@ public void pruneAgg(PredicateLeaf predicateLeaf, Map st } @Override - public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, DataType dataType, SessionProperties sessionProperties) { + public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, + DataType dataType, SessionProperties sessionProperties) { IntegerColumnStatistics integerColumnStatistics = (IntegerColumnStatistics) columnStatistics; if (integerColumnStatistics.getNumberOfValues() == 0) { blockBuilder.appendNull(); @@ -175,5 +193,4 @@ public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, } } - } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/SetColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/SetColumnProvider.java new file mode 100644 index 000000000..97ae7bb6b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/SetColumnProvider.java @@ -0,0 +1,46 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.archive.columns; + +import com.alibaba.polardbx.common.CrcAccumulator; +import com.alibaba.polardbx.common.orc.OrcBloomFilter; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.EnumType; +import com.alibaba.polardbx.optimizer.core.datatype.SetType; +import com.alibaba.polardbx.optimizer.core.field.SessionProperties; +import com.alibaba.polardbx.optimizer.core.row.Row; +import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; +import org.apache.orc.TypeDescription; + +import java.time.ZoneId; +import java.util.Optional; + +public class SetColumnProvider extends StringColumnProvider { + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + long longVal = ColumnProvider.longFromByte(bytes, bytes.length); + blockBuilder.writeString(String.join(",", ((SetType) dataType).convertFromBinary(longVal))); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ShortColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ShortColumnProvider.java index 06ab875a0..7de2b6e50 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ShortColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/ShortColumnProvider.java @@ -28,9 +28,9 @@ import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.ByteBlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.LongType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; import org.apache.calcite.sql.SqlKind; @@ -53,7 +53,8 @@ public TypeDescription orcType() { } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, + SessionProperties sessionProperties) { long[] array = ((LongColumnVector) vector).vector; for (int i = startIndex; i < endIndex; i++) { int idx = i; @@ -73,7 +74,8 @@ public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startI } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, + SessionProperties sessionProperties) { long[] array = ((LongColumnVector) vector).vector; for (int i = 0; i < selSize; i++) { int idx = selection[i]; @@ -109,10 +111,12 @@ public void putBloomFilter(ColumnVector vector, OrcBloomFilter bf, int startInde } @Override - public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, ZoneId timezone, Optional accumulator) { + public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, + ZoneId timezone, Optional accumulator) { if (row instanceof XRowSet) { try { - ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, accumulator); + ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, + accumulator); } catch (Exception e) { throw GeneralUtil.nestedException(e); } @@ -131,7 +135,25 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } @Override - public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + int intVal = ColumnProvider.intFromByte(bytes, bytes.length); + + if (blockBuilder instanceof ByteBlockBuilder) { + blockBuilder.writeByte((byte) intVal); + } else { + blockBuilder.writeShort((short) intVal); + } + } + + @Override + public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, + Map stripeColumnMetaMap) { return OssOrcFilePruner.pruneLong(predicateLeaf, columnStatistics, stripeColumnMetaMap); } @@ -142,7 +164,8 @@ public void pruneAgg(PredicateLeaf predicateLeaf, Map st } @Override - public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, DataType dataType, SessionProperties sessionProperties) { + public void fetchStatistics(ColumnStatistics columnStatistics, SqlKind aggKind, BlockBuilder blockBuilder, + DataType dataType, SessionProperties sessionProperties) { IntegerColumnStatistics integerColumnStatistics = (IntegerColumnStatistics) columnStatistics; if (integerColumnStatistics.getNumberOfValues() == 0) { blockBuilder.appendNull(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/StringColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/StringColumnProvider.java index 28deb90ee..336d7b55b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/StringColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/StringColumnProvider.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.Xprotocol.XRowSet; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; @@ -31,14 +32,15 @@ import java.time.ZoneId; import java.util.Optional; -class StringColumnProvider implements ColumnProvider { +abstract class StringColumnProvider implements ColumnProvider { @Override public TypeDescription orcType() { return TypeDescription.createVarchar(); } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, + SessionProperties sessionProperties) { BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; for (int i = startIndex; i < endIndex; i++) { int idx = i; @@ -60,7 +62,8 @@ public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startI } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, + SessionProperties sessionProperties) { BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; for (int i = 0; i < selSize; i++) { int idx = selection[i]; @@ -98,10 +101,12 @@ public void putBloomFilter(ColumnVector vector, OrcBloomFilter bf, int startInde } @Override - public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, ZoneId timezone, Optional accumulator) { + public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, + ZoneId timezone, Optional accumulator) { if (row instanceof XRowSet) { try { - ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, accumulator); + ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, + accumulator); } catch (Exception e) { throw GeneralUtil.nestedException(e); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/TimeColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/TimeColumnProvider.java index 0b26f42b0..9ef7ce4b4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/TimeColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/TimeColumnProvider.java @@ -17,9 +17,6 @@ package com.alibaba.polardbx.executor.archive.columns; import com.alibaba.polardbx.common.CrcAccumulator; -import com.alibaba.polardbx.common.charset.MySQLUnicodeUtils; -import com.alibaba.polardbx.common.datatype.DecimalConverter; -import com.alibaba.polardbx.common.datatype.DecimalStructure; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.orc.OrcBloomFilter; @@ -27,19 +24,17 @@ import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; import com.alibaba.polardbx.common.utils.time.core.TimeStorage; import com.alibaba.polardbx.common.utils.time.parser.StringTimeParser; -import com.alibaba.polardbx.common.utils.time.parser.TimeParserFlags; import com.alibaba.polardbx.executor.Xprotocol.XRowSet; import com.alibaba.polardbx.executor.archive.pruning.OssAggPruner; import com.alibaba.polardbx.executor.archive.pruning.OssOrcFilePruner; import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; import org.apache.calcite.sql.SqlKind; -import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.orc.ColumnStatistics; @@ -47,13 +42,13 @@ import org.apache.orc.TypeDescription; import org.apache.orc.sarg.PredicateLeaf; -import java.sql.Time; import java.sql.Types; import java.time.ZoneId; import java.util.Map; import java.util.Optional; class TimeColumnProvider implements ColumnProvider { + @Override public TypeDescription orcType() { return TypeDescription.createLong(); @@ -138,6 +133,22 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } } + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + + final int scale = dataType.getScale(); + + long result = ColumnProvider.convertTimeToLong(bytes, scale); + + blockBuilder.writeDatetimeRawLong(result); + } + @Override public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/TimestampColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/TimestampColumnProvider.java index 442b78428..d61f8820f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/TimestampColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/TimestampColumnProvider.java @@ -19,6 +19,7 @@ import com.alibaba.polardbx.common.CrcAccumulator; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.jdbc.ZeroTimestamp; import com.alibaba.polardbx.common.orc.OrcBloomFilter; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.time.MySQLTimeConverter; @@ -33,6 +34,7 @@ import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.TimestampBlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; @@ -40,7 +42,6 @@ import com.alibaba.polardbx.optimizer.core.row.Row; import com.alibaba.polardbx.rpc.result.XResultUtil; import org.apache.calcite.sql.SqlKind; -import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.orc.ColumnStatistics; @@ -61,8 +62,6 @@ */ public class TimestampColumnProvider implements ColumnProvider { - public static MysqlDateTime ZERO_DATE_TIME = new MysqlDateTime(); - @Override public TypeDescription orcType() { return TypeDescription.createLong(); @@ -81,7 +80,7 @@ public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startI blockBuilder.appendNull(); } else { if (longColumnVector.vector[idx] == ZERO_TIMESTAMP_LONG_VAL) { - ((TimestampBlockBuilder) blockBuilder).writeMysqlDatetime(ZERO_DATE_TIME); + ((TimestampBlockBuilder) blockBuilder).writeMysqlDatetime(MysqlDateTime.zeroDateTime()); } else { MySQLTimeVal mySQLTimeVal = XResultUtil.longToTimeValue(longColumnVector.vector[idx]); MysqlDateTime mysqlDateTime = @@ -105,7 +104,7 @@ public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] sele blockBuilder.appendNull(); } else { if (longColumnVector.vector[idx] == ZERO_TIMESTAMP_LONG_VAL) { - ((TimestampBlockBuilder) blockBuilder).writeMysqlDatetime(ZERO_DATE_TIME); + ((TimestampBlockBuilder) blockBuilder).writeMysqlDatetime(MysqlDateTime.zeroDateTime()); } else { MySQLTimeVal mySQLTimeVal = XResultUtil.longToTimeValue(longColumnVector.vector[idx]); MysqlDateTime mysqlDateTime = @@ -169,6 +168,47 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } } + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + final int scale = dataType.getScale(); + + // parse millis second + long second = 0; + for (int i = 0; i < 4; i++) { + byte b = bytes[i]; + second = (second << 8) | (b >= 0 ? (int) b : (b + 256)); + } + + // deal with '0000-00-00 00:00:00' + if (second == 0) { + blockBuilder.writeTimestamp(ZeroTimestamp.instance); + return; + } + + // parse fsp + int micro = 0; + int length = (scale + 1) / 2; + if (length > 0) { + int fraction = 0; + for (int i = 4; i < (4 + length); i++) { + byte b = bytes[i]; + fraction = (fraction << 8) | (b >= 0 ? (int) b : (b + 256)); + } + micro = fraction * (int) Math.pow(100, 3 - length); + } + + Timestamp ts = new Timestamp(second * 1000); + ts.setNanos(micro * 1000); + + blockBuilder.writeTimestamp(ts); + } + @Override public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/UnsignedLongColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/UnsignedLongColumnProvider.java index c75f8a50d..e2e4bc619 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/UnsignedLongColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/UnsignedLongColumnProvider.java @@ -31,13 +31,13 @@ import com.alibaba.polardbx.executor.archive.pruning.OssOrcFilePruner; import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.row.Row; import com.google.common.base.Preconditions; import org.apache.calcite.sql.SqlKind; -import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.orc.ColumnStatistics; @@ -134,6 +134,18 @@ public void putRow(ColumnVector columnVector, int rowNumber, Row row, int column } } + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + long result = ColumnProvider.longFromByte(bytes, bytes.length); + blockBuilder.writeLong(result); + } + @Override public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/VarcharColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/VarcharColumnProvider.java index b7e614888..101deb720 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/VarcharColumnProvider.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/VarcharColumnProvider.java @@ -17,6 +17,7 @@ package com.alibaba.polardbx.executor.archive.columns; import com.alibaba.polardbx.common.CrcAccumulator; +import com.alibaba.polardbx.common.charset.CharsetName; import com.alibaba.polardbx.common.charset.CollationName; import com.alibaba.polardbx.common.charset.MySQLUnicodeUtils; import com.alibaba.polardbx.common.orc.OrcBloomFilter; @@ -27,6 +28,7 @@ import com.alibaba.polardbx.executor.archive.pruning.PruningResult; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.SliceBlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; import com.alibaba.polardbx.optimizer.config.table.StripeColumnMeta; import com.alibaba.polardbx.optimizer.config.table.charset.CharsetFactory; import com.alibaba.polardbx.optimizer.config.table.collation.CollationHandler; @@ -41,15 +43,19 @@ import org.apache.orc.TypeDescription; import org.apache.orc.sarg.PredicateLeaf; +import java.nio.charset.Charset; import java.time.ZoneId; import java.util.Map; import java.util.Optional; import java.util.function.BiFunction; class VarcharColumnProvider implements ColumnProvider { + private static final Charset DEFAULT_CHARSET = CharsetName.defaultCharset().toJavaCharset(); public final BiFunction collationHandlerFunction; + private final Charset sourceCharset; VarcharColumnProvider(CollationName collationName) { + sourceCharset = CollationName.getCharsetOf(collationName).toJavaCharset(); CollationHandler collationHandler = CharsetFactory.INSTANCE.createCollationHandler(collationName); collationHandlerFunction = (bytes, length) -> { @@ -65,7 +71,8 @@ public TypeDescription orcType() { } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startIndex, int endIndex, + SessionProperties sessionProperties) { BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; for (int i = startIndex; i < endIndex; i++) { int idx = i; @@ -85,7 +92,8 @@ public void transform(ColumnVector vector, BlockBuilder blockBuilder, int startI } @Override - public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, SessionProperties sessionProperties) { + public void transform(ColumnVector vector, BlockBuilder blockBuilder, int[] selection, int selSize, + SessionProperties sessionProperties) { BytesColumnVector bytesColumnVector = (BytesColumnVector) vector; for (int i = 0; i < selSize; i++) { int j = selection[i]; @@ -122,10 +130,12 @@ public void putBloomFilter(ColumnVector vector, OrcBloomFilter bf, int startInde } @Override - public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, ZoneId timezone, Optional accumulator) { + public void putRow(ColumnVector columnVector, int rowNumber, Row row, int columnId, DataType dataType, + ZoneId timezone, Optional accumulator) { if (row instanceof XRowSet) { try { - ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, accumulator); + ((XRowSet) row).fastParseToColumnVector(columnId, ColumnProviders.UTF_8, columnVector, rowNumber, + accumulator); } catch (Exception e) { throw GeneralUtil.nestedException(e); } @@ -163,12 +173,30 @@ public void putRow(ColumnVector columnVector, ColumnVector redundantColumnVector accumulator.ifPresent(CrcAccumulator::appendNull); } else { ((BytesColumnVector) columnVector).setVal(rowNumber, bytes); - ((BytesColumnVector) redundantColumnVector).setVal(rowNumber, this.collationHandlerFunction.apply(bytes, dataType.length())); + ((BytesColumnVector) redundantColumnVector).setVal(rowNumber, + this.collationHandlerFunction.apply(bytes, dataType.length())); accumulator.ifPresent(a -> a.appendBytes(bytes, 0, bytes.length)); } } } + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + } else { + // Write directly (Should convert to UTF-8) + // The format in binlog is original bytes in character set. + byte[] bytes = row.getBytes(columnId); + if (sourceCharset.equals(DEFAULT_CHARSET)) { + ((SliceBlockBuilder) blockBuilder).writeBytes(bytes); + } else { + ((SliceBlockBuilder) blockBuilder).writeBytes( + new String(bytes, sourceCharset).getBytes(DEFAULT_CHARSET)); + } + } + } + @Override public PruningResult prune(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/YearColumnProvider.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/YearColumnProvider.java new file mode 100644 index 000000000..ebe8be9c8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/columns/YearColumnProvider.java @@ -0,0 +1,34 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.archive.columns; + +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.columnar.CSVRow; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; + +public class YearColumnProvider extends LongColumnProvider { + @Override + public void parseRow(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + } else { + byte[] bytes = row.getBytes(columnId); + long longVal = ColumnProvider.longFromByte(bytes, bytes.length); + blockBuilder.writeLong(longVal == 0 ? 0 : longVal + 1900); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/predicate/OSSPredicateBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/predicate/OSSPredicateBuilder.java index c95f8a256..feafe35dd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/predicate/OSSPredicateBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/predicate/OSSPredicateBuilder.java @@ -30,11 +30,9 @@ import com.alibaba.polardbx.common.utils.time.parser.TimeParseStatus; import com.alibaba.polardbx.common.utils.time.parser.TimeParserFlags; import com.alibaba.polardbx.executor.archive.pruning.OssOrcFilePruner; -import com.alibaba.polardbx.executor.archive.reader.TypeComparison; import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; -import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformerUtil; +import com.alibaba.polardbx.executor.archive.reader.TypeComparison; import com.alibaba.polardbx.executor.operator.util.minmaxfilter.MinMaxFilter; -import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.OSSOrcFileMeta; import com.alibaba.polardbx.optimizer.config.table.OrcMetaUtils; import com.alibaba.polardbx.optimizer.config.table.TableMeta; @@ -53,6 +51,7 @@ import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.fun.SqlRuntimeFilterFunction; import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.orc.impl.TypeUtils; import org.apache.orc.sarg.PredicateLeaf; import org.apache.orc.sarg.SearchArgument; import org.apache.orc.sarg.SearchArgumentFactory; @@ -60,7 +59,6 @@ import java.nio.charset.StandardCharsets; import java.sql.Types; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -268,14 +266,21 @@ private boolean visitBinary(RexCall call, } return true; case DECIMAL: - if (value instanceof Number) { - byte[] bytes = OssOrcFilePruner.decimalToBin(Decimal.fromString(value.toString()).getDecimalStructure(), - dataType.getPrecision(), dataType.getScale()); - applier.apply(columnName, PredicateLeaf.Type.STRING, - new Object[] {new String(bytes, StandardCharsets.UTF_8)}); - } else { + if (!(value instanceof Number)) { builder.literal(SearchArgument.TruthValue.YES_NO_NULL); + return true; } + Decimal decimal = Decimal.fromString(value.toString()); + if (fileMeta.isEnableDecimal64() && TypeUtils.isDecimal64Precision(dataType.getPrecision())) { + // passing an approximate value for pruning + long longVal = decimal.unscaleInternal(dataType.getScale()); + applier.apply(columnName, PredicateLeaf.Type.LONG, new Object[] {longVal}); + return true; + } + byte[] bytes = OssOrcFilePruner.decimalToBin(decimal.getDecimalStructure(), + dataType.getPrecision(), dataType.getScale()); + applier.apply(columnName, PredicateLeaf.Type.STRING, + new Object[] {new String(bytes, StandardCharsets.UTF_8)}); return true; case TIMESTAMP: MysqlDateTime mysqlDateTime = DataTypeUtil.toMySQLDatetimeByFlags( @@ -417,19 +422,28 @@ private boolean visitBetween(RexCall call) { } return true; case DECIMAL: - if (value1 instanceof Number && value2 instanceof Number) { - byte[] bytes1 = - OssOrcFilePruner.decimalToBin(Decimal.fromString(value1.toString()).getDecimalStructure(), - dataType.getPrecision(), dataType.getScale()); - byte[] bytes2 = - OssOrcFilePruner.decimalToBin(Decimal.fromString(value2.toString()).getDecimalStructure(), - dataType.getPrecision(), dataType.getScale()); - - builder.between(columnName, PredicateLeaf.Type.STRING, new String(bytes1, StandardCharsets.UTF_8), - new String(bytes2, StandardCharsets.UTF_8)); - } else { + if (!(value1 instanceof Number && value2 instanceof Number)) { builder.literal(SearchArgument.TruthValue.YES_NO_NULL); + return true; } + Decimal decimal1 = Decimal.fromString(value1.toString()); + Decimal decimal2 = Decimal.fromString(value2.toString()); + if (fileMeta.isEnableDecimal64() && TypeUtils.isDecimal64Precision(dataType.getPrecision())) { + // passing approximate values with wider range for pruning + long longVal1 = decimal1.unscaleInternal(dataType.getScale()); + long longVal2 = decimal2.unscaleInternal(dataType.getScale()); + builder.between(columnName, PredicateLeaf.Type.LONG, longVal1, longVal2); + return true; + } + byte[] bytes1 = + OssOrcFilePruner.decimalToBin(decimal1.getDecimalStructure(), + dataType.getPrecision(), dataType.getScale()); + byte[] bytes2 = + OssOrcFilePruner.decimalToBin(decimal2.getDecimalStructure(), + dataType.getPrecision(), dataType.getScale()); + + builder.between(columnName, PredicateLeaf.Type.STRING, new String(bytes1, StandardCharsets.UTF_8), + new String(bytes2, StandardCharsets.UTF_8)); return true; case TIMESTAMP: MysqlDateTime mysqlDateTime1 = DataTypeUtil.toMySQLDatetimeByFlags( @@ -591,7 +605,7 @@ private boolean visitIn(RexCall call) { case INTEGER_UNSIGNED: case BIGINT: if (checkInClass(paramList, Number.class)) { - List newPara = Lists.newArrayList(); + List newPara = Lists.newArrayListWithCapacity(paramList.size()); for (Object obj : paramList) { newPara.add(((Number) obj).longValue()); } @@ -602,7 +616,7 @@ private boolean visitIn(RexCall call) { return true; case BIGINT_UNSIGNED: if (checkInClass(paramList, Number.class)) { - List newPara = Lists.newArrayList(); + List newPara = Lists.newArrayListWithCapacity(paramList.size()); for (Object obj : paramList) { newPara.add(((Number) obj).longValue() ^ UInt64Utils.FLIP_MASK); } @@ -614,7 +628,7 @@ private boolean visitIn(RexCall call) { case DOUBLE: case FLOAT: if (checkInClass(paramList, Number.class)) { - List newPara = Lists.newArrayList(); + List newPara = Lists.newArrayListWithCapacity(paramList.size()); for (Object obj : paramList) { newPara.add(((Number) obj).doubleValue()); } @@ -626,7 +640,7 @@ private boolean visitIn(RexCall call) { case VARCHAR: case CHAR: if (redundantColumn != null && preciseDataType != null && checkInClass(paramList, String.class)) { - List newPara = Lists.newArrayList(); + List newPara = Lists.newArrayListWithCapacity(paramList.size()); final SliceType sliceType = (SliceType) preciseDataType; for (Object obj : paramList) { newPara.add(makeSortKeyString(obj, sliceType)); @@ -637,21 +651,31 @@ private boolean visitIn(RexCall call) { } return true; case DECIMAL: - if (checkInClass(paramList, Number.class)) { - List newPara = Lists.newArrayList(); + if (!checkInClass(paramList, Number.class)) { + builder.literal(SearchArgument.TruthValue.YES_NO_NULL); + return true; + } + List newPara = Lists.newArrayListWithCapacity(paramList.size()); + if (fileMeta.isEnableDecimal64() && TypeUtils.isDecimal64Precision(dataType.getPrecision())) { for (Object obj : paramList) { - byte[] bytes = - OssOrcFilePruner.decimalToBin(Decimal.fromString(obj.toString()).getDecimalStructure(), - dataType.getPrecision(), dataType.getScale()); - newPara.add(new String(bytes, StandardCharsets.UTF_8)); + Decimal decimal = Decimal.fromString(obj.toString()); + long longVal1 = decimal.unscaleInternal(dataType.getScale()); + newPara.add(longVal1); } - buildIn(columnName, PredicateLeaf.Type.STRING, newPara); - } else { - builder.literal(SearchArgument.TruthValue.YES_NO_NULL); + buildIn(columnName, PredicateLeaf.Type.LONG, newPara); + return true; + } + + for (Object obj : paramList) { + byte[] bytes = + OssOrcFilePruner.decimalToBin(Decimal.fromString(obj.toString()).getDecimalStructure(), + dataType.getPrecision(), dataType.getScale()); + newPara.add(new String(bytes, StandardCharsets.UTF_8)); } + buildIn(columnName, PredicateLeaf.Type.STRING, newPara); return true; case TIMESTAMP: - List timestampLongValueParams = new ArrayList<>(); + List timestampLongValueParams = Lists.newArrayListWithCapacity(paramList.size()); for (Object obj : paramList) { MysqlDateTime mysqlDateTime = DataTypeUtil.toMySQLDatetimeByFlags( obj, @@ -674,7 +698,7 @@ private boolean visitIn(RexCall call) { return true; case DATE: case DATETIME: - List packedParams = new ArrayList<>(); + List packedParams = Lists.newArrayListWithCapacity(paramList.size()); for (Object obj : paramList) { MysqlDateTime mysqlDateTime = DataTypeUtil.toMySQLDatetimeByFlags( obj, @@ -689,7 +713,7 @@ private boolean visitIn(RexCall call) { buildIn(columnName, PredicateLeaf.Type.LONG, packedParams); return true; case TIME: - List packedTimeParams = new ArrayList<>(); + List packedTimeParams = Lists.newArrayListWithCapacity(paramList.size()); for (Object obj : paramList) { MysqlDateTime mysqlDateTime = DataTypeUtil.toMySQLDatetimeByFlags( obj, @@ -952,22 +976,37 @@ private boolean visitRuntimeFilter(RexCall call) { case DECIMAL: String minDecimalString = minMaxFilter.getMinString(); String maxDecimalString = minMaxFilter.getMaxString(); - if (minDecimalString != null && maxDecimalString != null) { - byte[] bytes1 = - OssOrcFilePruner.decimalToBin(Decimal.fromString(minDecimalString).getDecimalStructure(), - dataType.getPrecision(), dataType.getScale()); - byte[] bytes2 = - OssOrcFilePruner.decimalToBin(Decimal.fromString(minDecimalString).getDecimalStructure(), - dataType.getPrecision(), dataType.getScale()); - if (minDecimalString.equals(maxDecimalString)) { - builder.equals(columnName, PredicateLeaf.Type.STRING, - new String(bytes1, StandardCharsets.UTF_8)); + if (minDecimalString == null || maxDecimalString == null) { + builder.literal(SearchArgument.TruthValue.YES_NO_NULL); + return true; + } + Decimal minDecimal = Decimal.fromString(minDecimalString); + Decimal maxDecimal = Decimal.fromString(maxDecimalString); + if (fileMeta.isEnableDecimal64() && TypeUtils.isDecimal64Precision(dataType.getPrecision())) { + // passing approximate values with wider range for pruning + long minLongVal = minDecimal.unscaleInternal(dataType.getScale()); + long maxLongVal = maxDecimal.unscaleInternal(dataType.getScale()); + if (minLongVal == maxLongVal) { + builder.equals(columnName, PredicateLeaf.Type.LONG, minLongVal); } else { - builder.between(columnName, PredicateLeaf.Type.STRING, - new String(bytes1, StandardCharsets.UTF_8), new String(bytes2, StandardCharsets.UTF_8)); + builder.between(columnName, PredicateLeaf.Type.LONG, minLongVal, + maxLongVal); } + return true; + } + + byte[] bytes1 = + OssOrcFilePruner.decimalToBin(minDecimal.getDecimalStructure(), + dataType.getPrecision(), dataType.getScale()); + byte[] bytes2 = + OssOrcFilePruner.decimalToBin(maxDecimal.getDecimalStructure(), + dataType.getPrecision(), dataType.getScale()); + if (minDecimalString.equals(maxDecimalString)) { + builder.equals(columnName, PredicateLeaf.Type.STRING, + new String(bytes1, StandardCharsets.UTF_8)); } else { - builder.literal(SearchArgument.TruthValue.YES_NO_NULL); + builder.between(columnName, PredicateLeaf.Type.STRING, + new String(bytes1, StandardCharsets.UTF_8), new String(bytes2, StandardCharsets.UTF_8)); } return true; default: diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/pruning/OssOrcFilePruner.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/pruning/OssOrcFilePruner.java index 9360fc832..d1ca270b4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/pruning/OssOrcFilePruner.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/pruning/OssOrcFilePruner.java @@ -638,7 +638,21 @@ public static byte[] decimalToBin(DecimalStructure dec, int precision, int scale public static PruningResult pruneDecimal(PredicateLeaf predicateLeaf, ColumnStatistics columnStatistics, Map stripeColumnMetaMap) { - StringColumnStatistics stringColumnStatistics = ((StringColumnStatistics) columnStatistics); + if (columnStatistics instanceof IntegerColumnStatistics) { + return pruneDecimal64(predicateLeaf, (IntegerColumnStatistics) columnStatistics, stripeColumnMetaMap); + } + if (columnStatistics instanceof StringColumnStatistics) { + return pruneNormalDecimal(predicateLeaf, (StringColumnStatistics) columnStatistics, stripeColumnMetaMap); + } + + // unsupported column statistics + LOGGER.warn("Unsupported orc decimal column statistics: " + columnStatistics.getClass().getName()); + return OrcFilePruningResult.PASS; + } + + private static PruningResult pruneNormalDecimal(PredicateLeaf predicateLeaf, + StringColumnStatistics stringColumnStatistics, + Map stripeColumnMetaMap) { String statisticsMinimum = stringColumnStatistics.getMinimum(); String statisticsMaximum = stringColumnStatistics.getMaximum(); @@ -702,7 +716,7 @@ public static PruningResult pruneDecimal(PredicateLeaf predicateLeaf, ColumnStat ).collect(Collectors.toList()); return generatePruningResult(stripeColumnMetaList, stripeColumnMetaMap); } else if (predicateLeaf.getOperator() == PredicateLeaf.Operator.IS_NULL) { - boolean test = columnStatistics.hasNull(); + boolean test = stringColumnStatistics.hasNull(); if (!test) { return OrcFilePruningResult.SKIP; } else { @@ -805,6 +819,127 @@ public static PruningResult pruneDecimal(PredicateLeaf predicateLeaf, ColumnStat } } + /** + * the literal of predicateLeaf should be unscaled decimal64 + */ + private static PruningResult pruneDecimal64(PredicateLeaf predicateLeaf, + IntegerColumnStatistics integerColumnStatistics, + Map stripeColumnMetaMap) { + if (predicateLeaf.getLiteralList() == null) { + if (!(predicateLeaf.getLiteral() instanceof Long)) { + LOGGER.warn("Unsupported decimal64 prune value: " + predicateLeaf.getLiteral() + + ", type: " + predicateLeaf.getLiteral().getClass().getName()); + return OrcFilePruningResult.PASS; + } + } else { + List literalList = predicateLeaf.getLiteralList(); + for (Object literal : literalList) { + if (!(literal instanceof Long)) { + LOGGER.warn("Unsupported decimal64 prune value in list: " + predicateLeaf.getLiteral() + + ", type: " + predicateLeaf.getLiteral().getClass().getName()); + return OrcFilePruningResult.PASS; + } + } + } + + long statisticsMinimum = integerColumnStatistics.getMinimum(); + long statisticsMaximum = integerColumnStatistics.getMaximum(); + + if (predicateLeaf.getOperator() == PredicateLeaf.Operator.EQUALS) { + long predicateValue = (Long) predicateLeaf.getLiteral(); + + if (predicateValue > statisticsMaximum || predicateValue < statisticsMinimum) { + return OrcFilePruningResult.SKIP; + } + + List stripeColumnMetaList = stripeColumnMetaMap.values().stream().filter(x -> { + IntegerColumnStatistics statistics = (IntegerColumnStatistics) x.getColumnStatistics(); + return !(predicateValue > statistics.getMaximum() || predicateValue < statistics.getMinimum()); + }).filter(x -> x.getBloomFilter() == null || x.getBloomFilter().testLong(predicateValue)) + .collect(Collectors.toList()); + return generatePruningResult(stripeColumnMetaList, stripeColumnMetaMap); + } else if (predicateLeaf.getOperator() == PredicateLeaf.Operator.IS_NULL) { + boolean test = integerColumnStatistics.hasNull(); + if (!test) { + return OrcFilePruningResult.SKIP; + } else { + List stripeColumnMetaList = + stripeColumnMetaMap.values().stream() + .filter(x -> x.getColumnStatistics().hasNull()).collect(Collectors.toList()); + return generatePruningResult(stripeColumnMetaList, stripeColumnMetaMap); + } + } else if (predicateLeaf.getOperator() == PredicateLeaf.Operator.BETWEEN) { + List literalList = predicateLeaf.getLiteralList(); + long min = (Long) literalList.get(0); + long max = (Long) literalList.get(1); + + if (max < min) { + return OrcFilePruningResult.SKIP; + } + + if (min > statisticsMaximum || max < statisticsMinimum) { + return OrcFilePruningResult.SKIP; + } else { + List stripeColumnMetaList = stripeColumnMetaMap.values().stream().filter(x -> { + IntegerColumnStatistics statistics = (IntegerColumnStatistics) x.getColumnStatistics(); + return !(min > statistics.getMaximum() || max < statistics.getMinimum()); + }).collect(Collectors.toList()); + return generatePruningResult(stripeColumnMetaList, stripeColumnMetaMap); + } + } else if (predicateLeaf.getOperator() == PredicateLeaf.Operator.LESS_THAN) { + long value = (Long) predicateLeaf.getLiteral(); + + if (value <= statisticsMinimum) { + return OrcFilePruningResult.SKIP; + } else { + List stripeColumnMetaList = stripeColumnMetaMap.values().stream().filter(x -> { + IntegerColumnStatistics statistics = (IntegerColumnStatistics) x.getColumnStatistics(); + return !(value <= statistics.getMinimum()); + }).collect(Collectors.toList()); + return generatePruningResult(stripeColumnMetaList, stripeColumnMetaMap); + } + } else if (predicateLeaf.getOperator() == PredicateLeaf.Operator.LESS_THAN_EQUALS) { + long value = (Long) predicateLeaf.getLiteral(); + + if (value < statisticsMinimum) { + return OrcFilePruningResult.SKIP; + } else { + List stripeColumnMetaList = stripeColumnMetaMap.values().stream().filter(x -> { + IntegerColumnStatistics statistics = (IntegerColumnStatistics) x.getColumnStatistics(); + return !(value < statistics.getMinimum()); + }).collect(Collectors.toList()); + return generatePruningResult(stripeColumnMetaList, stripeColumnMetaMap); + } + } else if (predicateLeaf.getOperator() == PredicateLeaf.Operator.GREATER_THAN) { + long value = (Long) predicateLeaf.getLiteral(); + + if (value >= statisticsMaximum) { + return OrcFilePruningResult.SKIP; + } else { + List stripeColumnMetaList = stripeColumnMetaMap.values().stream().filter(x -> { + IntegerColumnStatistics statistics = (IntegerColumnStatistics) x.getColumnStatistics(); + return !(value >= statistics.getMaximum()); + }).collect(Collectors.toList()); + return generatePruningResult(stripeColumnMetaList, stripeColumnMetaMap); + } + } else if (predicateLeaf.getOperator() == PredicateLeaf.Operator.GREATER_THAN_EQUALS) { + long value = (Long) predicateLeaf.getLiteral(); + + if (value > statisticsMaximum) { + return OrcFilePruningResult.SKIP; + } else { + List stripeColumnMetaList = stripeColumnMetaMap.values().stream().filter(x -> { + IntegerColumnStatistics statistics = (IntegerColumnStatistics) x.getColumnStatistics(); + return !(value > statistics.getMaximum()); + }).collect(Collectors.toList()); + return generatePruningResult(stripeColumnMetaList, stripeColumnMetaMap); + } + } else { + // TODO: support more predicate for pruning + return OrcFilePruningResult.PASS; + } + } + private static PruningResult generatePruningResult(List stripeColumnMetaList, Map stripeColumnMetaMap) { if (stripeColumnMetaMap.isEmpty()) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/BatchReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/BatchReader.java index e9beb156e..51ff260cd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/BatchReader.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/BatchReader.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.archive.reader; import java.io.Closeable; -import java.io.IOException; public interface BatchReader extends Closeable { T readBatch(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/BufferPoolManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/BufferPoolManager.java index ea4efa6ba..24e5f57d4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/BufferPoolManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/BufferPoolManager.java @@ -130,60 +130,53 @@ public List getImpl(OSSOrcFileMeta fileMeta, String column, OSSReadOption return cache.get( new Key(fileMeta.getLogicalTableSchema(), fileMeta.getLogicalTableName(), fileMeta.getFileName(), column), () -> { - long stamp = FileSystemManager.readLockWithTimeOut(ossReadOption.getEngine()); - try { - FileSystem fileSystem = FileSystemManager.getFileSystemGroup(ossReadOption.getEngine()).getMaster(); + FileSystem fileSystem = FileSystemManager.getFileSystemGroup(ossReadOption.getEngine()).getMaster(); - String orcPath = FileSystemUtils.buildUri(fileSystem, fileMeta.getFileName()); + String orcPath = FileSystemUtils.buildUri(fileSystem, fileMeta.getFileName()); - Configuration configuration = new Configuration(false); - configuration.setLong(OrcConf.MAX_MERGE_DISTANCE.getAttribute(), - ossReadOption.getMaxMergeDistance()); + Configuration configuration = new Configuration(false); + configuration.setLong(OrcConf.MAX_MERGE_DISTANCE.getAttribute(), + ossReadOption.getMaxMergeDistance()); - Reader reader = OrcFile.createReader(new Path(URI.create(orcPath)), - OrcFile.readerOptions(configuration).filesystem(fileSystem).orcTail(fileMeta.getOrcTail())); + Reader reader = OrcFile.createReader(new Path(URI.create(orcPath)), + OrcFile.readerOptions(configuration).filesystem(fileSystem).orcTail(fileMeta.getOrcTail())); - ColumnMeta columnMeta = ossReadOption.getOssColumnTransformer().getTargetColumnMeta(column); + ColumnMeta columnMeta = ossReadOption.getOssColumnTransformer().getTargetColumnMeta(column); - String fieldId = fileMeta.getTableMeta(executionContext).getColumnFieldId(column); + String fieldId = fileMeta.getTableMeta(executionContext).getColumnFieldId(column); - Preconditions.checkArgument(fieldId != null, "fix this case"); - Integer columnIndex = fileMeta.getColumnNameToIdx(fieldId); - TypeDescription schema = TypeDescription.createStruct(); + Preconditions.checkArgument(fieldId != null, "fix this case"); + Integer columnIndex = fileMeta.getColumnNameToIdx(fieldId); + TypeDescription schema = TypeDescription.createStruct(); - schema.addField( - fileMeta.getTypeDescription().getFieldNames().get(columnIndex), - fileMeta.getTypeDescription().getChildren().get(columnIndex).clone()); + schema.addField( + fileMeta.getTypeDescription().getFieldNames().get(columnIndex), + fileMeta.getTypeDescription().getChildren().get(columnIndex).clone()); - // reader filter options - Reader.Options readerOptions = new Reader.Options(configuration) - .schema(schema); + // reader filter options + Reader.Options readerOptions = new Reader.Options(configuration) + .schema(schema); - RecordReader recordReader = reader.rows(readerOptions); + RecordReader recordReader = reader.rows(readerOptions); - ColumnProvider columnProvider = ColumnProviders.getProvider(columnMeta); + ColumnProvider columnProvider = ColumnProviders.getProvider(columnMeta); - SessionProperties sessionProperties = SessionProperties.fromExecutionContext(executionContext); + SessionProperties sessionProperties = SessionProperties.fromExecutionContext(executionContext); - VectorizedRowBatch buffer = schema.createRowBatch(1000); + VectorizedRowBatch buffer = schema.createRowBatch(1000); - List result = new ArrayList<>(); + List result = new ArrayList<>(); - while (recordReader.nextBatch(buffer)) { - if (buffer.size == 0) { - continue; - } - BlockBuilder blockBuilder = BlockBuilders.create(columnMeta.getDataType(), executionContext); - columnProvider.transform(buffer.cols[0], blockBuilder, 0, buffer.size, sessionProperties); - result.add(blockBuilder.build()); + while (recordReader.nextBatch(buffer)) { + if (buffer.size == 0) { + continue; } - - return result; - } catch (Throwable e) { - throw GeneralUtil.nestedException(e); - } finally { - FileSystemManager.unlockRead(ossReadOption.getEngine(), stamp); + BlockBuilder blockBuilder = BlockBuilders.create(columnMeta.getDataType(), executionContext); + columnProvider.transform(buffer.cols[0], blockBuilder, 0, buffer.size, sessionProperties); + result.add(blockBuilder.build()); } + + return result; }); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReadResult.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReadResult.java index 886efd28a..35b4995b5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReadResult.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReadResult.java @@ -21,8 +21,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; -import java.util.Iterator; - public class ORCReadResult { private VectorizedRowBatch rowBatch; private DataType[] dataTypeList; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReaderTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReaderTask.java index 2113d74d1..2dc87fa16 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReaderTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReaderTask.java @@ -108,7 +108,6 @@ public ORCReaderTask(OSSReadOption ossReadOption, String tableFileName, FileMeta this.ossReadOption = ossReadOption; this.tableFileName = tableFileName; this.closed = new AtomicBoolean(false); - this.stamp = FileSystemManager.readLockWithTimeOut(ossReadOption.getEngine()); this.fileSystem = FileSystemManager.getFileSystemGroup(ossReadOption.getEngine()).getMaster(); String orcPath = FileSystemUtils.buildUri(this.fileSystem, tableFileName); this.ossFileUri = URI.create(orcPath); @@ -355,8 +354,6 @@ public synchronized void close() { this.chunkIterator = null; } catch (IOException e) { throw GeneralUtil.nestedException(e); - } finally { - FileSystemManager.unlockRead(ossReadOption.getEngine(), stamp); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReaderWithAggTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReaderWithAggTask.java index 4aac51cd0..dd51d1130 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReaderWithAggTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/ORCReaderWithAggTask.java @@ -212,7 +212,7 @@ private BlockBuilder fetchStatistics(SessionProperties sessionProperties, int co FileSystemGroup fileSystemGroup = FileSystemManager.getFileSystemGroup(fileMeta.getEngine()); Preconditions.checkArgument(fileSystemGroup != null); try { - if (!fileSystemGroup.exists(fileMeta.getFileName())) { + if (!fileSystemGroup.exists(fileMeta.getFileName(), context.getFinalPlan().isUseColumnar())) { throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS, "File " + fileMeta.getFileName() + " doesn't exits"); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSColumnTransformer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSColumnTransformer.java index 57849fb5f..ffd8dd290 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSColumnTransformer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSColumnTransformer.java @@ -32,6 +32,7 @@ import com.google.common.collect.Maps; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.eclipse.jetty.util.StringUtil; +import org.jetbrains.annotations.Nullable; import java.sql.Timestamp; import java.util.ArrayList; @@ -66,18 +67,27 @@ public class OSSColumnTransformer { public OSSColumnTransformer(List columnMetas, List fileColumnMetas, List initColumnMetas, - List timestamps) { + List timestamps, + List locInOrc) { Preconditions.checkArgument(columnMetas.size() == fileColumnMetas.size(), "target and source should have the same size"); this.sourceColumnMetas = fileColumnMetas; this.initColumnMetas = initColumnMetas; this.targetColumnMetas = columnMetas; this.timestamps = timestamps; - this.locInOrc = new ArrayList<>(sourceColumnMetas.size()); - int cnt = 0; - for (ColumnMeta meta : sourceColumnMetas) { - locInOrc.add((meta == null) ? null : cnt++); + + if (locInOrc == null) { + this.locInOrc = new ArrayList<>(sourceColumnMetas.size()); + int cnt = 0; + for (ColumnMeta meta : sourceColumnMetas) { + this.locInOrc.add((meta == null) ? null : cnt++); + } + } else { + Preconditions.checkArgument(locInOrc.size() == fileColumnMetas.size(), + "orc location list and source list should have the same size"); + this.locInOrc = locInOrc; } + this.targetColumnMap = Maps.newTreeMap(String::compareToIgnoreCase); for (int i = 0; i < targetColumnMetas.size(); i++) { this.targetColumnMap.put(targetColumnMetas.get(i).getName().toLowerCase(), i); @@ -94,6 +104,10 @@ public OSSColumnTransformer(List columnMetas, } } + public int columnCount() { + return targetColumnMetas.size(); + } + String[] getTargetColumns() { String[] columns = new String[sourceColumnMetas.size()]; for (int i = 0; i < sourceColumnMetas.size(); i++) { @@ -152,10 +166,15 @@ public ColumnMeta getTargetColumnMeta(String column) { .filter(x -> x.getName().equals(column)).findFirst().get(); } + @Nullable public Integer getLocInOrc(int loc) { return locInOrc.get(loc); } + public List getLocInOrc() { + return locInOrc; + } + /** * fill in default value of target column. * diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSPhysicalTableReadResult.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSPhysicalTableReadResult.java index 1680ff843..f523011ed 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSPhysicalTableReadResult.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSPhysicalTableReadResult.java @@ -29,6 +29,7 @@ import com.alibaba.polardbx.executor.chunk.MutableChunk; import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; import com.alibaba.polardbx.executor.chunk.SliceBlock; +import com.alibaba.polardbx.executor.chunk.TimestampBlock; import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; import com.alibaba.polardbx.optimizer.config.table.FileMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; @@ -36,9 +37,10 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.datatype.SliceType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.utils.CalciteUtils; +import com.alibaba.polardbx.optimizer.utils.TimestampUtils; import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.rel.metadata.RelColumnOrigin; import org.apache.calcite.rel.type.RelDataType; @@ -47,6 +49,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.TimeZone; public class OSSPhysicalTableReadResult extends SimpleOSSPhysicalTableReadResult { private List orcReaderTaskList; @@ -57,6 +60,7 @@ public class OSSPhysicalTableReadResult extends SimpleOSSPhysicalTableReadResult private int taskIndex; private volatile boolean isFinished; + private TimeZone timeZone; public OSSPhysicalTableReadResult(OSSReadOption readOption, ExecutionContext executionContext, List aggCalls, List aggColumns, @@ -85,6 +89,7 @@ public OSSPhysicalTableReadResult(OSSReadOption readOption, ExecutionContext exe this.columnProviders = readOption.getOssColumnTransformer().getTargetColumnProvides(); this.sessionProperties = SessionProperties.fromExecutionContext(context); + this.timeZone = TimestampUtils.getTimeZone(context); } public void init() { @@ -120,7 +125,7 @@ public Chunk next(List> inProjectDataTypeList, BlockBuilder[] blockB preAllocatedChunk, filterBitmap, outProject, - context); + context, 0, null); if (result == null) { continue; } @@ -141,6 +146,9 @@ public Chunk nextChunkFromBufferPool(List> inProjectDataTypeList, Bl context.getParamManager().getBoolean(ConnectionParams.ENABLE_OSS_ZERO_COPY); boolean compatible = context.getParamManager().getBoolean(ConnectionParams.ENABLE_OSS_COMPATIBLE); + boolean useSelection = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_COLUMNAR_SCAN_SELECTION); + long resultRows; RandomAccessBlock[] blocksForCompute = new RandomAccessBlock[filterBitmap.length]; @@ -210,29 +218,32 @@ public Chunk nextChunkFromBufferPool(List> inProjectDataTypeList, Bl // case 2. use block from cacheBlock blocks[i] = cachedBlock; } else if (delayMaterialization && cachedBlock instanceof DecimalBlock) { + // case 3. decimal block delay materialization - DecimalBlock decimalBlock = (DecimalBlock) cachedBlock; - blocks[i] = - new DecimalBlock(DataTypes.DecimalType, decimalBlock.getMemorySegments(), - decimalBlock.nulls(), decimalBlock.hasNull(), selSize, - selection, decimalBlock.getState()); + DecimalBlock decimalBlock = cachedBlock.cast(DecimalBlock.class); + blocks[i] = DecimalBlock.from(decimalBlock, selSize, selection, useSelection); + } else if (delayMaterialization && cachedBlock instanceof SliceBlock) { // case 4. slice block delay materialization - blocks[i] = new SliceBlock((SliceType) ((SliceBlock) cachedBlock).getType(), 0, selSize, - ((SliceBlock) cachedBlock).nulls(), ((SliceBlock) cachedBlock).offsets(), - ((SliceBlock) cachedBlock).data(), selection, compatible); + blocks[i] = SliceBlock.from(cachedBlock.cast(SliceBlock.class), selSize, selection, compatible, + useSelection); + } else if (delayMaterialization && cachedBlock instanceof DateBlock) { // case 5. date block delay materialization - DateBlock dateBlock = (DateBlock) cachedBlock; - blocks[i] = - new DateBlock(0, selSize, dateBlock.nulls(), dateBlock.getPacked(), dateBlock.getType(), - dateBlock.getTimezone(), selection); + DateBlock dateBlock = cachedBlock.cast(DateBlock.class); + blocks[i] = DateBlock.from(dateBlock, selSize, selection, useSelection); + + } else if (delayMaterialization && cachedBlock instanceof TimestampBlock) { + // case 5. date block delay materialization + TimestampBlock timestampBlock = cachedBlock.cast(TimestampBlock.class); + blocks[i] = TimestampBlock.from(timestampBlock, selSize, selection, useSelection, timeZone); + } else if (delayMaterialization && cachedBlock instanceof IntegerBlock) { + // case 6. integer block delay materialization - IntegerBlock integerBlock = (IntegerBlock) cachedBlock; - blocks[i] = - new IntegerBlock(integerBlock.getType(), integerBlock.intArray(), integerBlock.nulls(), - integerBlock.hasNull(), selSize, selection); + IntegerBlock integerBlock = cachedBlock.cast(IntegerBlock.class); + blocks[i] = IntegerBlock.from(integerBlock, selSize, selection, useSelection); + } else { // case 7. normal for (int j = 0; j < selSize; j++) { @@ -270,7 +281,7 @@ public Chunk next(List> inProjectDataTypeList, BlockBuilder[] blockB } Chunk result = next(buffer, readOption.getOssColumnTransformer(), - inProjectDataTypeList, blockBuilders, context); + inProjectDataTypeList, blockBuilders, context, 0, null); if (result == null) { continue; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSReadOption.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSReadOption.java index b547aa634..c29b53ae1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSReadOption.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/OSSReadOption.java @@ -41,12 +41,13 @@ public class OSSReadOption { private List pruningResultList; private long maxMergeDistance; + private boolean isColumnarIndex; public OSSReadOption(TypeDescription readSchema, OSSColumnTransformer ossColumnMapping, SearchArgument searchArgument, String[] columns, String tableName, Engine engine, List tableFileList, List phyTableFileMetas, List pruningResultList, - long maxMergeDistance) { + long maxMergeDistance, boolean isColumnarIndex) { this.readSchema = readSchema; this.ossColumnTransformer = ossColumnMapping; this.searchArgument = searchArgument; @@ -57,6 +58,7 @@ public OSSReadOption(TypeDescription readSchema, OSSColumnTransformer ossColumnM this.phyTableFileMetas = phyTableFileMetas; this.pruningResultList = pruningResultList; this.maxMergeDistance = maxMergeDistance; + this.isColumnarIndex = isColumnarIndex; } public Engine getEngine() { @@ -127,6 +129,10 @@ public long getMaxMergeDistance() { return maxMergeDistance; } + public boolean isColumnarIndex() { + return isColumnarIndex; + } + @Override public String toString() { return String.format( diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/SimpleOSSPhysicalTableReadResult.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/SimpleOSSPhysicalTableReadResult.java index 5f44700c0..16ec7f65c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/SimpleOSSPhysicalTableReadResult.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/SimpleOSSPhysicalTableReadResult.java @@ -33,14 +33,16 @@ import com.alibaba.polardbx.executor.operator.util.AggregateUtils; import com.alibaba.polardbx.executor.vectorized.EvaluationContext; import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.core.rel.OSSTableScan; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.utils.CalciteUtils; +import com.google.common.base.Preconditions; import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.rel.logical.LogicalAggregate; import org.apache.calcite.rel.type.RelDataType; @@ -62,6 +64,7 @@ public class SimpleOSSPhysicalTableReadResult { protected List aggCalls; protected ImmutableBitSet groupSet; protected RelDataType dataType; + protected boolean isColumnarIndex; public SimpleOSSPhysicalTableReadResult() { } @@ -72,6 +75,7 @@ public SimpleOSSPhysicalTableReadResult(List> dataTypeList, Executio dataTypeList.stream() .map(t -> ColumnProviders.getProvider(t)).collect(Collectors.toList()); + this.isColumnarIndex = ossTableScan.isColumnarIndex(); LogicalAggregate agg = ossTableScan.getAgg(); if (agg != null) { this.aggCalls = agg.getAggCallList(); @@ -85,6 +89,117 @@ public SimpleOSSPhysicalTableReadResult(List> dataTypeList, Executio this.sessionProperties = SessionProperties.fromExecutionContext(executionContext); } + public Chunk next(Chunk inputChunk, + List> inProjectDataTypeList, + VectorizedExpression condition, + MutableChunk preAllocatedChunk, + int[] filterBitmap, + int[] outProject, + ExecutionContext context, + int preSelSize, + int[] preSelection) { + + long resultRows; + RandomAccessBlock[] blocksForCompute = new RandomAccessBlock[filterBitmap.length]; + Chunk chunk = inputChunk; + if (chunk == null) { + return null; + } else { + resultRows = chunk.getPositionCount(); + } + + // make block for pre-filter + int inProjectCount = inProjectDataTypeList.size(); + for (int i = 0; i < inProjectCount; i++) { + if (filterBitmap[i] == 1) { + DataType dataType = inProjectDataTypeList.get(i); + BlockBuilder blockBuilder = BlockBuilders.create(dataType, context); + + Block cachedBlock = chunk.getBlock(i); + + for (int j = 0; j < chunk.getPositionCount(); j++) { + cachedBlock.writePositionTo(j, blockBuilder); + } + + blocksForCompute[i] = (RandomAccessBlock) blockBuilder.build(); + } + } + + // pre-filter + Pair sel = + preFilter(condition, preAllocatedChunk, filterBitmap, context, + (int) resultRows, blocksForCompute, inProjectCount); + + int selSize = sel.getKey(); + int[] selection = sel.getValue(); + if (selSize == 0) { + return null; + } + + if (preSelection != null && preSelSize >= 0) { + // intersect two selection array + int[] intersection = new int[Math.min(preSelSize, selSize)]; + int intersectedSize = VectorizedExpressionUtils.intersect( + selection, selSize, preSelection, preSelSize, intersection + ); + + selection = intersection; + selSize = intersectedSize; + } + + // buffer to block builders + Block[] blocks = new Block[outProject.length]; + for (int i = 0; i < outProject.length; i++) { + DataType dataType = inProjectDataTypeList.get(outProject[i]); + BlockBuilder blockBuilder = BlockBuilders.create(dataType, context); + Block cachedBlock = chunk.getBlock(outProject[i]); + + // normal + for (int j = 0; j < selSize; j++) { + int idx = selection[j]; + cachedBlock.writePositionTo(idx, blockBuilder); + } + blocks[i] = blockBuilder.build(); + } + + return new Chunk(blocks); + } + + public Chunk next(Chunk inputChunk, + BlockBuilder[] blockBuilders, + ExecutionContext context, + int preSelSize, + int[] preSelection) { + final int columns = blockBuilders.length; + final Chunk chunk = inputChunk; + if (chunk == null) { + // nothing to read, go next orc read task. + return null; + } + + // buffer to block builders + Block[] blocks = new Block[columns]; + // buffer to block builders + for (int i = 0; i < blockBuilders.length; i++) { + BlockBuilder blockBuilder = blockBuilders[i]; + + if (preSelSize >= 0 && preSelection != null) { + // Use pre-selection array to filter output block (only effective in columnar mode) + for (int j = 0; j < preSelSize; j++) { + int selected = preSelection[j]; + chunk.getBlock(i).writePositionTo(selected, blockBuilder); + } + } else { + for (int j = 0; j < chunk.getPositionCount(); j++) { + chunk.getBlock(i).writePositionTo(j, blockBuilder); + } + } + + blocks[i] = blockBuilder.build(); + } + return new Chunk(blocks); + } + public Chunk next(VectorizedRowBatch batch, OSSColumnTransformer ossColumnTransformer, List> inProjectDataTypeList, @@ -93,7 +208,9 @@ public Chunk next(VectorizedRowBatch batch, MutableChunk preAllocatedChunk, int[] filterBitmap, int[] outProject, - ExecutionContext context) { + ExecutionContext context, + int preSelSize, + int[] preSelection) { int blockCount = blockBuilders.length; final int resultRows = batch.size; RandomAccessBlock[] blocksForCompute = new RandomAccessBlock[filterBitmap.length]; @@ -133,6 +250,17 @@ public Chunk next(VectorizedRowBatch batch, return null; } + if (preSelection != null && preSelSize >= 0) { + // intersect two selection array + int[] intersection = new int[Math.min(preSelSize, selSize)]; + int intersectedSize = VectorizedExpressionUtils.intersect( + selection, selSize, preSelection, preSelSize, intersection + ); + + selection = intersection; + selSize = intersectedSize; + } + // buffer to block builders if (!withAgg()) { Block[] blocks = new Block[blockBuilders.length]; @@ -179,7 +307,9 @@ public Chunk next(VectorizedRowBatch batch, OSSColumnTransformer ossColumnTransformer, List> inProjectDataTypeList, BlockBuilder[] blockBuilders, - ExecutionContext context) { + ExecutionContext context, + int preSelSize, + int[] preSelection) { final int resultRows = batch.size; // buffer to block builders Block[] blocks = new Block[blockBuilders.length]; @@ -187,17 +317,31 @@ public Chunk next(VectorizedRowBatch batch, DataType dataType = inProjectDataTypeList.get(i); BlockBuilder blockBuilder = BlockBuilders.create(dataType, context); - blocks[i] = transformDataType( - blockBuilder, - inProjectDataTypeList.get(i), - batch.cols, - i, - null, - 0, - resultRows, - context, - ossColumnTransformer - ); + if (preSelSize >= 0 && preSelection != null) { + blocks[i] = transformDataType( + blockBuilder, + inProjectDataTypeList.get(i), + batch.cols, + i, + preSelection, + preSelSize, + resultRows, + context, + ossColumnTransformer + ); + } else { + blocks[i] = transformDataType( + blockBuilder, + inProjectDataTypeList.get(i), + batch.cols, + i, + null, + 0, + resultRows, + context, + ossColumnTransformer + ); + } } if (withAgg()) { @@ -251,8 +395,9 @@ Block transformDataType(BlockBuilder targetBlockBuilder, context); } Integer colIdInOrc = ossColumnTransformer.getLocInOrc(colId); + Preconditions.checkArgument(colIdInOrc != null); - ColumnVector columnVector = columnVectors[colIdInOrc]; + ColumnVector columnVector = isColumnarIndex ? columnVectors[colIdInOrc + 1] : columnVectors[colIdInOrc]; // same data type if (ossColumnCompare == TypeComparison.IS_EQUAL_YES) { this.columnProviders.get(colId).transform( @@ -298,13 +443,13 @@ protected Pair preFilter(VectorizedExpression condition, Mutable boolean[] nulls = filteredBlock.nulls(); boolean[] inputArray = null; if (filteredBlock instanceof LongBlock) { - long[] longInputArray = ((LongBlock) filteredBlock).longArray(); + long[] longInputArray = filteredBlock.cast(LongBlock.class).longArray(); inputArray = new boolean[longInputArray.length]; for (int i = 0; i < inputArray.length; i++) { inputArray[i] = longInputArray[i] == 1; } } else if (filteredBlock instanceof IntegerBlock) { - int[] intInputArray = ((IntegerBlock) filteredBlock).intArray(); + int[] intInputArray = filteredBlock.cast(IntegerBlock.class).intArray(); inputArray = new boolean[intInputArray.length]; for (int i = 0; i < inputArray.length; i++) { inputArray[i] = intInputArray[i] == 1; @@ -336,7 +481,7 @@ protected Chunk aggExec(Chunk chunk, List> inProjectDataTypeList, in int[] groups = AggregateUtils.convertBitSet(groupSet); MemoryAllocatorCtx memoryAllocator = context.getMemoryPool().getMemoryAllocatorCtx(); List aggregators = - AggregateUtils.convertAggregators(columns, CalciteUtils.getTypes(dataType), aggCalls, context, memoryAllocator); + AggregateUtils.convertAggregators(aggCalls, context, memoryAllocator); HashAggExec aggExec = new HashAggExec(columns, groups, aggregators, CalciteUtils.getTypes(dataType), 1, null, context); aggExec.openConsume(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/UnPushableORCReaderTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/UnPushableORCReaderTask.java index 16d3e0e2b..df52df1a4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/UnPushableORCReaderTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/reader/UnPushableORCReaderTask.java @@ -67,8 +67,6 @@ public class UnPushableORCReaderTask { protected final AtomicBoolean closed; - protected volatile long stamp; - protected ExecutionContext context; protected long startTime; @@ -80,7 +78,6 @@ public UnPushableORCReaderTask(OSSReadOption ossReadOption, String tableFileName this.ossReadOption = ossReadOption; this.tableFileName = tableFileName; this.closed = new AtomicBoolean(false); - this.stamp = FileSystemManager.readLockWithTimeOut(ossReadOption.getEngine()); this.fileSystem = FileSystemManager.getFileSystemGroup(ossReadOption.getEngine()).getMaster(); String orcPath = FileSystemUtils.buildUri(this.fileSystem, tableFileName); this.ossFileUri = URI.create(orcPath); @@ -246,8 +243,6 @@ public synchronized void close() { } } catch (IOException e) { throw GeneralUtil.nestedException(e); - } finally { - FileSystemManager.unlockRead(ossReadOption.getEngine(), stamp); } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillExecutor.java index f4babcfed..26c96d13e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillExecutor.java @@ -50,15 +50,18 @@ public OSSBackFillExecutor(Engine sourceEngine, Engine targetEngine) { this.targetEngine = targetEngine; } - public int backFill2FileStore(String schemaName, String sourceTableName, String targetTableName, ExecutionContext baseEc, + public int backFill2FileStore(String schemaName, String sourceTableName, String targetTableName, + ExecutionContext baseEc, Map> sourcePhyTables, int indexStride, long parallelism, Map, OSSBackFillWriterTask> tasks, String designatedPhysicalPartition) { - return backFill2FileStore(schemaName, sourceTableName, targetTableName, baseEc, sourcePhyTables, indexStride, parallelism, + return backFill2FileStore(schemaName, sourceTableName, targetTableName, baseEc, sourcePhyTables, indexStride, + parallelism, tasks, designatedPhysicalPartition, false); } - public int backFill2FileStore(String schemaName, String sourceTableName, String targetTableName, ExecutionContext baseEc, + public int backFill2FileStore(String schemaName, String sourceTableName, String targetTableName, + ExecutionContext baseEc, Map> sourcePhyTables, int indexStride, long parallelism, Map, OSSBackFillWriterTask> tasks, String designatedPhysicalPartition, boolean supportPause) { @@ -74,9 +77,8 @@ public int backFill2FileStore(String schemaName, String sourceTableName, String // Init extractor and loader final OSSBackFillExtractor extractor = - OSSBackFillExtractor - .create(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, sourcePhyTables, - baseEc, designatedPhysicalPartition, sourceEngine, targetEngine); + OSSBackFillExtractor.create(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, + parallelism, sourcePhyTables, baseEc, designatedPhysicalPartition, sourceEngine, targetEngine); final BatchConsumer batchConsumer = new OSSBackFillConsumer(tasks); @@ -94,7 +96,8 @@ public int backFill2FileStore(String schemaName, String sourceTableName, String return affectRows.get(); } - public int backFill2Innodb(String schemaName, String sourceTableName, String targetTableName, ExecutionContext baseEc, + public int backFill2Innodb(String schemaName, String sourceTableName, String targetTableName, + ExecutionContext baseEc, Map> sourcePhyTables, int indexStride, long parallelism, BiFunction, ExecutionContext, List> executeFunc, Map sourceTargetDbMap, String designateLogicalPart) { @@ -110,9 +113,8 @@ public int backFill2Innodb(String schemaName, String sourceTableName, String tar // Init extractor and loader final Extractor extractor = - OSSBackFillExtractor - .create(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, sourcePhyTables, - baseEc, null, sourceEngine, targetEngine); + OSSBackFillExtractor.create(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, + parallelism, sourcePhyTables, baseEc, null, sourceEngine, targetEngine); final Loader loader = OSSBackFillLoader diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillExtractor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillExtractor.java index e3706de95..03d633b03 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillExtractor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillExtractor.java @@ -72,9 +72,9 @@ protected OSSBackFillExtractor(String schemaName, String sourceTableName, String Map> sourcePhyTables, Engine sourceEngine, Engine targetEngine) { - super(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, - planSelectWithMax, - planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, null, null, primaryKeysId); + super(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, false, + null, planSelectWithMax, + planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, null, primaryKeysId); this.sourcePhyTables = sourcePhyTables; this.sourceEngine = GeneralUtil.coalesce(sourceEngine, Engine.INNODB); this.targetEngine = GeneralUtil.coalesce(targetEngine, Engine.INNODB); @@ -136,6 +136,7 @@ private Cursor doExtract(PhyTableOperation extractPlan, ExecutionContext extract case LOCAL_DISK: case EXTERNAL_DISK: case NFS: + case ABS: RelNode fileStorePlan = OSSTableScan.fromPhysicalTableOperation(extractPlan, extractEc, this.sourceTableName, 1); @@ -155,6 +156,7 @@ private List> doConsume(PhyTableOperation extract case OSS: case EXTERNAL_DISK: case NFS: + case ABS: return consumeFileStore(extractPlan, extractEc, batchConsumer, extractCursor); case INNODB: default: @@ -168,7 +170,7 @@ private List> consumeInnodb(PhyTableOperation ext Cursor extractCursor) { final List> result; try { - result = com.alibaba.polardbx.executor.gsi.utils.Transformer.buildBatchParam(extractCursor); + result = com.alibaba.polardbx.executor.gsi.utils.Transformer.buildBatchParam(extractCursor, false, null); } finally { extractCursor.close(new ArrayList<>()); } @@ -242,6 +244,7 @@ protected List splitAndInitUpperBound(f case LOCAL_DISK: case EXTERNAL_DISK: case NFS: + case ABS: plan = OSSTableScan.fromPhysicalTableOperation(phyTableOperation, baseEc, this.sourceTableName, 1); break; case INNODB: diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillLoader.java index e060bbfdb..0e92570b7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillLoader.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillLoader.java @@ -58,7 +58,7 @@ protected OSSBackFillLoader(String schemaName, String tableName, SqlInsert inser BiFunction, ExecutionContext, List> executeFunc, Map sourceTargetGroupMap, String designateLogicalPart) { super(schemaName, tableName, insert, insertIgnore, checkerPlan, checkerPkMapping, checkerParamMapping, - executeFunc, false); + executeFunc, false, null); this.sourceTargetGroupMap = sourceTargetGroupMap; this.designateLogicalPart = designateLogicalPart; } @@ -164,8 +164,9 @@ public int executeInsert(SqlInsert sqlInsert, String schemaName, String tableNam String targetGroup = sourceTargetGroupMap.get(sourceDbIndex); assert targetGroup != null; return InsertIndexExecutor - .insertIntoTable(null, sqlInsert, tableMeta, targetGroup, phyTableName, schemaName, executionContext, executeFunc, + .insertIntoTable(null, sqlInsert, tableMeta, targetGroup, phyTableName, schemaName, executionContext, + executeFunc, false, - false, this.designateLogicalPart); + false, this.designateLogicalPart, false, null); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillWriterTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillWriterTask.java index bbe5f63de..be3880967 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillWriterTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/archive/writer/OSSBackFillWriterTask.java @@ -63,6 +63,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.orc.OrcConf; import org.apache.orc.OrcFile; import org.apache.orc.Reader; import org.apache.orc.StripeInformation; @@ -228,7 +229,7 @@ public OSSBackFillWriterTask(String logicalSchema, this.flushTaskList = new ArrayList<>(); int indexStride = (int) conf.getLong("orc.row.index.stride", 1000); - this.batch = schema.createRowBatch(indexStride); + this.batch = schema.createRowBatch(getRowBatchVersion(conf), indexStride); this.fpp = conf.getDouble("orc.bloom.filter.fpp", 0.01D); @@ -246,6 +247,12 @@ public OSSBackFillWriterTask(String logicalSchema, this.fileChecksum = new ArrayList<>(); } + private TypeDescription.RowBatchVersion getRowBatchVersion(Configuration conf) { + boolean enableDecimal64 = OrcConf.ENABLE_DECIMAL_64.getBoolean(conf); + return enableDecimal64 ? TypeDescription.RowBatchVersion.USE_DECIMAL64 : + TypeDescription.RowBatchVersion.ORIGINAL; + } + public OSSBackFillWriterTask(String logicalSchema, String logicalTable, String physicalSchema, @@ -728,7 +735,7 @@ private void storeIndexFileMeta(OSSKey metaKey, String localIndexFilePath, File } // upload to oss - FileSystemUtils.writeFile(localIndexFile, metaKey.toString(), this.engine); + FileSystemUtils.writeFile(localIndexFile, metaKey.toString(), this.engine, false); // change file size try (Connection metaDbConn = MetaDbUtil.getConnection()) { @@ -779,7 +786,7 @@ public void upload(MetaForCommit metaForCommit) { LOGGER.info("orc generation done: " + localFilePath); LOGGER.info("file size(in bytes): " + fileSize); - FileSystemUtils.writeFile(localFile, ossKey.toString(), this.engine); + FileSystemUtils.writeFile(localFile, ossKey.toString(), this.engine, false); LOGGER.info("file upload done: " + taskName); } catch (Exception e) { throw GeneralUtil.nestedException(e); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/BatchConsumer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/BatchConsumer.java index 2e2e2d321..6fc13d06b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/BatchConsumer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/BatchConsumer.java @@ -20,6 +20,7 @@ import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.mysql.cj.polarx.protobuf.PolarxPhysicalBackfill; import java.util.List; import java.util.Map; @@ -31,7 +32,16 @@ default void consume(List> batch, throw new UnsupportedOperationException(); } - default void consume(String sourcePhySchema, String sourcePhyTable, Cursor cursor, ExecutionContext context, List> mockResult) { + default void consume(String sourcePhySchema, String sourcePhyTable, Cursor cursor, ExecutionContext context, + List> mockResult) { + throw new UnsupportedOperationException(); + } + + default void consume(Pair targetDbAndGroup, + Pair targetFileAndDir, + List> targetHosts, + Pair userInfo, + PolarxPhysicalBackfill.TransferFileDataOperator transferFileData) { throw new UnsupportedOperationException(); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Extractor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Extractor.java index 1134f49ea..55cad4300 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Extractor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Extractor.java @@ -70,6 +70,7 @@ import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.OptimizerHint; import org.apache.calcite.sql.SqlSelect; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang.math.RandomUtils; import org.jetbrains.annotations.NotNull; @@ -85,6 +86,7 @@ import java.util.Objects; import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Future; @@ -172,8 +174,6 @@ public class Extractor extends PhyOperationBuilderCommon { private final PhyTableOperation planSelectSample; - private final PhyTableOperation planSelectMinAndMaxSample; - private boolean needBuildSubBoundList = true; private Map>> backfillSubBoundList = new HashMap<>(); @@ -191,15 +191,16 @@ public class Extractor extends PhyOperationBuilderCommon { static private final Integer maxRandomInterval = 10000; + protected boolean useBinary; + protected final Set notConvertColumns = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + protected Extractor(String schemaName, String sourceTableName, String targetTableName, long batchSize, - long speedMin, - long speedLimit, - long parallelism, + long speedMin, long speedLimit, long parallelism, boolean useBinary, + List modifyStringColumns, PhyTableOperation planSelectWithMax, PhyTableOperation planSelectWithMin, PhyTableOperation planSelectWithMinAndMax, PhyTableOperation planSelectMaxPk, PhyTableOperation planSelectSample, - PhyTableOperation planSelectMinAndMaxSample, List primaryKeysId) { this.schemaName = schemaName; this.sourceTableName = sourceTableName; @@ -209,12 +210,15 @@ protected Extractor(String schemaName, String sourceTableName, String targetTabl this.rateLimiter = speedLimit <= 0 ? null : RateLimiter.create(speedLimit); this.nowSpeedLimit = speedLimit; this.parallelism = parallelism; + this.useBinary = useBinary; + if (CollectionUtils.isNotEmpty(modifyStringColumns)) { + this.notConvertColumns.addAll(modifyStringColumns); + } this.planSelectWithMax = planSelectWithMax; this.planSelectWithMin = planSelectWithMin; this.planSelectWithMinAndMax = planSelectWithMinAndMax; this.planSelectMaxPk = planSelectMaxPk; this.planSelectSample = planSelectSample; - this.planSelectMinAndMaxSample = planSelectMinAndMaxSample; //this.primaryKeys = primaryKeys; this.primaryKeysId = primaryKeysId; this.primaryKeysIdMap = new HashMap<>(); @@ -291,7 +295,7 @@ protected List> executePhysicalPlan(final Executi (ec) -> { final Cursor cursor = ExecutorHelper.execute(plan, ec); try { - return Transformer.convertUpperBoundWithDefault(cursor, (columnMeta, i) -> { + return Transformer.convertUpperBoundWithDefault(cursor, useBinary, (columnMeta, i) -> { // Generate default parameter context for upper bound of empty source table ParameterMethod defaultMethod = ParameterMethod.setString; Object defaultValue = "0"; @@ -340,6 +344,17 @@ protected List splitAndInitUpperBound(f return initUpperBound(baseEc, ddlJobId, dbIndex, phyTable, primaryKeysId); } + // local partition table not support yet + TableMeta tableMeta = baseEc.getSchemaManager(schemaName).getTable(sourceTableName); + if (tableMeta.getLocalPartitionDefinitionInfo() != null) { + return initUpperBound(baseEc, ddlJobId, dbIndex, phyTable, primaryKeysId); + } + + //tables with primary key absent not support (e.g. ugsi) + if (!tableMeta.isHasPrimaryKey()) { + return initUpperBound(baseEc, ddlJobId, dbIndex, phyTable, primaryKeysId); + } + boolean enableInnodbBtreeSampling = OptimizerContext.getContext(schemaName).getParamManager() .getBoolean(ConnectionParams.ENABLE_INNODB_BTREE_SAMPLING); if (!enableInnodbBtreeSampling) { @@ -367,8 +382,7 @@ protected List splitAndInitUpperBound(f calSamplePercentage = samplePercentage; } - PhyTableOperation plan = - buildSamplePlanWithParam(dbIndex, phyTable, new ArrayList<>(), calSamplePercentage, false, false); + PhyTableOperation plan = buildSamplePlanWithParam(dbIndex, phyTable, calSamplePercentage); // Execute query final List> resultList = executePhysicalPlan(baseEc, plan); @@ -380,7 +394,7 @@ protected List splitAndInitUpperBound(f // step must not less than zero int step = resultList.size() / splitCount; if (step <= 0) { - return null; + return initUpperBound(baseEc, ddlJobId, dbIndex, phyTable, primaryKeysId); } int subStep = step / splitCount; @@ -507,9 +521,7 @@ protected List splitPhysicalBatch(final List>> subUpperBoundList = new ArrayList<>(); boolean notSplit = backfillObjects.get(0).extra.getSplitLevel() == null; if (notSplit) { - PhyTableOperation plan = buildSamplePlanWithParam(dbIndex, physicalTableName, - new ArrayList<>(), calSamplePercentage, false, false - ); + PhyTableOperation plan = buildSamplePlanWithParam(dbIndex, physicalTableName, calSamplePercentage); // Execute query final List> sampleResult = executePhysicalPlan(ec, plan); @@ -991,6 +1003,17 @@ && isNotEmpty(backfillObjects.get(0).extra)) { return; } + // local partition table not support yet + TableMeta tableMeta = ec.getSchemaManager(schemaName).getTable(sourceTableName); + if (tableMeta.getLocalPartitionDefinitionInfo() != null) { + return; + } + + //tables with primary key absent not support (e.g. ugsi) + if (!tableMeta.isHasPrimaryKey()) { + return; + } + List newBackfillObjects = splitPhysicalBatch(ec, remainingRows, dbIndex, phyTable, backfillObjects); @@ -1082,7 +1105,8 @@ protected List> extract(String dbIndex, String ph try { // Extract extractCursor = ExecutorHelper.execute(extractPlan, extractEc); - result = com.alibaba.polardbx.executor.gsi.utils.Transformer.buildBatchParam(extractCursor); + result = com.alibaba.polardbx.executor.gsi.utils.Transformer.buildBatchParam(extractCursor, useBinary, + notConvertColumns); } finally { if (extractCursor != null) { extractCursor.close(new ArrayList<>()); @@ -1210,29 +1234,14 @@ protected PhyTableOperation buildSelectPlanWithParam(String dbIndex, String phyT /** * Build plan for physical sample select. * - * @param params pk column value of last batch * @return built plan */ - protected PhyTableOperation buildSamplePlanWithParam(String dbIndex, String phyTable, - List params, float calSamplePercentage, - boolean withLowerBound, boolean withUpperBound) { + protected PhyTableOperation buildSamplePlanWithParam(String dbIndex, String phyTable, float calSamplePercentage) { Map planParams = new HashMap<>(); // Physical table is 1st parameter planParams.put(1, PlannerUtils.buildParameterContextForTableName(phyTable, 1)); - int nextParamIndex = 2; - - // Parameters for where(DNF) - if (withLowerBound && withUpperBound) { - for (ParameterContext param : params) { - planParams.put(nextParamIndex, - new ParameterContext(param.getParameterMethod(), - new Object[] {nextParamIndex, param.getArgs()[1]})); - nextParamIndex++; - } - } - - PhyTableOperation phyTableOperation = withLowerBound ? planSelectMinAndMaxSample : planSelectSample; + PhyTableOperation phyTableOperation = planSelectSample; SqlSelect sqlSelect = (SqlSelect) phyTableOperation.getNativeSqlNode().clone(); OptimizerHint optimizerHint = new OptimizerHint(); optimizerHint.addHint("+sample_percentage(" + calSamplePercentage + ")"); @@ -1271,6 +1280,7 @@ public static List getPrimaryKeys(TableMeta tableMeta, ExecutionContext public static class ExtractorInfo { TableMeta sourceTableMeta; List targetTableColumns; + List realTargetTableColumns; List primaryKeys; /** @@ -1287,10 +1297,14 @@ public static class ExtractorInfo { */ List primaryKeysId; - public ExtractorInfo(TableMeta sourceTableMeta, List targetTableColumns, List primaryKeys, + public ExtractorInfo(TableMeta sourceTableMeta, + List targetTableColumns, + List realTargetTableColumns, + List primaryKeys, List appearedKeysId) { this.sourceTableMeta = sourceTableMeta; this.targetTableColumns = targetTableColumns; + this.realTargetTableColumns = realTargetTableColumns; this.primaryKeys = primaryKeys; this.primaryKeysId = appearedKeysId; } @@ -1303,6 +1317,10 @@ public List getTargetTableColumns() { return targetTableColumns; } + public List getRealTargetTableColumns() { + return realTargetTableColumns; + } + public List getPrimaryKeys() { return primaryKeys; } @@ -1335,7 +1353,7 @@ public static ExtractorInfo buildExtractorInfo(ExecutionContext ec, final TableMeta targetTableMeta = sm.getTable(targetTableName); final List targetTableColumns; if (onlyReadColumns) { - targetTableColumns = targetTableMeta.getReadColumns() + targetTableColumns = targetTableMeta.getAllColumns() .stream() .filter(columnMeta -> !skipGeneratedColumn || !columnMeta.isGeneratedColumn()) .map(ColumnMeta::getName) @@ -1366,6 +1384,23 @@ public static ExtractorInfo buildExtractorInfo(ExecutionContext ec, } } - return new ExtractorInfo(sourceTableMeta, targetTableColumns, primaryKeys, appearedKeysId); + // online change column 在源表和目标表上找正确的 column + List sourceTableColumnsAfterMapping = new ArrayList<>(targetTableColumns.size()); + List targetTableColumnsAfterMapping = new ArrayList<>(targetTableColumns.size()); + for (String columnName : targetTableColumns) { + ColumnMeta columnMeta = targetTableMeta.getColumn(columnName); + if (columnMeta.getMappingName() != null) { + if (!columnMeta.getMappingName().isEmpty()) { + sourceTableColumnsAfterMapping.add(columnMeta.getMappingName()); + targetTableColumnsAfterMapping.add(columnName); + } + } else { + sourceTableColumnsAfterMapping.add(columnName); + targetTableColumnsAfterMapping.add(columnName); + } + } + + return new ExtractorInfo(sourceTableMeta, sourceTableColumnsAfterMapping, targetTableColumnsAfterMapping, + primaryKeys, appearedKeysId); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Loader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Loader.java index 560704ed0..e853499bb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Loader.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Loader.java @@ -21,10 +21,12 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.jdbc.Parameters; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.executor.ExecutorHelper; import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.common.TopologyHandler; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.gsi.GsiUtils; import com.alibaba.polardbx.executor.spi.ITransactionManager; @@ -35,16 +37,20 @@ import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlInsert; +import java.sql.Connection; import java.text.MessageFormat; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.function.BiFunction; import java.util.function.Supplier; import static com.alibaba.polardbx.common.exception.code.ErrorCode.ER_DUP_ENTRY; +import static com.alibaba.polardbx.executor.columns.ColumnBackfillExecutor.isAllDnUseXDataSource; import static com.alibaba.polardbx.executor.gsi.GsiUtils.SQLSTATE_DUP_ENTRY; /** @@ -71,12 +77,16 @@ public abstract class Loader extends PhyOperationBuilderCommon { private final int[] checkerPkMapping; private final ITransactionManager tm; protected final boolean mirrorCopy; + protected final String backfillReturning; + protected boolean conflictDetection; + protected final boolean usingBackfillReturning; protected final BiFunction, ExecutionContext, List> executeFunc; protected Loader(String schemaName, String tableName, SqlInsert insert, SqlInsert insertIgnore, ExecutionPlan checkerPlan, int[] checkerPkMapping, int[] checkerParamMapping, - BiFunction, ExecutionContext, List> executeFunc, boolean mirrorCopy) { + BiFunction, ExecutionContext, List> executeFunc, + boolean mirrorCopy, String backfillReturning) { this.schemaName = schemaName; this.tableName = tableName; this.sqlInsert = insert; @@ -87,6 +97,9 @@ protected Loader(String schemaName, String tableName, SqlInsert insert, SqlInser this.executeFunc = executeFunc; this.tm = ExecutorContext.getContext(schemaName).getTransactionManager(); this.mirrorCopy = mirrorCopy; + this.backfillReturning = backfillReturning; + this.conflictDetection = !mirrorCopy; + this.usingBackfillReturning = backfillReturning != null && conflictDetection; } /** @@ -95,6 +108,48 @@ protected Loader(String schemaName, String tableName, SqlInsert insert, SqlInser public int fillIntoIndex(List> batchParams, Pair> baseEcAndIndexPair, Supplier checker) { + if (usingBackfillReturning) { + return fillIntoIndexWithReturning(batchParams, baseEcAndIndexPair, checker); + } else { + return fillIntoIndexWithInsert(batchParams, baseEcAndIndexPair, checker); + } + } + + public int fillIntoIndexWithReturning(List> batchParams, + Pair> baseEcAndIndexPair, + Supplier checker) { + if (batchParams.isEmpty()) { + return 0; + } + + baseEcAndIndexPair.getKey().setTxIsolation(Connection.TRANSACTION_READ_COMMITTED); + + return GsiUtils.wrapWithDistributedXATrx(tm, baseEcAndIndexPair.getKey(), (insertEc) -> { + int result = 0; + try { + // Batch insert + result = applyBatchWithReturning(batchParams, insertEc.copy(), baseEcAndIndexPair.getValue().getKey(), + baseEcAndIndexPair.getValue().getValue()); + + // Batch insert success, check lock exists + return checkBeforeCommit(checker, insertEc, result); + } catch (TddlNestableRuntimeException e) { + // Batch insert failed + SQLRecorderLogger.ddlLogger + .warn(MessageFormat.format( + "[{0}] Batch insert(returning) failed first row: {1} cause: {2}, phyTableName: {3}", + baseEcAndIndexPair.getKey().getTraceId(), + GsiUtils.rowToString(batchParams.get(0)), + e.getMessage(), baseEcAndIndexPair.getValue().getValue())); + + throw e; + } + }); + } + + public int fillIntoIndexWithInsert(List> batchParams, + Pair> baseEcAndIndexPair, + Supplier checker) { if (batchParams.isEmpty()) { return 0; } @@ -139,7 +194,7 @@ public int fillIntoIndex(List> batchParams, for (Map param : batchParams) { int single = applyRow(param, insertEc.copy(), baseEcAndIndexPair.getValue().getKey(), - baseEcAndIndexPair.getValue().getValue(), true); + baseEcAndIndexPair.getValue().getValue()); if (single < 1) { // Compare row @@ -240,7 +295,27 @@ private int applyBatch(List> batchParams, Executi newEc.setParams(parameters); - return executeInsert(sqlInsert, schemaName, tableName, newEc, sourceDbIndex, phyTableName); + SqlInsert insert = conflictDetection ? sqlInsert : sqlInsertIgnore; + + return executeInsert(insert, schemaName, tableName, newEc, sourceDbIndex, phyTableName); + } + + private int applyBatchWithReturning(List> batchParams, ExecutionContext newEc, + String sourceDbIndex, String phyTableName) { + // Construct params for each batch + String orgBackfillReturning = newEc.getBackfillReturning(); + try { + Parameters parameters = new Parameters(); + parameters.setBatchParams(batchParams); + + newEc.setParams(parameters); + + newEc.setBackfillReturning(backfillReturning); + + return executeInsert(sqlInsertIgnore, schemaName, tableName, newEc, sourceDbIndex, phyTableName); + } finally { + newEc.setBackfillReturning(orgBackfillReturning); + } } /** @@ -249,21 +324,54 @@ private int applyBatch(List> batchParams, Executi * @param param Parameter * @param newEc Copied ExecutionContext * @param sourceDbIndex the rows extract from which physicalDb - * @param ignore Use insert ignore * @return Affected rows */ private int applyRow(Map param, ExecutionContext newEc, String sourceDbIndex, - String phyTableName, - boolean ignore) { + String phyTableName) { Parameters parameters = new Parameters(); parameters.setParams(param); newEc.setParams(parameters); - return executeInsert(ignore ? sqlInsertIgnore : sqlInsert, schemaName, tableName, newEc, sourceDbIndex, - phyTableName); + return executeInsert(sqlInsertIgnore, schemaName, tableName, newEc, sourceDbIndex, phyTableName); } public abstract int executeInsert(SqlInsert sqlInsert, String schemaName, String tableName, ExecutionContext executionContext, String sourceDbIndex, String phyTableName); + + public static boolean canUseBackfillReturning(ExecutionContext ec, String schemaName) { + final ExecutorContext executorContext = ExecutorContext.getContext(schemaName); + final TopologyHandler topologyHandler = executorContext.getTopologyHandler(); + final boolean allDnUseXDataSource = isAllDnUseXDataSource(topologyHandler); + + return executorContext.getStorageInfoManager().supportsBackfillReturning() + && ec.getParamManager() + .getBoolean(ConnectionParams.BACKFILL_USE_RETURNING) && allDnUseXDataSource; + } + + public static int getReturningAffectRows(List> returningResult, + ExecutionContext newEc) { + List> orgParams = newEc.getParams().getBatchParameters(); + + if (returningResult.isEmpty()) { + return orgParams.size(); + } + + // 判断 + for (Map baseParam : returningResult) { + final List pkParams = new ArrayList<>(); + + for (int i = 0; i < baseParam.size(); i++) { + final ParameterContext pkParam = baseParam.get(i + 1); + pkParams.add( + Optional.ofNullable(pkParam.getArgs()[1]).map(e -> e.toString().toLowerCase()).orElse("NULL")); + } + + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_BACKFILL_DUPLICATE_ENTRY, + String.join("-", pkParams), + "PRIMARY"); + } + + return orgParams.size(); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Reporter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Reporter.java index 704f2e182..41ec122e8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Reporter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Reporter.java @@ -19,8 +19,8 @@ import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.ddl.newengine.DdlEngineStats; -import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; import com.alibaba.polardbx.executor.ddl.workqueue.BackFillThreadPool; +import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.statistics.SQLRecorderLogger; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Throttle.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Throttle.java index d558b215b..14e8c09a4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Throttle.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/backfill/Throttle.java @@ -20,7 +20,12 @@ import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.commons.collections.set.SynchronizedSet; -import java.util.*; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Queue; +import java.util.Set; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; @@ -89,100 +94,102 @@ private enum State { private int cyclePeriod = 3; public Throttle(long minRate, long maxRate, String schema) { - synchronized (THROTTLE_INSTANCES){ + synchronized (THROTTLE_INSTANCES) { THROTTLE_INSTANCES.add(this); this.minRate = minRate; this.maxRate = maxRate; reset(); this.statsQueue = new ArrayDeque<>(); this.timerTaskExecutor - .scheduleWithFixedDelay(new Runnable() { - @Override - public void run() { - LoggerUtil.buildMDC(schema); - long totalTimeCost = 0; - lock.lock(); - try { - if (state == State.INIT) { - return; - } - - if (statsQueue.isEmpty()) { - reset(); - return; - } - - if (statsQueue.size() < 3) { - return; - } - - long period = System.currentTimeMillis() - startTimeLastCycle; - - if (period < cyclePeriod * 1000) { - return; - } - - actualRateLastCycle = rowsLastCycle / (period / 1000); - startTimeLastCycle = System.currentTimeMillis(); - rowsLastCycle = 0; - - for (FeedbackStats stats : statsQueue) { - totalTimeCost += stats.timeCost; - } - - double avgTimeCost = totalTimeCost / statsQueue.size(); - - statsQueue = new ArrayDeque<>(); - - if (baseTimeCost > avgTimeCost) { - baseTimeCost = avgTimeCost; - } - - double aimTimeCost = baseTimeCost * 2; - - if (avgTimeCost <= (aimTimeCost * 0.75) && actualRateLastCycle >= rate * 0.9) { - growthFactor++; - rate = rate + rate * 0.05 * (1 - avgTimeCost / aimTimeCost) * (Math.log(growthFactor) / Math - .log(2)); - degrowthFactor = 0; - } else if (avgTimeCost > aimTimeCost) { - degrowthFactor++; - rate = - rate - rate * 0.1 * (1 - aimTimeCost / avgTimeCost) * (Math.log(degrowthFactor) / Math - .log(2)); - growthFactor = 0; - } else if (actualRateLastCycle <= rate * 0.7) { - degrowthFactor++; - rate = - rate - rate * 0.1 * (Math.log(degrowthFactor) / Math - .log(2)); - growthFactor = 0; - } else { - growthFactor = 0; - degrowthFactor = 0; - } - - if (rate > Throttle.this.maxRate) { - rate = Throttle.this.maxRate; - } else if (rate < Throttle.this.minRate) { - rate = Throttle.this.minRate; - baseTimeCost = Long.MAX_VALUE; - } - - SQLRecorderLogger.ddlLogger.info( - "speed: " + (long) actualRateLastCycle + " rows/s, avg cost: " + (long) avgTimeCost - + ", baseTimeCost: " - + (long) baseTimeCost + ", rate: " + (long) rate - + " rows/s"); - - } catch (Throwable t) { - SQLRecorderLogger.ddlLogger.error(t); - } finally { - lock.unlock(); + .scheduleWithFixedDelay(new Runnable() { + @Override + public void run() { + LoggerUtil.buildMDC(schema); + long totalTimeCost = 0; + lock.lock(); + try { + if (state == State.INIT) { + return; } + if (statsQueue.isEmpty()) { + reset(); + return; + } + + if (statsQueue.size() < 3) { + return; + } + + long period = System.currentTimeMillis() - startTimeLastCycle; + + if (period < cyclePeriod * 1000) { + return; + } + + actualRateLastCycle = rowsLastCycle / (period / 1000); + startTimeLastCycle = System.currentTimeMillis(); + rowsLastCycle = 0; + + for (FeedbackStats stats : statsQueue) { + totalTimeCost += stats.timeCost; + } + + double avgTimeCost = totalTimeCost / statsQueue.size(); + + statsQueue = new ArrayDeque<>(); + + if (baseTimeCost > avgTimeCost) { + baseTimeCost = avgTimeCost; + } + + double aimTimeCost = baseTimeCost * 2; + + if (avgTimeCost <= (aimTimeCost * 0.75) && actualRateLastCycle >= rate * 0.9) { + growthFactor++; + rate = rate + rate * 0.05 * (1 - avgTimeCost / aimTimeCost) * (Math.log(growthFactor) + / Math + .log(2)); + degrowthFactor = 0; + } else if (avgTimeCost > aimTimeCost) { + degrowthFactor++; + rate = + rate - rate * 0.1 * (1 - aimTimeCost / avgTimeCost) * (Math.log(degrowthFactor) + / Math + .log(2)); + growthFactor = 0; + } else if (actualRateLastCycle <= rate * 0.7) { + degrowthFactor++; + rate = + rate - rate * 0.1 * (Math.log(degrowthFactor) / Math + .log(2)); + growthFactor = 0; + } else { + growthFactor = 0; + degrowthFactor = 0; + } + + if (rate > Throttle.this.maxRate) { + rate = Throttle.this.maxRate; + } else if (rate < Throttle.this.minRate) { + rate = Throttle.this.minRate; + baseTimeCost = Long.MAX_VALUE; + } + + SQLRecorderLogger.ddlLogger.info( + "speed: " + (long) actualRateLastCycle + " rows/s, avg cost: " + (long) avgTimeCost + + ", baseTimeCost: " + + (long) baseTimeCost + ", rate: " + (long) rate + + " rows/s"); + + } catch (Throwable t) { + SQLRecorderLogger.ddlLogger.error(t); + } finally { + lock.unlock(); } - }, 0, cyclePeriod, TimeUnit.SECONDS); + + } + }, 0, cyclePeriod, TimeUnit.SECONDS); } } @@ -200,7 +207,7 @@ public static long getTotalThrottleRate() { public void stop() { timerTaskExecutor.shutdown(); reset(); - synchronized (THROTTLE_INSTANCES){ + synchronized (THROTTLE_INSTANCES) { THROTTLE_INSTANCES.remove(this); } } @@ -277,15 +284,16 @@ public void setBackFillId(Long backFillId) { this.backFillId = backFillId; } - public static List getThrottleInfoList(){ - synchronized (THROTTLE_INSTANCES){ + public static List getThrottleInfoList() { + synchronized (THROTTLE_INSTANCES) { List result = new ArrayList<>(THROTTLE_INSTANCES.size()); - for(Throttle throttle: THROTTLE_INSTANCES){ - if(throttle.backFillId == null){ + for (Throttle throttle : THROTTLE_INSTANCES) { + if (throttle.backFillId == null) { //checkers do not have backFillId continue; } - ThrottleInfo row = new ThrottleInfo(throttle.backFillId, throttle.actualRateLastCycle, throttle.totalRows); + ThrottleInfo row = + new ThrottleInfo(throttle.backFillId, throttle.actualRateLastCycle, throttle.totalRows); result.add(row); } return result; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/BalanceOptions.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/BalanceOptions.java index 81feb0c1e..f8dd106d1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/BalanceOptions.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/BalanceOptions.java @@ -55,7 +55,7 @@ public class BalanceOptions { /** * Whether generate subjob unit by sizes. (MB) */ - public Long maxTaskUnitSize = 1024L; + public Long maxTaskUnitSize = 1024 * 32L; /** * Whether shuffle data distribution randomly; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/Balancer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/Balancer.java index fb3892393..5a03f996c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/Balancer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/Balancer.java @@ -23,9 +23,15 @@ import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.thread.ExecutorUtil; +import com.alibaba.polardbx.executor.balancer.action.ActionSyncStoragePool; import com.alibaba.polardbx.executor.balancer.action.ActionUtils; import com.alibaba.polardbx.executor.balancer.action.BalanceAction; -import com.alibaba.polardbx.executor.balancer.policy.*; +import com.alibaba.polardbx.executor.balancer.policy.BalancePolicy; +import com.alibaba.polardbx.executor.balancer.policy.PolicyAutoSplitForPartitionBalance; +import com.alibaba.polardbx.executor.balancer.policy.PolicyDataBalance; +import com.alibaba.polardbx.executor.balancer.policy.PolicyDrainNode; +import com.alibaba.polardbx.executor.balancer.policy.PolicyMergePartition; +import com.alibaba.polardbx.executor.balancer.policy.PolicyPartitionBalance; import com.alibaba.polardbx.executor.balancer.splitpartition.PolicySplitPartition; import com.alibaba.polardbx.executor.balancer.stats.BalanceStats; import com.alibaba.polardbx.executor.balancer.stats.GroupStats; @@ -36,10 +42,12 @@ import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.gms.topology.DbInfoRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.locality.StoragePoolManager; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import org.apache.calcite.sql.SqlRebalance; +import org.apache.commons.lang3.StringUtils; import org.joda.time.DateTime; import java.util.ArrayList; @@ -155,6 +163,7 @@ public List rebalanceCluster(ExecutionContext ec, BalanceOptions /** * Fast checker if the drain node can be deletable */ + StoragePoolManager storagePoolManager = StoragePoolManager.getInstance(); if (options.drainNode != null) { PolicyDrainNode.DrainNodeInfo drainNodeInfo = PolicyDrainNode.DrainNodeInfo.parse(options.drainNode); drainNodeInfo.validate(); @@ -174,16 +183,27 @@ public List rebalanceCluster(ExecutionContext ec, BalanceOptions throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "already in rebalance"); } + if (!storagePoolManager.isTriggered() && options.drainNode == null) { + // for expand node + ActionSyncStoragePool syncStoragePoolAction = new ActionSyncStoragePool(null); + actions.add(syncStoragePoolAction); + } for (BalancePolicy policy : policies) { actions.addAll(policy.applyToMultiDb(ec, stats, options, schemaList)); } + if (!storagePoolManager.isTriggered() && options.drainNode != null) { + // for drain node + ActionSyncStoragePool syncStoragePoolAction = + new ActionSyncStoragePool(PolicyDrainNode.DrainNodeInfo.parse(options.drainNode)); + actions.add(syncStoragePoolAction); + } return actions; } public List rebalanceTenant(ExecutionContext ec, String storagePoolName, BalanceOptions options) { DdlJobManager jobManager = new DdlJobManager(); - String name = ActionUtils.genRebalanceClusterName(); + String name = ActionUtils.genRebalanceTenantResourceName(storagePoolName); boolean ok = jobManager.getResourceManager().checkResource(Sets.newHashSet(), Sets.newHashSet(name)); if (!ok) { throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "already in rebalance"); @@ -194,6 +214,11 @@ public List rebalanceTenant(ExecutionContext ec, String storagePo /** * Fast checker if the drain node can be deletable */ + List actions = new ArrayList<>(); + // skip drain node validation + if (storagePoolName.equalsIgnoreCase(StoragePoolManager.RECYCLE_STORAGE_POOL_NAME)) { + return actions; + } if (options.drainNode != null) { PolicyDrainNode.DrainNodeInfo drainNodeInfo = PolicyDrainNode.DrainNodeInfo.parse(options.drainNode); drainNodeInfo.validate(); @@ -207,12 +232,6 @@ public List rebalanceTenant(ExecutionContext ec, String storagePo collectBalanceStatsOfDatabase(schema) ).collect(Collectors.toMap(BalanceStats::getSchema, x -> x)); - List actions = new ArrayList<>(); - - if (!jobManager.getResourceManager().checkResource(Sets.newHashSet(), Sets.newHashSet(name))) { - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "already in rebalance"); - } - for (BalancePolicy policy : policies) { actions.addAll(policy.applyToMultiTenantDb(ec, stats, options, storagePoolName, schemaList)); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionDrainDatabase.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionDrainDatabase.java index 8f83a56eb..8ef85dd2f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionDrainDatabase.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionDrainDatabase.java @@ -20,17 +20,26 @@ import com.alibaba.polardbx.common.eventlogger.EventType; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.balancer.policy.PolicyDrainNode; import com.alibaba.polardbx.executor.balancer.stats.BalanceStats; import com.alibaba.polardbx.executor.balancer.stats.GroupStats; import com.alibaba.polardbx.executor.balancer.stats.PartitionStat; +import com.alibaba.polardbx.executor.common.DbStatusManager; import com.alibaba.polardbx.executor.ddl.job.task.CostEstimableDdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.gms.topology.DbTopologyManager; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.ScaleOutPlanUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import java.util.*; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; import java.util.stream.Collectors; /** @@ -73,44 +82,132 @@ public ExecutableDdlJob toDdlJob(ExecutionContext ec) { long totalRows = 0L; long totalSize = 0L; try { - List groupDetailInfoRecordList = DbTopologyManager.getAllDbGroupInfoRecordByInstId(schema, drainNode); - List groupNames = groupDetailInfoRecordList.stream().map(e->e.groupName).collect(Collectors.toList()); + PolicyDrainNode.DrainNodeInfo drainNodeInfo = + PolicyDrainNode.DrainNodeInfo.parse(drainNode); + if (GeneralUtil.isNotEmpty(drainNodeInfo.getDnInstIdList())) { + Set dnSet = new TreeSet<>(String::compareToIgnoreCase); + dnSet.addAll(drainNodeInfo.getDnInstIdList()); + for (String dnId : dnSet) { + List groupDetailInfoRecordList = + DbTopologyManager.getAllDbGroupInfoRecordByInstId(schema, dnId); + List groupNames = + groupDetailInfoRecordList.stream().map(e -> e.groupName).collect(Collectors.toList()); + + if (DbInfoManager.getInstance().isNewPartitionDb(schema)) { + Set drainingPhyDb = new HashSet<>(); + for (DbGroupInfoRecord groupInfo : groupDetailInfoRecordList) { + drainingPhyDb.add(groupInfo.phyDbName); + } + for (PartitionStat partitionStat : stats.getPartitionStats()) { + String phyDb = partitionStat.getPartitionGroupRecord().getPhy_db(); + if (drainingPhyDb.contains(phyDb)) { + totalRows += partitionStat.getPartitionRows(); + totalSize += partitionStat.getPartitionDiskSize(); + } + } + } else { + for (GroupStats.GroupsOfStorage groupsOfStorage : GeneralUtil.emptyIfNull(stats.getGroups())) { + if (groupsOfStorage == null || groupsOfStorage.getGroupDataSizeMap() == null) { + continue; + } + for (Map.Entry> entry : groupsOfStorage.groupDataSizeMap.entrySet()) { + if (groupNames.contains(entry.getKey())) { + totalRows += entry.getValue().getKey(); + totalSize += entry.getValue().getValue(); + } + } + } + } + } + } + } catch (Exception e) { + EventLogger.log(EventType.DDL_WARN, "calculate rebalance rows error. " + e.getMessage()); + } + + return ActionUtils.convertToDelegatorJob(schema, sql, + CostEstimableDdlTask.createCostInfo(totalRows, totalSize, null)); + } + + public String getSql() { + return sql; + } - if(DbInfoManager.getInstance().isNewPartitionDb(schema)){ + @Override + public Long getBackfillRows() { + long totalRows = 0L; + try { + List groupDetailInfoRecordList = + DbTopologyManager.getAllDbGroupInfoRecordByInstId(schema, drainNode); + List groupNames = + groupDetailInfoRecordList.stream().map(e -> e.groupName).collect(Collectors.toList()); + + if (DbInfoManager.getInstance().isNewPartitionDb(schema)) { Set drainingPhyDb = new HashSet<>(); - for(DbGroupInfoRecord groupInfo: groupDetailInfoRecordList){ + for (DbGroupInfoRecord groupInfo : groupDetailInfoRecordList) { drainingPhyDb.add(groupInfo.phyDbName); } - for(PartitionStat partitionStat: stats.getPartitionStats()){ + for (PartitionStat partitionStat : stats.getPartitionStats()) { String phyDb = partitionStat.getPartitionGroupRecord().getPhy_db(); - if(drainingPhyDb.contains(phyDb)){ + if (drainingPhyDb.contains(phyDb)) { totalRows += partitionStat.getPartitionRows(); - totalSize += partitionStat.getPartitionDiskSize(); } } - }else { - for (GroupStats.GroupsOfStorage groupsOfStorage: GeneralUtil.emptyIfNull(stats.getGroups())){ - if(groupsOfStorage==null || groupsOfStorage.getGroupDataSizeMap()==null){ + } else { + for (GroupStats.GroupsOfStorage groupsOfStorage : GeneralUtil.emptyIfNull(stats.getGroups())) { + if (groupsOfStorage == null || groupsOfStorage.getGroupDataSizeMap() == null) { continue; } - for(Map.Entry> entry: groupsOfStorage.groupDataSizeMap.entrySet()){ - if(groupNames.contains(entry.getKey())){ + for (Map.Entry> entry : groupsOfStorage.groupDataSizeMap.entrySet()) { + if (groupNames.contains(entry.getKey())) { totalRows += entry.getValue().getKey(); - totalSize += entry.getValue().getValue(); } } } } - }catch (Exception e){ + } catch (Exception e) { EventLogger.log(EventType.DDL_WARN, "calculate rebalance rows error. " + e.getMessage()); } - - return ActionUtils.convertToDelegatorJob(schema, sql, CostEstimableDdlTask.createCostInfo(totalRows, totalSize)); + return totalRows; } - public String getSql() { - return sql; + @Override + public Long getDiskSize() { + long totalSize = 0L; + try { + List groupDetailInfoRecordList = + DbTopologyManager.getAllDbGroupInfoRecordByInstId(schema, drainNode); + List groupNames = + groupDetailInfoRecordList.stream().map(e -> e.groupName).collect(Collectors.toList()); + + if (DbInfoManager.getInstance().isNewPartitionDb(schema)) { + Set drainingPhyDb = new HashSet<>(); + for (DbGroupInfoRecord groupInfo : groupDetailInfoRecordList) { + drainingPhyDb.add(groupInfo.phyDbName); + } + for (PartitionStat partitionStat : stats.getPartitionStats()) { + String phyDb = partitionStat.getPartitionGroupRecord().getPhy_db(); + if (drainingPhyDb.contains(phyDb)) { + totalSize += partitionStat.getPartitionDiskSize(); + } + } + } else { + for (GroupStats.GroupsOfStorage groupsOfStorage : GeneralUtil.emptyIfNull(stats.getGroups())) { + if (groupsOfStorage == null || groupsOfStorage.getGroupDataSizeMap() == null) { + continue; + } + for (Map.Entry> entry : groupsOfStorage.groupDataSizeMap.entrySet()) { + if (groupNames.contains(entry.getKey())) { + totalSize += entry.getValue().getValue(); + } + } + } + } + } catch (Exception e) { + EventLogger.log(EventType.DDL_WARN, "calculate rebalance rows error. " + e.getMessage()); + } + return totalSize; } + @Override public boolean equals(Object o) { if (this == o) { @@ -143,6 +240,6 @@ public int compareTo(ActionDrainDatabase o) { if (res != 0) { return res; } - return o.getSql().compareTo(sql); + return o.getSql().compareTo(sql); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionLockResource.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionLockResource.java index 03a25b5a7..298abbc46 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionLockResource.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionLockResource.java @@ -40,7 +40,7 @@ public class ActionLockResource implements BalanceAction { public ActionLockResource(String schema, Set exclusiveResourceSet) { this.schema = schema; this.exclusiveResourceSet = new HashSet<>(); - if(exclusiveResourceSet != null){ + if (exclusiveResourceSet != null) { this.exclusiveResourceSet.addAll(exclusiveResourceSet); } } @@ -57,7 +57,7 @@ public String getName() { @Override public String getStep() { - if(CollectionUtils.isEmpty(exclusiveResourceSet)){ + if (CollectionUtils.isEmpty(exclusiveResourceSet)) { return "Lock()"; } return "Lock(" + Joiner.on(",").join(exclusiveResourceSet) + ")"; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveGroup.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveGroup.java index e08be8065..3faf10c99 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveGroup.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveGroup.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.balancer.action; +import com.alibaba.fastjson.annotation.JSONField; import com.alibaba.polardbx.common.eventlogger.EventLogger; import com.alibaba.polardbx.common.eventlogger.EventType; import com.alibaba.polardbx.common.properties.ConnectionParams; @@ -25,18 +26,24 @@ import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.executor.balancer.stats.BalanceStats; import com.alibaba.polardbx.executor.balancer.stats.GroupStats; +import com.alibaba.polardbx.executor.balancer.stats.PartitionGroupStat; +import com.alibaba.polardbx.executor.balancer.stats.TableGroupStat; import com.alibaba.polardbx.executor.ddl.job.task.CostEstimableDdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlExceptionAction; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.optimizer.config.table.ScaleOutPlanUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import com.google.common.collect.Lists; import org.apache.commons.lang3.StringUtils; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; /** * Action that move group between storage nodes @@ -138,7 +145,49 @@ public ExecutableDdlJob toDdlJob(ExecutionContext ec) { } return ActionUtils.convertToDelegatorJob(schema, sql, - CostEstimableDdlTask.createCostInfo(totalRows, totalSize)); + CostEstimableDdlTask.createCostInfo(totalRows, totalSize, (long) getLogicalTableCount())); + } + + @Override + public Long getBackfillRows() { + long totalRows = 0L; + if (!DbInfoManager.getInstance().isNewPartitionDb(schema)) { + for (GroupStats.GroupsOfStorage groupsOfStorage : GeneralUtil.emptyIfNull(stats.getGroups())) { + if (groupsOfStorage == null || groupsOfStorage.getGroupDataSizeMap() == null) { + continue; + } + for (Map.Entry> entry : groupsOfStorage.groupDataSizeMap.entrySet()) { + if (sourceGroups.contains(entry.getKey())) { + totalRows += entry.getValue().getKey(); + } + } + } + } + return totalRows; + } + + @Override + public Long getDiskSize() { + long totalSize = 0L; + if (!DbInfoManager.getInstance().isNewPartitionDb(schema)) { + for (GroupStats.GroupsOfStorage groupsOfStorage : GeneralUtil.emptyIfNull(stats.getGroups())) { + if (groupsOfStorage == null || groupsOfStorage.getGroupDataSizeMap() == null) { + continue; + } + for (Map.Entry> entry : groupsOfStorage.groupDataSizeMap.entrySet()) { + if (sourceGroups.contains(entry.getKey())) { + totalSize += entry.getValue().getValue(); + } + } + } + } + return totalSize; + } + + @Override + public double getLogicalTableCount() { + List tables = ScaleOutPlanUtil.getLogicalTables(schema); + return tables.size(); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveGroups.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveGroups.java index be03c1d0d..b389cf54c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveGroups.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveGroups.java @@ -101,6 +101,22 @@ public ExecutableDdlJob toDdlJob(ExecutionContext ec) { return job; } + @Override + public Long getBackfillRows() { + return actions.stream().map(o -> o.getBackfillRows()).mapToLong(o -> o).sum(); + } + + @Override + public Long getDiskSize() { + return actions.stream().map(o -> o.getDiskSize()).mapToLong(o -> o).sum(); + } + + @Override + public double getLogicalTableCount() { + List tables = ScaleOutPlanUtil.getLogicalTables(schema); + return tables.size(); + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMovePartition.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMovePartition.java index 2081a997a..46bb97c02 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMovePartition.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMovePartition.java @@ -31,6 +31,7 @@ import com.alibaba.polardbx.executor.ddl.job.task.CostEstimableDdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; import com.alibaba.polardbx.gms.topology.DbTopologyManager; +import com.alibaba.polardbx.optimizer.config.table.ScaleOutPlanUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.google.common.collect.Sets; import lombok.Getter; @@ -94,6 +95,29 @@ public Long getBackfillRows() { PartitionGroupStat::getDataRows).mapToLong(o -> o).sum(); } + @Override + public Long getDiskSize() { + Set partitionNameSet = new HashSet<>(this.partitionNames); + TableGroupStat tableGroupStat = stats.getTableGroupStats().stream() + .filter(o -> o.getTableGroupConfig().getTableGroupRecord().getTg_name().equals(this.tableGroupName)) + .collect( + Collectors.toList()).get(0); + return tableGroupStat.getPartitionGroups().stream() + .filter(o -> partitionNameSet.contains((o.pg == null) ? "" : o.pg.getPartition_name())) + .map( + PartitionGroupStat::getTotalDiskSize).mapToLong(o -> o).sum(); + } + + @Override + public double getLogicalTableCount() { + TableGroupStat tableGroupStat = stats.getTableGroupStats().stream() + .filter(o -> o.getTableGroupConfig().getTableGroupRecord().getTg_name().equals(this.tableGroupName)) + .collect( + Collectors.toList()).get(0); + + return tableGroupStat.getTableGroupConfig().getTableCount(); + } + public static List createMoveToGroups(String schema, List partitions, String toGroup, @@ -226,7 +250,7 @@ public ExecutableDdlJob toDdlJob(ExecutionContext ec) { EventLogger.log(EventType.DDL_WARN, "calculate rebalance rows error. " + e.getMessage()); } return ActionUtils.convertToDelegatorJob(schema, sql, - CostEstimableDdlTask.createCostInfo(totalRows, totalSize)); + CostEstimableDdlTask.createCostInfo(totalRows, totalSize, (long) getLogicalTableCount())); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMovePartitions.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMovePartitions.java index b0c0735d3..78ba57e93 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMovePartitions.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMovePartitions.java @@ -19,6 +19,7 @@ import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.balancer.stats.TableGroupStat; import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask; import com.alibaba.polardbx.executor.ddl.newengine.dag.DirectedAcyclicGraph; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; @@ -85,6 +86,25 @@ public Long getBackfillRows() { return backfillRows; } + @Override + public Long getDiskSize() { + Long diskSize = 0L; + for (String toGroup : actions.keySet()) { + diskSize += actions.get(toGroup).stream().map(o -> o.getDiskSize()).mapToLong(o -> o).sum(); + } + return diskSize; + } + + @Override + public double getLogicalTableCount() { + double tableCount = 0; + for (String toGroup : actions.keySet()) { + tableCount += actions.get(toGroup).get(0).getLogicalTableCount(); + } + + return Math.max(1.0, tableCount / actions.size()); + } + /* * Convert it to concurrent move-partition jobs like: * @@ -159,8 +179,8 @@ private Set getPrimaryTables(String tableGroupName) { TableGroupInfoManager tableGroupInfoManager = OptimizerContext.getContext(schema).getTableGroupInfoManager(); TableGroupConfig tableGroupConfig = tableGroupInfoManager.getTableGroupConfigByName(tableGroupName); SchemaManager schemaManager = OptimizerContext.getContext(schema).getLatestSchemaManager(); - for (TablePartRecordInfoContext tableInfo : tableGroupConfig.getAllTables()) { - TableMeta tableMeta = schemaManager.getTable(tableInfo.getTableName()); + for (String tableName : tableGroupConfig.getAllTables()) { + TableMeta tableMeta = schemaManager.getTable(tableName); String primaryTableName = tableMeta.getTableName(); if (tableMeta.isGsi()) { assert @@ -178,8 +198,8 @@ private Set getRelatedTableGroupNames(String tableGroup, TableGroupInfoM tableGroups.add(tableGroup); TableGroupConfig tableGroupConfig = tableGroupInfoManager.getTableGroupConfigByName(tableGroup); if (tableGroupConfig != null) { - for (TablePartRecordInfoContext tablePartCon : GeneralUtil.emptyIfNull(tableGroupConfig.getAllTables())) { - TableMeta tableMeta = executionContext.getSchemaManager(schema).getTable(tablePartCon.getTableName()); + for (String tableName : GeneralUtil.emptyIfNull(tableGroupConfig.getAllTables())) { + TableMeta tableMeta = executionContext.getSchemaManager(schema).getTable(tableName); if (tableMeta.isGsi()) { String primaryTableName = tableMeta.getGsiTableMetaBean().gsiMetaBean.tableName; tableMeta = OptimizerContext.getContext(schema).getLatestSchemaManager().getTable(primaryTableName); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveTablePartition.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveTablePartition.java index 74b934a9a..17dabc213 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveTablePartition.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionMoveTablePartition.java @@ -25,9 +25,7 @@ import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.balancer.stats.BalanceStats; -import com.alibaba.polardbx.executor.balancer.stats.PartitionGroupStat; import com.alibaba.polardbx.executor.balancer.stats.PartitionStat; -import com.alibaba.polardbx.executor.balancer.stats.TableGroupStat; import com.alibaba.polardbx.executor.ddl.job.task.CostEstimableDdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; import com.alibaba.polardbx.gms.topology.DbTopologyManager; @@ -38,10 +36,8 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.HashSet; import java.util.List; import java.util.Objects; -import java.util.Set; import java.util.stream.Collectors; /** @@ -200,7 +196,7 @@ public ExecutableDdlJob toDdlJob(ExecutionContext ec) { EventLogger.log(EventType.DDL_WARN, "calculate rebalance rows error. " + e.getMessage()); } return ActionUtils.convertToDelegatorJob(schema, sql, - CostEstimableDdlTask.createCostInfo(totalRows, totalSize)); + CostEstimableDdlTask.createCostInfo(totalRows, totalSize, 1L)); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionSplitPartition.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionSplitPartition.java index 965a110cf..b933c742a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionSplitPartition.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionSplitPartition.java @@ -175,7 +175,7 @@ public ExecutableDdlJob toDdlJob(ExecutionContext ec) { } return ActionUtils.convertToDelegatorJob(schema, sql, - CostEstimableDdlTask.createCostInfo(totalRows, totalSize)); + CostEstimableDdlTask.createCostInfo(totalRows, totalSize, null)); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionSplitTablePartition.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionSplitTablePartition.java index bdf8e3456..1b59e8bf9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionSplitTablePartition.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionSplitTablePartition.java @@ -161,7 +161,7 @@ public ExecutableDdlJob toDdlJob(ExecutionContext ec) { } return ActionUtils.convertToDelegatorJob(schema, sql, - CostEstimableDdlTask.createCostInfo(totalRows, totalSize)); + CostEstimableDdlTask.createCostInfo(totalRows, totalSize, null)); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionSyncStoragePool.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionSyncStoragePool.java new file mode 100644 index 000000000..ea988e2d1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionSyncStoragePool.java @@ -0,0 +1,99 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.balancer.action; + +import com.alibaba.fastjson.JSON; +import com.alibaba.polardbx.common.model.privilege.DbInfo; +import com.alibaba.polardbx.executor.balancer.policy.PolicyDrainNode; +import com.alibaba.polardbx.executor.balancer.serial.DataDistInfo; +import com.alibaba.polardbx.executor.ddl.job.task.rebalance.SyncStoragePoolTask; +import com.alibaba.polardbx.executor.ddl.job.task.rebalance.WriteDataDistLogTask; +import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.optimizer.config.schema.DefaultDbSchema; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; + +import java.util.Objects; + +/** + * Action that just lock some resource + * + * @author taokun + * @since 2021/10 + */ +public class ActionSyncStoragePool implements BalanceAction { + + private PolicyDrainNode.DrainNodeInfo drainNodeInfo; + + public ActionSyncStoragePool(PolicyDrainNode.DrainNodeInfo drainNodeInfo) { + if (drainNodeInfo != null) { + this.drainNodeInfo = drainNodeInfo; + } else { + this.drainNodeInfo = new PolicyDrainNode.DrainNodeInfo(); + } + } + + @Override + public String getSchema() { + return DefaultDbSchema.NAME; + } + + @Override + public String getName() { + return "ActionSyncStoragePool"; + } + + @Override + public String getStep() { + return "ActionSyncStoragePool"; + } + + @Override + public ExecutableDdlJob toDdlJob(ExecutionContext ec) { + ExecutableDdlJob job = new ExecutableDdlJob(); + SyncStoragePoolTask task = new SyncStoragePoolTask(drainNodeInfo.getDnInstIdList()); + job.addTask(task); + job.labelAsHead(task); + job.labelAsTail(task); + return job; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ActionSyncStoragePool)) { + return false; + } + ActionSyncStoragePool that = (ActionSyncStoragePool) o; + return Objects.equals(drainNodeInfo, that.drainNodeInfo); + } + + @Override + public int hashCode() { + return Objects.hash(drainNodeInfo); + } + + @Override + public String toString() { + return "ActionSyncStoragePool{" + + "schema='" + getSchema() + '\'' + + ", drainNodeInfo='" + JSON.toJSONString(drainNodeInfo) + '\'' + + '}'; + } +} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionUtils.java index 4b32442e4..02444b229 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/ActionUtils.java @@ -18,7 +18,6 @@ import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.executor.ddl.job.MockDdlJob; import com.alibaba.polardbx.executor.ddl.job.builder.AlterTableBuilder; import com.alibaba.polardbx.executor.ddl.job.builder.DdlPhyPlanBuilder; import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; @@ -34,6 +33,7 @@ import com.alibaba.polardbx.executor.ddl.job.factory.oss.UnArchiveJobFactory; import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.executor.utils.failpoint.FailPointKey; import com.alibaba.polardbx.gms.rebalance.RebalanceTarget; @@ -60,7 +60,6 @@ import org.apache.calcite.rel.ddl.UnArchive; import org.apache.calcite.sql.SqlDdl; import org.apache.calcite.sql.SqlNode; -import org.apache.commons.lang3.StringUtils; /** * @since 2021/03 @@ -132,7 +131,8 @@ private static ExecutableDdlJob convertJob(ExecutionContext ec, DdlContext ddlCo } else if (ddl instanceof AlterTableGroupMovePartition) { LogicalAlterTableGroupMovePartition movePartition = LogicalAlterTableGroupMovePartition.create(ddl); movePartition.setSchemaName(schema); - movePartition.preparedData(ec); + boolean usePhysicalBackfill = PhysicalBackfillUtils.isSupportForPhysicalBackfill(schema, ec); + movePartition.preparedData(ec, usePhysicalBackfill); ddlContext.setDdlType(movePartition.getDdlType()); return AlterTableGroupMovePartitionJobFactory.create(ddl, movePartition.getPreparedData(), ec); } else if (ddl instanceof UnArchive) { @@ -180,4 +180,8 @@ public static String genRebalanceResourceName(RebalanceTarget target, String nam public static String genRebalanceClusterName() { return LockUtil.genRebalanceClusterName(); } + + public static String genRebalanceTenantResourceName(String tenantName) { + return LockUtil.genRebalanceResourceName(RebalanceTarget.TENANT, tenantName); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/BalanceAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/BalanceAction.java index d9388e3aa..ff5294b15 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/BalanceAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/BalanceAction.java @@ -51,6 +51,16 @@ public interface BalanceAction { @JSONField(deserialize = false, serialize = false) default Long getBackfillRows() { - return -1L; + return 0L; + } + + @JSONField(deserialize = false, serialize = false) + default Long getDiskSize() { + return 0L; + } + + @JSONField(deserialize = false, serialize = false) + default double getLogicalTableCount() { + return 0; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/DropPhysicalDbTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/DropPhysicalDbTask.java index 714343587..2d5899150 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/DropPhysicalDbTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/action/DropPhysicalDbTask.java @@ -32,9 +32,7 @@ import com.alibaba.polardbx.gms.topology.DbGroupInfoManager; import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; import com.alibaba.polardbx.gms.topology.DbTopologyManager; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import lombok.Getter; import org.apache.commons.collections.CollectionUtils; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyDataBalance.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyDataBalance.java index e31f86533..75e142bee 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyDataBalance.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyDataBalance.java @@ -58,6 +58,7 @@ import com.alibaba.polardbx.gms.rebalance.RebalanceTarget; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.rebalance.RebalanceTarget; +import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupUtils; import com.alibaba.polardbx.gms.topology.DbInfoManager; @@ -79,12 +80,22 @@ import java.sql.Connection; import java.sql.SQLException; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.ThreadLocalRandom; import java.util.stream.Collectors; import static com.alibaba.polardbx.executor.balancer.policy.PolicyPartitionBalance.MAX_TABLEGROUP_SOLVED_BY_LP; -import static com.alibaba.polardbx.executor.balancer.policy.PolicyUtils.getGroupDetails; /** * Move partitions between storage node if un-balanced. @@ -168,6 +179,13 @@ public List applyToShardingDb(ExecutionContext ec, if (CollectionUtils.isEmpty(groupList)) { return Collections.emptyList(); } + if (StoragePoolManager.getInstance().isTriggered()) { + List storageInsts = + StoragePoolManager.getInstance().getStoragePoolInfo(StoragePoolManager.DEFAULT_STORAGE_POOL_NAME) + .getDnLists(); + groupList = + groupList.stream().filter(o -> storageInsts.contains(o.storageInst)).collect(Collectors.toList()); + } List buckets = groupList.stream().map(BucketOfGroups::new).collect(Collectors.toList()); long totalStorageCount = buckets.size(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyDrainNode.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyDrainNode.java index 167716e29..95f4b4806 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyDrainNode.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyDrainNode.java @@ -118,6 +118,7 @@ import java.util.TreeSet; import java.util.stream.Collectors; +import static com.alibaba.polardbx.common.properties.ConnectionParams.REBALANCE_MAX_UNIT_SIZE; import static com.alibaba.polardbx.executor.balancer.policy.PolicyPartitionBalance.MAX_TABLEGROUP_SOLVED_BY_LP; import static com.alibaba.polardbx.executor.balancer.policy.PolicyUtils.getGroupDetails; @@ -215,6 +216,82 @@ public List applyToMultiDb(ExecutionContext ec, return moveDataActions; } + /** + * Apply to multiple tenant database + */ + @Override + public List applyToMultiTenantDb(ExecutionContext ec, + Map stats, + BalanceOptions options, + String storagePoolName, + List schemaNameList) { + DrainNodeInfo drainNodeInfo = DrainNodeInfo.parse(options.drainNode); + drainNodeInfo.validate(); + + List dnInstIdList = drainNodeInfo.getDnInstIdList(); + List cnIpPortList = new ArrayList<>(); + cnIpPortList.addAll(drainNodeInfo.getCnROIpPortList()); + cnIpPortList.addAll(drainNodeInfo.getCnRWIpPortList()); + LOG.info(String.format("apply drain_node policy: schemas=%s options=%s", + schemaNameList, options, stats)); + + boolean ignoreRollback = true; + BalanceAction updateStatusNotReady = + new ActionTaskAdapter( + DefaultDbSchema.NAME, + new UpdateNodeStatusTask( + DefaultDbSchema.NAME, + new ArrayList<>(dnInstIdList), + Collections.emptyList(), + StorageInfoRecord.STORAGE_STATUS_READY, + StorageInfoRecord.STORAGE_STATUS_NOT_READY, + ignoreRollback) + ); + + BalanceAction updateStatusRemoved = + new ActionTaskAdapter( + DefaultDbSchema.NAME, + new UpdateNodeStatusTask( + DefaultDbSchema.NAME, + new ArrayList<>(dnInstIdList), + new ArrayList<>(cnIpPortList), + StorageInfoRecord.STORAGE_STATUS_NOT_READY, + StorageInfoRecord.STORAGE_STATUS_REMOVED, + ignoreRollback)); + + BalanceAction drainCDC = + new ActionTaskAdapter(DefaultDbSchema.NAME, new DrainCDCTask(DefaultDbSchema.NAME, dnInstIdList)); + + String resName = ActionUtils.genRebalanceTenantResourceName(storagePoolName); + ActionLockResource lock = new ActionLockResource( + DefaultDbSchema.NAME, + com.google.common.collect.Sets.newHashSet(resName)); + + List moveDataActions = new ArrayList<>(); + moveDataActions.add(lock); + moveDataActions.add(updateStatusNotReady); + + SqlRebalance node = new SqlRebalance(SqlParserPos.ZERO); + node.setRebalanceDatabase(); + node.setPolicy(options.policy); + node.setDrainNode(options.drainNode); + node.setLogicalDdl(false); + node.setAsync(options.async); + node.setDebug(options.debug); + node.setExplain(options.explain); + node.setMaxActions(options.maxActions); + node.setMaxPartitionSize((int) options.maxPartitionSize); + + List moveDataActionForBalance = schemaNameList.stream() + .flatMap(schema -> applyToTenantDb(ec, stats.get(schema), options, storagePoolName, schema).stream()) + .collect(Collectors.toList()); + moveDataActions.addAll(moveDataActionForBalance); + + moveDataActions.add(drainCDC); + moveDataActions.add(updateStatusRemoved); + return moveDataActions; + } + protected void doValidate(DrainNodeInfo drainNodeInfo) { drainNodeInfo.validate(); } @@ -239,6 +316,19 @@ public List applyToShardingDb(ExecutionContext ec, if (CollectionUtils.isEmpty(groupList)) { return Collections.emptyList(); } + if (StoragePoolManager.getInstance().isTriggered()) { + List storageInsts = + StoragePoolManager.getInstance().getStoragePoolInfo(StoragePoolManager.DEFAULT_STORAGE_POOL_NAME) + .getDnLists(); + groupList = + groupList.stream() + .filter(o -> storageInsts.contains(o.storageInst) || drainNodeInfo.containsDnInst(o.storageInst)) + .collect(Collectors.toList()); + dnDiskInfo = + dnDiskInfo.stream() + .filter(o -> storageInsts.contains(o.instance) || drainNodeInfo.containsDnInst(o.instance)) + .collect(Collectors.toList()); + } Map groupDataSizeMap = groupList.stream() .flatMap(x -> x.getGroupDataSizeMap().entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getValue())); @@ -524,8 +614,7 @@ public List applyToTenantPartitionDb(ExecutionContext ec, Balance List toSyncTableGroup = new ArrayList<>(toSyncTableGroupSet); List> toSyncTables = tableGroupConfigList.stream() .filter(tableGroupConfig -> toSyncTableGroup.contains(tableGroupConfig.getTableGroupRecord().tg_name)).map( - tableGroupConfig -> tableGroupConfig.getTables().stream().map(o -> o.getTableName()) - .collect(Collectors.toList())).collect(Collectors.toList()); + tableGroupConfig -> tableGroupConfig.getTables()).collect(Collectors.toList()); // List actionMovePartitions = new ArrayList<>(); // prepare move action @@ -901,8 +990,7 @@ public List applyToPartitionDbDrainStoragePool(ExecutionContext e List toSyncTableGroup = new ArrayList<>(toMoveTableGroup); List> toSyncTables = tableGroupConfigList.stream() .filter(tableGroupConfig -> toSyncTableGroup.contains(tableGroupConfig.getTableGroupRecord().tg_name)).map( - tableGroupConfig -> tableGroupConfig.getTables().stream().map(o -> o.getTableName()) - .collect(Collectors.toList())).collect(Collectors.toList()); + tableGroupConfig -> tableGroupConfig.getTables()).collect(Collectors.toList()); List actionMovePartitions = new ArrayList<>(); // prepare move action @@ -1255,8 +1343,7 @@ public List applyToPartitionDbNew(ExecutionContext ec, BalanceOpt List toSyncTableGroup = new ArrayList<>(toSyncTableGroupSet); List> toSyncTables = tableGroupConfigList.stream() .filter(tableGroupConfig -> toSyncTableGroup.contains(tableGroupConfig.getTableGroupRecord().tg_name)).map( - tableGroupConfig -> tableGroupConfig.getTables().stream().map(o -> o.getTableName()) - .collect(Collectors.toList())).collect(Collectors.toList()); + tableGroupConfig -> tableGroupConfig.getTables()).collect(Collectors.toList()); List actionMovePartitions = new ArrayList<>(); // prepare move action @@ -1625,8 +1712,7 @@ public List applyToPartitionDb(ExecutionContext ec, List toSyncTableGroup = new ArrayList<>(toSyncTableGroupSet); List> toSyncTables = tableGroupConfigList.stream() .filter(tableGroupConfig -> toSyncTableGroup.contains(tableGroupConfig.getTableGroupRecord().tg_name)) - .map(tableGroupConfig -> tableGroupConfig.getTables().stream().map(o -> o.getTableName()) - .collect(Collectors.toList())) + .map(tableGroupConfig -> tableGroupConfig.getTables()) .collect(Collectors.toList()); List actionMovePartitions = new ArrayList<>(); // prepare move action @@ -1799,12 +1885,16 @@ public List applyToPartitionDb(ExecutionContext ec, // Map> movesGroupByTg = moves.stream().collect( // Collectors.groupingBy(o -> o.tgName, Collectors.mapping(o -> o, Collectors.toList())) // ); + long maxTaskUnitSize = ec.getParamManager().getLong(REBALANCE_MAX_UNIT_SIZE); + if (maxTaskUnitSize < 1024) { + maxTaskUnitSize = options.maxTaskUnitSize; + } List moveDataActions = new ArrayList<>(); for (int i = 0; i < moves.size(); ) { Long sumMoveSizes = 0L; int j = i; int nextI; - for (; j < moves.size() && sumMoveSizes <= options.maxTaskUnitSize * 1024 * 1024; j++) { + for (; j < moves.size() && sumMoveSizes <= maxTaskUnitSize * 1024 * 1024; j++) { sumMoveSizes += moves.get(j).dataSize; } nextI = j; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyPartitionBalance.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyPartitionBalance.java index d72b2555c..077162822 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyPartitionBalance.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyPartitionBalance.java @@ -24,10 +24,20 @@ import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.executor.balancer.BalanceOptions; -import com.alibaba.polardbx.executor.balancer.action.*; +import com.alibaba.polardbx.executor.balancer.action.ActionInitPartitionDb; +import com.alibaba.polardbx.executor.balancer.action.ActionLockResource; +import com.alibaba.polardbx.executor.balancer.action.ActionMoveGroup; +import com.alibaba.polardbx.executor.balancer.action.ActionMoveGroups; +import com.alibaba.polardbx.executor.balancer.action.ActionMovePartition; +import com.alibaba.polardbx.executor.balancer.action.ActionMovePartitions; +import com.alibaba.polardbx.executor.balancer.action.ActionMoveTablePartition; +import com.alibaba.polardbx.executor.balancer.action.ActionTaskAdapter; +import com.alibaba.polardbx.executor.balancer.action.ActionUtils; +import com.alibaba.polardbx.executor.balancer.action.ActionWriteDataDistLog; +import com.alibaba.polardbx.executor.balancer.action.BalanceAction; import com.alibaba.polardbx.executor.balancer.serial.DataDistInfo; -import com.alibaba.polardbx.executor.balancer.solver.Solution; import com.alibaba.polardbx.executor.balancer.solver.MixedModel; +import com.alibaba.polardbx.executor.balancer.solver.Solution; import com.alibaba.polardbx.executor.balancer.solver.SolverExample; import com.alibaba.polardbx.executor.balancer.stats.BalanceStats; import com.alibaba.polardbx.executor.balancer.stats.GroupStats; @@ -48,15 +58,28 @@ import com.alibaba.polardbx.gms.topology.GroupDetailInfoRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.locality.LocalityInfoUtils; +import com.alibaba.polardbx.optimizer.locality.LocalityManager; import com.alibaba.polardbx.optimizer.locality.StoragePoolManager; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.apache.calcite.sql.SqlRebalance; import org.apache.commons.collections.CollectionUtils; -import java.util.*; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.TreeMap; import java.util.stream.Collectors; +import static com.alibaba.polardbx.common.properties.ConnectionParams.REBALANCE_MAX_UNIT_SIZE; import static com.alibaba.polardbx.executor.balancer.policy.PolicyUtils.getGroupDetails; /** @@ -89,6 +112,13 @@ public List applyToShardingDb(ExecutionContext ec, BalanceOptions } List groupList = stats.getGroups(); + if (StoragePoolManager.getInstance().isTriggered()) { + List storageInsts = + StoragePoolManager.getInstance().getStoragePoolInfo(StoragePoolManager.DEFAULT_STORAGE_POOL_NAME) + .getDnLists(); + groupList = + groupList.stream().filter(o -> storageInsts.contains(o.storageInst)).collect(Collectors.toList()); + } if (CollectionUtils.isEmpty(groupList)) { return Collections.emptyList(); } @@ -760,11 +790,15 @@ public List applyToPartitionDb(ExecutionContext ec, // Map> movesGroupByTg = moves.stream().collect( // Collectors.groupingBy(o -> o.tgName, Collectors.mapping(o -> o, Collectors.toList())) // ); + long maxTaskUnitSize = ec.getParamManager().getLong(REBALANCE_MAX_UNIT_SIZE); + if (maxTaskUnitSize < 1024) { + maxTaskUnitSize = options.maxTaskUnitSize; + } for (int i = 0; i < moves.size(); ) { Long sumMoveSize = 0L; int j = i; int nextI; - for (; j < moves.size() && sumMoveSize <= options.maxTaskUnitSize * 1024 * 1024; j++) { + for (; j < moves.size() && sumMoveSize <= maxTaskUnitSize * 1024 * 1024; j++) { sumMoveSize += moves.get(j).dataSize; } nextI = j; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyUtils.java index 5651a2c6c..aa9621be7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/policy/PolicyUtils.java @@ -16,6 +16,8 @@ package com.alibaba.polardbx.executor.balancer.policy; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.gms.locality.LocalityDetailInfoRecord; import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; @@ -31,14 +33,16 @@ import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.locality.LocalityManager; +import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition; import com.alibaba.polardbx.optimizer.partition.PartitionInfoManager; -import com.alibaba.polardbx.optimizer.partition.PartitionInfoManager; +import com.alibaba.polardbx.optimizer.partition.PartitionSpec; import java.sql.Connection; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; /** @@ -140,12 +144,34 @@ public static List getLocalityDetails(String schema, S TableGroupConfig tableGroupConfig = TableGroupUtils.getTableGroupInfoByGroupName(schema, tableGroup); List localityDetailInfoRecords = new ArrayList<>(); int rowNum = 0; - PartitionGroupRecord partitionGroupRecord = tableGroupConfig.getPartitionGroupByName(partitionGroup); - String locality = (partitionGroupRecord == null)?"":partitionGroupRecord.getLocality(); + String originalLocality = ""; + PartitionSpec partitionSpec = null; + if (tableGroupConfig.getTables().size() > 0) { + String tableName = tableGroupConfig.getTables().get(0); + PartitionByDefinition subPartitionByDefinition = + OptimizerContext.getContext(schema).getPartitionInfoManager().getPartitionInfo(tableName) + .getPartitionBy().getSubPartitionBy(); + if (subPartitionByDefinition != null) { + throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, + String.format( + "invalid alter locality operation on partition! we don't support alter table group [%s] locality with secondary partition", + tableGroup)); + } + partitionSpec = + OptimizerContext.getContext(schema).getPartitionInfoManager().getPartitionInfo(tableName) + .getPartitionBy().getPartitionByPartName(partitionGroup); + } else { + throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, + String.format( + "invalid alter locality operation on partition! table group [%s] contains no table", + tableGroup)); + } + String locality = Optional.ofNullable(partitionSpec.getLocality()).orElse(""); localityDetailInfoRecords.add(new LocalityDetailInfoRecord(rowNum++, LocalityDetailInfoRecord.LOCALITY_TYPE_PARTITIONGROUP, - partitionGroupRecord.id, - partitionGroupRecord.partition_name, + // If with secondary partition, we must change it ourselvers. + tableGroupConfig.getPartitionGroupByName(partitionGroup).id, + partitionSpec.getName(), locality)); return localityDetailInfoRecords; } catch (SQLException e) { @@ -158,7 +184,7 @@ public static List getLocalityDetails(String schema, S TableGroupConfig tableGroupConfig = TableGroupUtils.getTableGroupInfoByGroupName(schema, tableGroup); TableInfoManager tableInfoManager = new TableInfoManager(); List tableNames = - tableGroupConfig.getTables().stream().map(table -> table.getTableName()).collect(Collectors.toList()); + tableGroupConfig.getTables(); PartitionInfoManager partitionInfoManager = OptimizerContext.getContext(schema).getPartitionInfoManager(); tableInfoManager.setConnection(conn); List tableInfoList = tableInfoManager.queryTables(schema); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/serial/DataDistInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/serial/DataDistInfo.java index b8903337f..2e121474f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/serial/DataDistInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/serial/DataDistInfo.java @@ -282,9 +282,7 @@ public TgDataDistInfo fromPartitionGroupStat(String tgName, List tableNames = new ArrayList<>(); if (!partitionGroupStats.isEmpty() || !partitionGroupStats.get(0).partitions.isEmpty()) { List collectTableNames = - partitionGroupStats.get(0).getFirstPartition().getTableGroupConfig().getTables().stream() - .map(o -> o.getTableName()).collect( - Collectors.toList()); + partitionGroupStats.get(0).getFirstPartition().getTableGroupConfig().getTables(); tableNames.addAll(collectTableNames); } return new TgDataDistInfo(tgName, pgDataDistInfo, tableNames); @@ -339,9 +337,7 @@ public TgDataDistInfo fromPartitionGroupStat(String tgName, List tableNames = - partitionGroupStats.get(0).getFirstPartition().getTableGroupConfig().getTables().stream() - .map(o -> o.getTableName()).collect( - Collectors.toList()); + partitionGroupStats.get(0).getFirstPartition().getTableGroupConfig().getTables(); return new TgDataDistInfo(tgName, pgDataDistInfo, tableNames); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/GreedyModel.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/GreedyModel.java index 106a7de03..56ac72cf6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/GreedyModel.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/GreedyModel.java @@ -16,10 +16,9 @@ package com.alibaba.polardbx.executor.balancer.solver; -import org.apache.calcite.util.Pair; - -import com.alibaba.polardbx.executor.balancer.solver.SolverUtils.PartitionSet; import com.alibaba.polardbx.executor.balancer.solver.SolverUtils.PartitionCluster; +import com.alibaba.polardbx.executor.balancer.solver.SolverUtils.PartitionSet; +import org.apache.calcite.util.Pair; import java.util.ArrayList; import java.util.Arrays; @@ -32,7 +31,6 @@ import java.util.Set; import java.util.logging.Logger; import java.util.stream.Collectors; -import java.util.stream.IntStream; public class GreedyModel { public Logger logger = Logger.getLogger(String.valueOf(getClass())); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/PartitionSplitInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/PartitionSplitInfo.java index a20792f47..d5498c7a4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/PartitionSplitInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/PartitionSplitInfo.java @@ -26,8 +26,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.BinaryOperator; -import java.util.stream.Collectors; public class PartitionSplitInfo { public Map partitionGroupStatMap; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/Solution.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/Solution.java index 2772d4038..dbdbee07b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/Solution.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/Solution.java @@ -16,12 +16,6 @@ package com.alibaba.polardbx.executor.balancer.solver; -import com.alibaba.polardbx.optimizer.partition.pruning.SearchDatumInfo; -import org.apache.calcite.util.Pair; - -import java.util.List; -import java.util.Map; - public class Solution { public Boolean withValidSolve = false; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/SolverExample.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/SolverExample.java index a0d5711ac..ee347e75c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/SolverExample.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/SolverExample.java @@ -16,11 +16,7 @@ package com.alibaba.polardbx.executor.balancer.solver; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; import java.io.InputStream; -import java.util.Objects; import java.util.Scanner; public class SolverExample { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/SplitModel.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/SplitModel.java index c1774215e..70bd7c350 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/SplitModel.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/solver/SplitModel.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.balancer.solver; -import com.alibaba.polardbx.optimizer.index.PartitionRuleSet; import org.apache.calcite.util.Pair; import org.apache.commons.lang.ArrayUtils; @@ -24,9 +23,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.Comparator; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.stream.Collectors; public class SplitModel { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/SplitNameBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/SplitNameBuilder.java index 8e22fe79b..64623321f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/SplitNameBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/SplitNameBuilder.java @@ -50,7 +50,7 @@ public SplitPoint build(SearchDatumInfo value) { /** * Generate partition name for merge-partition - * + *

* TODO(moyi) generate an elegant name * * @param toMerge partitions to merge diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/SplitPointUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/SplitPointUtils.java index 4a4a77552..3875ca19b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/SplitPointUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/SplitPointUtils.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.balancer.splitpartition; -import com.alibaba.polardbx.common.exception.NotSupportException; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.jdbc.Parameters; @@ -30,10 +29,10 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.field.SessionProperties; import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition; +import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.boundspec.PartitionBoundVal; import com.alibaba.polardbx.optimizer.partition.boundspec.PartitionBoundValueKind; import com.alibaba.polardbx.optimizer.partition.common.PartitionStrategy; -import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.datatype.PartitionField; import com.alibaba.polardbx.optimizer.partition.datatype.PartitionFieldBuilder; import com.alibaba.polardbx.optimizer.partition.datatype.function.PartitionIntFunction; @@ -44,9 +43,10 @@ import java.sql.SQLException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; -import java.util.*; import java.util.stream.Collectors; import static com.alibaba.polardbx.common.properties.ConnectionProperties.ENABLE_AUTO_SPLIT_PARTITION; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/StatisticsBasedSplitPointBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/StatisticsBasedSplitPointBuilder.java index 54b8acb9b..3a4057c1e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/StatisticsBasedSplitPointBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/splitpartition/StatisticsBasedSplitPointBuilder.java @@ -17,18 +17,19 @@ package com.alibaba.polardbx.executor.balancer.splitpartition; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.balancer.BalanceOptions; import com.alibaba.polardbx.executor.balancer.stats.PartitionGroupStat; import com.alibaba.polardbx.executor.balancer.stats.PartitionStat; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.util.PartitionNameUtil; import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.IntegerType; +import com.alibaba.polardbx.optimizer.partition.PartitionInfo; +import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil; import com.alibaba.polardbx.optimizer.partition.PartitionSpec; import com.alibaba.polardbx.optimizer.partition.datatype.IntPartitionField; import com.alibaba.polardbx.optimizer.partition.datatype.PartitionField; @@ -36,11 +37,9 @@ import com.alibaba.polardbx.optimizer.partition.pruning.SearchDatumInfo; import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; import com.alibaba.polardbx.statistics.SQLRecorderLogger; -import com.google.common.collect.Lists; import org.apache.commons.collections.CollectionUtils; import java.util.ArrayList; -import java.util.HashSet; import java.util.List; import java.util.TreeSet; @@ -89,11 +88,18 @@ private List buildSplitPoint(PartitionStat partition, int expectedSp return result; } - final TableGroupInfoManager tableGroupInfoManager = - OptimizerContext.getContext(tableSchema).getTableGroupInfoManager(); + OptimizerContext oc = OptimizerContext.getContext(tableSchema); + final TableGroupInfoManager tableGroupInfoManager = oc.getTableGroupInfoManager(); + + PartitionInfo partitionInfo = oc.getPartitionInfoManager().getPartitionInfo(tableName); + List partNames = new ArrayList<>(); + List> subPartNamePairs = new ArrayList<>(); + PartitionInfoUtil.getPartitionName(partitionInfo, partNames, subPartNamePairs); + TableGroupConfig tableGroupConfig = tableGroupInfoManager.getTableGroupConfigByName(tableGroupName); List newPartitionNames = - PartitionNameUtil.autoGeneratePartitionNames(tableGroupConfig, splitBounds.size() + 1, + PartitionNameUtil.autoGeneratePartitionNames(tableGroupConfig.getTableGroupRecord(), partNames, + subPartNamePairs, splitBounds.size() + 1, new TreeSet<>(String::compareToIgnoreCase), false); if (newPartitionNames.size() != splitBounds.size() + 1) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/BalanceStats.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/BalanceStats.java index d2d0a8080..a9c594786 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/BalanceStats.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/BalanceStats.java @@ -24,7 +24,11 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import java.util.*; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; import java.util.stream.Collectors; /** diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/PartitionStat.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/PartitionStat.java index ceb0c19a7..b775d8046 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/PartitionStat.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/PartitionStat.java @@ -21,11 +21,11 @@ import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; -import com.alibaba.polardbx.optimizer.partition.boundspec.PartitionBoundVal; import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; -import com.alibaba.polardbx.optimizer.partition.common.PartitionLocation; import com.alibaba.polardbx.optimizer.partition.PartitionSpec; +import com.alibaba.polardbx.optimizer.partition.boundspec.PartitionBoundVal; +import com.alibaba.polardbx.optimizer.partition.common.PartitionLocation; import com.alibaba.polardbx.optimizer.partition.common.PartitionStrategy; import com.alibaba.polardbx.optimizer.partition.pruning.SearchDatumInfo; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/StatsUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/StatsUtils.java index 6b4438afa..40dc0cf29 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/StatsUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/StatsUtils.java @@ -32,6 +32,7 @@ import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupAccessor; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig; import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupUtils; import com.alibaba.polardbx.gms.topology.GroupDetailInfoAccessor; @@ -85,6 +86,8 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; +import static com.alibaba.polardbx.gms.tablegroup.TableGroupRecord.TG_TYPE_COLUMNAR_TBL_TG; + /** * Maintain statistics of table-group * @@ -95,24 +98,6 @@ public class StatsUtils { private static final Logger logger = LoggerFactory.getLogger(StatsUtils.class); - public static List getTableGroupConfigs() { - List res = new ArrayList<>(); - - try (Connection connection = MetaDbUtil.getConnection()) { - TableGroupAccessor tableGroupAccessor = new TableGroupAccessor(); - tableGroupAccessor.setConnection(connection); - - List schemaNames = tableGroupAccessor.getDistinctSchemaNames(); - for (String schemaName : schemaNames) { - res.addAll(TableGroupUtils.getAllTableGroupInfoByDb(schemaName)); - } - } catch (SQLException e) { - MetaDbLogUtil.META_DB_LOG.error(e); - throw GeneralUtil.nestedException(e); - } - return res; - } - public static List getTableGroupConfigs(Set schemaNames) { List res = new ArrayList<>(); @@ -183,7 +168,7 @@ public static List getTableGroupConfigsWithFilter(List getTableGroupsStats(String targetSchema, String targetTableGroup, Boolean idle) { - List tableGroupConfigs = TableGroupUtils.getAllTableGroupInfoByDb(targetSchema); + List tableGroupConfigs = TableGroupUtils.getAllTableGroupDetailInfoByDb(targetSchema); tableGroupConfigs = tableGroupConfigs.stream() .filter(tgConfig -> tgConfig.getTableGroupRecord().getTg_name().equals(targetTableGroup)) .collect(Collectors.toList()); @@ -201,15 +186,18 @@ public static List getTableGroupsStats(String targetSchema, Stri String.format("got table-group stats for schema(%s) cost %dms: %s", targetSchema, elapsed, tablesStatInfo)); // iterate all table-groups - for (TableGroupConfig tableGroupConfig : tableGroupConfigs) { + for (TableGroupDetailConfig tableGroupConfig : tableGroupConfigs) { String schema = tableGroupConfig.getTableGroupRecord().schema; if (targetSchema != null && !targetSchema.equalsIgnoreCase(schema)) { continue; } + if (tableGroupConfig.getTableGroupRecord().tg_type == TG_TYPE_COLUMNAR_TBL_TG) { + continue; + } TableGroupStat tableGroupStat = new TableGroupStat(tableGroupConfig); // iterate all tables in a table-group - for (TablePartRecordInfoContext tableContext : tableGroupConfig.getAllTables()) { + for (TablePartRecordInfoContext tableContext : tableGroupConfig.getTablesPartRecordInfoContext()) { String table = tableContext.getTableName().toLowerCase(Locale.ROOT); List tablePartitionRecords = null; @@ -261,7 +249,7 @@ public static List getTableGroupsStats(String targetSchema, Stri * @return stats */ public static List getTableGroupsStats(String targetSchema, @Nullable String targetTable) { - List tableGroupConfigs = TableGroupUtils.getAllTableGroupInfoByDb(targetSchema); + List tableGroupConfigs = TableGroupUtils.getAllTableGroupDetailInfoByDb(targetSchema); List res = new ArrayList<>(); OptimizerContext oc = Objects.requireNonNull(OptimizerContext.getContext(targetSchema), targetSchema + " not exists"); @@ -277,15 +265,18 @@ public static List getTableGroupsStats(String targetSchema, @Nul JSON.toJSONString(tablesStatInfo))); // iterate all table-groups - for (TableGroupConfig tableGroupConfig : tableGroupConfigs) { + for (TableGroupDetailConfig tableGroupConfig : tableGroupConfigs) { String schema = tableGroupConfig.getTableGroupRecord().schema; if (targetSchema != null && !targetSchema.equalsIgnoreCase(schema)) { continue; } + if (tableGroupConfig.getTableGroupRecord().isColumnarTableGroup()) { + continue; + } TableGroupStat tableGroupStat = new TableGroupStat(tableGroupConfig); // iterate all tables in a table-group - for (TablePartRecordInfoContext tableContext : tableGroupConfig.getAllTables()) { + for (TablePartRecordInfoContext tableContext : tableGroupConfig.getTablesPartRecordInfoContext()) { String table = tableContext.getTableName().toLowerCase(Locale.ROOT); if (targetTable != null && !targetTable.equalsIgnoreCase(table)) { continue; @@ -445,11 +436,11 @@ public static List> queryGroupByPhyDb(String schema, String physica * @return > */ public static Map> queryTableGroupStats(String schema, - List tableGroups) { + List tableGroups) { Map> result = new HashMap<>(); Map phyTable2LogicalTableMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); - for (TableGroupConfig tg : tableGroups) { + for (TableGroupDetailConfig tg : tableGroups) { phyTable2LogicalTableMap.putAll(tg.phyToLogicalTables()); } @@ -646,10 +637,11 @@ public static Map>> queryTableSchemaStats(Set> rows = new ArrayList<>(); List> statisticRows = new ArrayList<>(); @@ -726,9 +718,10 @@ public static Map>> queryTableSchemaStatsForHea } String sql = - generateQueryPhyTablesStatsSQLForHeatmap(allPhyDbNames, indexTableNames, maxSingleLogicSchemaCount); + generateQueryPhyTablesStatsSQLForHeatmap(schemaName, allPhyDbNames, indexTableNames, + maxSingleLogicSchemaCount); - String countSql = generateQueryPhyTablesCountSQLForHeatmap(allPhyDbNames, indexTableNames); + String countSql = generateQueryPhyTablesCountSQLForHeatmap(schemaName, allPhyDbNames, indexTableNames); List> rows = new ArrayList<>(); for (String phyDbName : phyDbNames) { @@ -810,9 +803,11 @@ public static Map>> queryTableSchemaStaticsWith } String sql = - generateQueryPhyStaticsSQLForHeatmap(allPhyDbNames, indexTableNames, maxSingleLogicSchemaCount); + generateQueryPhyStaticsSQLForHeatmap(schemaName, allPhyDbNames, indexTableNames, + maxSingleLogicSchemaCount); - String countSql = generateQueryPhyTableStatisticsCountSQLForHeatmap(allPhyDbNames, indexTableNames); + String countSql = + generateQueryPhyTableStatisticsCountSQLForHeatmap(schemaName, allPhyDbNames, indexTableNames); List> rows = new ArrayList<>(); for (String phyDbName : phyDbNames) { @@ -853,47 +848,58 @@ public static Map>> queryTableGroupStatsForHeat Set indexTableNames, String tableLike, Map>> phyDbTablesInfoForHeatmap) { + TableGroupRecord tableGroupRecord = tableGroupConfig.getTableGroupRecord(); + String targetSchema = tableGroupRecord.getSchema(); + OptimizerContext oc = + Objects.requireNonNull(OptimizerContext.getContext(targetSchema), targetSchema + " not exists"); + PartitionInfoManager pm = oc.getPartitionInfoManager(); Map>> result = new HashMap<>(); for (PartitionGroupRecord partitionGroupRecord : tableGroupConfig.getPartitionGroupRecords()) { - for (TablePartRecordInfoContext tablePartRecordInfoContext : tableGroupConfig.getAllTables()) { + for (String logicalTableName : tableGroupConfig.getAllTables()) { // table name filter - String logicalTableName = tablePartRecordInfoContext.getTableName().toLowerCase(); + logicalTableName = logicalTableName.toLowerCase(); if (!isFilterTable(indexTableNames, tableLike, logicalTableName)) { continue; } - + PartitionInfo partitionInfo = pm.getPartitionInfo(logicalTableName); + if (partitionInfo == null) { + logger.warn(String.format( + "queryTableGroupStatsForHeatmap partitionInfo is null. logicalTableName=%s", + logicalTableName)); + continue; + } long partitionGroupId = partitionGroupRecord.id; - List tablePartitionRecords = - tablePartRecordInfoContext.getPartitionRecListByGroupId(partitionGroupId); - if (CollectionUtils.isEmpty(tablePartitionRecords)) { + PartitionSpec partitionSpec = + partitionInfo.getPartitionBy().getPhysicalPartitions().stream() + .filter(o -> o.getLocation().getPartitionGroupId().longValue() == partitionGroupId).findFirst() + .orElse(null); + if (partitionSpec == null) { logger.warn(String.format( - "queryTableGroupStatsForHeatmap tablePartitionRecords is null. logicalTableName=%s, partitionGroupId=%s", + "queryTableGroupStatsForHeatmap PartitionSpec is null. logicalTableName=%s, partitionGroupId=%s", logicalTableName, partitionGroupId)); continue; } - for (TablePartitionRecord tablePartitionRecord : tablePartitionRecords) { - String phyDbName = partitionGroupRecord.phy_db.toLowerCase(); - String phyTbName = tablePartitionRecord.phyTable.toLowerCase(); - List row; - try { - Map> phyTablesMap = phyDbTablesInfoForHeatmap.get(phyDbName); - if (phyTablesMap == null) { - row = getDefaultRowList(phyTbName, phyDbName); - } else { - row = phyTablesMap.get(phyTbName); - } - if (row == null) { - //row is null when phy table numbers is over max number. or not be accessed. - row = getDefaultRowList(phyTbName, phyDbName); - } - } catch (Exception ex) { - throw GeneralUtil.nestedException("Failed to get physical table info ", ex); + String phyDbName = partitionGroupRecord.phy_db.toLowerCase(); + String phyTbName = partitionSpec.getLocation().getPhyTableName().toLowerCase(); + List row; + try { + Map> phyTablesMap = phyDbTablesInfoForHeatmap.get(phyDbName); + if (phyTablesMap == null) { + row = getDefaultRowList(phyTbName, phyDbName); + } else { + row = phyTablesMap.get(phyTbName); } - - Map> table = - result.computeIfAbsent(logicalTableName, x -> new HashMap<>()); - table.put(phyTbName, row); + if (row == null) { + //row is null when phy table numbers is over max number. or not be accessed. + row = getDefaultRowList(phyTbName, phyDbName); + } + } catch (Exception ex) { + throw GeneralUtil.nestedException("Failed to get physical table info ", ex); } + + Map> table = + result.computeIfAbsent(logicalTableName, x -> new HashMap<>()); + table.put(phyTbName, row); } } @@ -952,12 +958,12 @@ public static Map>> queryTableGroupStatI /** * scan each logical table of table group */ - for (TablePartRecordInfoContext tablePartRecordInfoContext : tableGroupConfig.getAllTables()) { + for (String logicalTableName : tableGroupConfig.getAllTables()) { /** * Filter unused tableName by tableLike, logicalTableName may be gsi or logTb */ - String logicalTableName = tablePartRecordInfoContext.getTableName().toLowerCase(); + logicalTableName = logicalTableName.toLowerCase(); if (!isFilterTable(indexTableNames, tableLike, logicalTableName)) { continue; } @@ -1316,10 +1322,12 @@ public static String genAvgTableRowLengthSQL(String phyDb, String phyTableName) /** * Build a SQL to collect stats of mysql table */ - private static String generateQueryPhyTablesStatsSQL(Set schemaNames, Set indexTableNames, + private static String generateQueryPhyTablesStatsSQL(String logicalSchema, Set schemaNames, + Set indexTableNames, String tableLike) { StringBuilder sb = new StringBuilder(); int schemaIndex = 0; + SchemaManager sm = OptimizerContext.getContext(logicalSchema).getLatestSchemaManager(); for (String schemaName : schemaNames) { if (schemaIndex != 0) { sb.append(" union all "); @@ -1343,7 +1351,15 @@ private static String generateQueryPhyTablesStatsSQL(Set schemaNames, Se sb.append(" and ("); schemaIndex = 0; for (String tableName : indexTableNames) { - String filter = "table_name like '" + tableName + "%'"; + String pattern; + try { + PartitionInfo partitionInfo = sm.getTable(tableName).getPartitionInfo(); + pattern = partitionInfo.getTableNamePattern(); + } catch (Exception ex) { + pattern = tableName; + } + + String filter = "table_name like '" + pattern + "%'"; if (schemaIndex != 0) { sb.append(" or "); } @@ -1362,8 +1378,10 @@ private static String generateQueryPhyTablesStatsSQL(Set schemaNames, Se return sb.toString(); } - private static String generateQueryPhyTablesCountSQLForHeatmap(Set schemaNames, + private static String generateQueryPhyTablesCountSQLForHeatmap(String logicalSchema, + Set schemaNames, Set indexTableNames) { + SchemaManager sm = OptimizerContext.getContext(logicalSchema).getLatestSchemaManager(); StringBuilder sb = new StringBuilder(); int schemaIndex = 0; for (String schemaName : schemaNames) { @@ -1384,7 +1402,15 @@ private static String generateQueryPhyTablesCountSQLForHeatmap(Set schem sb.append(" and ("); schemaIndex = 0; for (String tableName : indexTableNames) { - String filter = "table_name like '" + tableName + "%'"; + String pattern; + try { + PartitionInfo partitionInfo = sm.getTable(tableName).getPartitionInfo(); + pattern = partitionInfo.getTableNamePattern(); + } catch (Exception ex) { + pattern = tableName; + } + + String filter = "table_name like '" + pattern + "%'"; if (schemaIndex != 0) { sb.append(" or "); } @@ -1398,9 +1424,11 @@ private static String generateQueryPhyTablesCountSQLForHeatmap(Set schem return sb.toString(); } - private static String generateQueryPhyTableStatisticsCountSQLForHeatmap(Set schemaNames, + private static String generateQueryPhyTableStatisticsCountSQLForHeatmap(String logicalSchema, + Set schemaNames, Set indexTableNames) { StringBuilder sb = new StringBuilder(); + SchemaManager sm = OptimizerContext.getContext(logicalSchema).getLatestSchemaManager(); int schemaIndex = 0; for (String schemaName : schemaNames) { if (schemaIndex != 0) { @@ -1420,7 +1448,15 @@ private static String generateQueryPhyTableStatisticsCountSQLForHeatmap(Set schemaNames, Set indexTableNames, + private static String generateQueryPhyTablesStatsSQLForHeatmap(String logicalSchema, Set schemaNames, + Set indexTableNames, Integer maxSingleLogicSchemaCount) { StringBuilder sb = new StringBuilder(); int schemaIndex = 0; + SchemaManager sm = OptimizerContext.getContext(logicalSchema).getLatestSchemaManager(); int limit = maxSingleLogicSchemaCount / schemaNames.size(); for (String schemaName : schemaNames) { if (schemaIndex != 0) { @@ -1465,7 +1503,15 @@ private static String generateQueryPhyTablesStatsSQLForHeatmap(Set schem sb.append(" and ("); schemaIndex = 0; for (String tableName : indexTableNames) { - String filter = "t.table_name like '" + tableName + "%'"; + String pattern; + try { + PartitionInfo partitionInfo = sm.getTable(tableName).getPartitionInfo(); + pattern = partitionInfo.getTableNamePattern(); + } catch (Exception ex) { + pattern = tableName; + } + + String filter = "table_name like '" + pattern + "%'"; if (schemaIndex != 0) { sb.append(" or "); } @@ -1483,10 +1529,12 @@ private static String generateQueryPhyTablesStatsSQLForHeatmap(Set schem return sb.toString(); } - private static String generateQueryPhyStaticsSQLForHeatmap(Set schemaNames, Set indexTableNames, + private static String generateQueryPhyStaticsSQLForHeatmap(String logicalSchema, Set schemaNames, + Set indexTableNames, Integer maxSingleLogicSchemaCount) { StringBuilder sb = new StringBuilder(); int schemaIndex = 0; + SchemaManager sm = OptimizerContext.getContext(logicalSchema).getLatestSchemaManager(); int limit = maxSingleLogicSchemaCount / schemaNames.size(); for (String schemaName : schemaNames) { if (schemaIndex != 0) { @@ -1513,7 +1561,15 @@ private static String generateQueryPhyStaticsSQLForHeatmap(Set schemaNam sb.append(" and ("); schemaIndex = 0; for (String tableName : indexTableNames) { - String filter = "s.table_name like '" + tableName + "%'"; + String pattern; + try { + PartitionInfo partitionInfo = sm.getTable(tableName).getPartitionInfo(); + pattern = partitionInfo.getTableNamePattern(); + } catch (Exception ex) { + pattern = tableName; + } + + String filter = "table_name like '" + pattern + "%'"; if (schemaIndex != 0) { sb.append(" or "); } @@ -1534,10 +1590,12 @@ private static String generateQueryPhyStaticsSQLForHeatmap(Set schemaNam /** * Build a SQL to query information_schema.table_statistics */ - private static String generateQueryPhyTablesStatisticsSQL(Set schemaNames, Set indexTableNames, + private static String generateQueryPhyTablesStatisticsSQL(String logicalSchema, Set schemaNames, + Set indexTableNames, String tableLike) { StringBuilder sb = new StringBuilder(); int schemaIndex = 0; + SchemaManager sm = OptimizerContext.getContext(logicalSchema).getLatestSchemaManager(); for (String schemaName : schemaNames) { // select sb.append(schemaIndex == 0 ? "SELECT " : " UNION ALL SELECT "); @@ -1559,7 +1617,15 @@ private static String generateQueryPhyTablesStatisticsSQL(Set schemaName sb.append(" and ("); schemaIndex = 0; for (String tableName : indexTableNames) { - String filter = "table_name like '" + tableName + "%'"; + String pattern; + try { + PartitionInfo partitionInfo = sm.getTable(tableName).getPartitionInfo(); + pattern = partitionInfo.getTableNamePattern(); + } catch (Exception ex) { + pattern = tableName; + } + + String filter = "table_name like '" + pattern + "%'"; if (schemaIndex != 0) { sb.append(" or "); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/TableGroupStat.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/TableGroupStat.java index 2f8c6c5d4..3ece25620 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/TableGroupStat.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/balancer/stats/TableGroupStat.java @@ -66,9 +66,8 @@ public List getPartitionGroups() { return new ArrayList<>(this.pgMap.values()); } - public List getAllTables() { + public List getAllTables() { return this.tableGroupConfig.getAllTables(); } - } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/AbstractAggregator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/AbstractAggregator.java deleted file mode 100644 index e2ca4eb9f..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/AbstractAggregator.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc; - -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -/** - * Created by chuanqin on 17/8/9. - */ -public abstract class AbstractAggregator implements Aggregator { - protected final int filterArg; - - /** - * Input type, may be not always same with - */ - protected DataType[] inputType; - - protected DataType returnType; - - /** - * Original target index - */ - protected int[] originTargetIndexes; - - /** - * Target index used to get block when accumulating - */ - protected int[] aggIndexInChunk; - - protected boolean isDistinct; - - public AbstractAggregator(int[] targetIndexes, boolean distinct, DataType[] inputType, DataType returnType, - int filterArg) { - this.originTargetIndexes = targetIndexes; - this.aggIndexInChunk = originTargetIndexes.clone(); - this.isDistinct = distinct; - this.inputType = inputType; - this.returnType = returnType; - this.filterArg = filterArg; - } - - public boolean isDistinct() { - return isDistinct; - } - - public int getFilterArg() { - return filterArg; - } - - public DataType[] getInputType() { - return inputType; - } - - public int[] getOriginTargetIndexes() { - return originTargetIndexes; - } - - public void setAggIndexInChunk(int[] aggIndexInChunk) { - this.aggIndexInChunk = aggIndexInChunk; - } - - /** - * not always same with aggTargetIndexes, see function: GroupConcat - */ - public int[] getInputColumnIndexes() { - return originTargetIndexes; - } -} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/Aggregator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/Aggregator.java deleted file mode 100644 index 8901280ab..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/Aggregator.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc; - -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; - -public interface Aggregator { - /** - * @param groupId id of the target agg group, sortAgg and windowExec may use 0 - *

- * Accumulate a value into target group - */ - void accumulate(int groupId, Chunk inputChunk, int position); - - /** - * write aggregated result to the block builder - */ - void writeResultTo(int groupId, BlockBuilder bb); - - /** - * Append a new group with initial value, may be null, 0 or something else - */ - default void appendInitValue() { - } - - /** - * Reset the group to its initial value - */ - default void resetToInitValue(int groupId) { - } - - /** - * @param capacity estimated group size - *

- * Init the aggregator - */ - default void open(int capacity) { - } - - /** - * Estimate the memory consumption - */ - default long estimateSize() { - return 0; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Avg.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Avg.java deleted file mode 100644 index 3a50471d0..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Avg.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableObjectLongGroupState; - -/** - * Created by chuanqin on 17/8/9. - */ -public class Avg extends AbstractAggregator { - private NullableObjectLongGroupState groupState; - - public Avg(int targetIndexes, boolean distinct, DataType returnType, int filterArg) { - super(new int[] {targetIndexes}, distinct, null, returnType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableObjectLongGroupState(capacity); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - Block block = inputChunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final Object value = block.getObject(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, returnType.convertFrom(value), 1); - } else { - Object oldValue = groupState.getObject(groupId); - groupState.set(groupId, returnType.getCalculator().add(oldValue, value), groupState.getLong(groupId) + 1); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - Object avg = returnType.getCalculator().divide( - groupState.getObject(groupId), - groupState.getLong(groupId)); - if (avg == null) { - bb.appendNull(); - } else { - bb.writeObject(avg); - } - } - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/BitAnd.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/BitAnd.java deleted file mode 100644 index d1a781ce5..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/BitAnd.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableObjectGroupState; - -/** - * Created by chuanqin on 17/12/7. - */ -public class BitAnd extends AbstractAggregator { - private NullableObjectGroupState groupState; - - public BitAnd(int index, DataType inputTypes, DataType outType, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputTypes}, outType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableObjectGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.append(returnType.getMaxValue()); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.set(groupId, returnType.getMaxValue()); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - Object value = block.getObject(position); - Object beforeValue = groupState.get(groupId); - groupState.set(groupId, returnType.getCalculator().bitAnd(value, beforeValue)); - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeObject(groupState.get(groupId)); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/BitOr.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/BitOr.java deleted file mode 100644 index 896155a5a..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/BitOr.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableObjectGroupState; - -/** - * Created by chuanqin on 17/12/7. - */ -public class BitOr extends AbstractAggregator { - - private NullableObjectGroupState groupState; - - public BitOr(int index, DataType inputTypes, DataType outType, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputTypes}, outType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableObjectGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.append(returnType.convertFrom(0)); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.set(groupId, returnType.convertFrom(0)); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - Object value = block.getObject(position); - Object beforeValue = groupState.get(groupId); - groupState.set(groupId, returnType.getCalculator().bitOr(value, beforeValue)); - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeObject(groupState.get(groupId)); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/BitXor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/BitXor.java deleted file mode 100644 index fa45e774e..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/BitXor.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableObjectGroupState; - -/** - * Created by chuanqin on 17/12/11. - */ -public class BitXor extends AbstractAggregator { - private NullableObjectGroupState groupState; - - public BitXor(int index, DataType inputTypes, DataType outType, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputTypes}, outType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableObjectGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.append(returnType.convertFrom(0)); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.set(groupId, returnType.convertFrom(0)); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - Object value = block.getObject(position); - Object beforeValue = groupState.get(groupId); - groupState.set(groupId, returnType.getCalculator().bitXor(value, beforeValue)); - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeObject(groupState.get(groupId)); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2ByteMax.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2ByteMax.java deleted file mode 100644 index 77a5c32e0..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2ByteMax.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableByteGroupState; - -public class Byte2ByteMax extends AbstractAggregator { - protected NullableByteGroupState groupState; - - public Byte2ByteMax(int index, DataType inputType, DataType outputType, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableByteGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final byte value = block.getByte(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - byte beforeValue = groupState.get(groupId); - byte afterValue = value > beforeValue ? value : beforeValue; - groupState.set(groupId, afterValue); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - bb.writeByte(groupState.get(groupId)); - } - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2ByteMin.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2ByteMin.java deleted file mode 100644 index 68cc0f4e6..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2ByteMin.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Byte2ByteMin extends Byte2ByteMax { - public Byte2ByteMin(int index, DataType inputType, DataType outputType, int filterArg) { - super(index, inputType, outputType, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final byte value = block.getByte(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - byte beforeValue = groupState.get(groupId); - byte afterValue = value < beforeValue ? value : beforeValue; - groupState.set(groupId, afterValue); - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2DecimalAvg.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2DecimalAvg.java deleted file mode 100644 index 8f7a42ca9..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2DecimalAvg.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Byte2DecimalAvg extends SpecificType2DecimalAvg { - public Byte2DecimalAvg(int index, boolean isDistict, DataType inputType, DataType outputType, int filterArg) { - super(index, isDistict, inputType, outputType, filterArg); - } - - @Override - protected Decimal getDecimal(Block block, int position) { - return Decimal.fromLong(block.getByte(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2DecimalSum.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2DecimalSum.java deleted file mode 100644 index b76d9594d..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2DecimalSum.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Byte2DecimalSum extends LittleNum2DecimalSum { - public Byte2DecimalSum(int targetIndexes, boolean distinct, DataType inputType, DataType outputType, - int filterArg) { - super(targetIndexes, distinct, inputType, outputType, filterArg); - } - - @Override - protected long getLong(Block block, int position) { - return block.getByte(position); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2UInt64BitAnd.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2UInt64BitAnd.java deleted file mode 100644 index 6b9b4c9fb..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2UInt64BitAnd.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Byte2UInt64BitAnd extends SpecificType2UInt64BitAnd { - public Byte2UInt64BitAnd(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getByte(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2UInt64BitOr.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2UInt64BitOr.java deleted file mode 100644 index 268b8358d..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2UInt64BitOr.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Byte2UInt64BitOr extends SpecificType2UInt64BitOr { - public Byte2UInt64BitOr(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getByte(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2UInt64BitXor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2UInt64BitXor.java deleted file mode 100644 index 96ef19afb..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Byte2UInt64BitXor.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Byte2UInt64BitXor extends SpecificType2UInt64BitXor { - public Byte2UInt64BitXor(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getByte(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Count.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Count.java deleted file mode 100644 index 1abf08759..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Count.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableLongGroupState; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; - -public class Count extends AbstractAggregator { - - private NullableLongGroupState groupState; - - public Count(int[] targetIndexes, boolean distinct, int filterArg) { - super(targetIndexes, distinct, null, DataTypes.LongType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableLongGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.append(0L); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - assert inputChunk.getBlockCount() > 0; - boolean notNull = true; - for (int i = 0; i < aggIndexInChunk.length; i++) { - if (inputChunk.getBlock(aggIndexInChunk[i]).isNull(position)) { - notNull = false; - break; - } - } - if (notNull) { - groupState.set(groupId, groupState.get(groupId) + 1); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - bb.writeLong(groupState.get(groupId)); - } - } - - @Override - public void resetToInitValue(int groupId) { - groupState.set(groupId, 0L); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/CountRow.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/CountRow.java deleted file mode 100644 index 164c445d6..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/CountRow.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.LongGroupState; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; - -public class CountRow extends AbstractAggregator { - private LongGroupState groupState; - - public CountRow(int[] targetIndexes, boolean distinct, int filterArg) { - super(targetIndexes, distinct, null, DataTypes.LongType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new LongGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.append(0L); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - groupState.set(groupId, groupState.get(groupId) + 1); - } - - @Override - public void writeResultTo(int position, BlockBuilder bb) { - bb.writeLong(groupState.get(position)); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.set(groupId, 0L); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/CumeDist.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/CumeDist.java deleted file mode 100644 index 397ba1186..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/CumeDist.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.row.Row; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; - -public class CumeDist extends Rank { - protected HashMap, Long> rowKeysToRank = new HashMap<>(); - protected List bufferRows = new ArrayList<>(); - int rowIndex = 0; - - public CumeDist(int[] index, int filterArg) { - super(index, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - count++; - if (aggIndexInChunk.length > 0) { - Chunk.ChunkRow row = inputChunk.rowAt(position); - bufferRows.add(row); - List rankKey = getAggregateKey(row); - rowKeysToRank.put(rankKey, count); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (rowIndex >= bufferRows.size()) { - return; - } - List rowKeys = getAggregateKey(bufferRows.get(rowIndex++)); - Long rank = rowKeysToRank.get(rowKeys); - if (rank == null) { - bb.writeDouble((double) 1.0); - } else { - bb.writeDouble((double) rank / count); - } - } - - @Override - public void resetToInitValue(int groupId) { - bufferRows = new ArrayList<>(); - rowKeysToRank = new HashMap<>(); - count = 0L; - rowIndex = 0; - } - - protected List getAggregateKey(Row row) { - if (row == null) { - return null; - } - if (aggIndexInChunk == null || aggIndexInChunk.length == 0) { - return null; - } - List lastRowValues = row.getValues(); - List aggTargetIndexValues = new ArrayList<>(); - for (int index : aggIndexInChunk) { - aggTargetIndexValues.add(lastRowValues.get(index)); - } - return aggTargetIndexValues; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalAvg.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalAvg.java deleted file mode 100644 index 1cc3c3a20..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalAvg.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Decimal2DecimalAvg extends SpecificType2DecimalAvg { - public Decimal2DecimalAvg(int index, boolean isDistict, DataType inputType, DataType outputType, int filterArg) { - super(index, isDistict, inputType, outputType, filterArg); - } - - @Override - protected Decimal getDecimal(Block block, int position) { - return block.getDecimal(position); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalMax.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalMax.java deleted file mode 100644 index dcb8e3555..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalMax.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.state.NullableDecimalGroupState; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -import java.util.Arrays; -import java.util.Collections; - -public class Decimal2DecimalMax extends AbstractAggregator { - - protected NullableDecimalGroupState groupState; - - public Decimal2DecimalMax(int index, DataType inputType, DataType outputType, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableDecimalGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.set(groupId, null); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final Decimal value = block.getDecimal(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - Decimal beforeValue = groupState.get(groupId); - Decimal afterValue = Collections.max(Arrays.asList(beforeValue, value)); - groupState.set(groupId, afterValue); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - bb.writeDecimal(groupState.get(groupId)); - } - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} - diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalMin.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalMin.java deleted file mode 100644 index 0d8ec4ad2..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalMin.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -import java.util.Arrays; -import java.util.Collections; - -public class Decimal2DecimalMin extends Decimal2DecimalMax { - - public Decimal2DecimalMin(int index, DataType inputType, DataType outputType, int filterArg) { - super(index, inputType, outputType, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final Decimal value = block.getDecimal(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - Decimal beforeValue = groupState.get(groupId); - Decimal afterValue = Collections.min(Arrays.asList(beforeValue, value)); - groupState.set(groupId, afterValue); - } - } -} - diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalSum.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalSum.java deleted file mode 100644 index 36445e943..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2DecimalSum.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.common.datatype.DecimalBox; -import com.alibaba.polardbx.common.datatype.FastDecimalUtils; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.chunk.DecimalBlock; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.DecimalBoxGroupState; -import com.alibaba.polardbx.optimizer.state.GroupState; -import com.alibaba.polardbx.optimizer.state.NullableDecimalGroupState; - -public class Decimal2DecimalSum extends AbstractAggregator { - protected GroupState state; - - private Decimal cache; - - public Decimal2DecimalSum(int targetIndexes, boolean distinct, DataType inputType, DataType outputType, - int filterArg) { - super(new int[] {targetIndexes}, distinct, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - state = new NullableDecimalGroupState(capacity); - cache = new Decimal(); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - Block block = inputChunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - DecimalBlock decimalBlock = (DecimalBlock) block; - boolean isSimple = decimalBlock.isSimple(); - - if (state instanceof DecimalBoxGroupState) { - DecimalBoxGroupState boxGroupState = (DecimalBoxGroupState) state; - - if (isSimple) { - // 1. best case: all decimal value in block is simple - if (boxGroupState.isNull(groupId)) { - DecimalBox box = new DecimalBox(); - int a1 = decimalBlock.fastInt1(position); - int a2 = decimalBlock.fastInt2(position); - int b = decimalBlock.fastFrac(position); - box.add(a1, a2, b); - - boxGroupState.set(groupId, box); - } else { - DecimalBox box = boxGroupState.getBox(groupId); - - int a1 = decimalBlock.fastInt1(position); - int a2 = decimalBlock.fastInt2(position); - int b = decimalBlock.fastFrac(position); - box.add(a1, a2, b); - } - } else { - // 2. bad case: a decimal value is not simple in the block - - // change state to - this.state = boxGroupState.toDecimalGroupState(); - - // do normal add - normalAdd(groupId, position, decimalBlock); - } - } else if (state instanceof NullableDecimalGroupState) { - // 3. normal case: - - normalAdd(groupId, position, decimalBlock); - } - } - - private void normalAdd(int groupId, int position, DecimalBlock decimalBlock) { - NullableDecimalGroupState decimalGroupState = (NullableDecimalGroupState) state; - Decimal value = decimalBlock.getDecimal(position); - if (decimalGroupState.isNull(groupId)) { - // initialize the operand (not null) - decimalGroupState.set(groupId, value.copy()); - } else { - Decimal beforeValue = decimalGroupState.get(groupId); - - // avoid reset memory to 0 - FastDecimalUtils.add( - beforeValue.getDecimalStructure(), - value.getDecimalStructure(), - cache.getDecimalStructure(), - false); - - // swap variants to avoid allocating memory - Decimal afterValue = cache; - cache = beforeValue; - - decimalGroupState.set(groupId, afterValue); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (state instanceof DecimalBoxGroupState) { - if (((DecimalBoxGroupState)state).isNull(groupId)) { - bb.appendNull(); - } else { - bb.writeDecimal(((DecimalBoxGroupState)state).get(groupId)); - } - } else if (state instanceof NullableDecimalGroupState) { - if (((NullableDecimalGroupState)state).isNull(groupId)) { - bb.appendNull(); - } else { - bb.writeDecimal(((NullableDecimalGroupState)state).get(groupId)); - } - } - } - - @Override - public void appendInitValue() { - if (state instanceof DecimalBoxGroupState) { - ((DecimalBoxGroupState) state).appendNull(); - } else if (state instanceof NullableDecimalGroupState) { - ((NullableDecimalGroupState) state).appendNull(); - } - } - - @Override - public void resetToInitValue(int groupId) { - if (state instanceof DecimalBoxGroupState) { - ((DecimalBoxGroupState) state).setNull(groupId); - } else if (state instanceof NullableDecimalGroupState) { - ((NullableDecimalGroupState) state).setNull(groupId); - } - } - - @Override - public long estimateSize() { - return state.estimateSize(); - } -} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2UInt64BitAnd.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2UInt64BitAnd.java deleted file mode 100644 index b20555398..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2UInt64BitAnd.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Decimal2UInt64BitAnd extends SpecificType2UInt64BitAnd { - public Decimal2UInt64BitAnd(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getDecimal(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2UInt64BitOr.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2UInt64BitOr.java deleted file mode 100644 index 775abf0e3..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2UInt64BitOr.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Decimal2UInt64BitOr extends SpecificType2UInt64BitOr { - public Decimal2UInt64BitOr(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getDecimal(position)); - } -} - diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2UInt64BitXor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2UInt64BitXor.java deleted file mode 100644 index a8d24b260..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Decimal2UInt64BitXor.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Decimal2UInt64BitXor extends SpecificType2UInt64BitXor { - public Decimal2UInt64BitXor(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getDecimal(position)); - } -} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/DenseRank.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/DenseRank.java deleted file mode 100644 index 0468ad9a4..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/DenseRank.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Chunk; - -public class DenseRank extends Rank { - - public DenseRank(int[] index, int filterArg) { - super(index, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - Chunk.ChunkRow row = inputChunk.rowAt(position); - if (!sameRank(lastRow, row)) { - rank++; - lastRow = row; - } - } -} - diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleAvg.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleAvg.java deleted file mode 100644 index 495452210..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleAvg.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Double2DoubleAvg extends SpecificType2DoubleAvgV2 { - public Double2DoubleAvg(int index, boolean isDistict, DataType inputType, DataType outputType, int filterArg) { - super(index, isDistict, inputType, outputType, filterArg); - } - - @Override - protected double getDouble(Block block, int position) { - return block.getDouble(position); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleMax.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleMax.java deleted file mode 100644 index 6436382e1..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleMax.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.state.NullableDoubleGroupState; - -public class Double2DoubleMax extends AbstractAggregator { - - protected NullableDoubleGroupState groupState; - - public Double2DoubleMax(int index, DataType inputType, DataType outputType, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableDoubleGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final double value = block.getDouble(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - double beforeValue = groupState.get(groupId); - double afterValue = Math.max(value, beforeValue); - groupState.set(groupId, afterValue); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - bb.writeDouble(groupState.get(groupId)); - } - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} - diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleMin.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleMin.java deleted file mode 100644 index fff777e54..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleMin.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Double2DoubleMin extends Double2DoubleMax { - - public Double2DoubleMin(int index, DataType inputType, DataType outputType, int filterArg) { - super(index, inputType, outputType, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final double value = block.getDouble(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - double beforeValue = groupState.get(groupId); - double afterValue = Math.min(value, beforeValue); - groupState.set(groupId, afterValue); - } - } -} - diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleSum.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleSum.java deleted file mode 100644 index b3f35ad79..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Double2DoubleSum.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public final class Double2DoubleSum extends LittleNum2DoubleSum { - public Double2DoubleSum(int targetIndexes, boolean distinct, DataType inputType, DataType outputType, - int filterArg) { - super(targetIndexes, distinct, inputType, outputType, filterArg); - } - - @Override - protected double getDouble(Block block, int position) { - return block.getDouble(position); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/FirstValue.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/FirstValue.java deleted file mode 100644 index 4a86c398f..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/FirstValue.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; - -/** - * Return the first value of the window - * - * @author hongxi.chx - */ -public class FirstValue extends AbstractAggregator { - - private Object outputValue = null; - private boolean assigned = false; - - public FirstValue(int index, int filterArg) { - super(new int[] {index}, false, null, null, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - if (assigned) { - return; - } - assigned = true; - outputValue = inputChunk.getBlock(aggIndexInChunk[0]).getObject(position); - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeObject(outputValue); - } - - @Override - public void resetToInitValue(int groupId) { - outputValue = null; - assigned = false; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2DoubleAvg.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2DoubleAvg.java deleted file mode 100644 index 8dde1ee8f..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2DoubleAvg.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Float2DoubleAvg extends SpecificType2DoubleAvgV2 { - public Float2DoubleAvg(int index, boolean isDistict, DataType inputType, DataType outputType, int filterArg) { - super(index, isDistict, inputType, outputType, filterArg); - } - - @Override - protected double getDouble(Block block, int position) { - return block.getFloat(position); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2DoubleSum.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2DoubleSum.java deleted file mode 100644 index 7f7b380e0..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2DoubleSum.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public final class Float2DoubleSum extends LittleNum2DoubleSum { - public Float2DoubleSum(int targetIndexes, boolean distinct, DataType inputType, DataType outputType, - int filterArg) { - super(targetIndexes, distinct, inputType, outputType, filterArg); - } - - @Override - protected double getDouble(Block block, int position) { - return block.getFloat(position); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2FloatMax.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2FloatMax.java deleted file mode 100644 index 03220e9a7..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2FloatMax.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableFloatGroupState; - -public class Float2FloatMax extends AbstractAggregator { - - protected NullableFloatGroupState groupState; - - public Float2FloatMax(int index, DataType inputType, DataType outputType, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableFloatGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final float value = block.getFloat(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - float beforeValue = groupState.get(groupId); - float afterValue = Math.max(value, beforeValue); - groupState.set(groupId, afterValue); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - bb.writeFloat(groupState.get(groupId)); - } - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2FloatMin.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2FloatMin.java deleted file mode 100644 index b9d768680..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Float2FloatMin.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Float2FloatMin extends Float2FloatMax { - public Float2FloatMin(int index, DataType inputType, DataType outputType, int filterArg) { - super(index, inputType, outputType, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final float value = block.getFloat(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - float beforeValue = groupState.get(groupId); - float afterValue = Math.min(value, beforeValue); - groupState.set(groupId, afterValue); - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/GroupConcat.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/GroupConcat.java deleted file mode 100644 index 75165ecac..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/GroupConcat.java +++ /dev/null @@ -1,300 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.common.utils.TStringUtil; -import com.alibaba.polardbx.net.util.CharsetUtil; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.CursorMeta; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.core.row.Row; -import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; -import com.alibaba.polardbx.optimizer.utils.FunctionUtils; - -import java.io.UnsupportedEncodingException; -import java.nio.charset.StandardCharsets; -import java.sql.Timestamp; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Locale; -import java.util.Set; - -/** - * Created by zilin.zl on 18/10/17. - */ -public class GroupConcat extends AbstractAggregator { - - private HashMap stringBuilders = new HashMap<>(); - private String separator = ","; - private List aggOrderIndexList; - private HashMap> tempLists = new HashMap<>(); - private int maxLen = 1024; - private String encoding = "utf8"; - private Set groupHasAppendToString = new HashSet<>(); - private int[] inputColumnIndexes; - private List isAscList; - private MemoryAllocatorCtx memoryAllocator; - - public GroupConcat(int[] aggTargetIndexes, boolean isDistinct, String separator, - List aggOrderIndexList, List isAscList, int maxLen, String encoding, - MemoryAllocatorCtx allocator, int filterArg, DataType outputType) { - super(aggTargetIndexes, isDistinct, null, outputType, filterArg); - this.aggOrderIndexList = aggOrderIndexList; - this.memoryAllocator = allocator; - if (separator != null) { - this.separator = separator; - } - this.isAscList = isAscList; - this.maxLen = maxLen; - if (encoding != null && encoding.length() != 0) { - this.encoding = encoding; - } - - inputColumnIndexes = buildInputColumnIndexes(); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Chunk.ChunkRow row = chunk.rowAt(position); - Row arrayRow = FunctionUtils.fromIRowSetToArrayRowSet(row); - boolean containsNull = Arrays.stream(aggIndexInChunk).anyMatch(i -> arrayRow.getObject(i) == null); - if (containsNull) { - return; - } - if (needSort()) { - memoryAllocator.allocateReservedMemory(arrayRow); - ArrayList tempList = tempLists.computeIfAbsent(groupId, k -> new ArrayList<>()); - tempList.add(arrayRow); - } else { - appendToStringBuilder(groupId, row); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (needSort()) { - sortTempList(groupId); - tempListAppendToStringBuilder(groupId); - } - StringBuilder stringBuilder = stringBuilders.get(groupId); - if (stringBuilder == null) { - bb.appendNull(); - return; - } - String result = stringBuilder.toString(); - if (result.length() > maxLen) { - bb.writeByteArray(result.substring(0, maxLen).getBytes()); - } else { - bb.writeByteArray(result.getBytes()); - } - } - - @Override - public void open(int capacity) { - resetToInitValue(0); - } - - @Override - public void resetToInitValue(int groupId) { - stringBuilders = new HashMap<>(); - groupHasAppendToString = new HashSet<>(); - for (ArrayList list : tempLists.values()) { - list.forEach(t -> memoryAllocator.releaseReservedMemory(t.estimateSize(), true)); - } - tempLists = new HashMap<>(); - } - - private boolean needSort() { - return aggOrderIndexList != null && aggOrderIndexList.size() != 0; - } - - private void sortTempList(int groupId) { - ArrayList tempList = tempLists.get(groupId); - if (tempList == null) { - return; - } - tempList.sort(new Comparator() { - - @Override - public int compare(Row r1, Row r2) { - if (r1 == null || r2 == null) { - GeneralUtil.nestedException("Memory is insufficient to execute this query"); - } - for (int j = 0; j < aggOrderIndexList.size(); j++) { - Integer i = aggOrderIndexList.get(j); - int v = isAscList.get(j) ? 1 : -1; - if (r1.getObject(i) == null) { - return v; - } - if (r2.getObject(i) == null) { - return -v; - } - DataType dataType = DataTypeUtil.getTypeOfObject(r1.getObject(i)); - int result = dataType.compare(r1.getObject(i), r2.getObject(i)); - if (result == 0) { - continue; - } else { - return v * result; - } - } - return 0; // equal - } - }); - } - - private void tempListAppendToStringBuilder(int groupId) { - ArrayList tempList = tempLists.get(groupId); - if (tempList == null) { - return; - } - for (int i = 0; i < tempList.size(); i++) { - Row row = tempList.get(i); - if (row == null) { - GeneralUtil.nestedException("Memory is insufficient to execute this query"); - } - appendToStringBuilder(groupId, row); - } - } - - private void appendToStringBuilder(int groupId, Row row) { - StringBuilder stringBuilder = stringBuilders.get(groupId); - if (stringBuilder == null) { - stringBuilder = new StringBuilder(); - stringBuilders.put(groupId, stringBuilder); - } - if (stringBuilder.length() > maxLen) { - stringBuilder.setLength(maxLen); - return; - } - if (groupHasAppendToString.contains(groupId)) { - stringBuilder.append(separator); - } else { - groupHasAppendToString.add(groupId); - } - for (int targetIndex : aggIndexInChunk) { - Object o = row.getObject(targetIndex); - DataType dataType = DataTypeUtil.getTypeOfObject(o); - if (DataTypeUtil.equalsSemantically(dataType, DataTypes.BytesType)) { - CursorMeta meta = row.getParentCursorMeta(); - if (meta != null - && DataTypeUtil - .equalsSemantically(meta.getColumnMeta(targetIndex).getDataType(), DataTypes.BitType)) { - stringBuilder.append(row.getLong(targetIndex)); - } else { // Binary Type - stringBuilder.append(encodeByteArray(row.getBytes(targetIndex))); - } - } else if (DataTypeUtil.anyMatchSemantically(dataType, DataTypes.TimestampType, DataTypes.DatetimeType)) { - String timeStampString = timeStampToString(DataTypes.TimestampType.convertFrom(o)); - int size = timeStampString.length(); - String padding = "000000000"; - CursorMeta meta = row.getParentCursorMeta(); - DataType columnType = null; - if (meta != null && meta.getColumnMeta(targetIndex) != null) { - columnType = meta.getColumnMeta(targetIndex).getDataType(); - } - - if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.TimestampType, DataTypes.DatetimeType)) { - size = (int) meta.getColumnMeta(targetIndex).getField().getLength(); - } - timeStampString += padding; - stringBuilder.append(timeStampString.substring(0, size)); - } else if (DataTypeUtil.equalsSemantically(dataType, DataTypes.BooleanType)) { - stringBuilder.append(booleanToString(row.getBoolean(targetIndex))); - } else { - stringBuilder.append(DataTypes.StringType.convertFrom(o)); - } - } - } - - private String encodeByteArray(byte[] b) { - String s; - try { - s = new String(b, CharsetUtil.getJavaCharset(encoding)); - } catch (UnsupportedEncodingException e) { - s = new String(b, StandardCharsets.UTF_8); - } - return s; - } - - private String timeStampToString(Timestamp timestamp) { - SimpleDateFormat dateFormatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US); //$NON-NLS-1$ - StringBuffer buf = new StringBuffer(); - buf.append(dateFormatter.format(timestamp)); - int nanos = timestamp.getNanos(); - buf.append('.'); - buf.append(TStringUtil.formatNanos(nanos, true)); - return buf.toString(); - } - - private String booleanToString(Boolean b) { - return b ? "1" : "0"; - } - - @Override - public int[] getInputColumnIndexes() { - return inputColumnIndexes; - } - - private int[] buildInputColumnIndexes() { - int[] inputColumnIndexes; - if (needSort()) { - inputColumnIndexes = new int[originTargetIndexes.length + aggOrderIndexList.size()]; - for (int i = 0; i < originTargetIndexes.length; i++) { - inputColumnIndexes[i] = originTargetIndexes[i]; - } - for (int i = 0; i < aggOrderIndexList.size(); i++) { - inputColumnIndexes[originTargetIndexes.length + i] = aggOrderIndexList.get(i); - } - } else { - inputColumnIndexes = originTargetIndexes; - } - return inputColumnIndexes; - } - - public static int[] toIntArray(List integers) { - int[] result = new int[integers.size()]; - for (int i = 0; i < integers.size(); i++) { - result[i] = integers.get(i); - } - return result; - } - - @Override - public void setAggIndexInChunk(int[] aggIndexInChunk) { - super.setAggIndexInChunk(aggIndexInChunk); - List newAggOrderIndexList; - if (needSort()) { - newAggOrderIndexList = new ArrayList<>(); - for (int i = 0; i < aggOrderIndexList.size(); i++) { - newAggOrderIndexList.add(aggIndexInChunk.length + i); - } - } else { - newAggOrderIndexList = null; - } - aggOrderIndexList = newAggOrderIndexList; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2DecimalAvg.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2DecimalAvg.java deleted file mode 100644 index 40dd7d152..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2DecimalAvg.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Int2DecimalAvg extends SpecificType2DecimalAvg { - public Int2DecimalAvg(int index, boolean isDistict, DataType inputType, DataType outputType, int filterArg) { - super(index, isDistict, inputType, outputType, filterArg); - } - - @Override - protected Decimal getDecimal(Block block, int position) { - return Decimal.fromLong(block.getInt(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2DecimalSum.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2DecimalSum.java deleted file mode 100644 index d9d6e6879..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2DecimalSum.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Int2DecimalSum extends LittleNum2DecimalSum { - public Int2DecimalSum(int targetIndexes, boolean distinct, DataType inputType, DataType outputType, int filterArg) { - super(targetIndexes, distinct, inputType, outputType, filterArg); - } - - @Override - protected long getLong(Block block, int position) { - return block.getInt(position); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2IntMax.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2IntMax.java deleted file mode 100644 index 7a589fe88..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2IntMax.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableIntegerGroupState; - -public class Int2IntMax extends AbstractAggregator { - - protected NullableIntegerGroupState groupState; - - public Int2IntMax(int index, DataType inputType, DataType outputType, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableIntegerGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final int value = block.getInt(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - int beforeValue = groupState.get(groupId); - int afterValue = Math.max(value, beforeValue); - groupState.set(groupId, afterValue); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - bb.writeInt(groupState.get(groupId)); - } - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} - diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2IntMin.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2IntMin.java deleted file mode 100644 index 93cf3751e..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2IntMin.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Int2IntMin extends Int2IntMax { - public Int2IntMin(int index, DataType inputType, DataType outputType, int filterArg) { - super(index, inputType, outputType, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final int value = block.getInt(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - int beforeValue = groupState.get(groupId); - int afterValue = Math.min(value, beforeValue); - groupState.set(groupId, afterValue); - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2UInt64BitAnd.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2UInt64BitAnd.java deleted file mode 100644 index d7a8368cb..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2UInt64BitAnd.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Int2UInt64BitAnd extends SpecificType2UInt64BitAnd { - public Int2UInt64BitAnd(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getInt(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2UInt64BitOr.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2UInt64BitOr.java deleted file mode 100644 index 139b26396..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2UInt64BitOr.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Int2UInt64BitOr extends SpecificType2UInt64BitOr { - public Int2UInt64BitOr(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getInt(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2UInt64BitXor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2UInt64BitXor.java deleted file mode 100644 index 8614bdeaa..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Int2UInt64BitXor.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Int2UInt64BitXor extends SpecificType2UInt64BitXor { - public Int2UInt64BitXor(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getInt(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/InternalFirstValue.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/InternalFirstValue.java deleted file mode 100644 index 7da6548aa..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/InternalFirstValue.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.BlockBuilders; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.chunk.NullBlock; -import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; - -import java.util.ArrayList; -import java.util.List; - -/** - * Created by chuanqin on 18/1/22. - */ -public class InternalFirstValue extends AbstractAggregator { - - private TypedBlockBuffer typedBlockBuffer; - - private ExecutionContext context; - - private static final int SEGMENT_SIZE = 1024; - - /** - * use fixScalarAggValue with not null first value to fix append only typedBlockBuffer - * - *

example: - *

- * [table t data]: - * ------------ - * | id | age | - * -----+------ - * | 1 | 9 | - * ------------ - *

- * [sql]: - * select max(id), age from t; - *

- * handle the special case: - * 1. partition table with only one row data - * 2. scalar agg with first value - * 3. two phase agg were generated - *

- * There will be possible to produce follow unexpected result for two phase scalar first value agg : - * ------------ - * | id | age | - * -----+------ - * | 1 | NULL| - * ------------ - *

- * so we need non-null first value to fix scalar agg - */ - private Object fixScalarAggValue; - - public InternalFirstValue(int index, DataType outType, int filterArg, ExecutionContext context) { - super(new int[] {index}, false, new DataType[] {outType}, outType, filterArg); - this.context = context; - this.typedBlockBuffer = new TypedBlockBuffer(outType, SEGMENT_SIZE); - } - - @Override - public void open(int capacity) { - this.typedBlockBuffer = new TypedBlockBuffer(returnType, SEGMENT_SIZE); - } - - @Override - public void appendInitValue() { - // delay append value to accumulate, because first_value can only append once - } - - @Override - public void resetToInitValue(int groupId) { - this.typedBlockBuffer = new TypedBlockBuffer(returnType, SEGMENT_SIZE); - fixScalarAggValue = null; - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (groupId == 0 && fixScalarAggValue == null && !block.isNull(position)) { - fixScalarAggValue = block.getObject(position); - } - if (groupId < typedBlockBuffer.size()) { - // pass - } else if (groupId == typedBlockBuffer.size()) { - // do append value here - typedBlockBuffer.appendValue(block, position); - } else { - throw new AssertionError("impossible case"); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - Object value; - if (typedBlockBuffer.size() == 0 && groupId == 0) { - /* - * This line handles a very special case: this IS a scalar agg and there IS NO input rows. - * In this case `appendInitValue()` was called but `accumulate()` was not, which leads to - * an empty buffer. We put a NULL here to make it behave correctly. - */ - typedBlockBuffer.appendValue(new NullBlock(1), 0); - value = typedBlockBuffer.get(groupId); - } else if (typedBlockBuffer.size() == 1) { - value = fixScalarAggValue; - } else { - value = typedBlockBuffer.get(groupId); - } - bb.writeObject(value); - } - - @Override - public long estimateSize() { - return typedBlockBuffer.estimateSize(); - } - - public class TypedBlockBuffer { - - private BlockBuilder blockBuilder; - private final int blockSize; - - private int currentSize; - private final List blocks = new ArrayList<>(); - private long estimateSize = 0; - - private TypedBlockBuffer(DataType dataType, int blockSize) { - this.blockBuilder = BlockBuilders.create(dataType, context); - this.blockSize = blockSize; - } - - public Object get(int position) { - return blockOf(position).getObject(offsetOf(position)); - } - - public void appendValue(Block block, int position) { - // Block fulfilled before appending - if (currentSize == blockSize) { - Block buildingBlock = getBuildingBlock(); - blocks.add(buildingBlock); - estimateSize += buildingBlock.estimateSize(); - blockBuilder = blockBuilder.newBlockBuilder(); - currentSize = 0; - } - - block.writePositionTo(position, blockBuilder); - currentSize++; - } - - private Block blockOf(int position) { - int chunkId = position / blockSize; - if (chunkId < blocks.size()) { - return blocks.get(chunkId); - } else { - return getBuildingBlock(); - } - } - - public int size() { - return currentSize + blocks.size() * blockSize; - } - - private int offsetOf(int position) { - return position % blockSize; - } - - private Block getBuildingBlock() { - return blockBuilder.build(); - } - - public long estimateSize() { - return estimateSize; - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Lag.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Lag.java deleted file mode 100644 index cff70c585..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Lag.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; - -import java.util.HashMap; - -public class Lag extends AbstractAggregator { - protected long offset = 1; - protected Object defaultValue; - protected HashMap indexToValue = new HashMap<>(); - protected long count = 0; - protected long index = 0; - - public Lag(int index, long offset, Object defaultValue, int filterArg) { - super(new int[] {index}, false, null, DataTypes.StringType, filterArg); - if (offset > 0) { - this.offset = offset; - } - this.defaultValue = defaultValue; - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - Chunk.ChunkRow row = inputChunk.rowAt(position); - count++; - indexToValue.put(count, row); - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - index++; - if (index - offset > 0) { - Object value = indexToValue.get(index - offset).getObject(aggIndexInChunk[0]); - if (value == null) { - bb.appendNull(); - } else { - bb.writeString(value.toString()); - } - } else if (defaultValue == null) { - bb.appendNull(); - } else { - bb.writeString(defaultValue.toString()); - } - } - - @Override - public void resetToInitValue(int groupId) { - indexToValue = new HashMap<>(); - count = 0L; - index = 0; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/LastValue.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/LastValue.java deleted file mode 100644 index fc5076eb8..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/LastValue.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; - -/** - * Return the last value of the window - * - * @author hongxi.chx - */ -public class LastValue extends AbstractAggregator { - private Object outputValue; - - public LastValue(int index, int filterArg) { - super(new int[] {index}, false, null, null, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - outputValue = inputChunk.getBlock(aggIndexInChunk[0]).getObject(position); - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeObject(outputValue); - } - - @Override - public void resetToInitValue(int groupId) { - outputValue = null; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Lead.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Lead.java deleted file mode 100644 index 152eeadf7..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Lead.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.BlockBuilder; - -public class Lead extends Lag { - - public Lead(int index, long offset, Object defaultLagValue, int filterArg) { - super(index, offset, defaultLagValue, filterArg); - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - index++; - if (index + offset > count) { - if (defaultValue == null) { - bb.appendNull(); - } else { - bb.writeString(defaultValue.toString()); - } - } else { - Object value = indexToValue.get(index + offset).getObject(aggIndexInChunk[0]); - if (value == null) { - bb.appendNull(); - } else { - bb.writeString(value.toString()); - } - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/LittleNum2DecimalSum.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/LittleNum2DecimalSum.java deleted file mode 100644 index 5a2581ed6..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/LittleNum2DecimalSum.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableLongGroupState; - -import java.util.HashMap; - -public abstract class LittleNum2DecimalSum extends AbstractAggregator { - private NullableLongGroupState partialGroupState; - private HashMap overflowToDecimal; - - public LittleNum2DecimalSum(int targetIndexes, boolean distinct, DataType inputType, DataType outputType, - int filterArg) { - super(new int[] {targetIndexes}, distinct, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - partialGroupState = new NullableLongGroupState(capacity); - overflowToDecimal = null; - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - Block block = inputChunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final long value = getLong(block, position); - if (partialGroupState.isNull(groupId)) { - partialGroupState.set(groupId, value); - } else { - long oldValue = partialGroupState.get(groupId); - long sumValue = oldValue + value; - // HD 2-12 Overflow iff both arguments have the opposite sign of the result - if (((oldValue ^ sumValue) & (value ^ sumValue)) < 0) { - if (overflowToDecimal == null) { - overflowToDecimal = new HashMap<>(); - } - Decimal previousSum = overflowToDecimal.getOrDefault(groupId, Decimal.ZERO); - overflowToDecimal.put(groupId, - previousSum.add(Decimal.fromLong(oldValue)).add(Decimal.fromLong(value))); - partialGroupState.set(groupId, 0L); - } else { - partialGroupState.set(groupId, sumValue); - } - } - } - - abstract long getLong(Block block, int position); - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (partialGroupState.isNull(groupId)) { - bb.appendNull(); - } else { - if (overflowToDecimal == null || !overflowToDecimal.containsKey(groupId)) { - bb.writeDecimal(Decimal.fromLong(partialGroupState.get(groupId))); - } else { - bb.writeDecimal(overflowToDecimal.get(groupId).add(Decimal.fromLong(partialGroupState.get(groupId)))); - } - } - } - - @Override - public void appendInitValue() { - partialGroupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - partialGroupState.setNull(groupId); - if (overflowToDecimal != null) { - overflowToDecimal.remove(groupId); - } - } - - @Override - public long estimateSize() { - return partialGroupState.estimateSize() + (overflowToDecimal == null ? 0 : - overflowToDecimal.size() * (Integer.BYTES + 29)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/LittleNum2DoubleSum.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/LittleNum2DoubleSum.java deleted file mode 100644 index 533bbf3c7..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/LittleNum2DoubleSum.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableDoubleGroupState; - -public abstract class LittleNum2DoubleSum extends AbstractAggregator { - private NullableDoubleGroupState groupState; - - public LittleNum2DoubleSum(int targetIndexes, boolean distinct, DataType inputType, DataType outputType, - int filterArg) { - super(new int[] {targetIndexes}, distinct, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableDoubleGroupState(capacity); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - Block block = inputChunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final double value = getDouble(block, position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - double oldValue = groupState.get(groupId); - groupState.set(groupId, value + oldValue); - } - } - - abstract double getDouble(Block block, int position); - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - double sum = groupState.get(groupId); - bb.writeDouble(sum); - } - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2DecimalAvg.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2DecimalAvg.java deleted file mode 100644 index e2441d023..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2DecimalAvg.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Long2DecimalAvg extends SpecificType2DecimalAvg { - public Long2DecimalAvg(int index, boolean isDistict, DataType inputType, DataType outputType, int filterArg) { - super(index, isDistict, inputType, outputType, filterArg); - } - - @Override - protected Decimal getDecimal(Block block, int position) { - return Decimal.fromLong(block.getLong(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2DecimalSum.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2DecimalSum.java deleted file mode 100644 index 06f0fe42f..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2DecimalSum.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Long2DecimalSum extends LittleNum2DecimalSum { - public Long2DecimalSum(int targetIndexes, boolean distinct, DataType inputType, DataType outputType, - int filterArg) { - super(targetIndexes, distinct, inputType, outputType, filterArg); - } - - @Override - protected long getLong(Block block, int position) { - return block.getLong(position); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2LongMax.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2LongMax.java deleted file mode 100644 index 5f31186f5..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2LongMax.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableLongGroupState; - -public class Long2LongMax extends AbstractAggregator { - protected NullableLongGroupState groupState; - - public Long2LongMax(int index, DataType inputType, DataType outputType, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableLongGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final long value = block.getLong(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - long beforeValue = groupState.get(groupId); - long afterValue = Math.max(value, beforeValue); - groupState.set(groupId, afterValue); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - bb.writeLong(groupState.get(groupId)); - } - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2LongMin.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2LongMin.java deleted file mode 100644 index 67805e95b..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2LongMin.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Long2LongMin extends Long2LongMax { - public Long2LongMin(int index, DataType inputType, DataType outputType, int filterArg) { - super(index, inputType, outputType, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final long value = block.getLong(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - long beforeValue = groupState.get(groupId); - long afterValue = Math.min(value, beforeValue); - groupState.set(groupId, afterValue); - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2LongSum0.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2LongSum0.java deleted file mode 100644 index 4143190fd..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2LongSum0.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.LongGroupState; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Long2LongSum0 extends AbstractAggregator { - private LongGroupState groupState; - - public Long2LongSum0(int targetIndexes, boolean distinct, DataType inputType, DataType outputType, int filterArg) { - super(new int[] {targetIndexes}, distinct, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new LongGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.append(0L); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - long value = block.getLong(position); - long beforeValue = groupState.get(groupId); - long afterValue = beforeValue + value; - groupState.set(groupId, afterValue); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.set(groupId, 0L); - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeLong(groupState.get(groupId)); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2UInt64BitAnd.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2UInt64BitAnd.java deleted file mode 100644 index c97688671..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2UInt64BitAnd.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Long2UInt64BitAnd extends SpecificType2UInt64BitAnd { - public Long2UInt64BitAnd(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getLong(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2UInt64BitOr.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2UInt64BitOr.java deleted file mode 100644 index d3c802cbb..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2UInt64BitOr.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Long2UInt64BitOr extends SpecificType2UInt64BitOr { - public Long2UInt64BitOr(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getLong(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2UInt64BitXor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2UInt64BitXor.java deleted file mode 100644 index 7e9dd5f52..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Long2UInt64BitXor.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Long2UInt64BitXor extends SpecificType2UInt64BitXor { - public Long2UInt64BitXor(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getLong(position)); - } -} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Max.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Max.java deleted file mode 100644 index 230a8213b..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Max.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableObjectGroupState; - -/** - * Created by chuanqin on 17/8/11. - */ -public class Max extends AbstractAggregator { - protected NullableObjectGroupState groupState; - - public Max(int targetIndexes, DataType inputType, DataType returnType, int filterArg) { - super(new int[] {targetIndexes}, false, new DataType[] {inputType}, returnType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableObjectGroupState(capacity); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - Block block = inputChunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final Object value = block.getObject(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - Object oldValue = groupState.get(groupId); - groupState.set(groupId, returnType.compare(oldValue, value) > 0 ? oldValue : value); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - Object max = groupState.get(groupId); - bb.writeObject(max); - } - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Min.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Min.java deleted file mode 100644 index ebd32d013..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Min.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -/** - * Created by chuanqin on 17/8/11. - */ -public class Min extends Max { - - public Min(int targetIndexes, DataType inputType, DataType returnType, int filterArg) { - super(targetIndexes, inputType, returnType, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - Block block = inputChunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final Object value = block.getObject(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - Object oldValue = groupState.get(groupId); - groupState.set(groupId, returnType.compare(oldValue, value) < 0 ? oldValue : value); - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/NThValue.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/NThValue.java deleted file mode 100644 index 70bc90a5c..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/NThValue.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; - -public class NThValue extends AbstractAggregator { - private Object outputValue = null; - private long targetPosition; - private long currentPosition = 0; - - public NThValue(int index, long targetPosition, int filterArg) { - super(new int[] {index}, false, null, null, filterArg); - this.targetPosition = targetPosition; - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - if (currentPosition == targetPosition) { - return; - } - if (currentPosition < targetPosition) { - currentPosition++; - } - if (currentPosition == targetPosition) { - outputValue = inputChunk.getBlock(aggIndexInChunk[0]).getObject(position); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeObject(outputValue); - } - - @Override - public void resetToInitValue(int groupId) { - outputValue = null; - currentPosition = 0; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/NTile.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/NTile.java deleted file mode 100644 index a0adbf686..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/NTile.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; - -/** - * Return the N th value - * - * @author hongxi.chx - */ -public class NTile extends AbstractAggregator { - private long tile; - private long count; - private long currentPosition = 0; - private Long largerBucketNum = null; - private long elementInLargerBucket = 0; - private long elementInNormalBucket = 0; - - public NTile(long tile, int filterArg) { - super(new int[0], false, null, DataTypes.LongType, filterArg); - this.tile = tile; - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - count++; - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - currentPosition++; - if (largerBucketNum == null) { - largerBucketNum = count - tile * (count / tile); - elementInLargerBucket = largerBucketNum == 0 ? count / tile : count / tile + 1; - elementInNormalBucket = count / tile; - } - if ((currentPosition - 1) < largerBucketNum * elementInLargerBucket) { - bb.writeLong((currentPosition - 1) / elementInLargerBucket + 1); - } else { - long restInNormalBucket = currentPosition - 1 - (largerBucketNum * elementInLargerBucket); - bb.writeLong(restInNormalBucket / elementInNormalBucket + largerBucketNum + 1); - } - } - - @Override - public void resetToInitValue(int groupId) { - count = 0L; - currentPosition = 0; - largerBucketNum = null; - elementInLargerBucket = 0; - elementInNormalBucket = 0; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/OtherType2UInt64BitAnd.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/OtherType2UInt64BitAnd.java deleted file mode 100644 index d1ff6c99d..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/OtherType2UInt64BitAnd.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class OtherType2UInt64BitAnd extends SpecificType2UInt64BitAnd { - public OtherType2UInt64BitAnd(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getObject(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/OtherType2UInt64BitOr.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/OtherType2UInt64BitOr.java deleted file mode 100644 index 52fedf257..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/OtherType2UInt64BitOr.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class OtherType2UInt64BitOr extends SpecificType2UInt64BitOr { - public OtherType2UInt64BitOr(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getObject(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/OtherType2UInt64BitXor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/OtherType2UInt64BitXor.java deleted file mode 100644 index 984ac18f4..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/OtherType2UInt64BitXor.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class OtherType2UInt64BitXor extends SpecificType2UInt64BitXor { - public OtherType2UInt64BitXor(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getObject(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/PercentRank.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/PercentRank.java deleted file mode 100644 index 51705e94a..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/PercentRank.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; - -import java.util.List; - -// rank不支持distinct,语义即不支持 -public class PercentRank extends CumeDist { - - public PercentRank(int[] index, int filterArg) { - super(index, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - count++; - if (aggIndexInChunk.length > 0) { - Chunk.ChunkRow row = inputChunk.rowAt(position); - bufferRows.add(row); - if (!sameRank(lastRow, row)) { - List rankKey = getAggregateKey(row); - lastRow = row; - rowKeysToRank.put(rankKey, count); - } - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (rowIndex >= bufferRows.size()) { - return; - } - List rowKeys = getAggregateKey(bufferRows.get(rowIndex++)); - Long rank = rowKeysToRank.get(rowKeys); - if (rank == null || rank <= 1) { - bb.writeDouble((double) 0.0); - } else { - bb.writeDouble((double) (rank - 1) / (count - 1)); - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Rank.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Rank.java deleted file mode 100644 index b189c4e3b..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Rank.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.core.row.Row; - -import java.util.List; -import java.util.Objects; - -/** - * rank不支持distinct,语义即不支持 - */ -public class Rank extends AbstractAggregator { - protected long rank = 0; - protected Long count = 0L; - protected Row lastRow = null; - - public Rank(int[] index, int filterArg) { - super(index != null && index.length > 0 && index[0] >= 0 ? index : new int[0], false, null, null, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - Chunk.ChunkRow row = inputChunk.rowAt(position); - count++; - if (!sameRank(lastRow, row)) { - rank = count; - lastRow = row; - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeLong(rank); - } - - @Override - public void resetToInitValue(int groupId) { - count = 0L; - rank = 0L; - lastRow = null; - } - - protected boolean sameRank(Row lastRow, Row row) { - if (lastRow == null) { - return row == null; - } - List lastRowValues = lastRow.getValues(); - List rowValues = row.getValues(); - for (int index : aggIndexInChunk) { - Object o1 = lastRowValues.get(index); - Object o2 = rowValues.get(index); - if (!(Objects.equals(o1, o2))) { - return false; - } - } - return true; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/RowNumber.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/RowNumber.java deleted file mode 100644 index f01c83342..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/RowNumber.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; - -public class RowNumber extends AbstractAggregator { - private long number = 0; - - public RowNumber(int filterArg) { - super(new int[0], false, null, DataTypes.LongType, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - number++; - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeLong(number); - } - - @Override - public void resetToInitValue(int groupId) { - number = 0; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2DecimalAvg.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2DecimalAvg.java deleted file mode 100644 index 566746f4d..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2DecimalAvg.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Short2DecimalAvg extends SpecificType2DecimalAvg { - public Short2DecimalAvg(int index, boolean isDistict, DataType inputType, DataType outputType, int filterArg) { - super(index, isDistict, inputType, outputType, filterArg); - } - - @Override - protected Decimal getDecimal(Block block, int position) { - return Decimal.fromLong(block.getShort(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2DecimalSum.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2DecimalSum.java deleted file mode 100644 index df489c79e..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2DecimalSum.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Short2DecimalSum extends LittleNum2DecimalSum { - public Short2DecimalSum(int targetIndexes, boolean distinct, DataType inputType, DataType outputType, - int filterArg) { - super(targetIndexes, distinct, inputType, outputType, filterArg); - } - - @Override - protected long getLong(Block block, int position) { - return block.getShort(position); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2ShortMax.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2ShortMax.java deleted file mode 100644 index 6408c978c..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2ShortMax.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableShortGroupState; - -public class Short2ShortMax extends AbstractAggregator { - protected NullableShortGroupState groupState; - - public Short2ShortMax(int index, DataType inputType, DataType outputType, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableShortGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final short value = block.getShort(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - short beforeValue = groupState.get(groupId); - short afterValue = value > beforeValue ? value : beforeValue; - groupState.set(groupId, afterValue); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - bb.writeShort(groupState.get(groupId)); - } - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2ShortMin.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2ShortMin.java deleted file mode 100644 index 1c4f7aa80..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2ShortMin.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class Short2ShortMin extends Short2ShortMax { - public Short2ShortMin(int index, DataType inputType, DataType outputType, int filterArg) { - super(index, inputType, outputType, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final short value = block.getShort(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - short beforeValue = groupState.get(groupId); - short afterValue = value < beforeValue ? value : beforeValue; - groupState.set(groupId, afterValue); - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2UInt64BitAnd.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2UInt64BitAnd.java deleted file mode 100644 index 173e9d6de..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2UInt64BitAnd.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Short2UInt64BitAnd extends SpecificType2UInt64BitAnd { - public Short2UInt64BitAnd(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getShort(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2UInt64BitOr.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2UInt64BitOr.java deleted file mode 100644 index 234ba1b45..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2UInt64BitOr.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Short2UInt64BitOr extends SpecificType2UInt64BitOr { - public Short2UInt64BitOr(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getShort(position)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2UInt64BitXor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2UInt64BitXor.java deleted file mode 100644 index 21cf03170..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Short2UInt64BitXor.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.ULongType; - -public class Short2UInt64BitXor extends SpecificType2UInt64BitXor { - public Short2UInt64BitXor(int index, DataType inputTypes, int filterArg) { - super(index, inputTypes, filterArg); - } - - @Override - protected UInt64 getUInt64(Block block, int position) { - return ULongType.instance.convertFrom(block.getShort(position)); - } -} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SingleValue.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SingleValue.java deleted file mode 100644 index 4f5c2a08d..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SingleValue.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.config.ConfigDataMode; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.datastruct.BooleanSegmentArrayList; -import com.alibaba.polardbx.optimizer.datastruct.ObjectSegmentArrayList; - -/** - * Created by chuanqin on 18/1/22. - */ -public class SingleValue extends AbstractAggregator { - private BooleanSegmentArrayList valueHasAssigned; - private ObjectSegmentArrayList values; - - public SingleValue(int targetIndex, int filterArg) { - super(new int[] {targetIndex}, false, null, null, filterArg); - } - - @Override - public void open(int capacity) { - values = new ObjectSegmentArrayList(capacity); - valueHasAssigned = new BooleanSegmentArrayList(capacity); - } - - @Override - public void appendInitValue() { - valueHasAssigned.add(false); - values.add(null); - } - - @Override - public void resetToInitValue(int groupId) { - valueHasAssigned.set(groupId, false); - values.set(groupId, null); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - if (!valueHasAssigned.get(groupId)) { - valueHasAssigned.set(groupId, true); - values.set(groupId, chunk.getBlock(aggIndexInChunk[0]).getObject(position)); - } else { - if (ConfigDataMode.isFastMock()) { - return; - } - GeneralUtil.nestedException("Subquery returns more than 1 row"); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeObject(values.get(groupId)); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2DecimalAvg.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2DecimalAvg.java deleted file mode 100644 index 8d6a8ebc0..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2DecimalAvg.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.common.datatype.DecimalRoundMod; -import com.alibaba.polardbx.common.datatype.DecimalStructure; -import com.alibaba.polardbx.common.datatype.FastDecimalUtils; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableDecimalLongGroupState; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; - -import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DEFAULT_DIV_PRECISION_INCREMENT; -import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_DIV_ZERO; - -public abstract class SpecificType2DecimalAvg extends AbstractAggregator { - - private NullableDecimalLongGroupState state; - - private Decimal cache; - - /** - * get div_precision_increment user variables from session. - */ - private int divPrecisionIncr; - - public SpecificType2DecimalAvg(int index, boolean isDistict, DataType inputType, DataType outputType, - int filterArg) { - super(new int[] {index}, isDistict, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - this.state = new NullableDecimalLongGroupState(capacity); - this.divPrecisionIncr = DEFAULT_DIV_PRECISION_INCREMENT; - this.cache = new Decimal(); - } - - @Override - public void appendInitValue() { - state.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - state.setNull(groupId); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final Decimal value = getDecimal(block, position); - if (state.isNull(groupId)) { - state.set(groupId, value.copy(), 1); - } else { - Decimal before = state.getDecimal(groupId); - - // avoid to allocate memory - before.add(value, cache); - Decimal sum = cache; - cache = before; - long count = state.getLong(groupId) + 1; - state.set(groupId, sum, count); - } - } - - abstract Decimal getDecimal(Block block, int position); - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (state.isNull(groupId)) { - bb.appendNull(); - } else { - DecimalStructure rounded = new DecimalStructure(); - DecimalStructure unRounded = new DecimalStructure(); - - // fetch sum & count decimal value - Decimal sum = state.getDecimal(groupId); - Decimal count = Decimal.fromLong(state.getLong(groupId)); - - // do divide - int error = FastDecimalUtils - .div(sum.getDecimalStructure(), count.getDecimalStructure(), unRounded, divPrecisionIncr); - if (error == E_DEC_DIV_ZERO) { - // divide zero, set null - bb.appendNull(); - } else { - // do round - FastDecimalUtils.round(unRounded, rounded, divPrecisionIncr, DecimalRoundMod.HALF_UP); - Decimal avg = new Decimal(rounded); - bb.writeDecimal(avg); - } - } - } - - @Override - public long estimateSize() { - return state.estimateSize(); - } -} - - diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2DoubleAvgV2.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2DoubleAvgV2.java deleted file mode 100644 index b4d55c9c4..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2DoubleAvgV2.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableDoubleLongGroupState; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; - -public abstract class SpecificType2DoubleAvgV2 extends AbstractAggregator { - private NullableDoubleLongGroupState groupState; - - public SpecificType2DoubleAvgV2(int index, boolean isDistict, DataType inputType, DataType outputType, - int filterArg) { - super(new int[] {index}, isDistict, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableDoubleLongGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final double value = getDouble(block, position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value, 1); - } else { - double sum = groupState.getDouble(groupId) + value; - long count = groupState.getLong(groupId) + 1; - groupState.set(groupId, sum, count); - } - } - - abstract double getDouble(Block block, int position); - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - Double avg = (Double) DataTypes.DoubleType.getCalculator().divide( - groupState.getDouble(groupId), - groupState.getLong(groupId)); - if (avg == null) { - bb.appendNull(); - } else { - bb.writeDouble(avg); - } - } - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} - - - diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2UInt64BitAnd.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2UInt64BitAnd.java deleted file mode 100644 index 2938bd8da..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2UInt64BitAnd.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableUInt64GroupState; - -public abstract class SpecificType2UInt64BitAnd extends AbstractAggregator { - private NullableUInt64GroupState groupState; - - public SpecificType2UInt64BitAnd(int index, DataType inputTypes, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputTypes}, DataTypes.ULongType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableUInt64GroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.append(UInt64.MAX_UINT64); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.set(groupId, UInt64.MAX_UINT64); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - UInt64 value = getUInt64(block, position); - UInt64 beforeValue = groupState.get(groupId); - groupState.set(groupId, beforeValue.bitAnd(value)); - } - - abstract UInt64 getUInt64(Block block, int position); - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeObject(groupState.get(groupId)); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2UInt64BitOr.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2UInt64BitOr.java deleted file mode 100644 index 76f9eedb0..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2UInt64BitOr.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableUInt64GroupState; - -public abstract class SpecificType2UInt64BitOr extends AbstractAggregator { - private NullableUInt64GroupState groupState; - - public SpecificType2UInt64BitOr(int index, DataType inputTypes, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputTypes}, DataTypes.ULongType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableUInt64GroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.append(UInt64.UINT64_ZERO); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.set(groupId, UInt64.UINT64_ZERO); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - UInt64 value = getUInt64(block, position); - UInt64 beforeValue = groupState.get(groupId); - groupState.set(groupId, beforeValue.bitOr(value)); - } - - abstract UInt64 getUInt64(Block block, int position); - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeObject(groupState.get(groupId)); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2UInt64BitXor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2UInt64BitXor.java deleted file mode 100644 index 05f0e4275..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/SpecificType2UInt64BitXor.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.common.datatype.UInt64; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableUInt64GroupState; - -public abstract class SpecificType2UInt64BitXor extends AbstractAggregator { - private NullableUInt64GroupState groupState; - - public SpecificType2UInt64BitXor(int index, DataType inputTypes, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputTypes}, DataTypes.ULongType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableUInt64GroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.append(UInt64.UINT64_ZERO); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.set(groupId, UInt64.UINT64_ZERO); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - UInt64 value = getUInt64(block, position); - UInt64 beforeValue = groupState.get(groupId); - groupState.set(groupId, beforeValue.bitXor(value)); - } - - abstract UInt64 getUInt64(Block block, int position); - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - bb.writeObject(groupState.get(groupId)); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Sum.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Sum.java deleted file mode 100644 index c75b1a9c5..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Sum.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableObjectGroupState; - -/** - * Created by chuanqin on 17/8/10. - */ -public class Sum extends AbstractAggregator { - private NullableObjectGroupState groupState; - - public Sum(int targetIndexes, boolean distinct, DataType returnType, int filterArg) { - super(new int[] {targetIndexes}, distinct, null, returnType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableObjectGroupState(capacity); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - Block block = inputChunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final Object value = block.getObject(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, returnType.convertFrom(value)); - } else { - Object oldValue = groupState.get(groupId); - groupState.set(groupId, returnType.getCalculator().add(oldValue, value)); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - Object sum = groupState.get(groupId); - bb.writeObject(sum); - } - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Sum0.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Sum0.java deleted file mode 100644 index 89cdf2dd3..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/Sum0.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableObjectGroupState; - -public class Sum0 extends AbstractAggregator { - private NullableObjectGroupState groupState; - - public Sum0(int targetIndexes, boolean distinct, DataType returnType, int filterArg) { - super(new int[] {targetIndexes}, distinct, null, returnType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableObjectGroupState(capacity); - } - - @Override - public void accumulate(int groupId, Chunk inputChunk, int position) { - Block block = inputChunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final Object value = block.getObject(position); - Object oldValue = groupState.get(groupId); - groupState.set(groupId, returnType.getCalculator().add(oldValue, value)); - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - Object sum = groupState.get(groupId); - bb.writeObject(sum); - } - - @Override - public void appendInitValue() { - groupState.append(returnType.convertFrom(0)); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.set(groupId, returnType.convertFrom(0)); - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/WrapedLong2WarpedLongMax.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/WrapedLong2WarpedLongMax.java deleted file mode 100644 index d353fdabf..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/WrapedLong2WarpedLongMax.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.optimizer.state.NullableLongGroupState; - -public class WrapedLong2WarpedLongMax extends AbstractAggregator { - protected NullableLongGroupState groupState; - - public WrapedLong2WarpedLongMax(int index, DataType inputType, DataType outputType, int filterArg) { - super(new int[] {index}, false, new DataType[] {inputType}, outputType, filterArg); - } - - @Override - public void open(int capacity) { - groupState = new NullableLongGroupState(capacity); - } - - @Override - public void appendInitValue() { - groupState.appendNull(); - } - - @Override - public void resetToInitValue(int groupId) { - groupState.setNull(groupId); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final long value = block.getPackedLong(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - long beforeValue = groupState.get(groupId); - long afterValue = Math.max(value, beforeValue); - groupState.set(groupId, afterValue); - } - } - - @Override - public void writeResultTo(int groupId, BlockBuilder bb) { - if (groupState.isNull(groupId)) { - bb.appendNull(); - } else { - bb.writeDatetimeRawLong(groupState.get(groupId)); - } - } - - @Override - public long estimateSize() { - return groupState.estimateSize(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/WrapedLong2WarpedLongMin.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/WrapedLong2WarpedLongMin.java deleted file mode 100644 index d99afa96e..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/calc/aggfunctions/WrapedLong2WarpedLongMin.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.calc.aggfunctions; - -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; - -public class WrapedLong2WarpedLongMin extends WrapedLong2WarpedLongMax { - public WrapedLong2WarpedLongMin(int index, DataType inputType, DataType outputType, int filterArg) { - super(index, inputType, outputType, filterArg); - } - - @Override - public void accumulate(int groupId, Chunk chunk, int position) { - Block block = chunk.getBlock(aggIndexInChunk[0]); - if (block.isNull(position)) { - return; - } - - final long value = block.getPackedLong(position); - if (groupState.isNull(groupId)) { - groupState.set(groupId, value); - } else { - long beforeValue = groupState.get(groupId); - long afterValue = Math.min(value, beforeValue); - groupState.set(groupId, afterValue); - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetManager.java index 323e74d6c..0e5d2b505 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetManager.java @@ -33,10 +33,13 @@ import com.alibaba.polardbx.executor.ddl.newengine.cross.CrossEngineValidator; import com.alibaba.polardbx.executor.ddl.workqueue.ChangeSetThreadPool; import com.alibaba.polardbx.executor.gsi.GsiUtils; +import com.alibaba.polardbx.executor.handler.HandlerCommon; import com.alibaba.polardbx.executor.spi.ITransactionManager; import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils; -import com.alibaba.polardbx.executor.ddl.workqueue.PriorityFIFOTask; import com.alibaba.polardbx.executor.ddl.workqueue.BackFillThreadPool; +import com.alibaba.polardbx.executor.ddl.workqueue.PriorityFIFOTask; +import com.alibaba.polardbx.executor.gsi.GsiUtils; +import com.alibaba.polardbx.executor.spi.ITransactionManager; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.executor.utils.OrderByOption; import com.alibaba.polardbx.gms.util.GroupInfoUtil; @@ -45,9 +48,12 @@ import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; import com.alibaba.polardbx.optimizer.config.table.GlobalIndexMeta; import com.alibaba.polardbx.optimizer.config.table.SchemaManager; +import com.alibaba.polardbx.optimizer.config.table.TableColumnUtils; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.CursorMeta; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; import com.alibaba.polardbx.optimizer.core.row.ArrayRow; import com.alibaba.polardbx.optimizer.core.row.Row; @@ -58,6 +64,7 @@ import com.google.common.util.concurrent.RateLimiter; import lombok.Data; import org.apache.calcite.rel.RelFieldCollation; +import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.util.Pair; import org.apache.commons.collections.MapUtils; @@ -90,6 +97,10 @@ public class ChangeSetManager { private final ChangeSetMetaManager changeSetMetaManager; + private List sourceTableColumns; + private List targetTableColumns; + private List notUsingBinaryStringColumns; + // speed ctl private volatile RateLimiter rateLimiter; private com.alibaba.polardbx.executor.backfill.Throttle throttle; @@ -143,6 +154,14 @@ public void logicalTableChangeSetCatchUp(String logicalTableName, String indexNa return; } + originEc = HandlerCommon.setChangeSetApplySqlMode(originEc.copy()); + final boolean useBinary = originEc.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY); + if (!useBinary) { + HandlerCommon.upgradeEncoding(originEc, schemaName, logicalTableName); + } + + prepareColumns(originEc, logicalTableName, indexName); + changeSetMetaManager.getChangeSetReporter().loadChangeSetMeta(changeSetId); if (status == ChangeSetCatchUpStatus.ABSENT) { @@ -157,20 +176,23 @@ public void logicalTableChangeSetCatchUp(String logicalTableName, String indexNa this.throttle = new com.alibaba.polardbx.executor.backfill.Throttle(speedMin, speedLimit, schemaName); List futures = new ArrayList<>(16); + // interrupted + AtomicReference interrupted = new AtomicReference<>(false); SQLRecorderLogger.ddlLogger.warn( MessageFormat.format("[{0}] ChangeSet Fetch job: {1} start with {2} phyTable(s).", originEc.getTraceId(), changeSetId, sourcePhyTableNames.size())); AtomicReference excep = new AtomicReference<>(null); + ExecutionContext finalOriginEc = originEc; sourcePhyTableNames.forEach((sourceGroupName, phyTableNames) -> { for (String phyTableName : phyTableNames) { FutureTask task = new FutureTask<>( () -> migrateTableCatchup( - schemaName, logicalTableName, + schemaName, logicalTableName, indexName, sourceGroupName, phyTableName, targetTableLocations, - taskType, status, originEc + taskType, status, finalOriginEc, interrupted ), null); futures.add(task); ChangeSetThreadPool.getInstance() @@ -192,15 +214,11 @@ public void logicalTableChangeSetCatchUp(String logicalTableName, String indexNa try { future.get(); } catch (Exception e) { - futures.forEach(f -> { - try { - f.cancel(true); - } catch (Throwable ignore) { - } - }); if (null == excep.get()) { excep.set(e); } + + interrupted.set(true); } } @@ -342,11 +360,12 @@ public void migrateTableCopyBaseline(String schema, String logicalTableName, * sourceGroup1[physicalTableP1] --> targetGroup2[physicalTableP2] & targetGroup3[physicalTableP3] * eg: WUMU_P00000[tbl_xxxx_00000] --> WUMU_P00001[tbl_xxxx_00001] & WUMU_P00002[tbl_xxxx_00002] */ - public void migrateTableCatchup(String schema, String logicalTableName, + public void migrateTableCatchup(String schema, String logicalTableName, String indexName, String sourceGroup, String sourceTable, Map targetTableLocations, ComplexTaskMetaManager.ComplexTaskType taskType, - ChangeSetCatchUpStatus status, ExecutionContext originEc) { + ChangeSetCatchUpStatus status, ExecutionContext originEc, + AtomicReference interrupted) { LOG.info(String.format("migrate table catch up %s.%s", schema, logicalTableName)); ExecutionContext ec = originEc.copy(); @@ -359,7 +378,7 @@ public void migrateTableCatchup(String schema, String logicalTableName, final String targetPhysicalDb = GroupInfoUtil.buildPhysicalDbNameFromGroupName(groupAndPhyTable.getKey()); ChangeSetMeta meta = new ChangeSetMeta( - schema, logicalTableName, null, + schema, logicalTableName, indexName, sourceGroup, groupAndPhyTable.getKey(), physicalDb, targetPhysicalDb, sourceTable, groupAndPhyTable.getValue(), @@ -375,17 +394,23 @@ public void migrateTableCatchup(String schema, String logicalTableName, ChangeSetData data = new ChangeSetData(meta); ChangeSetTask task = new ChangeSetTask(data, params); + // 判断是否已经完成 + if (changeSetMetaManager.getChangeSetReporter().isFinished(sourceGroup, sourceTable)) { + LOG.info(String.format("move table task %s is already completed", task)); + return; + } + try { changeSetMetaManager.getChangeSetReporter().updateCatchUpStart(sourceGroup, sourceTable); if (status.isWriteOnly()) { task.setTaskStatus(ChangeSetTaskStatus.CATCHUP_WO); - catchUpOnce(ec, task, status); + catchUpOnce(ec, task, status, interrupted); } else if (status.isDeleteOnly()) { task.setTaskStatus(ChangeSetTaskStatus.CATCHUP_DO); - catchUpOnce(ec, task, status); + catchUpOnce(ec, task, status, interrupted); } else { task.setTaskStatus(ChangeSetTaskStatus.CATCHUP); - catchUpByChangeSet(ec, task, status); + catchUpByChangeSet(ec, task, status, interrupted); } if (status == ChangeSetCatchUpStatus.WRITE_ONLY_FINAL) { task.setTaskStatus(ChangeSetTaskStatus.FINISH); @@ -398,7 +423,7 @@ public void migrateTableCatchup(String schema, String logicalTableName, throw e; } - LOG.info(String.format("finish move table task %s", task)); + LOG.info(String.format("finish catch up task %s", task)); } /** @@ -486,14 +511,15 @@ private void copyBaseline(ExecutionContext executionContext, ChangeSetTask task, } - private void catchUpByChangeSet(ExecutionContext ec, ChangeSetTask task, ChangeSetCatchUpStatus status) { + private void catchUpByChangeSet(ExecutionContext ec, ChangeSetTask task, ChangeSetCatchUpStatus status, + AtomicReference interrupted) { LOG.info(String.format("changeset %s start catchup", task)); int count = 0; int replayTimes = task.getParams().getReplayTimes(); while (count != replayTimes) { - if (!catchUpOnce(ec, task, status)) { + if (!catchUpOnce(ec, task, status, interrupted)) { break; } count++; @@ -502,7 +528,8 @@ private void catchUpByChangeSet(ExecutionContext ec, ChangeSetTask task, ChangeS LOG.info(String.format("changeset %s finish catchup, times : %d", task, count)); } - private boolean catchUpOnce(ExecutionContext ec, ChangeSetTask task, ChangeSetCatchUpStatus status) { + private boolean catchUpOnce(ExecutionContext ec, ChangeSetTask task, ChangeSetCatchUpStatus status, + AtomicReference interrupted) { int count = 0; boolean res = true; int changeSetTimes = getFetchChangeSetTimes(ec, task); @@ -519,7 +546,7 @@ private boolean catchUpOnce(ExecutionContext ec, ChangeSetTask task, ChangeSetCa replayChangeSet(ec, task, !status.isAbsent()); // interrupt - if (CrossEngineValidator.isJobInterrupted(ec) || Thread.currentThread().isInterrupted()) { + if (CrossEngineValidator.isJobInterrupted(ec) || interrupted.get()) { long jobId = ec.getDdlJobId(); throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "The job '" + jobId + "' has been cancelled"); @@ -697,6 +724,39 @@ private void waitApplyFinish(List futures) { futures.clear(); } + private void prepareColumns(ExecutionContext ec, String tableName, String indexName) { + final SchemaManager sm = ec.getSchemaManager(); + final TableMeta baseTableMeta = sm.getTable(tableName); + final TableMeta targetTableMeta = indexName == null ? sm.getTable(tableName) : sm.getTable(indexName); + + Map columnMultiWriteMapping = + TableColumnUtils.getColumnMultiWriteMapping(targetTableMeta.getTableColumnMeta()); + boolean isModify = MapUtils.isNotEmpty(columnMultiWriteMapping); + + List targetTableColumns = new ArrayList<>(); + List sourceTableColumns = new ArrayList<>(); + List notUsingBinaryStringColumns = new ArrayList<>(); + for (ColumnMeta columnMeta : baseTableMeta.getWriteColumns()) { + String columnName = columnMeta.getName(); + if (targetTableMeta.containsColumn(columnName)) { + if (isModify && columnMultiWriteMapping.get(columnName.toLowerCase()) != null) { + targetTableColumns.add(columnMultiWriteMapping.get(columnName.toLowerCase())); + DataType dataType = columnMeta.getDataType(); + if (DataTypeUtil.isStringType(dataType)) { + notUsingBinaryStringColumns.add(columnName); + } + } else { + targetTableColumns.add(columnName); + } + sourceTableColumns.add(columnName); + } + } + + setSourceTableColumns(sourceTableColumns); + setTargetTableColumns(targetTableColumns); + setNotUsingBinaryStringColumns(notUsingBinaryStringColumns); + } + public PhyTableOperation genPhySelectPlan(ExecutionContext ec, ChangeSetTask task, List rowPks, boolean lock) { final ChangeSetMeta meta = task.getData().getMeta(); @@ -705,6 +765,8 @@ public PhyTableOperation genPhySelectPlan(ExecutionContext ec, ChangeSetTask tas meta.getSourceTableName(), meta.getSourceGroup(), meta.getSourcePhysicalTable(), + sourceTableColumns, + notUsingBinaryStringColumns, rowPks, ec, lock @@ -718,7 +780,7 @@ public Parameters selectRowByPksFromSourceTable(ExecutionContext ec, ChangeSetTa start = System.currentTimeMillis(); - Parameters parameters = ChangeSetUtils.executePhySelectPlan(selectPlan, ec); + Parameters parameters = ChangeSetUtils.executePhySelectPlan(selectPlan, notUsingBinaryStringColumns, ec); end = System.currentTimeMillis(); @@ -772,8 +834,10 @@ public void replaceRowToTargetTable(ExecutionContext ec, ChangeSetTask task, Par PhyTableOperation replacePlan = ChangeSetUtils.buildReplace( meta.getSchemaName(), meta.getSourceTableName(), + meta.getTargetTableName(), targetGroup, targetPhyTable, + targetTableColumns, parameters, ec ); @@ -1240,12 +1304,26 @@ public ChangeSetTask(ChangeSetData data, ChangeSetParams params) { @Override public String toString() { ChangeSetMeta meta = getData().getMeta(); - return String.format("ChangeSetTask{Table=%s, taskStatus=%s, phyTableName:%s}", - meta.getChangeSetName(), taskStatus, meta.getSourcePhysicalTable()); + return String.format( + "ChangeSetTask{Table=%s, targetTable=%s, taskStatus=%s, sourcePhyTableName=%s, taskType=%s}", + meta.getChangeSetName(), meta.getTargetTableName(), taskStatus, meta.getSourcePhysicalTable(), + meta.getTaskType()); } } public static Long getChangeSetId() { return ID_GENERATOR.nextId(); } + + public void setSourceTableColumns(List sourceTableColumns) { + this.sourceTableColumns = sourceTableColumns; + } + + public void setTargetTableColumns(List targetTableColumns) { + this.targetTableColumns = targetTableColumns; + } + + public void setNotUsingBinaryStringColumns(List notUsingBinaryStringColumns) { + this.notUsingBinaryStringColumns = notUsingBinaryStringColumns; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetMetaManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetMetaManager.java index 96c9c73c9..fea5a4e0c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetMetaManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetMetaManager.java @@ -26,7 +26,6 @@ import com.alibaba.polardbx.common.utils.thread.ExecutorUtil; import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; import com.alibaba.polardbx.config.ConfigDataMode; -import com.alibaba.polardbx.executor.ddl.engine.AsyncDDLCache; import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; import com.alibaba.polardbx.gms.metadb.GmsSystemTables; import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; @@ -52,7 +51,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadPoolExecutor; @@ -139,11 +137,7 @@ public void deregister(@NotNull final String schemaName) { public ChangeSetMetaManager(String schema) { this.schema = schema; this.changeSetReporter = new ChangeSetReporter(this); - if (ConfigDataMode.isPolarDbX()) { - this.dataSource = MetaDbDataSource.getInstance().getDataSource(); - } else { - this.dataSource = AsyncDDLCache.getDataSource(schema); - } + this.dataSource = MetaDbDataSource.getInstance().getDataSource(); } public ChangeSetReporter getChangeSetReporter() { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetReporter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetReporter.java index 367486320..385ba66d9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetReporter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/changeset/ChangeSetReporter.java @@ -109,4 +109,14 @@ public boolean needReCatchUp(String sourceGroup, String phyTableName) { return StringUtils.equalsIgnoreCase(record.getMessage(), ChangeSetMetaManager.START_CATCHUP); } + + public boolean isFinished(String sourceGroup, String phyTableName) { + + ChangeSetMetaManager.ChangeSetObjectRecord record = changeSetBean.getRecord( + sourceGroup, + phyTableName + ); + + return record.getStatus() == ChangeSetMetaManager.ChangeSetStatus.SUCCESS.getValue(); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractBlock.java index 20f231378..97016b71d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractBlock.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.chunk; +import com.alibaba.polardbx.executor.operator.util.DriverObjectPool; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.google.common.base.Preconditions; @@ -24,7 +25,7 @@ /** * Abstract random accessible data block. */ -abstract class AbstractBlock implements Block, RandomAccessBlock { +public abstract class AbstractBlock implements Block, RandomAccessBlock { final int arrayOffset; int positionCount; @@ -44,10 +45,17 @@ abstract class AbstractBlock implements Block, RandomAccessBlock { protected boolean hasNull; protected String digest; + protected DriverObjectPool.Recycler recycler; + AbstractBlock(int arrayOffset, int positionCount, boolean[] valueIsNull) { + this(null, arrayOffset, positionCount, valueIsNull); + } + + AbstractBlock(DataType dataType, int arrayOffset, int positionCount, boolean[] valueIsNull) { Preconditions.checkArgument(positionCount >= 0); Preconditions.checkArgument(arrayOffset >= 0); + this.dataType = dataType; this.positionCount = positionCount; this.arrayOffset = arrayOffset; this.isNull = valueIsNull; @@ -66,6 +74,34 @@ protected AbstractBlock(DataType dataType, int positionCount, boolean[] isNull, this.arrayOffset = 0; } + @Override + public void destroyNulls(boolean force) { + if (force) { + this.isNull = null; + this.hasNull = false; + return; + } + + boolean hasNull = false; + for (int i = 0; i < positionCount; i++) { + hasNull |= isNull(i); + } + if (!hasNull) { + this.isNull = null; + this.hasNull = false; + } + } + + @Override + public void setRecycler(DriverObjectPool.Recycler recycler) { + this.recycler = recycler; + } + + @Override + public boolean isRecyclable() { + return recycler != null; + } + @Override public long estimateSize() { return estimatedSize; @@ -165,7 +201,7 @@ public void shallowCopyTo(RandomAccessBlock another) { protected void digest() { this.digest = new StringBuilder() .append("{class = ").append(getClass().getSimpleName()) - .append(", datatype = ").append(dataType.toString()) + .append(", datatype = ").append(dataType == null ? "null" : dataType.toString()) .append(", size = ").append(positionCount) .append("}") .toString(); @@ -211,5 +247,5 @@ public String toString() { * this method should be called to update block size info, including @estimatedSize and @elementUsedBytes * suggest adding sequence: [instantSize] + isNullSize + valueSize + [offsetSize] */ - abstract void updateSizeInfo(); + public abstract void updateSizeInfo(); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractBlockBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractBlockBuilder.java index 63933ae37..0b1b327eb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractBlockBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractBlockBuilder.java @@ -16,17 +16,15 @@ package com.alibaba.polardbx.executor.chunk; -import it.unimi.dsi.fastutil.booleans.BooleanArrayList; - public abstract class AbstractBlockBuilder implements BlockBuilder { - public final int initialCapacity; - public final BooleanArrayList valueIsNull; + final int initialCapacity; + final protected BatchedArrayList.BatchBooleanArrayList valueIsNull; protected boolean containsNull; - AbstractBlockBuilder(int initialCapacity) { + public AbstractBlockBuilder(int initialCapacity) { this.initialCapacity = initialCapacity; - this.valueIsNull = new BooleanArrayList(initialCapacity); + this.valueIsNull = new BatchedArrayList.BatchBooleanArrayList(initialCapacity); this.containsNull = false; } @@ -46,7 +44,7 @@ public void ensureCapacity(int capacity) { valueIsNull.ensureCapacity(capacity); } - void appendNullInternal() { + protected void appendNullInternal() { valueIsNull.add(true); containsNull = true; } @@ -60,7 +58,7 @@ public boolean mayHaveNull() { return containsNull; } - void checkReadablePosition(int position) { + protected void checkReadablePosition(int position) { if (position < 0 || position >= getPositionCount()) { throw new IllegalArgumentException("position is not valid"); } @@ -81,7 +79,13 @@ public final long estimateSize() { throw new UnsupportedOperationException(); } - int getCapacity() { + protected int getCapacity() { return valueIsNull.elements().length; } + + @Override + public final long hashCodeUseXxhash(int pos) { + throw new UnsupportedOperationException( + "Block builder not support hash code calculated by xxhash, you should convert it to block first"); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractCommonBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractCommonBlock.java index de023b543..ec648ce2c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractCommonBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/AbstractCommonBlock.java @@ -25,12 +25,16 @@ protected AbstractCommonBlock(DataType dataType, int positionCount, boolean[] is super(dataType, positionCount, isNull, hasNull); } + protected AbstractCommonBlock(DataType dataType, int positionCount) { + super(dataType, positionCount); + } + @Override public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { if (!(output instanceof ReferenceBlock)) { GeneralUtil.nestedException("cannot copy contents to " + output == null ? null : output.toString()); } - ReferenceBlock refBlock = (ReferenceBlock) output; + ReferenceBlock refBlock = output.cast(ReferenceBlock.class); refBlock.setHasNull(this.hasNull); Object[] outputArray = refBlock.objectArray(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BatchedArrayList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BatchedArrayList.java new file mode 100644 index 000000000..daafa58c6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BatchedArrayList.java @@ -0,0 +1,83 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.chunk; + +import it.unimi.dsi.fastutil.booleans.BooleanArrayList; +import it.unimi.dsi.fastutil.booleans.BooleanArrays; +import it.unimi.dsi.fastutil.ints.IntArrayList; +import it.unimi.dsi.fastutil.ints.IntArrays; +import it.unimi.dsi.fastutil.longs.LongArrayList; +import it.unimi.dsi.fastutil.longs.LongArrays; + +public interface BatchedArrayList { + + void add(T array, int[] selection, int offsetInSelection, int positionCount); + + class BatchLongArrayList extends LongArrayList implements BatchedArrayList { + public BatchLongArrayList(int capacity) { + super(capacity); + } + + @Override + public void add(long[] array, int[] selection, int offsetInSelection, int positionCount) { + // grow to prevent that array index out of bound. + this.a = LongArrays.ensureCapacity(this.a, this.size + positionCount, this.size); + for (int i = 0; i < positionCount; i++) { + int j = selection[i + offsetInSelection]; + this.a[this.size++] = array[j]; + } + } + } + + class BatchIntArrayList extends IntArrayList implements BatchedArrayList { + public BatchIntArrayList(int capacity) { + super(capacity); + } + + @Override + public void add(int[] array, int[] selection, int offsetInSelection, int positionCount) { + // grow to prevent that array index out of bound. + this.a = IntArrays.ensureCapacity(this.a, this.size + positionCount, this.size); + for (int i = 0; i < positionCount; i++) { + int j = selection[i + offsetInSelection]; + this.a[this.size++] = array[j]; + } + } + } + + class BatchBooleanArrayList extends BooleanArrayList implements BatchedArrayList { + public BatchBooleanArrayList(int capacity) { + super(capacity); + } + + @Override + public void add(boolean[] array, int[] selection, int offsetInSelection, int positionCount) { + this.a = BooleanArrays.ensureCapacity(this.a, this.size + positionCount, this.size); + for (int i = 0; i < positionCount; i++) { + int j = selection[i + offsetInSelection]; + this.a[this.size++] = array[j]; + } + } + + public void add(boolean booleanValue, int positionCount) { + this.a = BooleanArrays.ensureCapacity(this.a, this.size + positionCount, this.size); + for (int i = 0; i < positionCount; i++) { + this.a[this.size++] = booleanValue; + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlock.java index cfb450ead..05b1de08f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlock.java @@ -16,6 +16,10 @@ package com.alibaba.polardbx.executor.chunk; +import com.alibaba.polardbx.common.utils.XxhashUtils; +import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -44,6 +48,10 @@ public class BigIntegerBlock extends AbstractCommonBlock { private final byte[] data; + public BigIntegerBlock(int positionCount) { + this(0, positionCount, new boolean[positionCount], new byte[positionCount * BigIntegerBlock.LENGTH]); + } + public BigIntegerBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, byte[] data) { super(DataTypes.ULongType, positionCount, valueIsNull, valueIsNull != null); this.data = data; @@ -56,6 +64,28 @@ public BigIntegerBlock(int positionCount, boolean[] valueIsNull, byte[] data, bo updateSizeInfo(); } + public static BigIntegerBlock from(BigIntegerBlock other, int selSize, int[] selection) { + return new BigIntegerBlock(0, + selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + other.copyDataArray(selSize, selection)); + } + + public byte[] copyDataArray(int selSize, int[] selection) { + if (data == null) { + return null; + } + if (selection == null) { + return Arrays.copyOf(data, selSize * BigIntegerBlock.LENGTH); + } else { + byte[] target = new byte[selSize * BigIntegerBlock.LENGTH]; + for (int i = 0; i < selSize; i++) { + System.arraycopy(data, beginOffset(selection[i]), target, beginOffset(i), LENGTH); + } + return target; + } + } + @Override public BigInteger getBigInteger(int position) { checkReadablePosition(position); @@ -107,6 +137,15 @@ public int hashCode(int position) { return getBigInteger(position).hashCode(); } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } else { + return XxhashUtils.finalShuffle(getBigInteger(pos).longValue()); + } + } + @Override public boolean equals(int position, Block other, int otherPosition) { boolean n1 = isNull(position); @@ -117,7 +156,7 @@ public boolean equals(int position, Block other, int otherPosition) { return false; } if (other instanceof BigIntegerBlock) { - return equals(position, (BigIntegerBlock) other, otherPosition); + return equals(position, other.cast(BigIntegerBlock.class), otherPosition); } else if (other instanceof BigIntegerBlockBuilder) { return equals(position, (BigIntegerBlockBuilder) other, otherPosition); } else { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlockBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlockBuilder.java index df4799382..703304442 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlockBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlockBuilder.java @@ -30,7 +30,7 @@ public class BigIntegerBlockBuilder extends AbstractBlockBuilder { final ByteArrayList data; - private static byte[] emptyPacket = new byte[BigIntegerBlock.LENGTH]; + private static final byte[] EMPTY_PACKET = new byte[BigIntegerBlock.LENGTH]; public BigIntegerBlockBuilder(int capacity) { super(capacity); @@ -57,7 +57,7 @@ public void writeBigInteger(BigInteger value) { @Override public void appendNull() { appendNullInternal(); - data.addElements(data.size(), emptyPacket); + data.addElements(data.size(), EMPTY_PACKET); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlockEncoding.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlockEncoding.java index ee8ab1c59..6512c6f43 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlockEncoding.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BigIntegerBlockEncoding.java @@ -50,7 +50,7 @@ public void writeBlock(SliceOutput sliceOutput, Block block) { encodeNullsAsBits(sliceOutput, block); - BigIntegerBlock bigIntegerBlock = (BigIntegerBlock) block; + BigIntegerBlock bigIntegerBlock = block.cast(BigIntegerBlock.class); byte[] data = bigIntegerBlock.getData(); sliceOutput.writeBytes(data, 0, positionCount * BigIntegerBlock.LENGTH); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlobBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlobBlock.java index 866b2708f..3bf6c4bf4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlobBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlobBlock.java @@ -17,10 +17,13 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.optimizer.core.datatype.BlobType; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import io.airlift.slice.XxHash64; import java.sql.Blob; import java.sql.SQLException; +import java.util.Arrays; import static com.alibaba.polardbx.common.CrcAccumulator.NULL_TAG; @@ -30,10 +33,39 @@ */ public class BlobBlock extends ReferenceBlock { + public BlobBlock(int positionCount) { + super(0, positionCount, new boolean[positionCount], + new Blob[positionCount], DataTypes.BlobType); + } + public BlobBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, Object[] values) { super(arrayOffset, positionCount, valueIsNull, values, DataTypes.BlobType); } + public BlobBlock(DataType dataType, int arrayOffset, int positionCount, boolean[] valueIsNull, Object[] values) { + super(arrayOffset, positionCount, valueIsNull, values, dataType); + } + + public static BlobBlock from(BlobBlock other, int selSize, int[] selection) { + if (selection == null) { + // case 1: direct copy + return new BlobBlock(other.dataType, other.arrayOffset, selSize, + BlockUtils.copyNullArray(other.isNull, null, selSize), + Arrays.copyOf(other.values, other.values.length)); + } else { + // case 2: copy selected + boolean[] targetNulls = BlockUtils.copyNullArray(other.isNull, selection, selSize); + Object[] targetValues = new Object[selSize]; + + for (int position = 0; position < selSize; position++) { + targetValues[position] = other.values[selection[position]]; + } + + return new BlobBlock(other.dataType, other.arrayOffset, selSize, + targetNulls, targetValues); + } + } + @Override public Blob getBlob(int position) { return getReference(position); @@ -44,6 +76,9 @@ public Blob getObject(int position) { return isNull(position) ? null : getBlob(position); } + /** + * TODO dict + */ @Override public boolean equals(int position, Block otherBlock, int otherPosition) { boolean n1 = isNull(position); @@ -68,6 +103,23 @@ public int hashCode(int position) { return ((BlobType) DataTypes.BlobType).hashcode(getBlob(position)); } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } + Blob blob = getBlob(pos); + + com.alibaba.polardbx.optimizer.core.datatype.Blob value = + (com.alibaba.polardbx.optimizer.core.datatype.Blob) (DataTypes.BlobType).convertFrom(blob); + + if (value == null) { + return NULL_HASH_CODE; + } + + return XxHash64.hash(value.getSlice()); + } + @Override public int checksum(int position) { if (isNull(position)) { @@ -84,4 +136,8 @@ public int checksum(int position) { return NULL_TAG; } } + + public Blob[] blobArray() { + return (Blob[]) values; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Block.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Block.java index fb3956500..1929d8811 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Block.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Block.java @@ -17,7 +17,13 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.utils.bloomfilter.RFBloomFilter; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.executor.accumulator.state.NullableLongGroupState; +import com.alibaba.polardbx.executor.operator.scan.impl.DictionaryMapping; +import com.alibaba.polardbx.executor.operator.util.DriverObjectPool; +import com.alibaba.polardbx.executor.operator.util.TypedList; +import com.google.common.base.Preconditions; import java.math.BigInteger; import java.sql.Blob; @@ -25,11 +31,14 @@ import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; +import java.util.BitSet; /** * Block stores data in columnar format */ -public interface Block { +public interface Block extends CastableBlock { + public static final int NULL_HASH_CODE = 0; + /** * Is the specified position null? * @@ -116,6 +125,17 @@ default int hashCode(int position) { return getObject(position).hashCode(); } + long hashCodeUseXxhash(int pos); + + /** + * calculate of hash code when exchange is under partition wise + * for most type, it's equals to com.alibaba.polardbx.executor.chunk.Block#hashCodeUseXxhash(int) + * but for slice block, it's differ + */ + default long hashCodeUnderPairWise(int pos, boolean enableCompatible) { + return hashCodeUseXxhash(pos); + } + default int checksum(int position) { return hashCode(position); } @@ -132,6 +152,13 @@ default int[] hashCodeVector() { return results; } + default void hashCodeVector(int[] results, int positionCount) { + Preconditions.checkArgument(positionCount <= getPositionCount()); + for (int position = 0; position < positionCount; position++) { + results[position] = hashCode(position); + } + } + /** * Similar with
Object.equals
. Feel free to override it * if there is more efficient implementation. @@ -175,6 +202,13 @@ default boolean equals(int position, Block other, int otherPosition) { */ void writePositionTo(int position, BlockBuilder blockBuilder); + default void writePositionTo(int[] selection, final int offsetInSelection, final int positionCount, + BlockBuilder blockBuilder) { + for (int i = 0; i < positionCount; i++) { + writePositionTo(selection[i + offsetInSelection], blockBuilder); + } + } + /** * Returns the logical size of this block in memory. */ @@ -198,4 +232,439 @@ default void addToHasher(IStreamingHasher sink, int position) { default Object getObjectForCmp(int position) { return getObject(position); } + + default void collectNulls(int positionOffset, int positionCount, BitSet nullBitmap, int targetOffset) { + throw new UnsupportedOperationException(); + } + + /** + * Copy memory from block into given target array. + * + * @param positionOffset position offset in block. + * @param positionCount position count to copy in block. + * @param targetArray the target array to store the copied value. + * @param targetOffset the offset in target array. + * @param dictionaryMapping To maintain the dictionary mapping relation if this block has dictionary. + */ + default void copyToIntArray(int positionOffset, int positionCount, int[] targetArray, int targetOffset, + DictionaryMapping dictionaryMapping) { + throw new UnsupportedOperationException(); + } + + /** + * Copy memory from block into given target array. + * + * @param positionOffset position offset in block. + * @param positionCount position count to copy in block. + * @param targetArray the target array to store the copied value. + * @param targetOffset the offset in target array. + */ + default void copyToLongArray(int positionOffset, int positionCount, long[] targetArray, int targetOffset) { + throw new UnsupportedOperationException(); + } + + /** + * Performs a numerical sum over the elements in this block. + * + * @param groupSelected positions of selected elements. + * @param selSize count of selected elements. + * @param results store the sum results. + * results[0] = sum result number + * results[1] = sum state (E_DEC_OK, E_DEC_OVERFLOW, E_DEC_TRUNCATED) + */ + default void sum(int[] groupSelected, int selSize, long[] results) { + throw new UnsupportedOperationException(); + } + + /** + * Performs a numerical sum over the elements in this block. + * + * @param startIndexIncluded (included) start position of elements to perform sum. + * @param endIndexExcluded (excluded) end position of elements to perform sum. + * @param results store the sum results. + * results[0] = sum result number for decimal64 or decimal128-low + * results[1] = sum result number for decimal128-high + * results[2] = sum state (E_DEC_DEC64, E_DEC_DEC128, E_DEC_TRUNCATED) + */ + default void sum(int startIndexIncluded, int endIndexExcluded, long[] results) { + throw new UnsupportedOperationException(); + } + + default void sum(int startIndexIncluded, int endIndexExcluded, long[] sumResultArray, int[] sumStatusArray, + int[] normalizedGroupIds) { + throw new UnsupportedOperationException(); + } + + default void appendTypedHashTable(TypedList typedList, int sourceIndex, int startIndexIncluded, + int endIndexExcluded) { + throw new UnsupportedOperationException(); + } + + default void count(int[] groupIds, int[] probePositions, int selSize, NullableLongGroupState state) { + for (int i = 0; i < selSize; i++) { + int position = probePositions[i]; + int groupId = groupIds[position]; + + if (!isNull(position)) { + state.set(groupId, state.get(groupId) + 1); + } + } + } + + default void recycle() { + } + + default boolean isRecyclable() { + return false; + } + + default void setRecycler(DriverObjectPool.Recycler recycler) { + } + + /** + * Try to remove null array if there is no TRUE value in it. + * + * @param force force to remove null array without check. + */ + default void destroyNulls(boolean force) { + // default: do nothing + } + + default void addIntToBloomFilter(final int totalPartitionCount, RFBloomFilter[] RFBloomFilters) { + final int positionCount = getPositionCount(); + for (int pos = 0; pos < positionCount; pos++) { + + // calc physical partition id. + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + + // put hash code. + RFBloomFilters[partition].putInt(hashCode(pos)); + } + } + + default void addIntToBloomFilter(RFBloomFilter RFBloomFilter) { + final int positionCount = getPositionCount(); + for (int pos = 0; pos < positionCount; pos++) { + // put hash code. + RFBloomFilter.putInt(hashCode(pos)); + } + } + + default int mightContainsInt(RFBloomFilter RFBloomFilter, boolean[] bitmap) { + return mightContainsInt(RFBloomFilter, bitmap, false); + } + + default int mightContainsInt(RFBloomFilter RFBloomFilter, boolean[] bitmap, boolean isConjunctive) { + int hitCount = 0; + final int positionCount = getPositionCount(); + if (isConjunctive) { + + for (int pos = 0; pos < positionCount; pos++) { + + // Base on the original status in bitmap. + if (bitmap[pos]) { + int hashCode = hashCode(pos); + bitmap[pos] &= RFBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } else { + + for (int pos = 0; pos < positionCount; pos++) { + int hashCode = hashCode(pos); + bitmap[pos] = RFBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + + } + + return hitCount; + + } + + default int mightContainsInt(final int totalPartitionCount, RFBloomFilter[] RFBloomFilters, + boolean[] bitmap, boolean isPartitionConsistent) { + int hitCount = 0; + final int positionCount = getPositionCount(); + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + int hashCode = hashCode(pos); + bitmap[pos] = rfBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + int hashCode = hashCode(pos); + bitmap[pos] = rfBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } + + return hitCount; + } + + default int mightContainsInt(final int totalPartitionCount, RFBloomFilter[] RFBloomFilters, + boolean[] bitmap, boolean isPartitionConsistent, boolean isConjunctive) { + int hitCount = 0; + final int positionCount = getPositionCount(); + + if (isConjunctive) { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + int hashCode = hashCode(pos); + bitmap[pos] &= rfBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + int hashCode = hashCode(pos); + bitmap[pos] &= rfBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + + } + + } + } + + } else { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + int hashCode = hashCode(pos); + bitmap[pos] = rfBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + int hashCode = hashCode(pos); + bitmap[pos] = rfBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } + + return hitCount; + } + + default void addLongToBloomFilter(final int totalPartitionCount, RFBloomFilter[] RFBloomFilters) { + final int positionCount = getPositionCount(); + for (int pos = 0; pos < positionCount; pos++) { + + // calc physical partition id. + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + + // put hash code. + RFBloomFilters[partition].putLong(getLong(pos)); + } + } + + default void addLongToBloomFilter(RFBloomFilter RFBloomFilter) { + final int positionCount = getPositionCount(); + for (int pos = 0; pos < positionCount; pos++) { + // put hash code. + RFBloomFilter.putLong(getLong(pos)); + } + } + + default int mightContainsLong(RFBloomFilter RFBloomFilter, boolean[] bitmap) { + return mightContainsLong(RFBloomFilter, bitmap, false); + } + + default int mightContainsLong(RFBloomFilter RFBloomFilter, boolean[] bitmap, boolean isConjunctive) { + int hitCount = 0; + final int positionCount = getPositionCount(); + if (isConjunctive) { + + for (int pos = 0; pos < positionCount; pos++) { + + // Base on the original status in bitmap. + if (bitmap[pos]) { + bitmap[pos] &= RFBloomFilter.mightContainLong(getLong(pos)); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } else { + + for (int pos = 0; pos < positionCount; pos++) { + bitmap[pos] = RFBloomFilter.mightContainLong(getLong(pos)); + if (bitmap[pos]) { + hitCount++; + } + } + + } + + return hitCount; + + } + + default int mightContainsLong(final int totalPartitionCount, RFBloomFilter[] RFBloomFilters, + boolean[] bitmap, boolean isPartitionConsistent) { + int hitCount = 0; + final int positionCount = getPositionCount(); + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + bitmap[pos] = rfBloomFilter.mightContainLong(getLong(pos)); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] = rfBloomFilter.mightContainLong(getLong(pos)); + if (bitmap[pos]) { + hitCount++; + } + } + } + + return hitCount; + } + + default int mightContainsLong(final int totalPartitionCount, RFBloomFilter[] RFBloomFilters, + boolean[] bitmap, boolean isPartitionConsistent, boolean isConjunctive) { + int hitCount = 0; + final int positionCount = getPositionCount(); + + if (isConjunctive) { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + bitmap[pos] &= rfBloomFilter.mightContainLong(getLong(pos)); + if (bitmap[pos]) { + hitCount++; + } + } + + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] &= rfBloomFilter.mightContainLong(getLong(pos)); + if (bitmap[pos]) { + hitCount++; + } + + } + + } + } + + } else { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + bitmap[pos] = rfBloomFilter.mightContainLong(getLong(pos)); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] = rfBloomFilter.mightContainLong(getLong(pos)); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } + + return hitCount; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockBuilder.java index df48e2ecd..36ba400ac 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockBuilder.java @@ -17,6 +17,8 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; +import com.alibaba.polardbx.executor.operator.util.ObjectPools; import java.math.BigInteger; import java.sql.Blob; @@ -126,4 +128,8 @@ default void ensureCapacity(int capacity) { * Creates a new block builder of the same type with current block size as initial capacity */ BlockBuilder newBlockBuilder(); + + default BlockBuilder newBlockBuilder(ObjectPools objectPools, int chunkLimit) { + return newBlockBuilder(); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockBuilders.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockBuilders.java index 26ebc3a4d..2dc97c762 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockBuilders.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockBuilders.java @@ -18,12 +18,16 @@ import com.alibaba.polardbx.common.datatype.Decimal; import com.alibaba.polardbx.common.datatype.UInt64; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.executor.operator.util.ObjectPools; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DateType; import com.alibaba.polardbx.optimizer.core.datatype.EnumType; import io.airlift.slice.Slice; +import org.apache.orc.impl.TypeUtils; import java.math.BigInteger; import java.sql.Blob; @@ -41,10 +45,16 @@ public abstract class BlockBuilders { private static final int INITIAL_BLOCK_LEN = 4; // initial/max chunk size should be power of 2 - private static final int EXPECTED_STRING_LEN = 20; - private static final int EXPECTED_BYTE_ARRAY_LEN = 50; + public static final int EXPECTED_STRING_LEN = 20; + public static final int EXPECTED_BYTE_ARRAY_LEN = 50; public static BlockBuilder create(DataType type, ExecutionContext context) { + if (context.isEnableOrcRawTypeBlock()) { + // Special block builder for raw orc block builder. + // Only create Long/Double/ByteArrary block. + // Normal table/oss scan should not get there. + return createRawBlockBuilder(type, context.getBlockBuilderCapacity()); + } return create(type, context, context.getBlockBuilderCapacity()); } @@ -95,4 +105,81 @@ public static BlockBuilder create(DataType type, ExecutionContext context, int i throw new AssertionError("data block not implemented"); } + public static BlockBuilder create(DataType type, ExecutionContext context, int initialCapacity, + ObjectPools objectPools) { + // Very special cases e.g. compound type + if (type == null) { + return new ObjectBlockBuilder(initialCapacity); + } + + int chunkLimit = context.getParamManager().getInt(ConnectionParams.CHUNK_SIZE); + + Class clazz = type.getDataClass(); + if (clazz == Integer.class) { + return new IntegerBlockBuilder(initialCapacity, chunkLimit, objectPools.getIntArrayPool()); + } else if (clazz == Long.class) { + return new LongBlockBuilder(initialCapacity, chunkLimit, objectPools.getLongArrayPool()); + } else if (clazz == Date.class) { + return new DateBlockBuilder(initialCapacity, new DateType(), context, objectPools.getLongArrayPool()); + } else { + return create(type, context, initialCapacity); + } + } + + public static BlockBuilder createRawBlockBuilder(DataType type, int initialCapacity) { + // Very special cases e.g. compound type + if (type == null) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "Found null type when creating block builder for orc raw type"); + } + + switch (type.fieldType()) { + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_INT24: + case MYSQL_TYPE_TINY: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_YEAR: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_DATETIME2: + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_TIMESTAMP2: + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_TIME2: + case MYSQL_TYPE_BIT: { + // Long block builder. + return new LongBlockBuilder(initialCapacity); + } + case MYSQL_TYPE_FLOAT: + case MYSQL_TYPE_DOUBLE: { + // Double block builder. + return new DoubleBlockBuilder(initialCapacity); + } + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: { + // Long or ByteArray block builder + if (TypeUtils.isDecimal64Precision(type.getPrecision())) { + return new LongBlockBuilder(initialCapacity); + } else { + return new ByteArrayBlockBuilder(initialCapacity, EXPECTED_BYTE_ARRAY_LEN); + } + } + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_JSON: { + // ByteArray block builder + return new ByteArrayBlockBuilder(initialCapacity, EXPECTED_BYTE_ARRAY_LEN); + } + + default: + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "Found invalid type " + type.fieldType() + " when creating block builder for orc raw type"); + } + } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockComparator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockComparator.java new file mode 100644 index 000000000..14b18f651 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockComparator.java @@ -0,0 +1,95 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.chunk; + +import io.airlift.slice.Slice; + +public interface BlockComparator { + int compareTo(T b1, int position1, R b2, int position2); + + static int compareTo(Slice slice1, int beginOffset1, int endOffset1, Slice slice2, int beginOffset2, + int endOffset2) { + return slice1.compareTo(beginOffset1, endOffset1 - beginOffset1, slice2, beginOffset2, + endOffset2 - beginOffset2); + } + + BlockComparator SLICE_BLOCK_NO_DICT_SLICE_BLOCK_NO_DICT = + (BlockComparator) (b1, position1, b2, position2) -> + compareTo( + b1.getData(), + b1.beginOffsetInner(position1), + b1.endOffsetInner(position1), + b2.getData(), + b2.beginOffsetInner(position2), + b2.endOffsetInner(position2) + ); + + BlockComparator SLICE_BLOCK_DICT_SLICE_BLOCK_NO_DICT = + (BlockComparator) (b1, position1, b2, position2) -> { + Slice dictValue = b1.getDictValue(position1); + return compareTo( + dictValue, + 0, dictValue.length(), + b2.getData(), + b2.beginOffsetInner(position2), + b2.endOffsetInner(position2) + ); + }; + + BlockComparator SLICE_BLOCK_NO_DICT_SLICE_BLOCK_DICT = + (BlockComparator) (b1, position1, b2, position2) -> { + Slice dictValue = b2.getDictValue(position2); + return compareTo( + b1.getData(), + b1.beginOffsetInner(position1), + b1.endOffsetInner(position1), + dictValue, + 0, + dictValue.length() + ); + }; + + BlockComparator SLICE_BLOCK_DICT_SLICE_BLOCK_DICT = + (BlockComparator) (b1, position1, b2, position2) -> { + Slice dictValue1 = b1.getDictValue(position1); + Slice dictValue2 = b2.getDictValue(position2); + return dictValue1.compareTo(dictValue2); + }; + + BlockComparator SLICE_BLOCK_NO_DICT_SLICE_BLOCK_BUILDER = + (BlockComparator) (b1, position1, b2, position2) -> + compareTo( + b1.getData(), + b1.beginOffsetInner(position1), + b1.endOffsetInner(position1), + b2.getSliceOutput().getRawSlice(), + b2.beginOffset(position2), + b2.endOffset(position2) + ); + + BlockComparator SLICE_BLOCK_DICT_SLICE_BLOCK_BUILDER = + (BlockComparator) (b1, position1, b2, position2) -> { + Slice dictValue = b1.getDictValue(position1); + return compareTo( + dictValue, + 0, dictValue.length(), + b2.getSliceOutput().getRawSlice(), + b2.beginOffset(position2), + b2.endOffset(position2) + ); + }; +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockEncodingBuilders.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockEncodingBuilders.java index a41ab06f1..1b136df09 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockEncodingBuilders.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockEncodingBuilders.java @@ -18,9 +18,13 @@ import com.alibaba.polardbx.common.datatype.Decimal; import com.alibaba.polardbx.common.datatype.UInt64; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.EnumType; import io.airlift.slice.Slice; +import org.apache.orc.impl.TypeUtils; import java.math.BigInteger; import java.sql.Blob; @@ -33,7 +37,14 @@ public abstract class BlockEncodingBuilders { - public static List create(List types) { + public static List create(List types, ExecutionContext context) { + if (null != context && context.isEnableOrcRawTypeBlock()) { + // Special encoding for raw orc block builder. + // Only Long/Double/ByteArray blocks are created. + // Normal query should not get there. + return createBlockEncodingForRawOrcType(types); + } + // Very special cases e.g. compound type if (types == null || types.isEmpty()) { throw new IllegalArgumentException("types is empty!"); @@ -84,4 +95,65 @@ public static List create(List types) { return blockEncodingList; } + private static List createBlockEncodingForRawOrcType(List types) { + if (types == null || types.isEmpty()) { + throw new IllegalArgumentException("types is empty!"); + } + List blockEncodingList = new ArrayList<>(); + for (DataType type : types) { + switch (type.fieldType()) { + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_INT24: + case MYSQL_TYPE_TINY: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_YEAR: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_DATETIME2: + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_TIMESTAMP2: + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_TIME2: + case MYSQL_TYPE_BIT: { + // Long block encoder. + blockEncodingList.add(new LongBlockEncoding()); + break; + } + case MYSQL_TYPE_FLOAT: + case MYSQL_TYPE_DOUBLE: { + // Double block encoder. + blockEncodingList.add(new DoubleBlockEncoding()); + break; + } + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: { + // Long or ByteArray block encoder + if (TypeUtils.isDecimal64Precision(type.getPrecision())) { + blockEncodingList.add(new LongBlockEncoding()); + } else { + blockEncodingList.add(new ByteArrayBlockEncoding()); + } + break; + } + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_JSON: { + // ByteArray block encoder + blockEncodingList.add(new ByteArrayBlockEncoding()); + break; + } + + default: + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "Found invalid type " + type.fieldType() + " when creating block builder for orc raw type"); + } + } + return blockEncodingList; + } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockUtils.java index 37ae04e0a..a68eb2b49 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BlockUtils.java @@ -18,8 +18,12 @@ import com.alibaba.polardbx.common.datatype.Decimal; import com.alibaba.polardbx.common.datatype.UInt64; +import com.alibaba.polardbx.executor.operator.util.ObjectPools; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import java.util.Arrays; +import java.util.TimeZone; + public class BlockUtils { public static RandomAccessBlock createBlock(DataType dataType, int positionCount) { Class clazz = dataType.getDataClass(); @@ -46,6 +50,32 @@ public static RandomAccessBlock createBlock(DataType dataType, int positi } } + public static RandomAccessBlock createBlock(DataType dataType, int positionCount, ObjectPools objectPools, + int chunkLimit) { + Class clazz = dataType.getDataClass(); + if (clazz == Byte.class) { + return new ByteBlock(dataType, positionCount); + } else if (clazz == Short.class) { + return new ShortBlock(dataType, positionCount); + } else if (clazz == Integer.class) { + return new IntegerBlock(dataType, positionCount, objectPools.getIntArrayPool(), chunkLimit); + } else if (clazz == Long.class) { + return new LongBlock(dataType, positionCount, objectPools.getLongArrayPool(), chunkLimit); + } else if (clazz == Float.class) { + return new FloatBlock(dataType, positionCount); + } else if (clazz == Double.class) { + return new DoubleBlock(dataType, positionCount); + } else if (clazz == Boolean.class) { + return new BooleanBlock(dataType, positionCount); + } else if (clazz == Decimal.class) { + return new DecimalBlock(dataType, positionCount, objectPools.getLongArrayPool(), chunkLimit); + } else if (clazz == UInt64.class) { + return new ULongBlock(dataType, positionCount); + } else { + return new ReferenceBlock(dataType, positionCount); + } + } + public static void copySelectedInCommon(boolean selectedInUse, int[] sel, int size, RandomAccessBlock srcVector, RandomAccessBlock dstVector) { DataType dataType = dstVector.getType(); @@ -61,4 +91,211 @@ public static void copySelectedInCommon(boolean selectedInUse, int[] sel, int si } } + public static AbstractBlock fillSelection(Block result, int[] selection, int selSize, + boolean useSelection, boolean enableCompatible, TimeZone timeZone) { + if (result instanceof SliceBlock) { + + // case 1: for varchar / char type with dictionary / direct encoding. + // need compatible form execution context + return SliceBlock.from(result.cast(SliceBlock.class), selSize, selection, enableCompatible, useSelection); + } else if (result instanceof DateBlock) { + + // case 2: for date type. + return DateBlock.from(result.cast(DateBlock.class), selSize, selection, useSelection); + } else if (result instanceof IntegerBlock) { + + // case 3: for integer / short unsigned / int24 unsigned type. + return IntegerBlock.from(result.cast(IntegerBlock.class), selSize, selection, useSelection); + + } else if (result instanceof DecimalBlock) { + + // case 4. for decimal type in decimal_64 mode and normal mode. + return DecimalBlock.from(result.cast(DecimalBlock.class), selSize, selection, useSelection); + } else if (result instanceof LongBlock) { + + // case 5. for bigint / int unsigned type. + return LongBlock.from(result.cast(LongBlock.class), selSize, selection, useSelection); + + } else if (result instanceof TimestampBlock) { + + // case 6. timestamp type + return TimestampBlock.from(result.cast(TimestampBlock.class), selSize, selection, useSelection, timeZone); + } else { + if (useSelection) { + throw new UnsupportedOperationException("Unsupported block " + + result.getClass() + " with useSelection"); + } + if (result instanceof BlobBlock) { + return BlobBlock.from(result.cast(BlobBlock.class), selSize, selection); + } + if (result instanceof ByteBlock) { + return ByteBlock.from(result.cast(ByteBlock.class), selSize, selection); + } + if (result instanceof DoubleBlock) { + return DoubleBlock.from(result.cast(DoubleBlock.class), selSize, selection); + } + if (result instanceof EnumBlock) { + return EnumBlock.from(result.cast(EnumBlock.class), selSize, selection); + } + if (result instanceof FloatBlock) { + return FloatBlock.from(result.cast(FloatBlock.class), selSize, selection); + } + if (result instanceof ShortBlock) { + return ShortBlock.from(result.cast(ShortBlock.class), selSize, selection); + } + if (result instanceof StringBlock) { + return StringBlock.from(result.cast(StringBlock.class), selSize, selection); + } + if (result instanceof TimeBlock) { + return TimeBlock.from(result.cast(TimeBlock.class), selSize, selection); + } + if (result instanceof ULongBlock) { + return ULongBlock.from(result.cast(ULongBlock.class), selSize, selection); + } + if (result instanceof BigIntegerBlock) { + return BigIntegerBlock.from(result.cast(BigIntegerBlock.class), selSize, selection); + } + if (result instanceof ByteArrayBlock) { + return ByteArrayBlock.from(result.cast(ByteArrayBlock.class), selSize, selection); + } + throw new UnsupportedOperationException("Unsupported block " + + result.getClass() + " with selection copy"); + } + } + + public static AbstractBlock wrapNullSelection(AbstractBlock result, boolean useSelection, + boolean enableCompatible, TimeZone timeZone) { + if (result instanceof SliceBlock) { + // case 1: for varchar / char type with dictionary / direct encoding. + // need compatible form execution context + return SliceBlock.from(result.cast(SliceBlock.class), result.getPositionCount(), null, enableCompatible, + useSelection); + } else if (result instanceof DateBlock) { + // case 2: for date type. + return DateBlock.from(result.cast(DateBlock.class), result.getPositionCount(), null, useSelection); + } else if (result instanceof IntegerBlock) { + // case 3: for integer / short unsigned / int24 unsigned type. + return IntegerBlock.from(result.cast(IntegerBlock.class), result.getPositionCount(), null, useSelection); + } else if (result instanceof DecimalBlock) { + // case 4. for decimal type in decimal_64 mode and normal mode. + return DecimalBlock.from(result.cast(DecimalBlock.class), result.getPositionCount(), null, useSelection); + } else if (result instanceof LongBlock) { + // case 5. for bigint / int unsigned type. + return LongBlock.from(result.cast(LongBlock.class), result.getPositionCount(), null, useSelection); + } else if (result instanceof TimestampBlock) { + // case 6. timestamp type + return TimestampBlock.from(result.cast(TimestampBlock.class), result.getPositionCount(), null, + useSelection, timeZone); + } else { + return result; + } + } + + public static int[] copyIntArray(int[] values, int[] selection, int positionCount) { + if (values == null) { + return null; + } + if (selection == null) { + return Arrays.copyOf(values, positionCount); + } else { + int[] target = new int[positionCount]; + for (int i = 0; i < positionCount; i++) { + target[i] = values[selection[i]]; + } + return target; + } + } + + public static boolean[] copyNullArray(boolean[] values, int[] selection, int positionCount) { + if (values == null) { + return null; + } + if (selection == null) { + return Arrays.copyOf(values, positionCount); + } else { + boolean[] target = new boolean[positionCount]; + boolean hasNull = false; + for (int i = 0; i < positionCount; i++) { + target[i] = values[selection[i]]; + hasNull |= target[i]; + } + // NOTE: destroy the boolean array if it does not have null. + return hasNull ? target : null; + } + } + + public static long[] copyLongArray(long[] values, int[] selection, int positionCount) { + if (values == null) { + return null; + } + if (selection == null) { + return Arrays.copyOf(values, positionCount); + } else { + long[] target = new long[positionCount]; + for (int i = 0; i < positionCount; i++) { + target[i] = values[selection[i]]; + } + return target; + } + } + + public static byte[] copyByteArray(byte[] values, int[] selection, int positionCount) { + if (values == null) { + return null; + } + if (selection == null) { + return Arrays.copyOf(values, positionCount); + } else { + byte[] target = new byte[positionCount]; + for (int i = 0; i < positionCount; i++) { + target[i] = values[selection[i]]; + } + return target; + } + } + + public static double[] copyDoubleArray(double[] values, int[] selection, int positionCount) { + if (values == null) { + return null; + } + if (selection == null) { + return Arrays.copyOf(values, positionCount); + } else { + double[] target = new double[positionCount]; + for (int i = 0; i < positionCount; i++) { + target[i] = values[selection[i]]; + } + return target; + } + } + + public static float[] copyFloatArray(float[] values, int[] selection, int positionCount) { + if (values == null) { + return null; + } + if (selection == null) { + return Arrays.copyOf(values, positionCount); + } else { + float[] target = new float[positionCount]; + for (int i = 0; i < positionCount; i++) { + target[i] = values[selection[i]]; + } + return target; + } + } + + public static short[] copyShortArray(short[] values, int[] selection, int positionCount) { + if (values == null) { + return null; + } + if (selection == null) { + return Arrays.copyOf(values, positionCount); + } else { + short[] target = new short[positionCount]; + for (int i = 0; i < positionCount; i++) { + target[i] = values[selection[i]]; + } + return target; + } + } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BooleanBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BooleanBlock.java index 3b7bc33d9..bcded22cb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BooleanBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/BooleanBlock.java @@ -17,10 +17,12 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.XxhashUtils; import com.alibaba.polardbx.optimizer.core.datatype.BooleanType; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.google.common.base.Preconditions; +import io.airlift.slice.XxHash64; import org.openjdk.jol.info.ClassLayout; import static com.alibaba.polardbx.common.utils.memory.SizeOf.sizeOf; @@ -80,6 +82,15 @@ public int hashCode(int position) { return Boolean.hashCode(values[position + arrayOffset]); } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } else { + return XxhashUtils.finalShuffle(values[pos + arrayOffset] ? 1L : 0L); + } + } + @Override public int[] hashCodeVector() { if (mayHaveNull()) { @@ -93,6 +104,18 @@ public int[] hashCodeVector() { return hashes; } + @Override + public void hashCodeVector(int[] results, int positionCount) { + if (mayHaveNull()) { + super.hashCodeVector(results, positionCount); + return; + } + + for (int position = 0; position < positionCount; position++) { + results[position] = Boolean.hashCode(values[position + arrayOffset]); + } + } + @Override public boolean equals(int position, Block other, int otherPosition) { boolean n1 = isNull(position); @@ -112,7 +135,7 @@ public boolean equals(int position, Block other, int otherPosition) { @Override public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { if (output instanceof BooleanBlock) { - BooleanBlock outputVector = (BooleanBlock) output; + BooleanBlock outputVector = output.cast(BooleanBlock.class); if (selectedInUse) { for (int i = 0; i < size; i++) { int j = sel[i]; @@ -133,7 +156,7 @@ public void shallowCopyTo(RandomAccessBlock another) { if (!(another instanceof BooleanBlock)) { GeneralUtil.nestedException("Cannot shallow copy to " + another); } - BooleanBlock vectorSlot = (BooleanBlock) another; + BooleanBlock vectorSlot = another.cast(BooleanBlock.class); super.shallowCopyTo(vectorSlot); vectorSlot.values = values; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteArrayBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteArrayBlock.java index ceb16c9c0..a00bbe557 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteArrayBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteArrayBlock.java @@ -17,6 +17,7 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import io.airlift.slice.XxHash64; import org.openjdk.jol.info.ClassLayout; import java.util.Arrays; @@ -31,7 +32,11 @@ public class ByteArrayBlock extends AbstractCommonBlock { private static final long INSTANCE_SIZE = ClassLayout.parseClass(ByteArrayBlock.class).instanceSize(); private final int[] offsets; - private final byte[] data; + private byte[] data; + + public ByteArrayBlock(int positionCount) { + this(0, positionCount, new boolean[positionCount], new int[positionCount], null); + } ByteArrayBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, int[] offsets, byte[] data) { super(DataTypes.BytesType, positionCount, valueIsNull, valueIsNull != null); @@ -40,6 +45,34 @@ public class ByteArrayBlock extends AbstractCommonBlock { updateSizeInfo(); } + public static ByteArrayBlock from(ByteArrayBlock other, int selSize, int[] selection) { + int[] offsets = null; + byte[] data = null; + if (other.data != null) { + if (selection == null) { + offsets = Arrays.copyOf(other.offsets, selSize); + data = Arrays.copyOf(other.data, other.endOffset(selSize)); + } else { + offsets = new int[selSize]; + for (int i = 0; i < selSize; i++) { + int dataLength = other.endOffset(selection[i]) - other.beginOffset(selection[i]); + offsets[i] = dataLength + (i == 0 ? 0 : offsets[i - 1]); + } + data = new byte[offsets[selSize - 1]]; + for (int i = 0; i < selSize; i++) { + int beginOffset = other.beginOffset(selection[i]); + int dataLength = other.endOffset(selection[i]) - beginOffset; + System.arraycopy(other.data, beginOffset, data, i == 0 ? 0 : offsets[i - 1], dataLength); + } + } + } + return new ByteArrayBlock(0, + selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + offsets, + data); + } + @Override public byte[] getByteArray(int position) { checkReadablePosition(position); @@ -85,10 +118,19 @@ public int hashCode(int position) { return ChunkUtil.hashCode(data, beginOffset(position), endOffset(position)); } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } else { + return XxHash64.hash(data, beginOffset(pos), endOffset(pos) - beginOffset(pos)); + } + } + @Override public boolean equals(int position, Block other, int otherPosition) { if (other instanceof ByteArrayBlock) { - return equals(position, (ByteArrayBlock) other, otherPosition); + return equals(position, other.cast(ByteArrayBlock.class), otherPosition); } else if (other instanceof ByteArrayBlockBuilder) { return equals(position, (ByteArrayBlockBuilder) other, otherPosition); } else { @@ -142,6 +184,10 @@ public byte[] getData() { return data; } + public void setData(byte[] data) { + this.data = data; + } + @Override public void updateSizeInfo() { estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + sizeOf(data) + sizeOf(offsets); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteArrayBlockEncoding.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteArrayBlockEncoding.java index cd2589e7c..4e9ee72c9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteArrayBlockEncoding.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteArrayBlockEncoding.java @@ -47,7 +47,7 @@ public String getName() { @Override public void writeBlock(SliceOutput sliceOutput, Block block) { - ByteArrayBlock byteArrayBlock = (ByteArrayBlock) block; + ByteArrayBlock byteArrayBlock = block.cast(ByteArrayBlock.class); int positionCount = block.getPositionCount(); sliceOutput.appendInt(positionCount); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteBlock.java index 712dd5bde..8e69c6d6b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ByteBlock.java @@ -17,6 +17,8 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.XxhashUtils; +import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -46,6 +48,13 @@ public ByteBlock(DataType dataType, int slotLen) { updateSizeInfo(); } + public static ByteBlock from(ByteBlock other, int selSize, int[] selection) { + return new ByteBlock(0, + selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + BlockUtils.copyByteArray(other.values, selection, selSize)); + } + @Override public byte getByte(int position) { checkReadablePosition(position); @@ -101,6 +110,15 @@ public int hashCode(int position) { return Byte.hashCode(values[position + arrayOffset]); } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } else { + return XxhashUtils.finalShuffle(values[pos + arrayOffset]); + } + } + @Override public int[] hashCodeVector() { if (mayHaveNull()) { @@ -114,6 +132,17 @@ public int[] hashCodeVector() { return hashes; } + @Override + public void hashCodeVector(int[] results, int positionCount) { + if (mayHaveNull()) { + super.hashCodeVector(results, positionCount); + return; + } + for (int position = 0; position < positionCount; position++) { + results[position] = Byte.hashCode(values[position + arrayOffset]); + } + } + @Override public DataType getType() { return DataTypes.ByteType; @@ -122,7 +151,7 @@ public DataType getType() { @Override public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { if (output instanceof ByteBlock) { - ByteBlock outputVectorSlot = (ByteBlock) output; + ByteBlock outputVectorSlot = output.cast(ByteBlock.class); if (selectedInUse) { for (int i = 0; i < size; i++) { int j = sel[i]; @@ -143,7 +172,7 @@ public void shallowCopyTo(RandomAccessBlock another) { if (!(another instanceof ByteBlock)) { GeneralUtil.nestedException("cannot shallow copy to " + another == null ? null : another.toString()); } - ByteBlock vectorSlot = (ByteBlock) another; + ByteBlock vectorSlot = another.cast(ByteBlock.class); super.shallowCopyTo(vectorSlot); vectorSlot.values = values; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/CastableBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/CastableBlock.java new file mode 100644 index 000000000..6c931198b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/CastableBlock.java @@ -0,0 +1,33 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.chunk; + +import com.alibaba.polardbx.common.utils.GeneralUtil; + +public interface CastableBlock { + default T cast(Class clazz) { + if (clazz.isInstance(this)) { + return (T) this; + } + throw GeneralUtil.nestedException(new ClassCastException( + "failed to cast " + this.getClass().getName() + " to " + clazz.getName())); + } + + default boolean isInstanceOf(Class clazz) { + return clazz.isInstance(this); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Chunk.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Chunk.java index ea0901d19..818a157bc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Chunk.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Chunk.java @@ -18,14 +18,15 @@ import com.alibaba.polardbx.common.utils.hash.HashResult128; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.executor.chunk.columnar.CommonLazyBlock; +import com.alibaba.polardbx.executor.operator.util.VectorUtils; import com.alibaba.polardbx.optimizer.core.row.AbstractRow; import com.alibaba.polardbx.optimizer.core.row.Row; -import com.alibaba.polardbx.optimizer.utils.VectorUtils; - -import com.google.common.hash.HashCode; +import com.google.common.annotations.VisibleForTesting; import org.openjdk.jol.info.ClassLayout; import java.util.ArrayList; +import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.concurrent.atomic.AtomicLongFieldUpdater; @@ -63,6 +64,18 @@ public class Chunk implements Iterable { */ protected boolean selectionInUse; + /** + * partition index in storage layer, used by local partition wise join + * default -1, means no partition info + */ + private int partIndex = -1; + + /** + * partitions scheduled to this computer node, used by local partition wise join + * default -1, means no partition info + */ + private int partCount = -1; + public Chunk(int positionCount, Block... blocks) { this.positionCount = positionCount; this.blocks = blocks; @@ -89,6 +102,10 @@ public Chunk(int[] selection, Block[] slots) { } } + public Block[] getBlocks() { + return blocks; + } + public Block getBlock(int i) { return blocks[i]; } @@ -121,6 +138,28 @@ public int[] hashCodeVector() { return h; } + public void hashCodeVector(int[] hashCodeResults, int[] intermediates, int[] blockHashCodes, int positionCount) { + if (blocks.length == 1) { + // short circuit for single block + blocks[0].hashCodeVector(hashCodeResults, positionCount); + return; + } + + Arrays.fill(hashCodeResults, 0); + Arrays.fill(intermediates, 0); + Arrays.fill(blockHashCodes, 0); + for (int c = 0; c < getBlockCount(); c++) { + // overwrite intermediates array. + VectorUtils.multiplyInt(intermediates, hashCodeResults, 31, positionCount); + + // overwrite blockHashCodes array. + blocks[c].hashCodeVector(blockHashCodes, positionCount); + + // overwrite hashCodeResults array. + VectorUtils.addInt(hashCodeResults, intermediates, blockHashCodes, positionCount); + } + } + public int hashCode(int position) { int h = 0; for (int c = 0; c < getBlockCount(); c++) { @@ -129,11 +168,24 @@ public int hashCode(int position) { return h; } + @VisibleForTesting + public long hashCodeUseXxhash(int position) { + long h = 0; + for (int c = 0; c < getBlockCount(); c++) { + h = h * 31 + blocks[c].hashCodeUseXxhash(position); + } + return h; + } + public boolean equals(int position, Chunk otherChunk, int otherPosition) { assert getBlockCount() == otherChunk.getBlockCount(); for (int i = 0; i < getBlockCount(); ++i) { final Block block = blocks[i]; - final Block otherBlock = otherChunk.blocks[i]; + Block otherBlock = otherChunk.blocks[i]; + if (otherBlock instanceof CommonLazyBlock) { + ((CommonLazyBlock) otherBlock).load(); + otherBlock = ((CommonLazyBlock) otherBlock).getLoaded(); + } if (!block.equals(position, otherBlock, otherPosition)) { return false; } @@ -280,4 +332,28 @@ public void setSelection(int[] newSel) { this.selection = newSel; } + public int getPartIndex() { + return partIndex; + } + + public void setPartIndex(int partIndex) { + this.partIndex = partIndex; + } + + public int getPartCount() { + return partCount; + } + + public void setPartCount(int partCount) { + this.partCount = partCount; + } + + public void recycle() { + for (int blockIndex = 0; blockIndex < blocks.length; blockIndex++) { + Block block = blocks[blockIndex]; + if (block.isRecyclable()) { + blocks[blockIndex].recycle(); + } + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ChunkBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ChunkBuilder.java index 67fe02796..903377e09 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ChunkBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ChunkBuilder.java @@ -17,15 +17,14 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; +import com.alibaba.polardbx.executor.operator.util.BatchBlockWriter; +import com.alibaba.polardbx.executor.operator.util.ObjectPools; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.optimizer.core.datatype.DecimalType; -import com.alibaba.polardbx.optimizer.core.datatype.SliceType; +import it.unimi.dsi.fastutil.ints.IntArrayList; -import java.util.Arrays; import java.util.List; -import java.util.stream.IntStream; public class ChunkBuilder { @@ -34,20 +33,53 @@ public class ChunkBuilder { private int declaredPositions; private final int chunkLimit; private ExecutionContext context; - private final boolean enableDelay; + private final boolean enableBlockBuilderBatchWriting; private final boolean enableOssCompatible; + private final boolean useBlockWriter; + private ObjectPools objectPools; + + public ChunkBuilder(List types, int chunkLimit, ExecutionContext context, ObjectPools objectPools) { + this.types = types; + this.context = context; + this.objectPools = objectPools; + + this.enableBlockBuilderBatchWriting = + context == null ? false : + context.getParamManager().getBoolean(ConnectionParams.ENABLE_BLOCK_BUILDER_BATCH_WRITING); + this.useBlockWriter = + context == null ? false : context.getParamManager().getBoolean(ConnectionParams.ENABLE_DRIVER_OBJECT_POOL); + + this.enableOssCompatible = context == null ? true : context.isEnableOssCompatible(); + blockBuilders = new BlockBuilder[types.size()]; + if (useBlockWriter && objectPools != null) { + for (int i = 0; i < blockBuilders.length; i++) { + blockBuilders[i] = BatchBlockWriter.create(types.get(i), context, chunkLimit, objectPools); + } + } else { + for (int i = 0; i < blockBuilders.length; i++) { + blockBuilders[i] = BlockBuilders.create(types.get(i), context); + } + } + + this.chunkLimit = chunkLimit; + } public ChunkBuilder(List types, int chunkLimit, ExecutionContext context) { this.types = types; this.context = context; - this.enableDelay = context == null ? false : context.isEnableOssDelayMaterializationOnExchange(); + this.useBlockWriter = false; + this.enableBlockBuilderBatchWriting = + context == null ? false : + context.getParamManager().getBoolean(ConnectionParams.ENABLE_BLOCK_BUILDER_BATCH_WRITING); this.enableOssCompatible = context == null ? true : context.isEnableOssCompatible(); blockBuilders = new BlockBuilder[types.size()]; + for (int i = 0; i < blockBuilders.length; i++) { blockBuilders[i] = BlockBuilders.create(types.get(i), context); } + this.chunkLimit = chunkLimit; } @@ -84,127 +116,38 @@ public void reset() { declaredPositions = 0; - for (int i = 0; i < blockBuilders.length; i++) { - blockBuilders[i] = blockBuilders[i].newBlockBuilder(); + if (useBlockWriter && objectPools != null) { + for (int i = 0; i < blockBuilders.length; i++) { + blockBuilders[i] = blockBuilders[i].newBlockBuilder(objectPools, chunkLimit); + } + } else { + for (int i = 0; i < blockBuilders.length; i++) { + blockBuilders[i] = blockBuilders[i].newBlockBuilder(); + } } + } - public void appendTo(Block block, int channel, int position) { + public void appendTo(Block block, int channel, int[] selection, final int offsetInSelection, + final int positionCount) { BlockBuilder blockBuilder = blockBuilders[channel]; - if (block.isNull(position)) { - blockBuilder.appendNull(); + if (blockBuilder instanceof BatchBlockWriter) { + ((BatchBlockWriter) blockBuilder).copyBlock(block, selection, offsetInSelection, 0, positionCount); } else { - block.writePositionTo(position, blockBuilder); - } - } - - public Chunk fromPartition(List assignedPositions, Chunk sourceChunk) { - if (assignedPositions.isEmpty()) { - return null; - } - - final int sourceChunkLimit = assignedPositions.size(); - // pre-unbox - int[] positions = assignedPositions.stream().mapToInt(i -> i).toArray(); - // for delay materialization - int selSize = sourceChunkLimit; - int[] newSelection; - - Block[] targetBlocks = new Block[sourceChunk.getBlockCount()]; - for (int channel = 0; channel < sourceChunk.getBlockCount(); channel++) { - Block sourceBlock = sourceChunk.getBlock(channel); - if (enableDelay - && (sourceBlock instanceof SliceBlock) - && selSize <= sourceBlock.getPositionCount()) { - SliceBlock sliceBlock = (SliceBlock) sourceBlock; - - if (sliceBlock.getSelection() != null) { - int[] oldSelection = sliceBlock.getSelection(); - newSelection = new int[selSize]; - for (int position = 0; position < selSize; position++) { - newSelection[position] = oldSelection[positions[position]]; - } - } else { - newSelection = positions; - } - - // delay for slice block - targetBlocks[channel] - = new SliceBlock((SliceType) ((SliceBlock) sourceBlock).getType(), 0, selSize, - ((SliceBlock) sourceBlock).nulls(), ((SliceBlock) sourceBlock).offsets(), - ((SliceBlock) sourceBlock).data(), newSelection, enableOssCompatible); - - } else if (enableDelay - && (sourceBlock instanceof DecimalBlock) - && selSize <= sourceBlock.getPositionCount()) { - DecimalBlock decimalBlock = (DecimalBlock) sourceBlock; - - if (decimalBlock.getSelection() != null) { - int[] oldSelection = decimalBlock.getSelection(); - newSelection = new int[selSize]; - for (int position = 0; position < selSize; position++) { - newSelection[position] = oldSelection[positions[position]]; - } - } else { - newSelection = positions; - } - - // delay for decimal block - targetBlocks[channel] - = new DecimalBlock(DataTypes.DecimalType, decimalBlock.getMemorySegments(), - decimalBlock.nulls(), decimalBlock.hasNull(), selSize, - newSelection, decimalBlock.getState()); - } else if (enableDelay - && (sourceBlock instanceof DateBlock) - && selSize <= sourceBlock.getPositionCount()) { - DateBlock dateBlock = (DateBlock) sourceBlock; - - if (dateBlock.getSelection() != null) { - int[] oldSelection = dateBlock.getSelection(); - newSelection = new int[selSize]; - for (int position = 0; position < selSize; position++) { - newSelection[position] = oldSelection[positions[position]]; - } - } else { - newSelection = positions; - } - - // delay for date block - targetBlocks[channel] - = new DateBlock(0, selSize, - dateBlock.nulls(), dateBlock.getPacked(), dateBlock.getType(), dateBlock.getTimezone(), - newSelection); - - } else if (enableDelay - && (sourceBlock instanceof IntegerBlock) - && selSize <= sourceBlock.getPositionCount()) { - IntegerBlock integerBlock = (IntegerBlock) sourceBlock; - - if (integerBlock.getSelection() != null) { - int[] oldSelection = integerBlock.getSelection(); - newSelection = new int[selSize]; - for (int position = 0; position < selSize; position++) { - newSelection[position] = oldSelection[positions[position]]; - } - } else { - newSelection = positions; - } - - // delay for date block - targetBlocks[channel] - = new IntegerBlock(integerBlock.getType(), integerBlock.intArray(), integerBlock.nulls(), - integerBlock.hasNull(), selSize, newSelection); + if (enableBlockBuilderBatchWriting) { + block.writePositionTo(selection, offsetInSelection, positionCount, blockBuilder); } else { - // normal - for (int position : positions) { - sourceBlock.writePositionTo(position, blockBuilders[channel]); + for (int i = 0; i < positionCount; i++) { + block.writePositionTo(selection[i + offsetInSelection], blockBuilder); } - targetBlocks[channel] = blockBuilders[channel].build(); } } + } - declarePosition(sourceChunkLimit); - return new Chunk(sourceChunkLimit, targetBlocks); + public void appendTo(Block block, int channel, int position) { + BlockBuilder blockBuilder = blockBuilders[channel]; + // appendNull may have additional operations in certain block types + block.writePositionTo(position, blockBuilder); } public DataType getType(int channel) { @@ -215,6 +158,10 @@ public BlockBuilder getBlockBuilder(int channel) { return blockBuilders[channel]; } + public void updateDeclarePosition(int update) { + declaredPositions += update; + } + public void declarePosition() { declaredPositions++; } @@ -223,6 +170,10 @@ public void declarePosition(int positionCount) { declaredPositions = positionCount; } + public int getDeclarePosition() { + return declaredPositions; + } + public BlockBuilder[] getBlockBuilders() { return blockBuilders; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ChunkConverter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ChunkConverter.java index c81e2d705..7774b548e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ChunkConverter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ChunkConverter.java @@ -43,4 +43,8 @@ public Chunk apply(Chunk chunk) { } return new Chunk(blocks); } + + public int columnWidth() { + return columnIndexes.length; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Converters.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Converters.java index 227d8bbf5..7c44f9ad7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Converters.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/Converters.java @@ -94,6 +94,10 @@ public static ChunkConverter createChunkConverter(List sourceTypes, Li public static BlockConverter createBlockConverter(DataType sourceType, DataType targetType, ExecutionContext context) { + if (context.isEnableOrcRawTypeBlock()) { + // Raw type do not need to convert. + return BlockConverter.IDENTITY; + } if (DataTypeUtil.equalsSemantically(sourceType, targetType)) { if (sourceType instanceof SliceType && targetType instanceof SliceType @@ -144,7 +148,7 @@ private static class CollationConverter implements BlockConverter { @Override public Block apply(Block block) { if (block instanceof SliceBlock) { - ((SliceBlock) block).resetCollation(collationName); + block.cast(SliceBlock.class).resetCollation(collationName); } return block; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlock.java index e5aeaaa3e..d30024e14 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlock.java @@ -16,13 +16,19 @@ package com.alibaba.polardbx.executor.chunk; +import com.alibaba.polardbx.common.utils.XxhashUtils; +import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; import com.alibaba.polardbx.common.utils.time.core.OriginalDate; import com.alibaba.polardbx.common.utils.time.core.TimeStorage; +import com.alibaba.polardbx.executor.operator.util.BatchBlockWriter; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.google.common.base.Preconditions; +import io.airlift.slice.SliceOutput; +import io.airlift.slice.XxHash64; import org.openjdk.jol.info.ClassLayout; import java.sql.Date; @@ -46,8 +52,17 @@ public class DateBlock extends AbstractCommonBlock { private int[] selection; - public DateBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, long[] data, DataType dataType, - TimeZone timezone, int[] selection) { + public DateBlock(int slotLen, TimeZone timezone) { + super(DataTypes.DateType, slotLen); + this.packed = new long[slotLen]; + this.selection = null; + this.timezone = timezone; + updateSizeInfo(); + } + + DateBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, long[] data, + DataType dataType, + TimeZone timezone, int[] selection) { super(dataType, positionCount, valueIsNull, valueIsNull != null); this.packed = Preconditions.checkNotNull(data); this.timezone = timezone; @@ -55,14 +70,36 @@ public DateBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, long updateSizeInfo(); } - DateBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, long[] data, DataType dataType, - TimeZone timezone) { + public DateBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, long[] data, + DataType dataType, + TimeZone timezone) { super(dataType, positionCount, valueIsNull, valueIsNull != null); this.packed = Preconditions.checkNotNull(data); this.timezone = timezone; updateSizeInfo(); } + public static DateBlock from(DateBlock dateBlock, int selSize, int[] selection, boolean useSelection) { + if (useSelection) { + return new DateBlock(0, selSize, dateBlock.isNull, dateBlock.packed, dateBlock.dataType, + dateBlock.timezone, selection); + } + + return new DateBlock(0, selSize, + BlockUtils.copyNullArray(dateBlock.isNull, selection, selSize), + BlockUtils.copyLongArray(dateBlock.packed, selection, selSize), + dateBlock.dataType, + dateBlock.timezone, + null); + } + + @Override + public void recycle() { + if (recycler != null) { + recycler.recycle(packed); + } + } + private int realPositionOf(int position) { if (selection == null) { return position; @@ -73,18 +110,13 @@ private int realPositionOf(int position) { @Override public boolean isNull(int position) { position = realPositionOf(position); - return isNull != null && isNull[position + arrayOffset]; + return isNullInner(position); } @Override public Date getDate(int position) { - // unpack the long value to original date object. - final long packedLong = getPackedLong(position); - MysqlDateTime t = TimeStorage.readDate(packedLong); - t.setTimezone(timezone); - // we assume the time read from packed long value is valid. - Date date = new OriginalDate(t); - return date; + position = realPositionOf(position); + return getDateInner(position); } /** @@ -93,76 +125,95 @@ public Date getDate(int position) { @Override @Deprecated public long getLong(int position) { + position = realPositionOf(position); // this method means get long value of millis second ? - long millis = getDate(position).getTime(); - return millis; + return getDateInner(position).getTime(); } @Override - public long getPackedLong(int position) { + public Object getObject(int position) { position = realPositionOf(position); - - return packed[arrayOffset + position]; + return isNullInner(position) ? null : getDateInner(position); } @Override - public Object getObject(int position) { - return isNull(position) ? null : getDate(position); + public Object getObjectForCmp(int position) { + position = realPositionOf(position); + return isNullInner(position) ? null : getPackedLongInner(position); } @Override - public Object getObjectForCmp(int position) { - return isNull(position) ? null : getPackedLong(position); + public void writePositionTo(int[] selection, int offsetInSelection, int positionCount, BlockBuilder blockBuilder) { + if (this.selection != null || !(blockBuilder instanceof DateBlockBuilder)) { + // don't support it when selection in use. + super.writePositionTo(selection, offsetInSelection, positionCount, blockBuilder); + return; + } + + if (!mayHaveNull()) { + ((DateBlockBuilder) blockBuilder).packed + .add(this.packed, selection, offsetInSelection, positionCount); + + ((DateBlockBuilder) blockBuilder).valueIsNull + .add(false, positionCount); + + return; + } + + DateBlockBuilder dateBlockBuilder = (DateBlockBuilder) blockBuilder; + for (int i = 0; i < positionCount; i++) { + int position = selection[i + offsetInSelection]; + + if (isNull != null && isNull[position + arrayOffset]) { + dateBlockBuilder.appendNull(); + } else { + dateBlockBuilder.writeDatetimeRawLong(packed[position + arrayOffset]); + } + } } @Override public void writePositionTo(int position, BlockBuilder blockBuilder) { - if (blockBuilder instanceof DateBlockBuilder) { - writePositionTo(position, (DateBlockBuilder) blockBuilder); - } else { - throw new AssertionError(); - } + position = realPositionOf(position); + writePositionToInner(position, blockBuilder); } - private void writePositionTo(int position, DateBlockBuilder b) { - if (isNull(position)) { - b.appendNull(); - } else { - b.valueIsNull.add(false); - b.packed.add(getPackedLong(position)); - } + @Override + public int hashCode(int position) { + position = realPositionOf(position); + return hashCodeInner(position); } @Override - public int hashCode(int position) { - if (isNull(position)) { - return 0; + public long hashCodeUseXxhash(int pos) { + int realPos = realPositionOf(pos); + if (isNull(realPos)) { + return NULL_HASH_CODE; + } else { + return XxhashUtils.finalShuffle(getPackedLongInner(realPos)); } - return Long.hashCode(getPackedLong(position)); } @Override public void addToHasher(IStreamingHasher sink, int position) { - if (isNull(position)) { - sink.putBytes(NULL_VALUE_FOR_HASHER); - } else { - sink.putString(getDate(position).toString()); - } + position = realPositionOf(position); + addToHasherInner(sink, position); } @Override public boolean equals(int position, Block other, int otherPosition) { + position = realPositionOf(position); if (other instanceof DateBlock) { - return equals(position, (DateBlock) other, otherPosition); + return equalsInner(position, other.cast(DateBlock.class), otherPosition); } else if (other instanceof DateBlockBuilder) { - return equals(position, (DateBlockBuilder) other, otherPosition); + return equalsInner(position, (DateBlockBuilder) other, otherPosition); } else { throw new AssertionError(); } } - boolean equals(int position, DateBlock other, int otherPosition) { - boolean n1 = isNull(position); + private boolean equalsInner(int realPosition, DateBlock other, int otherPosition) { + boolean n1 = isNullInner(realPosition); boolean n2 = other.isNull(otherPosition); if (n1 && n2) { return true; @@ -171,13 +222,13 @@ boolean equals(int position, DateBlock other, int otherPosition) { } // by packed long value - long l1 = getPackedLong(position); + long l1 = getPackedLongInner(realPosition); long l2 = other.getPackedLong(otherPosition); return l1 == l2; } - boolean equals(int position, DateBlockBuilder other, int otherPosition) { - boolean n1 = isNull(position); + private boolean equalsInner(int realPosition, DateBlockBuilder other, int otherPosition) { + boolean n1 = isNullInner(realPosition); boolean n2 = other.isNull(otherPosition); if (n1 && n2) { return true; @@ -186,7 +237,7 @@ boolean equals(int position, DateBlockBuilder other, int otherPosition) { } // by packed long value - long l1 = getPackedLong(position); + long l1 = getPackedLongInner(realPosition); long l2 = other.getPackedLong(otherPosition); return l1 == l2; } @@ -203,6 +254,70 @@ public int[] getSelection() { return selection; } + public void writeLong(SliceOutput sliceOutput, int position) { + position = realPositionOf(position); + sliceOutput.writeLong(getPackedLongInner(position)); + } + + public long getPackedLong(int position) { + position = realPositionOf(position); + return packed[arrayOffset + position]; + } + + private long getPackedLongInner(int position) { + return packed[arrayOffset + position]; + } + + private Date getDateInner(int position) { + // unpack the long value to original date object. + final long packedLong = getPackedLongInner(position); + MysqlDateTime t = TimeStorage.readDate(packedLong); + t.setTimezone(timezone); + // we assume the time read from packed long value is valid. + Date date = new OriginalDate(t); + return date; + } + + private int hashCodeInner(int position) { + if (isNullInner(position)) { + return 0; + } + return Long.hashCode(getPackedLongInner(position)); + } + + private void writePositionToInner(int position, BlockBuilder blockBuilder) { + if (blockBuilder instanceof DateBlockBuilder) { + DateBlockBuilder b = (DateBlockBuilder) blockBuilder; + if (isNullInner(position)) { + b.appendNull(); + } else { + b.valueIsNull.add(false); + b.packed.add(getPackedLongInner(position)); + } + } else if (blockBuilder instanceof BatchBlockWriter.BatchDateBlockBuilder) { + BatchBlockWriter.BatchDateBlockBuilder b = (BatchBlockWriter.BatchDateBlockBuilder) blockBuilder; + if (isNullInner(position)) { + b.appendNull(); + } else { + b.writePackedLong(getPackedLongInner(position)); + } + } else { + throw new AssertionError(); + } + } + + private void addToHasherInner(IStreamingHasher sink, int position) { + if (isNullInner(position)) { + sink.putBytes(NULL_VALUE_FOR_HASHER); + } else { + sink.putString(getDateInner(position).toString()); + } + } + + private boolean isNullInner(int position) { + return isNull != null && isNull[position + arrayOffset]; + } + @Override public void updateSizeInfo() { estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + sizeOf(packed); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlockBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlockBuilder.java index 7068313ea..b791a8f47 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlockBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlockBuilder.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.chunk; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.time.MySQLTimeTypeUtil; import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; @@ -23,6 +24,9 @@ import com.alibaba.polardbx.common.utils.time.core.TimeStorage; import com.alibaba.polardbx.common.utils.time.parser.StringTimeParser; import com.alibaba.polardbx.common.utils.timezone.InternalTimeZone; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; +import com.alibaba.polardbx.executor.operator.util.DriverObjectPool; +import com.alibaba.polardbx.executor.operator.util.ObjectPools; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.google.common.base.Preconditions; @@ -39,19 +43,34 @@ */ public class DateBlockBuilder extends AbstractBlockBuilder { - final LongArrayList packed; + final BatchedArrayList.BatchLongArrayList packed; final DataType dataType; final ExecutionContext context; // timezone is mutable TimeZone timezone; + private DriverObjectPool objectPool; + private int chunkLimit; + public DateBlockBuilder(int capacity, DataType dataType, ExecutionContext context) { super(capacity); - this.packed = new LongArrayList(capacity); + this.packed = new BatchedArrayList.BatchLongArrayList(capacity); this.dataType = dataType; this.context = context; // 当前执行器以外的时区处理逻辑,都认为时间戳以默认时区表示 this.timezone = InternalTimeZone.DEFAULT_TIME_ZONE; + this.chunkLimit = context.getParamManager().getInt(ConnectionParams.CHUNK_SIZE); + } + + public DateBlockBuilder(int capacity, DataType dataType, ExecutionContext context, + DriverObjectPool objectPool) { + super(capacity); + this.packed = new BatchedArrayList.BatchLongArrayList(capacity); + this.dataType = dataType; + this.context = context; + this.timezone = InternalTimeZone.DEFAULT_TIME_ZONE; + this.objectPool = objectPool; + this.chunkLimit = context.getParamManager().getInt(ConnectionParams.CHUNK_SIZE); } @Override @@ -144,8 +163,13 @@ public void ensureCapacity(int capacity) { @Override public Block build() { - return new DateBlock(0, getPositionCount(), mayHaveNull() ? valueIsNull.elements() : null, packed.elements(), - dataType, timezone); + Block block = + new DateBlock(0, getPositionCount(), mayHaveNull() ? valueIsNull.elements() : null, packed.elements(), + dataType, timezone); + if (objectPool != null) { + block.setRecycler(objectPool.getRecycler(chunkLimit)); + } + return block; } @Override @@ -156,7 +180,16 @@ public void appendNull() { @Override public BlockBuilder newBlockBuilder() { - return new DateBlockBuilder(getCapacity(), dataType, context); + if (objectPool != null) { + return new DateBlockBuilder(getCapacity(), dataType, context, objectPool); + } else { + return new DateBlockBuilder(getCapacity(), dataType, context); + } + } + + @Override + public BlockBuilder newBlockBuilder(ObjectPools objectPools, int chunkLimit) { + return new DateBlockBuilder(getCapacity(), dataType, context, objectPools.getLongArrayPool()); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlockEncoding.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlockEncoding.java index 388820953..81c7a7a5e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlockEncoding.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DateBlockEncoding.java @@ -49,19 +49,19 @@ public String getName() { @Override public void writeBlock(SliceOutput sliceOutput, Block block) { // write zone id - TimeZone zone = ((DateBlock) block).getTimezone(); + TimeZone zone = block.cast(DateBlock.class).getTimezone(); byte[] zoneIdBytes = zone.getID().getBytes(); sliceOutput.appendInt(zoneIdBytes.length); sliceOutput.appendBytes(zoneIdBytes); - DateBlock dateBlock = (DateBlock) block; + DateBlock dateBlock = block.cast(DateBlock.class); int positionCount = block.getPositionCount(); sliceOutput.appendInt(positionCount); encodeNullsAsBits(sliceOutput, block); for (int position = 0; position < positionCount; position++) { - sliceOutput.writeLong(dateBlock.getPackedLong(position)); + dateBlock.writeLong(sliceOutput, position); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlock.java index b86d774fc..0af84af1f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlock.java @@ -17,11 +17,21 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; import com.alibaba.polardbx.common.datatype.RawBytesDecimalUtils; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.MathUtils; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.executor.operator.util.BatchBlockWriter; +import com.alibaba.polardbx.executor.operator.util.DriverObjectPool; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DecimalType; +import com.google.common.base.Preconditions; +import io.airlift.slice.BasicSliceOutput; +import io.airlift.slice.DynamicSliceOutput; import io.airlift.slice.Slice; import io.airlift.slice.SliceOutput; import io.airlift.slice.Slices; @@ -29,28 +39,49 @@ import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DERIVED_FRACTIONS_OFFSET; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_DEC128; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_DEC64; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_OVERFLOW; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.E_DEC_TRUNCATED; import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.FRACTIONS_OFFSET; import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.INTEGERS_OFFSET; import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.IS_NEG_OFFSET; -import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.roundUp; import static com.alibaba.polardbx.common.utils.memory.SizeOf.sizeOf; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.DECIMAL_128; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.DECIMAL_64; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.FULL; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.SIMPLE_MODE_2; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.SIMPLE_MODE_3; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.UNALLOC_STATE; /** * An implement of Decimal block mixed with fixed and variable length */ -public class DecimalBlock extends AbstractBlock { +public class DecimalBlock extends AbstractBlock implements SegmentedDecimalBlock { private static final int NULL_VALUE = 0; private static final long INSTANCE_SIZE = ClassLayout.parseClass(DecimalBlock.class).instanceSize(); - private static final int UNSET = -1; - /** * Allocate the memory of decimal vector */ protected Slice memorySegments; - protected int[] selection; + protected DecimalStructure hashCodeTmpBuffer; + protected DecimalStructure hashCodeResultBuffer; + protected DecimalStructure regionTmpBuffer; + + /** + * decimal64 values in Decimal64 + * or low bits in Decimal128 + */ + protected long[] decimal64Values; + /** + * high bits in Decimal128 + */ + protected long[] decimal128HighValues; + + private int[] selection; /** * A Decimal Block is simple only if all non-null decimal values are in format of * (a2 * 10^(9*-1) + a1 * 10^(9*0) + b * 10^(9*-1)). @@ -58,16 +89,31 @@ public class DecimalBlock extends AbstractBlock { */ private DecimalBlockState state; + private DriverObjectPool objectPool = null; + /** * For Vectorized expression result vector. */ public DecimalBlock(DataType dataType, int slotLen) { super(dataType, slotLen); - this.memorySegments = Slices.allocate(slotLen * DECIMAL_MEMORY_SIZE); + // delay memory allocation this.selection = null; + this.decimal64Values = null; + this.state = UNALLOC_STATE; + } - this.state = DecimalBlockState.UNSET_STATE; - updateSizeInfo(); + private DecimalBlock(DataType dataType, int positionCount, boolean[] valueIsNull, boolean hasNull) { + super(dataType, positionCount, valueIsNull, hasNull); + } + + public DecimalBlock(DataType dataType, int slotLen, DriverObjectPool objectPool, int chunkLimit) { + super(dataType, slotLen); + // delay memory allocation + this.selection = null; + this.decimal64Values = null; + this.state = UNALLOC_STATE; + this.objectPool = objectPool; + this.recycler = objectPool.getRecycler(chunkLimit); } /** @@ -86,9 +132,9 @@ public DecimalBlock(DataType dataType, Slice memorySegments, boolean[] nulls, bo /** * Normal */ - public DecimalBlock(int positionCount, boolean[] valueIsNull, + public DecimalBlock(DataType dataType, int positionCount, boolean[] valueIsNull, Slice memorySegments, DecimalBlockState state) { - super(0, positionCount, valueIsNull); + super(dataType, 0, positionCount, valueIsNull); this.memorySegments = memorySegments; this.selection = null; @@ -96,122 +142,901 @@ public DecimalBlock(int positionCount, boolean[] valueIsNull, updateSizeInfo(); } + /** + * Decimal64 + */ + public DecimalBlock(DataType dataType, int positionCount, boolean hasNull, boolean[] valueIsNull, + long[] decimal64Values) { + super(dataType, positionCount, valueIsNull, hasNull); + this.decimal64Values = decimal64Values; + this.selection = null; + + this.state = DecimalBlockState.DECIMAL_64; + updateSizeInfo(); + } + + /** + * Decimal 128 with selection + */ + public DecimalBlock(DataType dataType, int positionCount, boolean hasNull, boolean[] valueIsNull, + long[] decimal128Low, long[] decimal128High, int[] selection) { + super(dataType, positionCount, valueIsNull, hasNull); + this.decimal64Values = decimal128Low; + this.decimal128HighValues = decimal128High; + this.selection = selection; + + this.state = DecimalBlockState.DECIMAL_128; + updateSizeInfo(); + } + + /** + * Decimal 64 with selection + */ + public DecimalBlock(DataType dataType, int positionCount, boolean hasNull, boolean[] valueIsNull, + long[] decimal64Values, int[] selection) { + super(dataType, positionCount, valueIsNull, hasNull); + this.decimal64Values = decimal64Values; + this.selection = selection; + + this.state = DecimalBlockState.DECIMAL_64; + updateSizeInfo(); + } + + public DecimalBlock(DataType dataType, int positionCount, boolean useDecimal64) { + super(dataType, positionCount); + allocateValues(useDecimal64); + this.selection = null; + updateSizeInfo(); + } + + public static DecimalBlock buildDecimal128Block(DataType dataType, int positionCount, boolean hasNull, + boolean[] valueIsNull, + long[] decimal128Low, long[] decimal128High) { + DecimalBlock decimalBlock = new DecimalBlock(dataType, positionCount, valueIsNull, hasNull); + decimalBlock.decimal64Values = decimal128Low; + decimalBlock.decimal128HighValues = decimal128High; + decimalBlock.selection = null; + + decimalBlock.state = DecimalBlockState.DECIMAL_128; + decimalBlock.updateSizeInfo(); + return decimalBlock; + } + + public static DecimalBlock from(DecimalBlock other, int selSize, int[] selection, boolean useSelection) { + if (useSelection) { + if (other.state == DECIMAL_64) { + // for decimal-64 mode. + return new DecimalBlock(other.dataType, selSize, other.hasNull, + other.isNull, other.decimal64Values, selection); + } else if (other.state == DECIMAL_128) { + // for decimal-128 mode. + return new DecimalBlock(other.dataType, selSize, other.hasNull, + other.isNull, other.decimal64Values, other.decimal128HighValues, selection); + } else { + // for normal mode. + return new DecimalBlock(other.dataType, other.getMemorySegments(), + other.isNull, other.hasNull, selSize, selection, other.state); + } + } + if (other.state == DECIMAL_64) { + // case1: for decimal-64 mode. + if (selection == null) { + // case 1.1: directly copy long array + boolean[] targetNulls = BlockUtils.copyNullArray(other.isNull, null, selSize); + return new DecimalBlock(other.dataType, selSize, + targetNulls != null, + targetNulls, + BlockUtils.copyLongArray(other.decimal64Values, null, selSize), + null); + } else { + // case 1.2: copy long array by selection. + boolean[] targetNulls = BlockUtils.copyNullArray(other.isNull, selection, selSize); + return new DecimalBlock(other.dataType, selSize, + targetNulls != null, targetNulls, + BlockUtils.copyLongArray(other.decimal64Values, selection, selSize), + null); + } + } else if (other.state == DECIMAL_128) { + // case2: for decimal-128 mode. + if (selection == null) { + // case 2.1: directly copy long array + boolean[] targetNulls = BlockUtils.copyNullArray(other.isNull, null, selSize); + return new DecimalBlock(other.dataType, selSize, + targetNulls != null, + targetNulls, + BlockUtils.copyLongArray(other.decimal64Values, null, selSize), + BlockUtils.copyLongArray(other.decimal128HighValues, null, selSize), + null); + } else { + // case 2.2: copy long array by selection. + boolean[] targetNulls = BlockUtils.copyNullArray(other.isNull, selection, selSize); + return new DecimalBlock(other.dataType, selSize, + targetNulls != null, targetNulls, + BlockUtils.copyLongArray(other.decimal64Values, selection, selSize), + BlockUtils.copyLongArray(other.decimal128HighValues, selection, selSize), + null); + } + } else { + // case3: for normal-decimal mode. + if (selection == null) { + // case 3.1: directly copy slice. + boolean[] targetNulls = BlockUtils.copyNullArray(other.isNull, null, selSize); + return new DecimalBlock(other.dataType, + Slices.copyOf(other.getMemorySegments()), + targetNulls, targetNulls != null, + selSize, null, + other.state); + } else { + // case 3.2: copy slice by selection in fixed size. + boolean[] targetNulls = BlockUtils.copyNullArray(other.isNull, selection, selSize); + Slice targetSlice = Slices.allocate(selSize * DECIMAL_MEMORY_SIZE); + Slice sourceSlice = other.memorySegments; + for (int position = 0; position < selSize; position++) { + targetSlice.setBytes(position * DECIMAL_MEMORY_SIZE, sourceSlice, + selection[position] * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); + } + + return new DecimalBlock(other.dataType, targetSlice, + targetNulls, targetNulls != null, selSize, null, other.state); + } + } + } + + @Override + public void recycle() { + if (recycler != null && decimal64Values != null) { + recycler.recycle(decimal64Values); + } + } + + private void allocateValues(boolean isDecimal64) { + if (isDecimal64) { + this.decimal64Values = new long[positionCount]; + this.state = DecimalBlockState.DECIMAL_64; + } else { + this.memorySegments = Slices.allocate(positionCount * DECIMAL_MEMORY_SIZE); + this.state = DecimalBlockState.UNSET_STATE; + } + } + public int realPositionOf(int position) { return selection == null ? position : selection[position]; } + @Override + public void sum(int[] groupSelected, int selSize, long[] results) { + Preconditions.checkArgument(selSize <= positionCount); + if (isDecimal64()) { + boolean overflow64 = false; + long sum = 0L; + if (selection != null) { + for (int i = 0; i < selSize; i++) { + int position = selection[groupSelected[i]]; + long value = decimal64Values[position]; + long oldValue = sum; + sum = value + oldValue; + overflow64 |= ((value ^ sum) & (oldValue ^ sum)) < 0; + } + } else { + for (int i = 0; i < selSize; i++) { + int position = groupSelected[i]; + long value = decimal64Values[position]; + long oldValue = sum; + sum = value + oldValue; + overflow64 |= ((value ^ sum) & (oldValue ^ sum)) < 0; + } + } + if (!overflow64) { + results[0] = sum; + results[1] = 0; + results[2] = E_DEC_DEC64; + return; + } + // try decimal128 + long sumLow = 0L, sumHigh = 0L; + // there is no need to perform overflow detection on decimal128, + // since the sum of a decimal64 block will never overflow decimal128 + boolean overflow128 = false; + if (selection != null) { + for (int i = 0; i < selSize; i++) { + int position = selection[groupSelected[i]]; + long value = decimal64Values[position]; + long valueHigh = value > 0 ? 0 : -1; + sumHigh += valueHigh; + long addResult = value + sumLow; + long carryOut = ((value & sumLow) + | ((value | sumLow) & (~addResult))) >>> 63; + sumHigh += carryOut; + sumLow = addResult; + } + } else { + for (int i = 0; i < selSize; i++) { + int position = groupSelected[i]; + long value = decimal64Values[position]; + long valueHigh = value > 0 ? 0 : -1; + sumHigh += valueHigh; + long addResult = value + sumLow; + long carryOut = ((value & sumLow) + | ((value | sumLow) & (~addResult))) >>> 63; + sumHigh += carryOut; + sumLow = addResult; + } + } + if (!overflow128) { + results[0] = sumLow; + results[1] = sumHigh; + results[2] = E_DEC_DEC128; + return; + } + + // reset results and return error state + results[0] = -1L; + results[1] = -1L; + results[2] = E_DEC_TRUNCATED; + } else if (isDecimal128()) { + long sumLow = 0L, sumHigh = 0L; + // need to perform overflow detection in a decimal128 block + boolean overflow128 = false; + if (selection != null) { + for (int i = 0; i < selSize; i++) { + int position = selection[groupSelected[i]]; + long decimal128Low = decimal64Values[position]; + long decimal128High = decimal128HighValues[position]; + long newDecimal128High = sumHigh + decimal128High; + overflow128 |= ((decimal128High ^ newDecimal128High) + & (sumHigh ^ newDecimal128High)) < 0; + long newDecimal128Low = sumLow + decimal128Low; + long carryOut = ((sumLow & decimal128Low) + | ((sumLow | decimal128Low) & (~newDecimal128Low))) >>> 63; + newDecimal128High += carryOut; + sumHigh = newDecimal128High; + sumLow = newDecimal128Low; + } + } else { + for (int i = 0; i < selSize; i++) { + int position = groupSelected[i]; + long decimal128Low = decimal64Values[position]; + long decimal128High = decimal128HighValues[position]; + long newDecimal128High = sumHigh + decimal128High; + overflow128 |= ((decimal128High ^ newDecimal128High) + & (sumHigh ^ newDecimal128High)) < 0; + long newDecimal128Low = sumLow + decimal128Low; + long carryOut = ((sumLow & decimal128Low) + | ((sumLow | decimal128Low) & (~newDecimal128Low))) >>> 63; + newDecimal128High += carryOut; + sumHigh = newDecimal128High; + sumLow = newDecimal128Low; + } + } + if (!overflow128) { + results[0] = sumLow; + results[1] = sumHigh; + results[2] = E_DEC_DEC128; + return; + } + + // reset results and return error state + results[0] = -1L; + results[1] = -1L; + results[2] = E_DEC_TRUNCATED; + } else { + // for normal mode, just return error state. + results[0] = -1L; + results[1] = -1L; + results[2] = E_DEC_TRUNCATED; + } + } + + @Override + public void sum(int startIndexIncluded, int endIndexExcluded, long[] results) { + Preconditions.checkArgument(endIndexExcluded <= positionCount); + if (isDecimal64()) { + boolean overflow64 = false; + long sum = 0L; + if (selection != null) { + for (int i = startIndexIncluded; i < endIndexExcluded; i++) { + int position = selection[i]; + long value = decimal64Values[position]; + long oldValue = sum; + sum = value + oldValue; + overflow64 |= ((value ^ sum) & (oldValue ^ sum)) < 0; + } + } else { + for (int position = startIndexIncluded; position < endIndexExcluded; position++) { + long value = decimal64Values[position]; + long oldValue = sum; + sum = value + oldValue; + overflow64 |= ((value ^ sum) & (oldValue ^ sum)) < 0; + } + } + if (!overflow64) { + results[0] = sum; + results[1] = 0; + results[2] = E_DEC_DEC64; + return; + } + + // try decimal128 + long sumLow = 0L, sumHigh = 0L; + // there is no need to perform overflow detection on decimal128, + // since the sum of a decimal64 block will never overflow decimal128 + boolean overflow128 = false; + if (selection != null) { + for (int i = startIndexIncluded; i < endIndexExcluded; i++) { + int position = selection[i]; + long value = decimal64Values[position]; + long valueHigh = value > 0 ? 0 : -1; + sumHigh += valueHigh; + long addResult = value + sumLow; + long carryOut = ((value & sumLow) + | ((value | sumLow) & (~addResult))) >>> 63; + sumHigh += carryOut; + sumLow = addResult; + } + } else { + for (int position = startIndexIncluded; position < endIndexExcluded; position++) { + long value = decimal64Values[position]; + long valueHigh = value > 0 ? 0 : -1; + sumHigh += valueHigh; + long addResult = value + sumLow; + long carryOut = ((value & sumLow) + | ((value | sumLow) & (~addResult))) >>> 63; + sumHigh += carryOut; + sumLow = addResult; + } + } + if (!overflow128) { + results[0] = sumLow; + results[1] = sumHigh; + results[2] = E_DEC_DEC128; + return; + } + + // reset results and return error state + results[0] = -1L; + results[1] = -1L; + results[2] = E_DEC_TRUNCATED; + } else if (this.state == DECIMAL_128) { + long sumLow = 0L, sumHigh = 0L; + // need to perform overflow detection in a decimal128 block + boolean overflow128 = false; + if (selection != null) { + for (int i = startIndexIncluded; i < endIndexExcluded; i++) { + int position = selection[i]; + long decimal128Low = decimal64Values[position]; + long decimal128High = decimal128HighValues[position]; + long newDecimal128High = sumHigh + decimal128High; + overflow128 |= ((decimal128High ^ newDecimal128High) + & (sumHigh ^ newDecimal128High)) < 0; + long newDecimal128Low = sumLow + decimal128Low; + long carryOut = ((sumLow & decimal128Low) + | ((sumLow | decimal128Low) & (~newDecimal128Low))) >>> 63; + newDecimal128High += carryOut; + sumHigh = newDecimal128High; + sumLow = newDecimal128Low; + } + } else { + for (int position = startIndexIncluded; position < endIndexExcluded; position++) { + long decimal128Low = decimal64Values[position]; + long decimal128High = decimal128HighValues[position]; + long newDecimal128High = sumHigh + decimal128High; + overflow128 |= ((decimal128High ^ newDecimal128High) + & (sumHigh ^ newDecimal128High)) < 0; + long newDecimal128Low = sumLow + decimal128Low; + long carryOut = ((sumLow & decimal128Low) + | ((sumLow | decimal128Low) & (~newDecimal128Low))) >>> 63; + newDecimal128High += carryOut; + sumHigh = newDecimal128High; + sumLow = newDecimal128Low; + } + } + if (!overflow128) { + results[0] = sumLow; + results[1] = sumHigh; + results[2] = E_DEC_DEC128; + return; + } + + // reset results and return error state + results[0] = -1L; + results[1] = -1L; + results[2] = E_DEC_TRUNCATED; + } else { + // for non-decimal-64 mode, just return error state. + results[0] = -1L; + results[1] = -1L; + results[2] = E_DEC_TRUNCATED; + } + } + + @Override + public void sum(int startIndexIncluded, int endIndexExcluded, long[] sumResultArray, int[] sumStatusArray, + int[] normalizedGroupIds) { + Preconditions.checkArgument(endIndexExcluded <= positionCount); + if (isDecimal64()) { + boolean overflow = false; + long sum = 0L; + if (selection != null) { + for (int i = startIndexIncluded; i < endIndexExcluded; i++) { + int position = selection[i]; + long value = decimal64Values[position]; + int normalizedGroupId = normalizedGroupIds[position]; + + // sum + long oldValue = sumResultArray[normalizedGroupId]; + sum = value + oldValue; + sumResultArray[normalizedGroupId] = sum; + + // check overflow + overflow |= ((value ^ sum) & (oldValue ^ sum)) < 0; + } + } else { + for (int position = startIndexIncluded; position < endIndexExcluded; position++) { + long value = decimal64Values[position]; + int normalizedGroupId = normalizedGroupIds[position]; + + // sum + long oldValue = sumResultArray[normalizedGroupId]; + sum = value + oldValue; + sumResultArray[normalizedGroupId] = sum; + + // check overflow + overflow |= ((value ^ sum) & (oldValue ^ sum)) < 0; + } + } + sumStatusArray[0] = overflow ? E_DEC_OVERFLOW : E_DEC_DEC64; + } else { + // for non-decimal-64 mode, just return error state. + // decimal128 does not support put the result into a long array + sumStatusArray[0] = E_DEC_TRUNCATED; + } + } + @Override public boolean isNull(int position) { position = realPositionOf(position); - return isNull != null && isNull[position + arrayOffset]; + return isNullInner(position); } @Override public Decimal getDecimal(int position) { position = realPositionOf(position); - Slice memorySegment = memorySegments.slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); - return new Decimal(memorySegment); + return getDecimalInner(position); + } + + public void getDecimalStructure(DecimalStructure target, int position) { + position = realPositionOf(position); + if (isDecimal64()) { + target.setLongWithScale(decimal64Values[position], getScale()); + } else if (isDecimal128()) { + DecimalStructure buffer = getRegionTmpBuffer(); + FastDecimalUtils.setDecimal128WithScale(buffer, target, decimal64Values[position], + decimal128HighValues[position], getScale()); + } else { + memorySegments.slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE, target.getDecimalMemorySegment()); + } + } + + @Override + public long getLong(int position) { + position = realPositionOf(position); + return getLongInner(position); + } + + @Override + public long getDecimal128Low(int position) { + position = realPositionOf(position); + return getDecimal128LowInner(position); + } + + @Override + public long getDecimal128High(int position) { + position = realPositionOf(position); + return getDecimal128HighInner(position); } @Override public Object getObject(int position) { - return isNull(position) ? null : getDecimal(position); + position = realPositionOf(position); + return isNullInner(position) ? null : getDecimalInner(position); + } + + public void writePositionTo(int position, BatchBlockWriter.BatchDecimalBlockBuilder blockWriter, Slice output) { + position = realPositionOf(position); + blockWriter.writeDecimal(getDecimalInner(position, output)); } @Override public void writePositionTo(int position, BlockBuilder blockBuilder) { + position = realPositionOf(position); + if (blockBuilder instanceof DecimalBlockBuilder) { - writePositionTo(position, (DecimalBlockBuilder) blockBuilder); + writePositionToInner(position, (DecimalBlockBuilder) blockBuilder); + } else if (blockBuilder instanceof BatchBlockWriter.BatchDecimalBlockBuilder) { + BatchBlockWriter.BatchDecimalBlockBuilder b = (BatchBlockWriter.BatchDecimalBlockBuilder) blockBuilder; + + if (isNullInner(position)) { + b.appendNull(); + } else if (isDecimal64() && (b.isDecimal64() || b.isDecimal128())) { + b.writeLong(getLongInner(position)); + } else if (isDecimal128() && (b.isDecimal64() || b.isDecimal128())) { + b.writeDecimal128(getDecimal128LowInner(position), getDecimal128HighInner(position)); + } else { + b.writeDecimal(getDecimalInner(position)); + } } else { - throw new AssertionError(); + throw new AssertionError(blockBuilder.getClass()); + } + } + + public long[] allocateDecimal64() { + if (isUnalloc()) { + if (objectPool != null) { + long[] pooled = objectPool.poll(); + if (pooled != null && pooled.length >= positionCount) { + this.decimal64Values = pooled; + } else { + this.decimal64Values = new long[positionCount]; + } + } else { + this.decimal64Values = new long[positionCount]; + } + + this.state = DecimalBlockState.DECIMAL_64; } + updateSizeInfo(); + return this.decimal64Values; } - private void writePositionTo(int position, DecimalBlockBuilder b) { - if (isNull(position)) { + public void allocateDecimal128() { + if (isUnalloc()) { + if (objectPool != null) { + long[] pooled = objectPool.poll(); + if (pooled != null && pooled.length >= positionCount) { + this.decimal64Values = pooled; + pooled = objectPool.poll(); + } else { + this.decimal64Values = new long[positionCount]; + pooled = null; + } + if (pooled != null && pooled.length >= positionCount) { + this.decimal128HighValues = pooled; + } else { + this.decimal128HighValues = new long[positionCount]; + } + } else { + this.decimal64Values = new long[positionCount]; + this.decimal128HighValues = new long[positionCount]; + } + this.state = DecimalBlockState.DECIMAL_128; + } else if (isDecimal64()) { + // will not clear existing decimal64Values to reduce operations, + // caller should be aware of this behavior + if (objectPool != null) { + long[] pooled = objectPool.poll(); + if (pooled != null && pooled.length >= positionCount) { + this.decimal128HighValues = pooled; + } else { + this.decimal128HighValues = new long[positionCount]; + } + } else { + this.decimal128HighValues = new long[positionCount]; + } + this.state = DecimalBlockState.DECIMAL_128; + } + + updateSizeInfo(); + } + + public void deallocateDecimal64() { + if (isDecimal64()) { + this.decimal64Values = null; + this.state = UNALLOC_STATE; + } + } + + public void deallocateDecimal128() { + if (isDecimal128()) { + this.decimal64Values = null; + this.decimal128HighValues = null; + this.state = UNALLOC_STATE; + } + } + + public long[] decimal64Values() { + return this.decimal64Values; + } + + private void writePositionToInner(int position, DecimalBlockBuilder b) { + if (isNullInner(position)) { b.appendNull(); - } else { - position = realPositionOf(position); - // write to decimal memory segments - b.sliceOutput.writeBytes(memorySegments, position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); - b.valueIsNull.add(false); + return; + } + if (isDecimal64()) { + if (b.isDecimal64() || b.isDecimal128() || b.state.isUnset()) { + b.setScale(getScale()); + b.writeLong(getLongInner(position)); + } else { + b.writeDecimal(getDecimalInner(position)); + } + return; + } - // update decimal info - DecimalBlockState elementState = DecimalBlockState.stateOf(memorySegments, position); - b.state = b.state.merge(elementState); + if (isDecimal128()) { + if (b.isDecimal64() || b.isDecimal128() || b.state.isUnset()) { + b.writeDecimal128(getDecimal128LowInner(position), getDecimal128HighInner(position)); + } else { + b.writeDecimal(getDecimalInner(position)); + } + return; } + + b.convertToNormalDecimal(); + // normal decimal + // write to decimal memory segments + b.sliceOutput.writeBytes(memorySegments, position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); + b.valueIsNull.add(false); + + // update decimal info + DecimalBlockState elementState = DecimalBlockState.stateOf(memorySegments, position); + b.state = b.state.merge(elementState); } @Override public int hashCode(int position) { - if (isNull(position)) { - return 0; - } position = realPositionOf(position); - Slice memorySegment = memorySegments.slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); - return RawBytesDecimalUtils.hashCode(memorySegment); + return hashCodeInner(position); } @Override - public boolean equals(int position, Block other, int otherPosition) { - boolean n1 = isNull(position); - boolean n2 = other.isNull(otherPosition); - if (n1 && n2) { - return true; - } else if (n1 != n2) { - return false; + public long hashCodeUseXxhash(int pos) { + int realPos = realPositionOf(pos); + if (isNullInner(realPos)) { + return NULL_HASH_CODE; + } else { + if (isDecimal64()) { + long val = getLongInner(realPos); + DecimalStructure bufferDec = getHashCodeTmpBuffer(); + DecimalStructure resultDec = getHashCodeResultBuffer(); + FastDecimalUtils.setLongWithScale(bufferDec, resultDec, val, getScale()); + + return RawBytesDecimalUtils.hashCode(resultDec.getDecimalMemorySegment()); + } else if (isDecimal128()) { + long decimal128Low = getDecimal128LowInner(realPos); + long decimal128High = getDecimal128HighInner(realPos); + DecimalStructure bufferDec = getHashCodeTmpBuffer(); + DecimalStructure resultDec = getHashCodeResultBuffer(); + FastDecimalUtils.setDecimal128WithScale(bufferDec, resultDec, decimal128Low, decimal128High, + getScale()); + + return RawBytesDecimalUtils.hashCode(resultDec.getDecimalMemorySegment()); + } else { + Slice memorySegment = memorySegments.slice(realPos * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); + return RawBytesDecimalUtils.hashCode(memorySegment); + } + } + } + + private DecimalStructure getHashCodeTmpBuffer() { + if (this.hashCodeTmpBuffer == null) { + this.hashCodeTmpBuffer = new DecimalStructure(); } + return this.hashCodeTmpBuffer; + } - if (other instanceof DecimalBlock) { - // for decimal block, compare by memory segment - Slice memorySegment1 = this.segmentUncheckedAt(position); - Slice memorySegment2 = ((DecimalBlock) other).segmentUncheckedAt(otherPosition); - return RawBytesDecimalUtils.equals(memorySegment1, memorySegment2); - } else if (other instanceof DecimalBlockBuilder) { - // for decimal block, compare by memory segment - Slice memorySegment1 = this.segmentUncheckedAt(position); - Slice memorySegment2 = ((DecimalBlockBuilder) other).segmentUncheckedAt(otherPosition); - return RawBytesDecimalUtils.equals(memorySegment1, memorySegment2); - } else { - throw new AssertionError(); + private DecimalStructure getHashCodeResultBuffer() { + if (this.hashCodeResultBuffer == null) { + this.hashCodeResultBuffer = new DecimalStructure(); } + return this.hashCodeResultBuffer; } - Slice segmentUncheckedAt(int position) { + private DecimalStructure getRegionTmpBuffer() { + if (this.regionTmpBuffer == null) { + this.regionTmpBuffer = new DecimalStructure(); + } + return this.regionTmpBuffer; + } + + @Override + public boolean equals(int position, Block other, int otherPosition) { position = realPositionOf(position); - return memorySegments.slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); + return equalsInner(position, other, otherPosition); + } + + @Override + public Slice segmentUncheckedAt(int position) { + position = realPositionOf(position); + return segmentUncheckedAtInner(position); } @Override public void addToHasher(IStreamingHasher sink, int position) { - if (isNull(position)) { - sink.putInt(NULL_VALUE); - } else { - sink.putInt(hashCode(position)); - } + position = realPositionOf(position); + addToHasherInner(sink, position); } @Override public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { if (output instanceof DecimalBlock) { - DecimalBlock outputVectorSlot = (DecimalBlock) output; + DecimalBlock outputVectorSlot = output.cast(DecimalBlock.class); + if (outputVectorSlot.isUnalloc()) { + copySelectedToUnalloc(selectedInUse, sel, size, outputVectorSlot); + } else if (outputVectorSlot.isDecimal64()) { + copySelectedToDecimal64(selectedInUse, sel, size, outputVectorSlot); + } else if (outputVectorSlot.isDecimal128()) { + copySelectedToDecimal128(selectedInUse, sel, size, outputVectorSlot); + } else { + copySelectedToNormal(selectedInUse, sel, size, outputVectorSlot); + } + } else { + BlockUtils.copySelectedInCommon(selectedInUse, sel, size, this, output); + } + + super.copySelected(selectedInUse, sel, size, output); + } + + private void copySelectedToUnalloc(boolean selectedInUse, int[] sel, int size, DecimalBlock outputVectorSlot) { + // copy current dataType into output slot + outputVectorSlot.dataType = new DecimalType(dataType.getPrecision(), dataType.getScale()); + if (isDecimal64()) { + outputVectorSlot.allocateDecimal64(); + copySelectedToDecimal64(selectedInUse, sel, size, outputVectorSlot); + return; + } + if (isDecimal128()) { + outputVectorSlot.allocateDecimal128(); + copySelectedToDecimal128(selectedInUse, sel, size, outputVectorSlot); + return; + } + if (isUnalloc()) { + // should not reach here + return; + } + copySelectedToNormal(selectedInUse, sel, size, outputVectorSlot); + } + + private void copySelectedToDecimal64(boolean selectedInUse, int[] sel, int size, DecimalBlock outputVectorSlot) { + if (isDecimal64()) { + // decimal64 -> decimal64 + if (selectedInUse) { + for (int i = 0; i < size; i++) { + int j = sel[i]; + outputVectorSlot.decimal64Values[j] = decimal64Values[j]; + } + } else { + System.arraycopy(decimal64Values, 0, outputVectorSlot.decimal64Values, 0, size); + } + return; + } + if (isDecimal128()) { + // decimal128 -> decimal64 + // must convert target decimal64 to decimal128 + outputVectorSlot.allocToDecimal128(); + if (selectedInUse) { + for (int i = 0; i < size; i++) { + int j = sel[i]; + outputVectorSlot.decimal64Values[j] = decimal64Values[j]; + outputVectorSlot.decimal128HighValues[j] = decimal128HighValues[j]; + } + } else { + System.arraycopy(decimal64Values, 0, outputVectorSlot.decimal64Values, 0, size); + System.arraycopy(decimal128HighValues, 0, outputVectorSlot.decimal128HighValues, 0, size); + } + return; + } + // normal -> decimal64 + // must convert target decimal64 to normal + outputVectorSlot.allocToNormal(); + copySelectedToNormal(selectedInUse, sel, size, outputVectorSlot); + } + + private void copySelectedToDecimal128(boolean selectedInUse, int[] sel, int size, DecimalBlock outputVectorSlot) { + if (isDecimal64()) { + // decimal64 -> decimal128 + if (selectedInUse) { + for (int i = 0; i < size; i++) { + int j = sel[i]; + outputVectorSlot.decimal64Values[j] = decimal64Values[j]; + outputVectorSlot.decimal128HighValues[j] = decimal64Values[j] < 0 ? -1 : 0; + } + } else { + System.arraycopy(decimal64Values, 0, outputVectorSlot.decimal64Values, 0, size); + for (int i = 0; i < size; i++) { + outputVectorSlot.decimal128HighValues[i] = decimal64Values[i] < 0 ? -1 : 0; + } + } + return; + } + if (isDecimal128()) { + // decimal128 -> decimal128 + // must convert target decimal64 to decimal128 if (selectedInUse) { for (int i = 0; i < size; i++) { int j = sel[i]; + outputVectorSlot.decimal64Values[j] = decimal64Values[j]; + outputVectorSlot.decimal128HighValues[j] = decimal128HighValues[j]; + } + } else { + System.arraycopy(decimal64Values, 0, outputVectorSlot.decimal64Values, 0, size); + System.arraycopy(decimal128HighValues, 0, outputVectorSlot.decimal128HighValues, 0, size); + } + return; + } + // normal -> decimal128 + // must convert target decimal128 to normal + outputVectorSlot.allocToNormal(); + copySelectedToNormal(selectedInUse, sel, size, outputVectorSlot); + } - // copy memory segment from specified position in selection array. - int fromIndex = j * DECIMAL_MEMORY_SIZE; - outputVectorSlot.memorySegments.setBytes(fromIndex, memorySegments, fromIndex, DECIMAL_MEMORY_SIZE); + private void copySelectedToNormal(boolean selectedInUse, int[] sel, int size, DecimalBlock outputVectorSlot) { + if (isDecimal64()) { + // decimal64 -> normal + DecimalStructure bufferDec = new DecimalStructure(); + DecimalStructure resultDec = new DecimalStructure(); + if (selectedInUse) { + for (int i = 0; i < size; i++) { + int j = sel[i]; + FastDecimalUtils.setLongWithScale(bufferDec, resultDec, decimal64Values[j], getScale()); + int index = j * DECIMAL_MEMORY_SIZE; + outputVectorSlot.getMemorySegments().setBytes(index, resultDec.getDecimalMemorySegment(), 0, + DECIMAL_MEMORY_SIZE); } } else { - // directly copy memory. - outputVectorSlot.memorySegments.setBytes(0, memorySegments); + for (int i = 0; i < size; i++) { + FastDecimalUtils.setLongWithScale(bufferDec, resultDec, decimal64Values[i], getScale()); + int index = i * DECIMAL_MEMORY_SIZE; + outputVectorSlot.getMemorySegments().setBytes(index, resultDec.getDecimalMemorySegment(), 0, + DECIMAL_MEMORY_SIZE); + } + } + outputVectorSlot.collectDecimalInfo(); + return; + } + if (isDecimal128()) { + // decimal128 -> normal + DecimalStructure bufferDec = new DecimalStructure(); + DecimalStructure resultDec = new DecimalStructure(); + if (selectedInUse) { + for (int i = 0; i < size; i++) { + int j = sel[i]; + long decimal128Low = decimal64Values[j]; + long decimal128High = decimal128HighValues[j]; + FastDecimalUtils.setDecimal128WithScale(bufferDec, resultDec, decimal128Low, decimal128High, + getScale()); + int index = j * DECIMAL_MEMORY_SIZE; + outputVectorSlot.getMemorySegments().setBytes(index, resultDec.getDecimalMemorySegment(), 0, + DECIMAL_MEMORY_SIZE); + } + } else { + for (int i = 0; i < size; i++) { + long decimal128Low = decimal64Values[i]; + long decimal128High = decimal128HighValues[i]; + FastDecimalUtils.setDecimal128WithScale(bufferDec, resultDec, decimal128Low, decimal128High, + getScale()); + int index = i * DECIMAL_MEMORY_SIZE; + outputVectorSlot.getMemorySegments().setBytes(index, resultDec.getDecimalMemorySegment(), 0, + DECIMAL_MEMORY_SIZE); + } + } + outputVectorSlot.collectDecimalInfo(); + return; + } + // normal -> normal + if (selectedInUse) { + for (int i = 0; i < size; i++) { + int j = sel[i]; + + // copy memory segment from specified position in selection array. + int fromIndex = j * DECIMAL_MEMORY_SIZE; + outputVectorSlot.getMemorySegments() + .setBytes(fromIndex, getMemorySegments(), fromIndex, DECIMAL_MEMORY_SIZE); } } else { - BlockUtils.copySelectedInCommon(selectedInUse, sel, size, this, output); + // directly copy memory. + outputVectorSlot.getMemorySegments().setBytes(0, getMemorySegments()); } - - super.copySelected(selectedInUse, sel, size, output); + outputVectorSlot.collectDecimalInfo(); } @Override @@ -219,31 +1044,24 @@ public void shallowCopyTo(RandomAccessBlock another) { if (!(another instanceof DecimalBlock)) { GeneralUtil.nestedException("cannot shallow copy to " + another == null ? null : another.toString()); } - DecimalBlock vectorSlot = (DecimalBlock) another; + DecimalBlock vectorSlot = another.cast(DecimalBlock.class); super.shallowCopyTo(vectorSlot); vectorSlot.memorySegments = memorySegments; + vectorSlot.decimal64Values = decimal64Values; + vectorSlot.decimal128HighValues = decimal128HighValues; + vectorSlot.state = state; } @Override protected Object getElementAtUnchecked(int position) { position = realPositionOf(position); - // slice a memory segment in 64 bytes and build a decimal value. - int fromIndex = position * DECIMAL_MEMORY_SIZE; - Slice decimalMemorySegment = memorySegments.slice(fromIndex, DECIMAL_MEMORY_SIZE); - return new Decimal(decimalMemorySegment); + return getElementAtUncheckedInner(position); } @Override public void setElementAt(int position, Object element) { - final int realPos = realPositionOf(position); - super.updateElementAt(position, element, e -> { - Decimal decimal = (Decimal) e; - Slice decimalMemorySegment = decimal.getMemorySegment(); - - // copy memory from specified position in size of 64 bytes - int fromIndex = realPos * DECIMAL_MEMORY_SIZE; - memorySegments.setBytes(fromIndex, decimalMemorySegment); - }); + position = realPositionOf(position); + setElementAtInner(position, element); } public void encoding(SliceOutput sliceOutput) { @@ -251,11 +1069,11 @@ public void encoding(SliceOutput sliceOutput) { if (selection != null) { for (int i = 0; i < positionCount; i++) { int j = selection[i]; - sliceOutput.appendBytes(this.memorySegments.slice(j * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE)); + sliceOutput.appendBytes(this.getMemorySegments().slice(j * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE)); } } else { - sliceOutput.appendBytes(this.memorySegments); + sliceOutput.appendBytes(this.getMemorySegments()); } } @@ -264,12 +1082,117 @@ public void encoding(SliceOutput sliceOutput) { */ @Deprecated public Slice getMemorySegments() { + allocToNormal(); return this.memorySegments; } + /** + * dangerous! + * can only be invoked by intermediate DecimalBlock, like the OutputSlot of vectorized expression + */ + private void allocToNormal() { + if (isUnalloc()) { + allocateNormalDecimal(); + } else if (isDecimal64() || isDecimal128()) { + convertToNormal(); + } + } + + /** + * dangerous! + * can only be invoked by intermediate DecimalBlock, like the OutputSlot of vectorized expression + */ + private void allocToDecimal128() { + if (isUnalloc()) { + allocateDecimal128(); + return; + } + if (isDecimal64()) { + if (this.decimal64Values != null) { + this.decimal128HighValues = new long[positionCount]; + for (int i = 0; i < decimal64Values.length; i++) { + decimal128HighValues[i] = decimal64Values[i] < 0 ? -1 : 0; + } + } + this.state = DECIMAL_128; + updateSizeInfo(); + return; + } + if (isDecimal128()) { + return; + } + throw new IllegalStateException("Can not convert " + state + "to DECIMAL_128"); + } + + void allocateNormalDecimal() { + this.memorySegments = Slices.allocate(positionCount * DECIMAL_MEMORY_SIZE); + this.state = DecimalBlockState.UNSET_STATE; + updateSizeInfo(); + } + public Slice getRegion(int position) { position = realPositionOf(position); - return memorySegments.slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); + return getRegionInner(position); + } + + public Slice getRegion(int position, Slice output) { + position = realPositionOf(position); + return getRegionInner(position, output); + } + + /** + * convert Decimal64 to normal decimal memory segment + */ + private void convertToNormal() { + if ((!isDecimal64() && !isDecimal128()) || this.memorySegments != null) { + return; + } + SliceOutput sliceOutput = new DynamicSliceOutput(positionCount * DECIMAL_MEMORY_SIZE); + DecimalStructure buffer = new DecimalStructure(); + DecimalStructure result = new DecimalStructure(); + if (isDecimal64() && decimal64Values != null) { + // handle decimal64 values + for (int pos = 0; pos < decimal64Values.length; pos++) { + if (!isNullInner(pos)) { + long decimal64 = decimal64Values[pos]; + if (decimal64 == 0) { + sliceOutput.writeBytes(Decimal.ZERO.getMemorySegment()); + } else { + FastDecimalUtils.setLongWithScale(buffer, result, decimal64, dataType.getScale()); + sliceOutput.writeBytes(buffer.getDecimalMemorySegment()); + buffer.reset(); + result.reset(); + } + } else { + sliceOutput.skipBytes(DECIMAL_MEMORY_SIZE); + } + } + } else if (isDecimal128() && decimal64Values != null && decimal128HighValues != null) { + // handle decimal128 values + for (int pos = 0; pos < decimal64Values.length; pos++) { + if (!isNullInner(pos)) { + long decimal128Low = decimal64Values[pos]; + long decimal128High = decimal128HighValues[pos]; + if (decimal128Low == 0 && decimal128High == 0) { + sliceOutput.writeBytes(Decimal.ZERO.getMemorySegment()); + } else { + FastDecimalUtils.setDecimal128WithScale(result, buffer, + decimal128Low, decimal128High, getScale()); + sliceOutput.writeBytes(buffer.getDecimalMemorySegment()); + buffer.reset(); + result.reset(); + } + } else { + sliceOutput.skipBytes(DECIMAL_MEMORY_SIZE); + } + } + } + + this.memorySegments = sliceOutput.slice(); + this.decimal64Values = null; + this.decimal128HighValues = null; + this.state = DecimalBlockState.FULL; + updateSizeInfo(); } @Override @@ -279,6 +1202,7 @@ public void compact(int[] selection) { } int compactedSize = selection.length; int index = 0; + allocToNormal(); for (int i = 0; i < compactedSize; i++) { int j = selection[i]; @@ -295,10 +1219,306 @@ public void compact(int[] selection) { updateSizeInfo(); } + private boolean isNullInner(int position) { + return isNull != null && isNull[position + arrayOffset]; + } + + private Decimal getDecimalInner(int position) { + if (isDecimal64()) { + return new Decimal(decimal64Values[position], dataType.getScale()); + } else if (isDecimal128()) { + long decimal128Low = decimal64Values[position]; + long decimal128High = decimal128HighValues[position]; + DecimalStructure decimalStructure = new DecimalStructure(); + DecimalStructure tmpBuffer = getRegionTmpBuffer(); + FastDecimalUtils.setDecimal128WithScale(tmpBuffer, decimalStructure, + decimal128Low, decimal128High, getScale()); + return new Decimal(decimalStructure); + } else { + Slice memorySegment = getMemorySegments().slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); + return new Decimal(memorySegment); + } + } + + private Decimal getDecimalInner(int position, Slice output) { + if (isDecimal64()) { + DecimalStructure decimalStructure = new DecimalStructure(output); + decimalStructure.setLongWithScale(decimal64Values[position], dataType.getScale()); + return new Decimal(decimalStructure); + } else if (isDecimal128()) { + long decimal128Low = decimal64Values[position]; + long decimal128High = decimal128HighValues[position]; + DecimalStructure decimalStructure = new DecimalStructure(output); + DecimalStructure tmpBuffer = getRegionTmpBuffer(); + FastDecimalUtils.setDecimal128WithScale(tmpBuffer, decimalStructure, + decimal128Low, decimal128High, getScale()); + return new Decimal(decimalStructure); + } else { + Slice memorySegment = + getMemorySegments().slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE, output); + return new Decimal(memorySegment); + } + } + + private long getLongInner(int position) { + if (isDecimal64()) { + return decimal64Values[position]; + } else { + throw new IllegalStateException("Cannot get long from DecimalBlock with state: " + state); + } + } + + private long getDecimal128LowInner(int position) { + if (isDecimal128()) { + return decimal64Values[position]; + } else { + throw new IllegalStateException("Cannot get decimal128Low from DecimalBlock with state: " + state); + } + } + + private long getDecimal128HighInner(int position) { + if (isDecimal128()) { + return decimal128HighValues[position]; + } else { + throw new IllegalStateException("Cannot get decimal128High from DecimalBlock with state: " + state); + } + } + + private long getDecimal128LowUncheckInner(int position) { + return decimal64Values[position]; + } + + private long getDecimal128HighUncheckInner(int position) { + return decimal128HighValues[position]; + } + + private boolean equalsInner(int realPosition, Block other, int otherPosition) { + boolean n1 = isNullInner(realPosition); + boolean n2 = other.isNull(otherPosition); + if (n1 && n2) { + return true; + } else if (n1 != n2) { + return false; + } + + if (!(other instanceof SegmentedDecimalBlock)) { + throw new AssertionError("Failed to compare with " + other.getClass().getName()); + } + SegmentedDecimalBlock otherBlock = (SegmentedDecimalBlock) other; + if (isDecimal64()) { + if (otherBlock.isDecimal64()) { + return getLongInner(realPosition) == other.getLong(otherPosition); + } else if (otherBlock.isDecimal128()) { + long thisDecimal64 = getLongInner(realPosition); + long thatLow = otherBlock.getDecimal128Low(otherPosition); + long thatHigh = otherBlock.getDecimal128High(otherPosition); + if (thisDecimal64 != thatLow) { + return false; + } + return thisDecimal64 >= 0 ? (thatHigh == 0) : (thatHigh == -1); + } else { + Slice memorySegment2 = otherBlock.segmentUncheckedAt(otherPosition); + return RawBytesDecimalUtils.equals(getDecimalInner(realPosition).getMemorySegment(), memorySegment2); + } + } + if (isDecimal128()) { + if (otherBlock.isDecimal64()) { + long thisLow = decimal64Values[realPosition]; + long thisHigh = decimal128HighValues[realPosition]; + long thatDecimal64 = other.getLong(otherPosition); + if (thisLow != thatDecimal64) { + return false; + } + return thatDecimal64 >= 0 ? (thisHigh == 0) : (thisHigh == -1); + } else if (otherBlock.isDecimal128()) { + return decimal64Values[realPosition] == otherBlock.getDecimal128Low(otherPosition) && + decimal128HighValues[realPosition] == otherBlock.getDecimal128High(otherPosition); + } else { + Slice memorySegment2 = otherBlock.segmentUncheckedAt(otherPosition); + return RawBytesDecimalUtils.equals(getDecimalInner(realPosition).getMemorySegment(), memorySegment2); + } + } + + // for decimal block, compare by memory segment + Slice memorySegment1 = this.segmentUncheckedAtInner(realPosition); + Slice memorySegment2 = otherBlock.segmentUncheckedAt(otherPosition); + return RawBytesDecimalUtils.equals(memorySegment1, memorySegment2); + } + + private int hashCodeInner(int position) { + if (isNullInner(position)) { + return 0; + } + if (isDecimal64()) { + long val = getLongInner(position); + int hashCode = RawBytesDecimalUtils.hashCode(val, getScale()); + if (hashCode != 0) { + return hashCode; + } + // fallback + DecimalStructure bufferDec = getHashCodeTmpBuffer(); + DecimalStructure resultDec = getHashCodeResultBuffer(); + FastDecimalUtils.setLongWithScale(bufferDec, resultDec, val, getScale()); + return RawBytesDecimalUtils.hashCode(resultDec.getDecimalMemorySegment()); + } else if (isDecimal128()) { + long decimal128Low = getDecimal128LowInner(position); + long decimal128High = getDecimal128HighInner(position); + DecimalStructure bufferDec = getHashCodeTmpBuffer(); + DecimalStructure resultDec = getHashCodeResultBuffer(); + FastDecimalUtils.setDecimal128WithScale(bufferDec, resultDec, decimal128Low, decimal128High, getScale()); + return RawBytesDecimalUtils.hashCode(resultDec.getDecimalMemorySegment()); + } else { + Slice memorySegment = memorySegments.slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); + return RawBytesDecimalUtils.hashCode(memorySegment); + } + } + + private Slice segmentUncheckedAtInner(int position) { + if (isDecimal64()) { + long decimal64 = decimal64Values[position]; + return new Decimal(decimal64, getScale()).getMemorySegment(); + } + if (isDecimal128()) { + long decimal128Low = decimal64Values[position]; + long decimal128High = decimal128HighValues[position]; + DecimalStructure bufferDec = getRegionTmpBuffer(); + DecimalStructure resultDec = new DecimalStructure(); + FastDecimalUtils.setDecimal128WithScale(bufferDec, resultDec, decimal128Low, decimal128High, getScale()); + return resultDec.getDecimalMemorySegment(); + } + return getMemorySegments().slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); + } + + private void addToHasherInner(IStreamingHasher sink, int position) { + if (isNullInner(position)) { + sink.putInt(NULL_VALUE); + } else { + sink.putInt(hashCodeInner(position)); + } + } + + private Object getElementAtUncheckedInner(int position) { + if (isDecimal64()) { + long decimal64 = decimal64Values[position]; + return new Decimal(decimal64, getScale()); + } + if (isDecimal128()) { + long decimal128Low = decimal64Values[position]; + long decimal128High = decimal128HighValues[position]; + DecimalStructure bufferDec = getRegionTmpBuffer(); + DecimalStructure resultDec = new DecimalStructure(); + FastDecimalUtils.setDecimal128WithScale(bufferDec, resultDec, decimal128Low, decimal128High, getScale()); + return new Decimal(resultDec); + } + // slice a memory segment in 64 bytes and build a decimal value. + int fromIndex = position * DECIMAL_MEMORY_SIZE; + Slice decimalMemorySegment = getMemorySegments().slice(fromIndex, DECIMAL_MEMORY_SIZE); + return new Decimal(decimalMemorySegment); + } + + /** + * considered a low-frequency path in VectorizedExpression + */ + private void setElementAtInner(final int position, Object element) { + if (element == null) { + isNull[position] = true; + hasNull = true; + return; + } + Decimal decimal = (Decimal) element; + boolean elementIsDec64 = DecimalConverter.isDecimal64(decimal); + if (isUnalloc()) { + allocateValues(elementIsDec64); + } + isNull[position] = false; + if (isDecimal64() && elementIsDec64) { + // set a decimal64 inside a Decimal64 Block + // make sure that the scales are the same + if (decimal.scale() == getScale()) { + Decimal tmpDecimal = new Decimal(); + FastDecimalUtils.shift(decimal.getDecimalStructure(), tmpDecimal.getDecimalStructure(), + decimal.scale()); + long decimal64 = tmpDecimal.longValue(); + decimal64Values[position] = decimal64; + return; + } + } + if (isDecimal128() && elementIsDec64) { + // set a decimal64 inside a Decimal128 Block + // make sure that the scales are the same + if (decimal.scale() == getScale()) { + Decimal tmpDecimal = new Decimal(); + FastDecimalUtils.shift(decimal.getDecimalStructure(), tmpDecimal.getDecimalStructure(), + decimal.scale()); + long decimal64 = tmpDecimal.longValue(); + decimal64Values[position] = decimal64; + decimal128HighValues[position] = decimal64 < 0 ? -1 : 0; + return; + } + } + // set a normal decimal inside a DecimalBlock + Slice decimalMemorySegment = decimal.getMemorySegment(); + + // copy memory from specified position in size of 64 bytes + int fromIndex = position * DECIMAL_MEMORY_SIZE; + getMemorySegments().setBytes(fromIndex, decimalMemorySegment); + } + + private Slice getRegionInner(int position) { + if (isDecimal64()) { + long val = decimal64Values[position]; + DecimalStructure decimalStructure = new DecimalStructure(); + DecimalStructure tmpBuffer = getRegionTmpBuffer(); + FastDecimalUtils.setLongWithScale(tmpBuffer, decimalStructure, val, getScale()); + return decimalStructure.getDecimalMemorySegment(); + } + if (isDecimal128()) { + long decimal128Low = decimal64Values[position]; + long decimal128High = decimal128HighValues[position]; + DecimalStructure decimalStructure = new DecimalStructure(); + DecimalStructure tmpBuffer = getRegionTmpBuffer(); + FastDecimalUtils.setDecimal128WithScale(tmpBuffer, decimalStructure, + decimal128Low, decimal128High, getScale()); + return decimalStructure.getDecimalMemorySegment(); + } + return getMemorySegments().slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); + } + + private Slice getRegionInner(int position, Slice output) { + if (isDecimal64()) { + long val = decimal64Values[position]; + DecimalStructure decimalStructure = new DecimalStructure(output); + DecimalStructure tmpBuffer = getRegionTmpBuffer(); + FastDecimalUtils.setLongWithScale(tmpBuffer, decimalStructure, val, getScale()); + return decimalStructure.getDecimalMemorySegment(); + } + if (isDecimal128()) { + long decimal128Low = decimal64Values[position]; + long decimal128High = decimal128HighValues[position]; + DecimalStructure decimalStructure = new DecimalStructure(output); + DecimalStructure tmpBuffer = getRegionTmpBuffer(); + FastDecimalUtils.setDecimal128WithScale(tmpBuffer, decimalStructure, + decimal128Low, decimal128High, getScale()); + return decimalStructure.getDecimalMemorySegment(); + } + return getMemorySegments().slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE, output); + } + @Override public void updateSizeInfo() { - estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + memorySegments.length(); - elementUsedBytes = Byte.BYTES * positionCount + DECIMAL_MEMORY_SIZE * positionCount; + if (isUnalloc()) { + return; + } + if (isDecimal64()) { + estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + sizeOf(decimal64Values); + elementUsedBytes = Byte.BYTES * positionCount + Long.BYTES * positionCount; + } else if (isDecimal128()) { + estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + sizeOf(decimal64Values) + sizeOf(decimal128HighValues); + elementUsedBytes = Byte.BYTES * positionCount + Long.BYTES * positionCount * 2; + } else { + estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + memorySegments.length(); + elementUsedBytes = Byte.BYTES * positionCount + DECIMAL_MEMORY_SIZE * positionCount; + } } public void collectDecimalInfo() { @@ -309,7 +1529,7 @@ public void collectDecimalInfo() { DecimalBlockState resultState = DecimalBlockState.UNSET_STATE; for (int position = 0; position < positionCount; position++) { position = realPositionOf(position); - if (!isNull(position)) { + if (!isNullInner(position)) { // get state of block element and merge with result state DecimalBlockState elementState = DecimalBlockState.stateOf(memorySegments, position); resultState = resultState.merge(elementState); @@ -328,18 +1548,21 @@ public int[] getSelection() { } public int fastInt1(int position) { + position = realPositionOf(position); return (!state.isSimple() || state.getInt1Pos() == UNSET) ? 0 : - memorySegments.getInt(realPositionOf(position) * DECIMAL_MEMORY_SIZE + state.getInt1Pos() * 4); + getMemorySegments().getInt(position * DECIMAL_MEMORY_SIZE + state.getInt1Pos() * 4); } public int fastInt2(int position) { + position = realPositionOf(position); return (!state.isSimple() || state.getInt2Pos() == UNSET) ? 0 : - memorySegments.getInt(realPositionOf(position) * DECIMAL_MEMORY_SIZE + state.getInt2Pos() * 4); + getMemorySegments().getInt(position * DECIMAL_MEMORY_SIZE + state.getInt2Pos() * 4); } public int fastFrac(int position) { + position = realPositionOf(position); return (!state.isSimple() || state.getFracPos() == UNSET) ? 0 : - memorySegments.getInt(realPositionOf(position) * DECIMAL_MEMORY_SIZE + state.getFracPos() * 4); + getMemorySegments().getInt(position * DECIMAL_MEMORY_SIZE + state.getFracPos() * 4); } public boolean isSimple() { @@ -358,20 +1581,61 @@ public int getFracPos() { return state.getFracPos(); } + /** + * For performance consideration, when this is a decimal64 block, + * the caller should obtain the long array through getDecimal64Values() + */ + @Override + public boolean isDecimal64() { + return state.isDecimal64(); + } + + /** + * For performance consideration, when this is a decimal128 block, + * the caller should obtain the lowBits array and the highBits array + * through getDecimal128LowValues() and getDecimal128HighValues() + */ + @Override + public boolean isDecimal128() { + return state.isDecimal128(); + } + + public Slice allocCachedSlice() { + Slice cachedSlice; + if (isDecimal64() || isDecimal128()) { + cachedSlice = DecimalStructure.allocateDecimalSlice(); + } else { + cachedSlice = new Slice(); + } + return cachedSlice; + } + + public boolean isUnalloc() { + return state == UNALLOC_STATE; + } + + public int getScale() { + return dataType.getScale(); + } + // note: dangerous! public void setMultiResult1(int position, int sum0, int sum9) { int index = realPositionOf(position) * DECIMAL_MEMORY_SIZE; - memorySegments.setInt(index + 0, sum0); + Slice memorySegments = getMemorySegments(); + memorySegments.setInt(index, sum0); memorySegments.setInt(index + 4, sum9); memorySegments.setByte(index + INTEGERS_OFFSET, 9); memorySegments.setByte(index + FRACTIONS_OFFSET, 9); memorySegments.setByte(index + DERIVED_FRACTIONS_OFFSET, 9); memorySegments.setByte(index + IS_NEG_OFFSET, 0); + + this.state = this.state.merge(SIMPLE_MODE_2); } // note: dangerous! public void setMultiResult2(int position, int carry0, int sum0, int sum9) { int index = realPositionOf(position) * DECIMAL_MEMORY_SIZE; + Slice memorySegments = getMemorySegments(); memorySegments.setInt(index, carry0); memorySegments.setInt(index + 4, sum0); memorySegments.setInt(index + 8, sum9); @@ -379,23 +1643,29 @@ public void setMultiResult2(int position, int carry0, int sum0, int sum9) { memorySegments.setByte(index + FRACTIONS_OFFSET, 9); memorySegments.setByte(index + DERIVED_FRACTIONS_OFFSET, 9); memorySegments.setByte(index + IS_NEG_OFFSET, 0); + + this.state = this.state.merge(SIMPLE_MODE_3); } // note: dangerous! public void setMultiResult3(int position, int sum0, int sum9, int sum18) { int index = realPositionOf(position) * DECIMAL_MEMORY_SIZE; - memorySegments.setInt(index + 0, sum0); + Slice memorySegments = getMemorySegments(); + memorySegments.setInt(index, sum0); memorySegments.setInt(index + 4, sum9); memorySegments.setInt(index + 8, sum18); memorySegments.setByte(index + INTEGERS_OFFSET, 9); memorySegments.setByte(index + FRACTIONS_OFFSET, 18); memorySegments.setByte(index + DERIVED_FRACTIONS_OFFSET, 18); memorySegments.setByte(index + IS_NEG_OFFSET, 0); + + this.state = this.state.merge(FULL); } // note: dangerous! public void setMultiResult4(int position, int carry0, int sum0, int sum9, int sum18) { int index = realPositionOf(position) * DECIMAL_MEMORY_SIZE; + Slice memorySegments = getMemorySegments(); memorySegments.setInt(index, carry0); memorySegments.setInt(index + 4, sum0); memorySegments.setInt(index + 8, sum9); @@ -404,22 +1674,28 @@ public void setMultiResult4(int position, int carry0, int sum0, int sum9, int su memorySegments.setByte(index + FRACTIONS_OFFSET, 18); memorySegments.setByte(index + DERIVED_FRACTIONS_OFFSET, 18); memorySegments.setByte(index + IS_NEG_OFFSET, 0); + + this.state = this.state.merge(FULL); } // note: dangerous! public void setSubResult1(int position, int sub0, int sub9, boolean isNeg) { int index = realPositionOf(position) * DECIMAL_MEMORY_SIZE; + Slice memorySegments = getMemorySegments(); memorySegments.setInt(index, sub0); memorySegments.setInt(index + 4, sub9); memorySegments.setByte(index + INTEGERS_OFFSET, 9); memorySegments.setByte(index + FRACTIONS_OFFSET, 9); memorySegments.setByte(index + DERIVED_FRACTIONS_OFFSET, 9); memorySegments.setByte(index + IS_NEG_OFFSET, isNeg ? 1 : 0); + + this.state = SIMPLE_MODE_2; } // note: dangerous! public void setSubResult2(int position, int carry, int sub0, int sub9, boolean isNeg) { int index = realPositionOf(position) * DECIMAL_MEMORY_SIZE; + Slice memorySegments = getMemorySegments(); memorySegments.setInt(index, carry); memorySegments.setInt(index + 4, sub0); memorySegments.setInt(index + 8, sub9); @@ -427,22 +1703,28 @@ public void setSubResult2(int position, int carry, int sub0, int sub9, boolean i memorySegments.setByte(index + FRACTIONS_OFFSET, 9); memorySegments.setByte(index + DERIVED_FRACTIONS_OFFSET, 9); memorySegments.setByte(index + IS_NEG_OFFSET, isNeg ? 1 : 0); + + this.state = this.state.merge(SIMPLE_MODE_3); } // note: dangerous! public void setAddResult1(int position, int sum0, int sum9) { int index = realPositionOf(position) * DECIMAL_MEMORY_SIZE; + Slice memorySegments = getMemorySegments(); memorySegments.setInt(index, sum0); memorySegments.setInt(index + 4, sum9); memorySegments.setByte(index + INTEGERS_OFFSET, 9); memorySegments.setByte(index + FRACTIONS_OFFSET, 9); memorySegments.setByte(index + DERIVED_FRACTIONS_OFFSET, 9); memorySegments.setByte(index + IS_NEG_OFFSET, 0); + + this.state = this.state.merge(SIMPLE_MODE_2); } // note: dangerous! public void setAddResult2(int position, int carry, int sum0, int sum9) { int index = realPositionOf(position) * DECIMAL_MEMORY_SIZE; + Slice memorySegments = getMemorySegments(); memorySegments.setInt(index, carry); memorySegments.setInt(index + 4, sum0); memorySegments.setInt(index + 8, sum9); @@ -450,117 +1732,27 @@ public void setAddResult2(int position, int carry, int sum0, int sum9) { memorySegments.setByte(index + FRACTIONS_OFFSET, 9); memorySegments.setByte(index + DERIVED_FRACTIONS_OFFSET, 9); memorySegments.setByte(index + IS_NEG_OFFSET, 0); - } - - /** - * State of decimal block - */ - enum DecimalBlockState { - UNSET_STATE(false, UNSET, UNSET, UNSET), - - NOT_SIMPLE(false, UNSET, UNSET, UNSET), - - // frac * 10^-9 - SIMPLE_MODE_1(true, UNSET, UNSET, 0), - - // int1 + frac * 10^-9 - SIMPLE_MODE_2(true, UNSET, 0, 1), - - // int2 * 10^9 + int1 + frac * 10^-9 - SIMPLE_MODE_3(true, 0, 1, 2); - - private final boolean isSimple; - private final int int2Pos; - private final int int1Pos; - private final int fracPos; - - DecimalBlockState(boolean isSimple, int int2Pos, int int1Pos, int fracPos) { - this.isSimple = isSimple; - this.int2Pos = int2Pos; - this.int1Pos = int1Pos; - this.fracPos = fracPos; - } - - public DecimalBlockState merge(DecimalBlockState that) { - if (this == UNSET_STATE) { - return that; - } - - if (that == UNSET_STATE) { - return this; - } - - if (this == that && this != NOT_SIMPLE) { - return this; - } - return NOT_SIMPLE; - } - - public static DecimalBlockState stateOf(Slice memorySegments, int position) { - int isNeg = memorySegments.getByte(position * DECIMAL_MEMORY_SIZE + IS_NEG_OFFSET) & 0xFF; - if (isNeg == 1) { - return NOT_SIMPLE; - } - - int integers = memorySegments.getByte(position * DECIMAL_MEMORY_SIZE + INTEGERS_OFFSET) & 0xFF; - int fractions = memorySegments.getByte(position * DECIMAL_MEMORY_SIZE + FRACTIONS_OFFSET) & 0xFF; - - int intWord = roundUp(integers); - int fracWord = roundUp(fractions); - - if (intWord == 0 && fracWord == 1) { - // frac * 10^-9 - return SIMPLE_MODE_1; - } else if (intWord == 1 && fracWord == 1) { - // int1 + frac * 10^-9 - return SIMPLE_MODE_2; - } else if (intWord == 2 && fracWord == 1) { - // int2 * 10^9 + int1 + frac * 10^-9 - return SIMPLE_MODE_3; - } - - return NOT_SIMPLE; - } - - public static DecimalBlockState stateOf(DecimalStructure decimalStructure) { - if (decimalStructure == null || decimalStructure.isNeg()) { - return NOT_SIMPLE; - } - - int integers = decimalStructure.getIntegers(); - int fractions = decimalStructure.getFractions(); - - int intWord = roundUp(integers); - int fracWord = roundUp(fractions); - if (intWord == 0 && fracWord == 1) { - // frac * 10^-9 - return SIMPLE_MODE_1; - } else if (intWord == 1 && fracWord == 1) { - // int1 + frac * 10^-9 - return SIMPLE_MODE_2; - } else if (intWord == 2 && fracWord == 1) { - // int2 * 10^9 + int1 + frac * 10^-9 - return SIMPLE_MODE_3; - } - - return NOT_SIMPLE; - } + this.state = this.state.merge(SIMPLE_MODE_3); + } - public boolean isSimple() { - return isSimple; - } + // note: dangerous! + public void setFullState() { + this.state = FULL; + } - public int getInt2Pos() { - return int2Pos; - } + // note: dangerous! + public long[] getDecimal64Values() { + return decimal64Values; + } - public int getInt1Pos() { - return int1Pos; - } + // note: dangerous! + public long[] getDecimal128LowValues() { + return decimal64Values; + } - public int getFracPos() { - return fracPos; - } + // note: dangerous! + public long[] getDecimal128HighValues() { + return decimal128HighValues; } -} \ No newline at end of file +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlockBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlockBuilder.java index 8d33a37d7..7fd4205f9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlockBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlockBuilder.java @@ -17,33 +17,54 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.datatype.DecimalType; import com.google.common.base.Preconditions; -import com.alibaba.polardbx.common.datatype.DecimalConverter; -import com.alibaba.polardbx.common.datatype.DecimalStructure; import io.airlift.slice.DynamicSliceOutput; import io.airlift.slice.Slice; import io.airlift.slice.SliceOutput; +import it.unimi.dsi.fastutil.longs.LongArrayList; import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; -import static com.alibaba.polardbx.executor.chunk.DecimalBlock.DecimalBlockState.*; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.DECIMAL_128; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.DECIMAL_64; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.UNSET_STATE; /** * Decimal block builder */ -public class DecimalBlockBuilder extends AbstractBlockBuilder { +public class DecimalBlockBuilder extends AbstractBlockBuilder implements SegmentedDecimalBlock { + SliceOutput sliceOutput; - DataType decimalType; - // collect state of decimal values. - DecimalBlock.DecimalBlockState state; + LongArrayList decimal64List; + LongArrayList decimal128HighList; + DecimalType decimalType; + + /** + * UNSET / DECIMAL_64 / DECIMAL_128 / + */ + DecimalBlockState state; + + private int scale; - public DecimalBlockBuilder(int capacity, DataType decimalType) { + private DecimalStructure decimalBuffer; + private DecimalStructure decimalResult; + + public DecimalBlockBuilder(int capacity, DataType type) { super(capacity); - this.sliceOutput = new DynamicSliceOutput(capacity * DECIMAL_MEMORY_SIZE); - this.decimalType = decimalType; + this.decimalType = (DecimalType) type; + this.scale = decimalType.getScale(); + if (decimalType.isDecimal64()) { + initDecimal64List(); + } else { + initSliceOutput(); + } this.state = UNSET_STATE; } @@ -51,26 +72,90 @@ public DecimalBlockBuilder(int capacity) { this(capacity, DataTypes.DecimalType); } + public void initSliceOutput() { + if (this.sliceOutput == null) { + this.sliceOutput = new DynamicSliceOutput(initialCapacity * DECIMAL_MEMORY_SIZE); + } + } + + private void initDecimal64List() { + if (this.decimal64List == null) { + this.decimal64List = new LongArrayList(initialCapacity); + } + } + + private void initDecimal128List() { + if (this.decimal64List == null) { + this.decimal64List = new LongArrayList(initialCapacity); + } + if (this.decimal128HighList == null) { + this.decimal128HighList = new LongArrayList(initialCapacity); + } + } + @Override public void writeDecimal(Decimal value) { + convertToNormalDecimal(); + valueIsNull.add(false); sliceOutput.writeBytes(value.getMemorySegment()); updateDecimalInfo(value.getDecimalStructure()); } - public void writeDecimalBin(byte[] bytes, DataType dataType) { - // binary -> decimal - DecimalStructure d2 = new DecimalStructure(); - DecimalConverter.binToDecimal(bytes, d2, dataType.getPrecision(), dataType.getScale()); + @Override + public void writeLong(long value) { + if (state.isUnset()) { + initDecimal64List(); + state = DECIMAL_64; + } else if (!state.isDecimal64Or128()) { + writeDecimal(new Decimal(value, decimalType.getScale())); + return; + } valueIsNull.add(false); - sliceOutput.writeBytes(d2.getDecimalMemorySegment()); + decimal64List.add(value); + if (state.isDecimal128()) { + decimal128HighList.add(value < 0 ? -1 : 0); + } + } - updateDecimalInfo(d2); + public void writeDecimal128(long low, long high) { + if (state.isUnset()) { + initDecimal128List(); + for (int i = 0; i < valueIsNull.size(); i++) { + decimal128HighList.add(0); + } + state = DECIMAL_128; + } else if (state.isDecimal64()) { + // convert decimal64 to decimal128 + initDecimal128List(); + state = DECIMAL_128; + for (int i = 0; i < valueIsNull.size(); i++) { + if (decimal64List != null && decimal64List.getLong(i) < 0) { + decimal128HighList.add(-1); + } else { + decimal128HighList.add(0); + } + } + } else if (!state.isDecimal128()) { + // normal decimal + DecimalStructure buffer = getDecimalBuffer(); + DecimalStructure result = getDecimalResult(); + FastDecimalUtils.setDecimal128WithScale(buffer, result, low, high, scale); + valueIsNull.add(false); + sliceOutput.writeBytes(result.getDecimalMemorySegment()); + updateDecimalInfo(result); + return; + } + + valueIsNull.add(false); + decimal64List.add(low); + decimal128HighList.add(high); } public void writeDecimalBin(byte[] bytes) { + convertToNormalDecimal(); // binary -> decimal DecimalStructure d2 = new DecimalStructure(); DecimalConverter.binToDecimal(bytes, d2, decimalType.getPrecision(), decimalType.getScale()); @@ -96,17 +181,57 @@ public void writeByteArray(byte[] value, int offset, int length) { @Override public void appendNull() { appendNullInternal(); - // If null value, just skip 64-bytes - sliceOutput.skipBytes(DECIMAL_MEMORY_SIZE); + if (isDecimal64() || isUnset()) { + initDecimal64List(); + decimal64List.add(0L); + } else if (isDecimal128()) { + decimal64List.add(0L); + decimal128HighList.add(0L); + } else { + // normal decimal + // If null value, just skip 64-bytes + sliceOutput.skipBytes(DECIMAL_MEMORY_SIZE); + } } @Override public Decimal getDecimal(int position) { checkReadablePosition(position); + if (state.isDecimal64()) { + return new Decimal(getLong(position), scale); + } + if (state.isDecimal128()) { + DecimalStructure buffer = getDecimalBuffer(); + DecimalStructure result = getDecimalResult(); + FastDecimalUtils.setDecimal128WithScale(buffer, result, + decimal64List.getLong(position), decimal128HighList.getLong(position), scale); + return new Decimal(result); + } Slice segment = sliceOutput.slice().slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); return new Decimal(segment); } + @Override + public long getLong(int position) { + checkDecimal64StoreType(); + checkReadablePosition(position); + return decimal64List.getLong(position); + } + + @Override + public long getDecimal128Low(int position) { + checkDecimal128StoreType(); + checkReadablePosition(position); + return decimal64List.getLong(position); + } + + @Override + public long getDecimal128High(int position) { + checkDecimal128StoreType(); + checkReadablePosition(position); + return decimal128HighList.getLong(position); + } + @Override public Object getObject(int position) { return isNull(position) ? null : getDecimal(position); @@ -118,6 +243,7 @@ public void writeObject(Object value) { appendNull(); return; } + checkNormalDecimalType(); Preconditions.checkArgument(value instanceof Decimal); writeDecimal((Decimal) value); } @@ -125,13 +251,31 @@ public void writeObject(Object value) { @Override public void ensureCapacity(int capacity) { super.ensureCapacity(capacity); - // Ignore bytes stored. - sliceOutput.ensureCapacity(capacity * DECIMAL_MEMORY_SIZE); + if (isDecimal64()) { + decimal64List.ensureCapacity(capacity); + } else if (isDecimal128()) { + decimal64List.ensureCapacity(capacity); + decimal128HighList.ensureCapacity(capacity); + } else { + // Ignore bytes stored. + sliceOutput.ensureCapacity(capacity * DECIMAL_MEMORY_SIZE); + } } @Override public Block build() { - return new DecimalBlock(getPositionCount(), mayHaveNull() ? valueIsNull.elements() : null, + if (isDecimal64()) { + return new DecimalBlock(decimalType, getPositionCount(), mayHaveNull(), + mayHaveNull() ? valueIsNull.elements() : null, + decimal64List.elements()); + } + if (isDecimal128()) { + return DecimalBlock.buildDecimal128Block(decimalType, getPositionCount(), mayHaveNull(), + mayHaveNull() ? valueIsNull.elements() : null, + decimal64List.elements(), decimal128HighList.elements()); + } + convertToNormalDecimal(); + return new DecimalBlock(decimalType, getPositionCount(), mayHaveNull() ? valueIsNull.elements() : null, sliceOutput.slice(), state); } @@ -148,7 +292,8 @@ public int hashCode(int position) { return getDecimal(position).hashCode(); } - Slice segmentUncheckedAt(int position) { + @Override + public Slice segmentUncheckedAt(int position) { return sliceOutput.slice().slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); } @@ -161,12 +306,159 @@ public DecimalBlock.DecimalBlockState getState() { return this.state; } - public void setDecimalType(DataType decimalType) { - this.decimalType = decimalType; - } - public DataType getDecimalType() { return decimalType; } + + @Override + public boolean isDecimal64() { + return state.isDecimal64() || (state.isUnset() && decimalType.isDecimal64()); + } + + @Override + public boolean isDecimal128() { + return state.isDecimal128(); + } + + public boolean isNormal() { + return state.isNormal(); + } + + public void convertToNormalDecimal() { + initSliceOutput(); + + if (isNormal()) { + return; + } + + // unset 或 decimal64/decimal128 状态 + if (decimal64List != null && !decimal64List.isEmpty()) { + if (state.isUnset()) { + for (int pos = 0; pos < decimal64List.size(); pos++) { + if (!isNull(pos)) { + // UNSET state expect all values are null + throw new IllegalStateException("Incorrect DecimalBlockBuilder state"); + } else { + sliceOutput.skipBytes(DECIMAL_MEMORY_SIZE); + } + } + decimal64List.clear(); + } else if (state.isDecimal64()) { + state = UNSET_STATE; + DecimalStructure tmpBuffer = getDecimalBuffer(); + DecimalStructure resultBuffer = getDecimalResult(); + // 可能已经有 DECIMAL64值 + for (int pos = 0; pos < decimal64List.size(); pos++) { + if (!isNull(pos)) { + long decimal64 = decimal64List.getLong(pos); + FastDecimalUtils.setLongWithScale(tmpBuffer, resultBuffer, decimal64, scale); + sliceOutput.writeBytes(resultBuffer.getDecimalMemorySegment()); + updateDecimalInfo(resultBuffer); + } else { + sliceOutput.skipBytes(DECIMAL_MEMORY_SIZE); + } + } + + decimal64List.clear(); + } else if (state.isDecimal128()) { + Preconditions.checkArgument(decimal64List.size() == decimal128HighList.size(), + "Decimal128 lowBits count does not match highBits count"); + state = UNSET_STATE; + DecimalStructure tmpBuffer = getDecimalBuffer(); + DecimalStructure resultBuffer = getDecimalResult(); + for (int pos = 0; pos < decimal64List.size(); pos++) { + if (!isNull(pos)) { + long lowBits = decimal64List.getLong(pos); + long highBits = decimal128HighList.getLong(pos); + FastDecimalUtils.setDecimal128WithScale(tmpBuffer, resultBuffer, lowBits, highBits, scale); + sliceOutput.writeBytes(resultBuffer.getDecimalMemorySegment()); + updateDecimalInfo(resultBuffer); + } else { + sliceOutput.skipBytes(DECIMAL_MEMORY_SIZE); + } + } + + decimal64List.clear(); + decimal128HighList.clear(); + } + } + } + + private void checkNormalDecimalType() { + if (state.isDecimal64Or128()) { + throw new AssertionError("DECIMAL_64 store type is inconsistent when writing a Decimal"); + } + } + + private void checkDecimal64StoreType() { + if (state.isUnset()) { + state = DECIMAL_64; + } else if (state != DECIMAL_64) { + throw new AssertionError("Unmatched DECIMAL_64 type: " + state); + } + } + + private void checkDecimal128StoreType() { + if (state != DECIMAL_128) { + throw new AssertionError("Unmatched DECIMAL_128 type: " + state); + } + } + + public boolean canWriteDecimal64() { + return state.isUnset() || state.isDecimal64(); + } + + public boolean isUnset() { + return state.isUnset(); + } + + public boolean isSimple() { + return state.isSimple(); + } + + public void setContainsNull(boolean containsNull) { + this.containsNull = containsNull; + } + + public void setScale(int scale) { + if (this.scale == scale) { + return; + } + if (state == DECIMAL_64 || state == DECIMAL_128) { + throw new IllegalStateException("Cannot change scale after decimal64/128 is written"); + } + this.scale = scale; + this.decimalType = new DecimalType(this.decimalType.getPrecision(), scale); + } + + public SliceOutput getSliceOutput() { + return sliceOutput; + } + + public LongArrayList getDecimal64List() { + return decimal64List; + } + + public LongArrayList getDecimal128LowList() { + return decimal64List; + } + + public LongArrayList getDecimal128HighList() { + return decimal128HighList; + } + + protected DecimalStructure getDecimalBuffer() { + if (decimalBuffer == null) { + this.decimalBuffer = new DecimalStructure(); + } + return decimalBuffer; + } + + protected DecimalStructure getDecimalResult() { + if (decimalResult == null) { + this.decimalResult = new DecimalStructure(); + } + return decimalResult; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlockEncoding.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlockEncoding.java index 0831b993c..3548f5741 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlockEncoding.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DecimalBlockEncoding.java @@ -29,6 +29,8 @@ */ package com.alibaba.polardbx.executor.chunk; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.datatype.DecimalType; import io.airlift.slice.Slice; import io.airlift.slice.SliceInput; import io.airlift.slice.SliceOutput; @@ -44,6 +46,13 @@ public class DecimalBlockEncoding implements BlockEncoding { private static final String NAME = "DECIMAL"; + /** + * compatible with boolean + */ + private static final byte NORMAL_DEC = 0; + private static final byte DEC_64 = 1; + private static final byte DEC_128 = 2; + @Override public String getName() { return NAME; @@ -51,10 +60,30 @@ public String getName() { @Override public void writeBlock(SliceOutput sliceOutput, Block block) { - DecimalBlock b = (DecimalBlock) block; + DecimalBlock b = block.cast(DecimalBlock.class); int positionCount = b.getPositionCount(); sliceOutput.appendInt(positionCount); + byte decType; + if (b.isDecimal64()) { + decType = DEC_64; + } else if (b.isDecimal128()) { + decType = DEC_128; + } else { + decType = NORMAL_DEC; + } + sliceOutput.writeByte(decType); + if (b.isDecimal64()) { + writeDecimal64(sliceOutput, b); + } else if (b.isDecimal128()) { + writeDecimal128(sliceOutput, b); + } else { + writeDecimal(sliceOutput, b); + } + } + + private void writeDecimal(SliceOutput sliceOutput, DecimalBlock b) { + int positionCount = b.getPositionCount(); // for fast decimal String stateName = b.getState() == null ? "" : b.getState().name(); sliceOutput.writeInt(stateName.length()); @@ -71,10 +100,49 @@ public void writeBlock(SliceOutput sliceOutput, Block block) { } } + private void writeDecimal64(SliceOutput sliceOutput, DecimalBlock b) { + sliceOutput.writeInt(b.getScale()); + int positionCount = b.getPositionCount(); + + encodeNullsAsBits(sliceOutput, b); + for (int position = 0; position < positionCount; position++) { + if (!b.isNull(position)) { + sliceOutput.writeLong(b.getLong(position)); + } + } + } + + private void writeDecimal128(SliceOutput sliceOutput, DecimalBlock b) { + sliceOutput.writeInt(b.getScale()); + int positionCount = b.getPositionCount(); + + encodeNullsAsBits(sliceOutput, b); + for (int position = 0; position < positionCount; position++) { + if (!b.isNull(position)) { + sliceOutput.writeLong(b.getDecimal128Low(position)); + sliceOutput.writeLong(b.getDecimal128High(position)); + } + } + } + @Override public Block readBlock(SliceInput sliceInput) { int positionCount = sliceInput.readInt(); + byte decType = sliceInput.readByte(); + switch (decType) { + case DEC_64: + return readDecimal64(sliceInput, positionCount); + case DEC_128: + return readDecimal128(sliceInput, positionCount); + case NORMAL_DEC: + return readDecimal(sliceInput, positionCount); + default: + throw new IllegalStateException("Unexpected decimal block encoding type: " + decType); + } + } + + private Block readDecimal(SliceInput sliceInput, int positionCount) { // for fast decimal int stateNameLen = sliceInput.readInt(); byte[] stateName = new byte[stateNameLen]; @@ -94,6 +162,42 @@ public Block readBlock(SliceInput sliceInput) { slice = slice.slice(0, length); } - return new DecimalBlock(positionCount, valueIsNull, slice, state); + return new DecimalBlock(DataTypes.DecimalType, positionCount, valueIsNull, slice, state); + } + + private Block readDecimal64(SliceInput sliceInput, int positionCount) { + int scale = sliceInput.readInt(); + boolean[] valueIsNull = decodeNullBits(sliceInput, positionCount); + boolean hasNull = false; + + long[] decimal64Values = new long[positionCount]; + for (int position = 0; position < positionCount; position++) { + if (!valueIsNull[position]) { + decimal64Values[position] = sliceInput.readLong(); + } else { + hasNull = true; + } + } + return new DecimalBlock(DecimalType.decimal64WithScale(scale), positionCount, hasNull, valueIsNull, + decimal64Values); + } + + private Block readDecimal128(SliceInput sliceInput, int positionCount) { + int scale = sliceInput.readInt(); + boolean[] valueIsNull = decodeNullBits(sliceInput, positionCount); + boolean hasNull = false; + + long[] decimal128LowValues = new long[positionCount]; + long[] decimal128HighValues = new long[positionCount]; + for (int position = 0; position < positionCount; position++) { + if (!valueIsNull[position]) { + decimal128LowValues[position] = sliceInput.readLong(); + decimal128HighValues[position] = sliceInput.readLong(); + } else { + hasNull = true; + } + } + return DecimalBlock.buildDecimal128Block(DecimalType.decimal128WithScale(scale), positionCount, hasNull, + valueIsNull, decimal128LowValues, decimal128HighValues); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DoubleBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DoubleBlock.java index 4df552ada..4eb31026b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DoubleBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/DoubleBlock.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.google.common.base.Preconditions; +import io.airlift.slice.XxHash64; import org.openjdk.jol.info.ClassLayout; import static com.alibaba.polardbx.common.utils.memory.SizeOf.sizeOf; @@ -56,6 +57,13 @@ public DoubleBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, do updateSizeInfo(); } + public static DoubleBlock from(DoubleBlock other, int selSize, int[] selection) { + return new DoubleBlock(0, + selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + BlockUtils.copyDoubleArray(other.values, selection, selSize)); + } + @Override public double getDouble(int position) { checkReadablePosition(position); @@ -109,6 +117,15 @@ public int hashCode(int position) { return Double.hashCode(values[position + arrayOffset]); } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } else { + return XxHash64.hash(Double.doubleToRawLongBits(values[pos + arrayOffset])); + } + } + /** * Designed for test purpose */ @@ -121,7 +138,7 @@ public static DoubleBlock of(Double... values) { builder.writeDouble(values[i]); } } - return (DoubleBlock) builder.build(); + return builder.build().cast(DoubleBlock.class); } @Override @@ -137,6 +154,17 @@ public int[] hashCodeVector() { return hashes; } + @Override + public void hashCodeVector(int[] results, int positionCount) { + if (mayHaveNull()) { + super.hashCodeVector(results, positionCount); + return; + } + for (int position = 0; position < positionCount; position++) { + results[position] = Double.hashCode(values[position + arrayOffset]); + } + } + @Override public DataType getType() { return DataTypes.DoubleType; @@ -145,7 +173,7 @@ public DataType getType() { @Override public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { if (output instanceof DoubleBlock) { - DoubleBlock outputVector = (DoubleBlock) output; + DoubleBlock outputVector = output.cast(DoubleBlock.class); if (selectedInUse) { for (int i = 0; i < size; i++) { int j = sel[i]; @@ -166,7 +194,7 @@ public void shallowCopyTo(RandomAccessBlock another) { if (!(another instanceof DoubleBlock)) { GeneralUtil.nestedException("cannot shallow copy to " + another == null ? null : another.toString()); } - DoubleBlock vectorSlot = (DoubleBlock) another; + DoubleBlock vectorSlot = another.cast(DoubleBlock.class); super.shallowCopyTo(vectorSlot); vectorSlot.values = values; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/EnumBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/EnumBlock.java index 824222bb3..9f680990a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/EnumBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/EnumBlock.java @@ -16,9 +16,13 @@ package com.alibaba.polardbx.executor.chunk; +import com.alibaba.polardbx.common.utils.XxhashUtils; +import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.google.common.base.Preconditions; import org.openjdk.jol.info.ClassLayout; +import java.util.Arrays; import java.util.Map; import static com.alibaba.polardbx.common.utils.memory.SizeOf.sizeOf; @@ -33,11 +37,15 @@ public class EnumBlock extends AbstractCommonBlock { private static final long INSTANCE_SIZE = ClassLayout.parseClass(EnumBlock.class).instanceSize(); private final int[] offsets; - private final char[] data; private final Map enumValues; + private char[] data; - EnumBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, int[] offsets, char[] data, - final Map enumValues) { + public EnumBlock(int positionCount, final Map enumValues) { + this(0, positionCount, new boolean[positionCount], new int[positionCount], null, enumValues); + } + + public EnumBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, int[] offsets, char[] data, + final Map enumValues) { super(DataTypes.StringType, positionCount, valueIsNull, valueIsNull != null); this.offsets = offsets; this.data = data; @@ -54,6 +62,33 @@ public class EnumBlock extends AbstractCommonBlock { updateSizeInfo(); } + public static EnumBlock from(EnumBlock other, int selSize, int[] selection) { + int[] newOffsets = new int[selSize]; + + if (other.data == null) { + return new EnumBlock(0, selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + newOffsets, null, other.enumValues); + } + if (selection == null) { + return new EnumBlock(0, selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + newOffsets, Arrays.copyOf(other.data, other.data.length), other.enumValues); + } else { + EnumBlockBuilder enumBlockBuilder = + new EnumBlockBuilder(selSize, other.data.length / (other.positionCount + 1) * selSize, + other.enumValues); + for (int i = 0; i < selSize; i++) { + if (other.isNull(selection[i])) { + enumBlockBuilder.appendNull(); + } else { + enumBlockBuilder.writeString(other.getString(selection[i])); + } + } + return (EnumBlock) enumBlockBuilder.build(); + } + } + @Override public String getString(int position) { try { @@ -101,6 +136,21 @@ public int hashCode(int position) { return ChunkUtil.hashCode(data, beginOffset(position), endOffset(position), true); } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } else { + String val = getString(pos); + Integer index = enumValues.get(val); + if (index == null) { + return XxhashUtils.finalShuffle(0); + } else { + return XxhashUtils.finalShuffle(index); + } + } + } + @Override public int checksum(int position) { if (isNull(position)) { @@ -115,7 +165,7 @@ public int checksum(int position) { @Override public boolean equals(int position, Block other, int otherPosition) { if (other instanceof EnumBlock) { - return equals(position, (EnumBlock) other, otherPosition); + return equals(position, other.cast(EnumBlock.class), otherPosition); } else if (other instanceof EnumBlockBuilder) { return equals(position, (EnumBlockBuilder) other, otherPosition); } else { @@ -169,6 +219,11 @@ public char[] getData() { return data; } + public void setData(char[] data) { + Preconditions.checkArgument(this.data == null); + this.data = data; + } + @Override public void updateSizeInfo() { estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + sizeOf(data) + sizeOf(offsets); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/EnumBlockEncoding.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/EnumBlockEncoding.java index fd24c8a84..d01387318 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/EnumBlockEncoding.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/EnumBlockEncoding.java @@ -59,7 +59,7 @@ public void writeBlock(SliceOutput sliceOutput, Block block) { int nullsCnt = encodeNullsAsBits(sliceOutput, block); sliceOutput.writeBoolean(positionCount > nullsCnt); if (positionCount > nullsCnt) { - EnumBlock EnumBlock = (EnumBlock) block; + EnumBlock EnumBlock = block.cast(EnumBlock.class); int[] offset = EnumBlock.getOffsets(); for (int position = 0; position < positionCount; position++) { sliceOutput.writeInt(offset[position]); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/FloatBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/FloatBlock.java index 5b8bf51c9..c4d9c6b05 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/FloatBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/FloatBlock.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.google.common.base.Preconditions; +import io.airlift.slice.XxHash64; import org.openjdk.jol.info.ClassLayout; import static com.alibaba.polardbx.common.utils.memory.SizeOf.sizeOf; @@ -32,6 +33,8 @@ public class FloatBlock extends AbstractBlock { private float[] values; + private boolean forDeletedOrcScan = false; + public FloatBlock(DataType dataType, int slotLen) { super(dataType, slotLen); this.values = new float[slotLen]; @@ -44,6 +47,22 @@ public FloatBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, flo updateSizeInfo(); } + public static FloatBlock from(FloatBlock other, int selSize, int[] selection) { + FloatBlock block = new FloatBlock(0, + selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + BlockUtils.copyFloatArray(other.values, selection, selSize)); + block.forDeletedOrcScan = other.forDeletedOrcScan; + return block; + } + + public FloatBlock(DataType dataType, int slotLen, boolean forDeletedOrcScan) { + super(dataType, slotLen); + this.values = new float[slotLen]; + this.forDeletedOrcScan = forDeletedOrcScan; + updateSizeInfo(); + } + @Override public float getFloat(int position) { checkReadablePosition(position); @@ -88,6 +107,15 @@ public int hashCode(int position) { return Float.hashCode(values[position + arrayOffset]); } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } else { + return XxHash64.hash(Float.floatToRawIntBits(values[pos + arrayOffset])); + } + } + @Override public int[] hashCodeVector() { if (mayHaveNull()) { @@ -101,6 +129,17 @@ public int[] hashCodeVector() { return hashes; } + @Override + public void hashCodeVector(int[] results, int positionCount) { + if (mayHaveNull()) { + super.hashCodeVector(results, positionCount); + return; + } + for (int position = 0; position < positionCount; position++) { + results[position] = Float.hashCode(values[position + arrayOffset]); + } + } + @Override public DataType getType() { return DataTypes.FloatType; @@ -118,7 +157,7 @@ public void addToHasher(IStreamingHasher sink, int position) { @Override public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { if (output instanceof FloatBlock) { - FloatBlock outputVectorSlot = (FloatBlock) output; + FloatBlock outputVectorSlot = output.cast(FloatBlock.class); if (selectedInUse) { for (int i = 0; i < size; i++) { int j = sel[i]; @@ -139,7 +178,7 @@ public void shallowCopyTo(RandomAccessBlock another) { if (!(another instanceof FloatBlock)) { GeneralUtil.nestedException("cannot shallow copy to " + another == null ? null : another.toString()); } - FloatBlock vectorSlot = (FloatBlock) another; + FloatBlock vectorSlot = another.cast(FloatBlock.class); super.shallowCopyTo(vectorSlot); vectorSlot.values = values; } @@ -180,5 +219,6 @@ public void updateSizeInfo() { estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + sizeOf(values); elementUsedBytes = Byte.BYTES * positionCount + Float.BYTES * positionCount; } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/IntegerBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/IntegerBlock.java index 98f30e782..1b54a1323 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/IntegerBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/IntegerBlock.java @@ -17,15 +17,25 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.XxhashUtils; +import com.alibaba.polardbx.common.utils.bloomfilter.RFBloomFilter; +import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.executor.accumulator.state.NullableLongGroupState; +import com.alibaba.polardbx.executor.operator.scan.impl.DictionaryMapping; +import com.alibaba.polardbx.executor.operator.util.DriverObjectPool; +import com.alibaba.polardbx.executor.operator.util.TypedList; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.executor.accumulator.state.NullableLongGroupState; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.openjdk.jol.info.ClassLayout; +import java.util.BitSet; + import static com.alibaba.polardbx.common.utils.memory.SizeOf.sizeOf; /** @@ -44,6 +54,20 @@ public IntegerBlock(DataType dataType, int slotLen) { updateSizeInfo(); } + // for object pool + public IntegerBlock(DataType dataType, int slotLen, DriverObjectPool objectPool, int chunkLimit) { + super(dataType, slotLen); + int[] pooled = objectPool.poll(); + if (pooled != null && pooled.length >= slotLen) { + this.values = pooled; + } else { + this.values = new int[slotLen]; + } + + updateSizeInfo(); + setRecycler(objectPool.getRecycler(chunkLimit)); + } + public IntegerBlock(DataType dataType, int[] values, boolean[] nulls, boolean hasNull, int length, int[] selection) { super(dataType, length, nulls, hasNull); @@ -52,16 +76,356 @@ public IntegerBlock(DataType dataType, int[] values, boolean[] nulls, boolean ha updateSizeInfo(); } - IntegerBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, int[] values) { + public IntegerBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, int[] values) { super(arrayOffset, positionCount, valueIsNull); this.values = Preconditions.checkNotNull(values); updateSizeInfo(); } - IntegerBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, int[] values, boolean hasNull) { - super(DataTypes.IntegerType, positionCount, valueIsNull, hasNull); - this.values = Preconditions.checkNotNull(values); - updateSizeInfo(); + public static IntegerBlock from(IntegerBlock other, int selSize, int[] selection, boolean useSelection) { + if (useSelection) { + return new IntegerBlock(other.getType(), other.intArray(), other.nulls(), + other.hasNull(), selSize, selection); + } + boolean[] targetNulls = BlockUtils.copyNullArray(other.nulls(), selection, selSize); + return new IntegerBlock(other.getType(), + BlockUtils.copyIntArray(other.intArray(), selection, selSize), + targetNulls, + targetNulls != null, + selSize, null); + } + + @Override + public void addLongToBloomFilter(int totalPartitionCount, RFBloomFilter[] RFBloomFilters) { + for (int pos = 0; pos < positionCount; pos++) { + + // calc physical partition id. + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + + // put hash code. + RFBloomFilters[partition].putLong(values[pos]); + } + } + + @Override + public void addLongToBloomFilter(RFBloomFilter RFBloomFilter) { + for (int pos = 0; pos < positionCount; pos++) { + // put hash code. + RFBloomFilter.putLong(values[pos]); + } + } + + @Override + public int mightContainsLong(RFBloomFilter RFBloomFilter, boolean[] bitmap, boolean isConjunctive) { + int hitCount = 0; + final int positionCount = getPositionCount(); + if (isConjunctive) { + + for (int pos = 0; pos < positionCount; pos++) { + + // Base on the original status in bitmap. + if (bitmap[pos]) { + bitmap[pos] &= RFBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } else { + + for (int pos = 0; pos < positionCount; pos++) { + bitmap[pos] = RFBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + + } + + return hitCount; + } + + @Override + public int mightContainsLong(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent) { + int hitCount = 0; + final int positionCount = getPositionCount(); + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + bitmap[pos] = rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] = rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } + + return hitCount; + } + + @Override + public void addIntToBloomFilter(int totalPartitionCount, RFBloomFilter[] RFBloomFilters) { + for (int pos = 0; pos < positionCount; pos++) { + + // calc physical partition id. + long hashVal = XxhashUtils.finalShuffle(values[pos]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + + // put hash code. + RFBloomFilters[partition].putInt(values[pos]); + } + } + + @Override + public void addIntToBloomFilter(RFBloomFilter RFBloomFilter) { + final int positionCount = getPositionCount(); + for (int pos = 0; pos < positionCount; pos++) { + // put hash code. + RFBloomFilter.putInt(values[pos]); + } + } + + @Override + public int mightContainsInt(RFBloomFilter RFBloomFilter, boolean[] bitmap, boolean isConjunctive) { + int hitCount = 0; + + if (isConjunctive) { + for (int pos = 0; pos < positionCount; pos++) { + // Base on the original status in bitmap. + if (bitmap[pos]) { + int hashCode = values[pos]; + bitmap[pos] &= RFBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } else { + for (int pos = 0; pos < positionCount; pos++) { + int hashCode = values[pos]; + bitmap[pos] = RFBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } + + return hitCount; + } + + @Override + public int mightContainsInt(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent) { + int hitCount = 0; + final int positionCount = getPositionCount(); + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = XxhashUtils.finalShuffle(values[0]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + int hashCode = values[pos]; + bitmap[pos] = rfBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = XxhashUtils.finalShuffle(values[pos]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + int hashCode = values[pos]; + bitmap[pos] = rfBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } + + return hitCount; + } + + @Override + public int mightContainsInt(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent, boolean isConjunctive) { + int hitCount = 0; + final int positionCount = getPositionCount(); + + if (isConjunctive) { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + bitmap[pos] &= rfBloomFilter.mightContainInt(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] &= rfBloomFilter.mightContainInt(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + + } + + } + } + + } else { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + bitmap[pos] = rfBloomFilter.mightContainInt(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] = rfBloomFilter.mightContainInt(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } + + return hitCount; + } + + @Override + public int mightContainsLong(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent, boolean isConjunctive) { + int hitCount = 0; + + if (isConjunctive) { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = XxhashUtils.finalShuffle(values[0]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + bitmap[pos] &= rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + long hashVal = XxhashUtils.finalShuffle(values[pos]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] &= rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + + } + + } + } + + } else { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = XxhashUtils.finalShuffle(values[0]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + bitmap[pos] = rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = XxhashUtils.finalShuffle(values[pos]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] = rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } + + return hitCount; + } + + @Override + public void recycle() { + if (recycler != null) { + recycler.recycle(values); + } } private int realPositionOf(int position) { @@ -74,62 +438,87 @@ private int realPositionOf(int position) { @Override public boolean isNull(int position) { position = realPositionOf(position); - return isNull != null && isNull[position + arrayOffset]; + return isNullInner(position); } @Override public int getInt(int position) { position = realPositionOf(position); - return values[position + arrayOffset]; + return getIntInner(position); } @Override public Object getObject(int position) { - return isNull(position) ? null : getInt(position); + position = realPositionOf(position); + return isNullInner(position) ? null : getIntInner(position); } @Override public void writePositionTo(int position, BlockBuilder blockBuilder) { - if (isNull(position)) { - blockBuilder.appendNull(); - } else { - blockBuilder.writeInt(getInt(position)); + position = realPositionOf(position); + writePositionToInner(position, blockBuilder); + } + + @Override + public void writePositionTo(int[] selection, final int offsetInSelection, final int positionCount, + BlockBuilder blockBuilder) { + if (this.selection != null || !(blockBuilder instanceof IntegerBlockBuilder)) { + // don't support it when selection in use. + super.writePositionTo(selection, offsetInSelection, positionCount, blockBuilder); + return; + } + + // best case + if (!mayHaveNull()) { + ((IntegerBlockBuilder) blockBuilder).values + .add(this.values, selection, offsetInSelection, positionCount); + + ((IntegerBlockBuilder) blockBuilder).valueIsNull + .add(false, positionCount); + + return; + } + + // normal case + for (int i = 0; i < positionCount; i++) { + int position = selection[i + offsetInSelection]; + + if (isNull != null && isNull[position + arrayOffset]) { + blockBuilder.appendNull(); + } else { + blockBuilder.writeInt(values[position + arrayOffset]); + } } } @Override public void addToHasher(IStreamingHasher sink, int position) { - if (isNull(position)) { - sink.putInt(NULL_VALUE); - } else { - sink.putInt(getInt(position)); - } + position = realPositionOf(position); + addToHasherInner(sink, position); } @Override public int hashCode(int position) { - if (isNull(position)) { - return 0; - } - return getInt(position); + position = realPositionOf(position); + return hashCodeInner(position); } @Override - public boolean equals(int position, Block other, int otherPosition) { - boolean n1 = isNull(position); - boolean n2 = other.isNull(otherPosition); - if (n1 && n2) { - return true; - } else if (n1 != n2) { - return false; - } - if (other instanceof IntegerBlock || other instanceof IntegerBlockBuilder) { - return getInt(position) == other.getInt(otherPosition); + public long hashCodeUseXxhash(int pos) { + int realPos = realPositionOf(pos); + if (isNullInner(realPos)) { + return NULL_HASH_CODE; } else { - throw new AssertionError(); + return XxhashUtils.finalShuffle(getIntInner(realPos)); } } + @Override + public boolean equals(int position, Block other, int otherPosition) { + position = realPositionOf(position); + return equalsInner(position, other, otherPosition); + } + /** * Designed for test purpose */ @@ -170,6 +559,34 @@ public int[] hashCodeVector() { return hashes; } + @Override + public void hashCodeVector(int[] results, int positionCount) { + + if (selection != null) { + for (int position = 0; position < positionCount; position++) { + results[position] = values[selection[position]]; + } + + if (mayHaveNull()) { + for (int position = 0; position < positionCount; position++) { + if (isNull[selection[position]]) { + results[position] = 0; + } + } + } + } else { + System.arraycopy(values, arrayOffset, results, 0, positionCount); + + if (mayHaveNull()) { + for (int position = 0; position < positionCount; position++) { + if (isNull[position]) { + results[position] = 0; + } + } + } + } + } + @Override public DataType getType() { return DataTypes.IntegerType; @@ -179,7 +596,7 @@ public DataType getType() { public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { checkNoDelayMaterialization(); if (output instanceof IntegerBlock) { - IntegerBlock outputVectorSlot = (IntegerBlock) output; + IntegerBlock outputVectorSlot = output.cast(IntegerBlock.class); if (selectedInUse) { for (int i = 0; i < size; i++) { int j = sel[i]; @@ -201,20 +618,29 @@ public void shallowCopyTo(RandomAccessBlock another) { if (!(another instanceof IntegerBlock)) { GeneralUtil.nestedException("cannot shallow copy to " + another == null ? null : another.toString()); } - IntegerBlock vectorSlot = (IntegerBlock) another; + IntegerBlock vectorSlot = another.cast(IntegerBlock.class); super.shallowCopyTo(vectorSlot); vectorSlot.values = values; } @Override protected Object getElementAtUnchecked(int position) { + position = realPositionOf(position); return values[position]; } @Override public void setElementAt(int position, Object element) { checkNoDelayMaterialization(); - super.updateElementAt(position, element, e -> values[position] = (int) e); + + if (element != null) { + isNull[position] = false; + values[position] = (int) element; + } else { + isNull[position] = true; + hasNull = true; + } + } public int[] intArray() { @@ -245,10 +671,144 @@ public void updateSizeInfo() { elementUsedBytes = Byte.BYTES * positionCount + Integer.BYTES * positionCount; } + @Override + public void collectNulls(int positionOffset, int positionCount, BitSet nullBitmap, int targetOffset) { + Preconditions.checkArgument(positionOffset + positionCount <= this.positionCount); + if (isNull == null) { + return; + } + if (selection != null) { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + int j = selection[i]; + if (isNull[j + arrayOffset]) { + nullBitmap.set(targetOffset); + } + targetOffset++; + } + } else { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + if (isNull[i + arrayOffset]) { + nullBitmap.set(targetOffset); + } + targetOffset++; + } + } + } + + @Override + public void copyToIntArray(int positionOffset, int positionCount, int[] targetArray, int targetOffset, + DictionaryMapping dictionaryMapping) { + Preconditions.checkArgument(positionOffset + positionCount <= this.positionCount); + if (selection != null) { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + int j = selection[i]; + targetArray[targetOffset++] = values[j + arrayOffset]; + } + } else { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + targetArray[targetOffset++] = values[i + arrayOffset]; + } + } + } + + @Override + public void appendTypedHashTable(TypedList typedList, int sourceIndex, int startIndexIncluded, + int endIndexExcluded) { + Preconditions.checkArgument(endIndexExcluded <= this.positionCount); + if (selection != null) { + for (int i = startIndexIncluded; i < endIndexExcluded; i++) { + int j = selection[i]; + typedList.setInt(sourceIndex++, values[j + arrayOffset]); + } + } else { + typedList.setIntArray(sourceIndex, values, startIndexIncluded, endIndexExcluded); + } + } + + @Override + public void count(int[] groupIds, int[] probePositions, int selSize, NullableLongGroupState state) { + if (!mayHaveNull()) { + for (int i = 0; i < selSize; i++) { + int position = probePositions[i]; + int groupId = groupIds[position]; + state.set(groupId, state.get(groupId) + 1); + } + return; + } + + if (selection == null) { + for (int i = 0; i < selSize; i++) { + int position = probePositions[i]; + int groupId = groupIds[position]; + + if (!isNull[position]) { + state.set(groupId, state.get(groupId) + 1); + } + } + } else { + for (int i = 0; i < selSize; i++) { + int position = probePositions[i]; + int groupId = groupIds[position]; + + int realPosition = selection[position]; + + if (!isNull[realPosition]) { + state.set(groupId, state.get(groupId) + 1); + } + } + } + } + public int[] getSelection() { return selection; } + private int getIntInner(int position) { + return values[position + arrayOffset]; + } + + private boolean isNullInner(int position) { + return isNull != null && isNull[position + arrayOffset]; + } + + private void writePositionToInner(int position, BlockBuilder blockBuilder) { + if (isNullInner(position)) { + blockBuilder.appendNull(); + } else { + blockBuilder.writeInt(getIntInner(position)); + } + } + + private void addToHasherInner(IStreamingHasher sink, int position) { + if (isNullInner(position)) { + sink.putInt(NULL_VALUE); + } else { + sink.putInt(getIntInner(position)); + } + } + + private int hashCodeInner(int position) { + if (isNullInner(position)) { + return 0; + } + return getIntInner(position); + } + + private boolean equalsInner(int realPosition, Block other, int otherPosition) { + boolean n1 = isNullInner(realPosition); + boolean n2 = other.isNull(otherPosition); + if (n1 && n2) { + return true; + } else if (n1 != n2) { + return false; + } + if (other instanceof IntegerBlock || other instanceof IntegerBlockBuilder) { + return getIntInner(realPosition) == other.getInt(otherPosition); + } else { + throw new AssertionError(); + } + } + private void checkNoDelayMaterialization() { if (selection != null) { throw new AssertionError("un-support delay materialization in this method"); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/IntegerBlockBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/IntegerBlockBuilder.java index 7c40b2bc4..15f1483f2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/IntegerBlockBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/IntegerBlockBuilder.java @@ -16,16 +16,30 @@ package com.alibaba.polardbx.executor.chunk; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; +import com.alibaba.polardbx.executor.operator.util.DriverObjectPool; +import com.alibaba.polardbx.executor.operator.util.ObjectPools; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.google.common.base.Preconditions; import it.unimi.dsi.fastutil.ints.IntArrayList; public class IntegerBlockBuilder extends AbstractBlockBuilder { - private final IntArrayList values; + protected final BatchedArrayList.BatchIntArrayList values; + private DriverObjectPool objectPool; + private int chunkLimit; public IntegerBlockBuilder(int capacity) { super(capacity); - this.values = new IntArrayList(capacity); + this.values = new BatchedArrayList.BatchIntArrayList(capacity); + } + + public IntegerBlockBuilder(int capacity, int chunkLimit, DriverObjectPool objectPool) { + super(capacity); + this.values = new BatchedArrayList.BatchIntArrayList(capacity); + this.objectPool = objectPool; + this.chunkLimit = chunkLimit; } @Override @@ -63,8 +77,12 @@ public void ensureCapacity(int capacity) { @Override public Block build() { - return new IntegerBlock(0, getPositionCount(), mayHaveNull() ? valueIsNull.elements() : null, + Block block = new IntegerBlock(0, getPositionCount(), mayHaveNull() ? valueIsNull.elements() : null, values.elements()); + if (objectPool != null) { + block.setRecycler(objectPool.getRecycler(chunkLimit)); + } + return block; } @Override @@ -75,7 +93,16 @@ public void appendNull() { @Override public BlockBuilder newBlockBuilder() { - return new IntegerBlockBuilder(getCapacity()); + if (objectPool != null) { + return new IntegerBlockBuilder(getCapacity(), chunkLimit, objectPool); + } else { + return new IntegerBlockBuilder(getCapacity()); + } + } + + @Override + public BlockBuilder newBlockBuilder(ObjectPools objectPools, int chunkLimit) { + return new IntegerBlockBuilder(getCapacity(), chunkLimit, objectPools.getIntArrayPool()); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/LongBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/LongBlock.java index 50035eef8..fed1eaec6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/LongBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/LongBlock.java @@ -17,6 +17,11 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.XxhashUtils; +import com.alibaba.polardbx.common.utils.bloomfilter.RFBloomFilter; +import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.executor.operator.util.DriverObjectPool; +import com.alibaba.polardbx.executor.operator.util.TypedList; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -25,6 +30,8 @@ import com.google.common.base.Preconditions; import org.openjdk.jol.info.ClassLayout; +import java.util.BitSet; + import static com.alibaba.polardbx.common.utils.memory.SizeOf.sizeOf; /** @@ -39,22 +46,446 @@ public class LongBlock extends AbstractBlock { private long[] values; + // for zero copy + private int[] selection; + public LongBlock(DataType dataType, int slotLen) { super(dataType, slotLen); this.values = new long[slotLen]; + this.selection = null; + updateSizeInfo(); + } + + // for object pool + public LongBlock(DataType dataType, int slotLen, DriverObjectPool objectPool, int chunkLimit) { + super(dataType, slotLen); + long[] pooled = objectPool.poll(); + if (pooled != null && pooled.length >= slotLen) { + this.values = pooled; + } else { + this.values = new long[slotLen]; + } + + this.selection = null; updateSizeInfo(); + setRecycler(objectPool.getRecycler(chunkLimit)); } public LongBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, long[] values) { super(arrayOffset, positionCount, valueIsNull); this.values = Preconditions.checkNotNull(values); + this.selection = null; updateSizeInfo(); } + public LongBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, long[] values, int[] selection) { + super(arrayOffset, positionCount, valueIsNull); + this.values = Preconditions.checkNotNull(values); + this.selection = selection; + updateSizeInfo(); + } + + public static LongBlock from(LongBlock other, int selSize, int[] selection, boolean useSelection) { + if (useSelection) { + return new LongBlock(0, selSize, other.isNull, other.values, selection); + } + return new LongBlock(0, + selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + BlockUtils.copyLongArray(other.values, selection, selSize), + null); + } + + @Override + public void addLongToBloomFilter(int totalPartitionCount, RFBloomFilter[] RFBloomFilters) { + for (int pos = 0; pos < positionCount; pos++) { + + // calc physical partition id. + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + + // put hash code. + RFBloomFilters[partition].putLong(values[pos]); + } + } + + @Override + public void addLongToBloomFilter(RFBloomFilter RFBloomFilter) { + for (int pos = 0; pos < positionCount; pos++) { + // put hash code. + RFBloomFilter.putLong(values[pos]); + } + } + + @Override + public int mightContainsLong(RFBloomFilter RFBloomFilter, boolean[] bitmap, boolean isConjunctive) { + int hitCount = 0; + final int positionCount = getPositionCount(); + if (isConjunctive) { + + for (int pos = 0; pos < positionCount; pos++) { + + // Base on the original status in bitmap. + if (bitmap[pos]) { + bitmap[pos] &= RFBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } else { + + for (int pos = 0; pos < positionCount; pos++) { + bitmap[pos] = RFBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + + } + + return hitCount; + } + + @Override + public int mightContainsLong(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent) { + int hitCount = 0; + final int positionCount = getPositionCount(); + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + bitmap[pos] = rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] = rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } + + return hitCount; + } + + @Override + public void addIntToBloomFilter(int totalPartitionCount, RFBloomFilter[] RFBloomFilters) { + for (int pos = 0; pos < positionCount; pos++) { + + // calc physical partition id. + long hashVal = XxhashUtils.finalShuffle(values[pos + arrayOffset]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + + // put hash code. + int hashCode = Long.hashCode(values[pos + arrayOffset]); + RFBloomFilters[partition].putInt(hashCode); + } + } + + @Override + public void addIntToBloomFilter(RFBloomFilter RFBloomFilter) { + final int positionCount = getPositionCount(); + for (int pos = 0; pos < positionCount; pos++) { + // put hash code. + RFBloomFilter.putInt(Long.hashCode(values[pos + arrayOffset])); + } + } + + @Override + public int mightContainsInt(RFBloomFilter RFBloomFilter, boolean[] bitmap, boolean isConjunctive) { + int hitCount = 0; + if (isConjunctive) { + for (int pos = 0; pos < positionCount; pos++) { + // Base on the original status in bitmap. + if (bitmap[pos]) { + int hashCode = Long.hashCode(values[pos + arrayOffset]); + bitmap[pos] &= RFBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } else { + for (int pos = 0; pos < positionCount; pos++) { + int hashCode = Long.hashCode(values[pos + arrayOffset]); + bitmap[pos] = RFBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } + + return hitCount; + } + + @Override + public int mightContainsInt(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent) { + int hitCount = 0; + final int positionCount = getPositionCount(); + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = XxhashUtils.finalShuffle(values[0]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + int hashCode = Long.hashCode(values[pos]); + bitmap[pos] = rfBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = XxhashUtils.finalShuffle(values[pos]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + int hashCode = Long.hashCode(values[pos]); + ; + bitmap[pos] = rfBloomFilter.mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + } + + return hitCount; + } + + @Override + public int mightContainsInt(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent, boolean isConjunctive) { + int hitCount = 0; + final int positionCount = getPositionCount(); + + if (isConjunctive) { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + bitmap[pos] &= rfBloomFilter.mightContainInt(Long.hashCode(values[pos])); + if (bitmap[pos]) { + hitCount++; + } + } + + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] &= rfBloomFilter.mightContainInt(Long.hashCode(values[pos])); + if (bitmap[pos]) { + hitCount++; + } + + } + + } + } + + } else { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = hashCodeUseXxhash(0); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + bitmap[pos] = rfBloomFilter.mightContainInt(Long.hashCode(values[pos])); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = hashCodeUseXxhash(pos); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] = rfBloomFilter.mightContainInt(Long.hashCode(values[pos])); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } + + return hitCount; + } + + @Override + public int mightContainsLong(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent, boolean isConjunctive) { + int hitCount = 0; + if (isConjunctive) { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = XxhashUtils.finalShuffle(values[0]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + bitmap[pos] &= rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + + if (bitmap[pos]) { + long hashVal = XxhashUtils.finalShuffle(values[pos]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] &= rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + + } + + } + } + + } else { + + if (isPartitionConsistent) { + // Find the consistent partition number. + long hashVal = XxhashUtils.finalShuffle(values[0]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + for (int pos = 0; pos < positionCount; pos++) { + bitmap[pos] = rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } else { + + for (int pos = 0; pos < positionCount; pos++) { + long hashVal = XxhashUtils.finalShuffle(values[pos]); + int partition = (int) ((hashVal & Long.MAX_VALUE) % totalPartitionCount); + RFBloomFilter rfBloomFilter = RFBloomFilters[partition]; + + bitmap[pos] = rfBloomFilter.mightContainLong(values[pos]); + if (bitmap[pos]) { + hitCount++; + } + } + } + + } + + return hitCount; + } + + @Override + public void collectNulls(int positionOffset, int positionCount, BitSet nullBitmap, int targetOffset) { + Preconditions.checkArgument(positionOffset + positionCount <= this.positionCount); + if (isNull == null) { + return; + } + if (selection != null) { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + int j = selection[i]; + if (isNull[j + arrayOffset]) { + nullBitmap.set(targetOffset); + } + targetOffset++; + } + } else { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + if (isNull[i + arrayOffset]) { + nullBitmap.set(targetOffset); + } + targetOffset++; + } + } + } + + @Override + public void recycle() { + if (recycler != null) { + recycler.recycle(values); + } + } + + @Override + public void copyToLongArray(int positionOffset, int positionCount, long[] targetArray, int targetOffset) { + Preconditions.checkArgument(positionOffset + positionCount <= this.positionCount); + if (selection != null) { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + int j = selection[i]; + targetArray[targetOffset++] = values[j + arrayOffset]; + } + } else { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + targetArray[targetOffset++] = values[i + arrayOffset]; + } + } + } + + @Override + public void appendTypedHashTable(TypedList typedList, int sourceIndex, int startIndexIncluded, + int endIndexExcluded) { + Preconditions.checkArgument(endIndexExcluded <= this.positionCount); + if (selection != null) { + for (int i = startIndexIncluded; i < endIndexExcluded; i++) { + int j = selection[i]; + typedList.setLong(sourceIndex++, values[j + arrayOffset]); + } + } else { + typedList.setLongArray(sourceIndex, values, startIndexIncluded, endIndexExcluded); + } + } + @Override public long getLong(int position) { - checkReadablePosition(position); - return values[position + arrayOffset]; + position = realPositionOf(position); + return getLongInner(position); } @Override @@ -62,21 +493,44 @@ public Object getObject(int position) { return isNull(position) ? null : getLong(position); } + @Override + public boolean isNull(int position) { + position = realPositionOf(position); + return isNullInner(position); + } + @Override public boolean equals(int position, Block other, int otherPosition) { - boolean n1 = isNull(position); - boolean n2 = other.isNull(otherPosition); - if (n1 && n2) { - return true; - } else if (n1 != n2) { - return false; + position = realPositionOf(position); + return equalsInner(position, other, otherPosition); + } + + @Override + public void writePositionTo(int[] selection, int offsetInSelection, int positionCount, BlockBuilder blockBuilder) { + if (this.selection != null || !(blockBuilder instanceof LongBlockBuilder)) { + // don't support it when selection in use. + super.writePositionTo(selection, offsetInSelection, positionCount, blockBuilder); + return; } - if (other instanceof LongBlock || other instanceof LongBlockBuilder) { - return getLong(position) == other.getLong(otherPosition); - } else if (other instanceof IntegerBlock || other instanceof IntegerBlockBuilder) { - return getLong(position) == other.getInt(otherPosition); - } else { - throw new AssertionError(); + + if (!mayHaveNull()) { + ((LongBlockBuilder) blockBuilder).values + .add(this.values, selection, offsetInSelection, positionCount); + + ((LongBlockBuilder) blockBuilder).valueIsNull + .add(false, positionCount); + + return; + } + + for (int i = 0; i < positionCount; i++) { + int position = selection[i + offsetInSelection]; + + if (isNull != null && isNull[position + arrayOffset]) { + blockBuilder.appendNull(); + } else { + blockBuilder.writeLong(values[position + arrayOffset]); + } } } @@ -87,28 +541,30 @@ public static LongBlock wrap(long[] values) { @Override public void writePositionTo(int position, BlockBuilder blockBuilder) { - if (isNull(position)) { - blockBuilder.appendNull(); - } else { - blockBuilder.writeLong(getLong(position)); - } + position = realPositionOf(position); + writePositionToInner(position, blockBuilder); } @Override public void addToHasher(IStreamingHasher sink, int position) { - if (isNull(position)) { - sink.putLong(NULL_VALUE); - } else { - sink.putLong(getLong(position)); - } + position = realPositionOf(position); + addToHasherInner(sink, position); } @Override public int hashCode(int position) { - if (isNull(position)) { - return 0; + position = realPositionOf(position); + return hashCodeInner(position); + } + + @Override + public long hashCodeUseXxhash(int pos) { + pos = realPositionOf(pos); + if (isNullInner(pos)) { + return NULL_HASH_CODE; + } else { + return XxhashUtils.finalShuffle(values[pos + arrayOffset]); } - return Long.hashCode(values[position + arrayOffset]); } @Override @@ -116,6 +572,17 @@ public int[] hashCodeVector() { if (mayHaveNull()) { return super.hashCodeVector(); } + + if (selection != null) { + final int n = getPositionCount(); + int[] hashes = new int[n]; + for (int position = 0; position < n; position++) { + int realPosition = realPositionOf(position); + hashes[position] = Long.hashCode(values[realPosition + arrayOffset]); + } + return hashes; + } + final int n = getPositionCount(); int[] hashes = new int[n]; for (int position = 0; position < n; position++) { @@ -124,6 +591,54 @@ public int[] hashCodeVector() { return hashes; } + @Override + public void hashCodeVector(int[] results, int positionCount) { + if (selection != null) { + for (int position = 0; position < positionCount; position++) { + results[position] = Long.hashCode(values[selection[position]]); + } + + if (mayHaveNull()) { + for (int position = 0; position < positionCount; position++) { + if (isNull[selection[position]]) { + results[position] = 0; + } + } + } + + } else { + for (int position = 0; position < positionCount; position++) { + results[position] = Long.hashCode(values[position]); + } + + if (mayHaveNull()) { + for (int position = 0; position < positionCount; position++) { + if (isNull[position]) { + results[position] = 0; + } + } + } + } + + } + + /** + * Designed for test purpose + */ + public static LongBlock ofInt(Integer... values) { + final int len = values.length; + boolean[] valueIsNull = new boolean[len]; + long[] longValues = new long[len]; + for (int i = 0; i < len; i++) { + if (values[i] != null) { + longValues[i] = values[i].longValue(); + } else { + valueIsNull[i] = true; + } + } + return new LongBlock(0, len, valueIsNull, longValues); + } + /** * Designed for test purpose */ @@ -149,7 +664,7 @@ public DataType getType() { @Override public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { if (output instanceof LongBlock) { - LongBlock outputVectorSlot = (LongBlock) output; + LongBlock outputVectorSlot = output.cast(LongBlock.class); if (selectedInUse) { for (int i = 0; i < size; i++) { int j = sel[i]; @@ -170,7 +685,7 @@ public void shallowCopyTo(RandomAccessBlock another) { if (!(another instanceof LongBlock)) { GeneralUtil.nestedException("cannot shallow copy to " + another == null ? null : another.toString()); } - LongBlock vectorSlot = (LongBlock) another; + LongBlock vectorSlot = another.cast(LongBlock.class); super.shallowCopyTo(vectorSlot); vectorSlot.values = values; } @@ -182,7 +697,13 @@ protected Object getElementAtUnchecked(int position) { @Override public void setElementAt(int position, Object element) { - super.updateElementAt(position, element, e -> values[position] = (long) e); + if (element != null) { + isNull[position] = false; + values[position] = (long) element; + } else { + isNull[position] = true; + hasNull = true; + } } public long[] longArray() { @@ -211,4 +732,63 @@ public void updateSizeInfo() { estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + sizeOf(values); elementUsedBytes = Byte.BYTES * positionCount + Long.BYTES * positionCount; } + + private int realPositionOf(int position) { + if (selection == null) { + return position; + } + return selection[position]; + } + + private long getLongInner(int position) { + return values[position + arrayOffset]; + } + + private void writePositionToInner(int position, BlockBuilder blockBuilder) { + if (isNullInner(position)) { + blockBuilder.appendNull(); + } else { + blockBuilder.writeLong(getLongInner(position)); + } + } + + private boolean equalsInner(int position, Block other, int otherPosition) { + boolean n1 = isNullInner(position); + boolean n2 = other.isNull(otherPosition); + if (n1 && n2) { + return true; + } else if (n1 != n2) { + return false; + } + if (other instanceof LongBlock || other instanceof LongBlockBuilder) { + return getLongInner(position) == other.getLong(otherPosition); + } else if (other instanceof IntegerBlock || other instanceof IntegerBlockBuilder) { + return getLongInner(position) == other.getInt(otherPosition); + } else { + throw new AssertionError(); + } + } + + private boolean isNullInner(int position) { + return isNull != null && isNull[position + arrayOffset]; + } + + private void addToHasherInner(IStreamingHasher sink, int position) { + if (isNullInner(position)) { + sink.putLong(NULL_VALUE); + } else { + sink.putLong(getLongInner(position)); + } + } + + private int hashCodeInner(int position) { + if (isNullInner(position)) { + return 0; + } + return Long.hashCode(values[position + arrayOffset]); + } + + public int[] getSelection() { + return selection; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/LongBlockBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/LongBlockBuilder.java index 936eef991..818230f0f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/LongBlockBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/LongBlockBuilder.java @@ -16,16 +16,30 @@ package com.alibaba.polardbx.executor.chunk; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; +import com.alibaba.polardbx.executor.operator.util.DriverObjectPool; +import com.alibaba.polardbx.executor.operator.util.ObjectPools; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.google.common.base.Preconditions; import it.unimi.dsi.fastutil.longs.LongArrayList; public class LongBlockBuilder extends AbstractBlockBuilder { - protected final LongArrayList values; + protected final BatchedArrayList.BatchLongArrayList values; + private DriverObjectPool objectPool; + private int chunkLimit; public LongBlockBuilder(int capacity) { super(capacity); - this.values = new LongArrayList(capacity); + this.values = new BatchedArrayList.BatchLongArrayList(capacity); + } + + public LongBlockBuilder(int capacity, int chunkLimit, DriverObjectPool objectPool) { + super(capacity); + this.values = new BatchedArrayList.BatchLongArrayList(capacity); + this.objectPool = objectPool; + this.chunkLimit = chunkLimit; } @Override @@ -63,7 +77,12 @@ public void ensureCapacity(int capacity) { @Override public Block build() { - return new LongBlock(0, getPositionCount(), mayHaveNull() ? valueIsNull.elements() : null, values.elements()); + Block block = + new LongBlock(0, getPositionCount(), mayHaveNull() ? valueIsNull.elements() : null, values.elements()); + if (objectPool != null) { + block.setRecycler(objectPool.getRecycler(chunkLimit)); + } + return block; } @Override @@ -74,7 +93,16 @@ public void appendNull() { @Override public BlockBuilder newBlockBuilder() { - return new LongBlockBuilder(getCapacity()); + if (objectPool != null) { + return new LongBlockBuilder(getCapacity(), chunkLimit, objectPool); + } else { + return new LongBlockBuilder(getCapacity()); + } + } + + @Override + public BlockBuilder newBlockBuilder(ObjectPools objectPools, int chunkLimit) { + return new LongBlockBuilder(getCapacity(), chunkLimit, objectPools.getLongArrayPool()); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/MutableChunk.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/MutableChunk.java index 606559176..76fd2b9c2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/MutableChunk.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/MutableChunk.java @@ -17,54 +17,48 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.operator.util.ObjectPools; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.google.common.base.Preconditions; import java.util.ArrayList; +import java.util.BitSet; import java.util.Collection; import java.util.List; public class MutableChunk extends Chunk { - public MutableChunk(int positionCount, Block... blocks) { - super(positionCount, blocks); - } + private int chunkLimit; + private int[] outputIndexes; + private boolean isFirstAllocation; + + // for literal + BitSet literalBitmap; + // for test public MutableChunk(Block... blocks) { super(blocks); } - public MutableChunk(int[] selection, Block[] slots) { + public MutableChunk(int[] selection, Block[] slots, int chunkLimit, int[] outputIndexes, BitSet literalBitmap) { super(selection, slots); + this.chunkLimit = chunkLimit; + this.outputIndexes = outputIndexes; + this.isFirstAllocation = true; + this.literalBitmap = literalBitmap; } public RandomAccessBlock slotIn(int index, DataType dataType) { - RandomAccessBlock block = slotIn(index); - DataType slotDataType = block.getType(); - if (slotDataType != null && !slotDataType.getDataClass().equals(dataType.getDataClass())) { - GeneralUtil.nestedException("block type " + slotDataType + " is not consistent with " + dataType); - } - - return block; + return slotIn(index); } /** * unsafe method to get vector */ public RandomAccessBlock slotIn(int index) { - RandomAccessBlock block = null; - if (index < blocks.length) { - block = (RandomAccessBlock) blocks[index]; - if (block == null) { - GeneralUtil.nestedException("block in " + index + " does not exist"); - } - } else { - GeneralUtil.nestedException("block in " + index + " does not exist"); - } - return block; + return (RandomAccessBlock) blocks[index]; } public void setSlotAt(RandomAccessBlock block, int index) { - Preconditions.checkNotNull(block, "block can't be null"); this.blocks[index] = (Block) block; } @@ -91,10 +85,7 @@ public void reallocate(int newBatchSize, int blockCount) { reallocate(newBatchSize, blockCount, false); } - /** - * If the runtime size of Chunk exceeds the batch size, reallocate the vector. - */ - public void reallocate(final int newBatchSize, int blockCount, boolean reuse) { + public void allocateWithObjectPool(final int newBatchSize, final int inputBlockCount, ObjectPools objectPools) { // Get the destination physical element count. // It's greater than or equal to batch size (position count). int distSize = newBatchSize; @@ -103,14 +94,16 @@ public void reallocate(final int newBatchSize, int blockCount, boolean reuse) { distSize = Math.max(newBatchSize, selectionBound); } - for (int i = blockCount; i < blocks.length; i++) { + int j = 0; + for (int i = inputBlockCount; i < blocks.length; i++) { Block vector = blocks[i]; RandomAccessBlock newVector; - if (reuse - && vector.getPositionCount() == distSize - && vector instanceof RandomAccessBlock - ) { - newVector = (RandomAccessBlock) vector; + + if (j < outputIndexes.length && outputIndexes[j] == i) { + j++; + // allocate memory for output vector in chunk limit size. + newVector = BlockUtils.createBlock(((RandomAccessBlock) vector).getType(), + distSize, objectPools, chunkLimit); } else { newVector = BlockUtils.createBlock(((RandomAccessBlock) vector).getType(), distSize); } @@ -118,6 +111,127 @@ public void reallocate(final int newBatchSize, int blockCount, boolean reuse) { newVector.resize(newBatchSize); blocks[i] = (Block) newVector; } + + this.positionCount = newBatchSize; + } + + public void allocateWithReuse(final int newBatchSize, final int inputBlockCount) { + if (isFirstAllocation) { + // In first allocation, allocate memory for all output blocks. + for (int i = inputBlockCount; i < blocks.length; i++) { + Block vector = blocks[i]; + RandomAccessBlock newVector = (RandomAccessBlock) vector; + + if (literalBitmap == null || !literalBitmap.get(i)) { + newVector = BlockUtils.createBlock(((RandomAccessBlock) vector).getType(), chunkLimit); + // set position count = max{selection count, batch size} + newVector.resize(newBatchSize); + } else { + // lazy allocation + newVector.resize(0); + } + + blocks[i] = (Block) newVector; + } + isFirstAllocation = false; + } else { + for (int i = inputBlockCount; i < blocks.length; i++) { + RandomAccessBlock vector = (RandomAccessBlock) blocks[i]; + if (literalBitmap == null || !literalBitmap.get(i)) { + // set position count = max{selection count, batch size} + vector.resize(newBatchSize); + } else { + vector.resize(0); + } + } + } + this.positionCount = newBatchSize; + } + + /** + * If the runtime size of Chunk exceeds the batch size, reallocate the vector. + */ + public void reallocate(final int newBatchSize, int blockCount, boolean reuse) { + if (reuse && chunkLimit > 0 && outputIndexes != null) { + + if (isFirstAllocation) { + // In first allocation, allocate memory for all output blocks. + for (int i = blockCount; i < blocks.length; i++) { + Block vector = blocks[i]; + + // exclude the literal expression + RandomAccessBlock newVector = (RandomAccessBlock) vector; + if (literalBitmap == null || !literalBitmap.get(i)) { + newVector = BlockUtils.createBlock(((RandomAccessBlock) vector).getType(), chunkLimit); + // set position count = max{selection count, batch size} + newVector.resize(newBatchSize); + } else { + newVector.resize(0); + } + + blocks[i] = (Block) newVector; + + } + isFirstAllocation = false; + } else { + // Just allocate memory for output vector in chunk limit size. + for (int i = 0; i < outputIndexes.length; i++) { + Block vector = blocks[i]; + RandomAccessBlock newVector = + BlockUtils.createBlock(((RandomAccessBlock) vector).getType(), chunkLimit); + blocks[i] = (Block) newVector; + } + + for (int i = blockCount; i < blocks.length; i++) { + // set position count = max{selection count, batch size} + RandomAccessBlock vector = (RandomAccessBlock) blocks[i]; + if (literalBitmap == null || !literalBitmap.get(i)) { + vector.resize(newBatchSize); + } else { + vector.resize(0); + } + + } + + } + } else { + // allocate memory for request. + + // Get the destination physical element count. + // It's greater than or equal to batch size (position count). + int distSize = newBatchSize; + if (isSelectionInUse() && selection != null && selection.length > 0) { + int selectionBound = selection[selection.length - 1] + 1; + distSize = Math.max(newBatchSize, selectionBound); + } + + for (int i = blockCount; i < blocks.length; i++) { + Block vector = blocks[i]; + RandomAccessBlock newVector; + + // don't merge branch + if (literalBitmap != null && literalBitmap.get(i)) { + // exclude the literal expression + newVector = (RandomAccessBlock) vector; + newVector.resize(0); + } else if (reuse + && vector.getPositionCount() == distSize + && vector instanceof RandomAccessBlock + ) { + // Just reuse vector if dist size is equal. + newVector = (RandomAccessBlock) vector; + // set position count = max{selection count, batch size} + newVector.resize(newBatchSize); + } else { + newVector = BlockUtils.createBlock(((RandomAccessBlock) vector).getType(), distSize); + // set position count = max{selection count, batch size} + newVector.resize(newBatchSize); + } + + blocks[i] = (Block) newVector; + } + } + this.positionCount = newBatchSize; } @@ -129,11 +243,18 @@ public static class Builder { final int positionCount; int[] selection; List blocks = new ArrayList<>(64); + int[] outputIndexes; + int chunkLimit; + + // for literal + BitSet literalBitmap; public Builder(int positionCount) { Preconditions.checkArgument( positionCount > 0, "Slot length expected to be greater than 0 but is " + positionCount); this.positionCount = positionCount; + this.chunkLimit = 0; + this.outputIndexes = null; } public Builder withSelection(int[] selection) { @@ -157,11 +278,28 @@ public Builder addSlotsByTypes(Collection> dataTypes) { return this; } + public Builder addOutputIndexes(int[] outputIndexes) { + this.outputIndexes = outputIndexes; + return this; + } + + public Builder addChunkLimit(int chunkLimit) { + this.chunkLimit = chunkLimit; + return this; + } + + public Builder addLiteralBitmap(BitSet literalBitmap) { + this.literalBitmap = literalBitmap; + // exclude the root output index. + this.literalBitmap.clear(outputIndexes[0]); + return this; + } + public MutableChunk build() { if (blocks.isEmpty()) { throw new IllegalArgumentException("Can't create vector batch without any slots!"); } - return new MutableChunk(selection, blocks.toArray(new Block[0])); + return new MutableChunk(selection, blocks.toArray(new Block[0]), chunkLimit, outputIndexes, literalBitmap); } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/NullBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/NullBlock.java index 43f6aee8c..e1880ef18 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/NullBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/NullBlock.java @@ -70,6 +70,11 @@ void checkReadablePosition(int position) { } } + @Override + public long hashCodeUseXxhash(int pos) { + return NULL_HASH_CODE; + } + @Override public boolean mayHaveNull() { return true; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/RandomAccessBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/RandomAccessBlock.java index c08859464..45e363026 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/RandomAccessBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/RandomAccessBlock.java @@ -21,7 +21,7 @@ /** * Random accessible column vector, extended by Block class. */ -public interface RandomAccessBlock { +public interface RandomAccessBlock extends CastableBlock { /** * Set the nullness array to the vector diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ReferenceBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ReferenceBlock.java index ff44f0a41..f55a066ae 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ReferenceBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ReferenceBlock.java @@ -33,7 +33,7 @@ public class ReferenceBlock extends AbstractBlock { private static final long INSTANCE_SIZE = ClassLayout.parseClass(ReferenceBlock.class).instanceSize(); - private Object[] values; + protected Object[] values; public ReferenceBlock(DataType dataType, int slotLen) { super(dataType, slotLen); @@ -44,7 +44,7 @@ public ReferenceBlock(DataType dataType, int slotLen) { public ReferenceBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, Object[] values, DataType dataType) { super(dataType, positionCount, valueIsNull, valueIsNull != null); - this.values = Preconditions.checkNotNull(values); + this.values = values; updateSizeInfo(); } @@ -52,7 +52,7 @@ public ReferenceBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, DataType dataType, boolean hasNull) { super(dataType, positionCount, valueIsNull, hasNull); - this.values = Preconditions.checkNotNull(values); + this.values = values; updateSizeInfo(); } @@ -72,7 +72,7 @@ public boolean equals(int position, Block otherBlock, int otherPosition) { return false; } if (otherBlock instanceof ReferenceBlock) { - return getReference(position).equals(((ReferenceBlock) otherBlock).getReference(otherPosition)); + return getReference(position).equals((otherBlock.cast(ReferenceBlock.class)).getReference(otherPosition)); } else if (otherBlock instanceof ReferenceBlockBuilder) { return getReference(position).equals(((ReferenceBlockBuilder) otherBlock).getReference(otherPosition)); } else { @@ -82,6 +82,9 @@ public boolean equals(int position, Block otherBlock, int otherPosition) { @Override public T getObject(int position) { + if (isNull(position)) { + return null; + } return getReference(position); } @@ -98,10 +101,15 @@ public int hashCode(int position) { return Objects.hashCode(values[position + arrayOffset]); } + @Override + public long hashCodeUseXxhash(int position) { + return hashCode(position); + } + @Override public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { if (output instanceof ReferenceBlock) { - ReferenceBlock outputVectorSlot = (ReferenceBlock) output; + ReferenceBlock outputVectorSlot = output.cast(ReferenceBlock.class); if (selectedInUse) { for (int i = 0; i < size; i++) { int j = sel[i]; @@ -122,7 +130,7 @@ public void shallowCopyTo(RandomAccessBlock another) { if (!(another instanceof ReferenceBlock)) { GeneralUtil.nestedException("cannot shallow copy to " + another == null ? null : another.toString()); } - ReferenceBlock vectorSlot = (ReferenceBlock) another; + ReferenceBlock vectorSlot = another.cast(ReferenceBlock.class); super.shallowCopyTo(vectorSlot); vectorSlot.values = values; } @@ -134,9 +142,13 @@ protected Object getElementAtUnchecked(int position) { @Override public void setElementAt(int position, Object element) { - super.updateElementAt(position, element, e -> values[position] = e); - - //TODO updateSizeInfo() should be called when we are able to estimated the size of Object[] + if (element != null) { + isNull[position] = false; + values[position] = element; + } else { + isNull[position] = true; + hasNull = true; + } } public Object[] objectArray() { @@ -149,8 +161,10 @@ public Object[] objectArray() { * @param context Evaluation Context * @return Type-specific block. */ - public Block toTypeSpecificBlock(EvaluationContext context) { - BlockBuilder blockBuilder = BlockBuilders.create(dataType, context.getExecutionContext(), positionCount); + public Block toTypeSpecificBlock(EvaluationContext context, DataType resultType) { + DataType targetType = resultType == null ? dataType : resultType; + boolean shouldConvert = resultType != null && !resultType.equalDeeply(dataType); + BlockBuilder blockBuilder = BlockBuilders.create(targetType, context.getExecutionContext(), positionCount); int batchSize = context.getPreAllocatedChunk().batchSize(); boolean isSelectionInUse = context.getPreAllocatedChunk().isSelectionInUse(); @@ -159,11 +173,17 @@ public Block toTypeSpecificBlock(EvaluationContext context) { for (int i = 0; i < batchSize; i++) { int j = selection[i]; Object value = isNull[j] ? null : values[j]; + if (shouldConvert) { + value = targetType.convertFrom(value); + } blockBuilder.writeObject(value); } } else { for (int i = 0; i < batchSize; i++) { Object value = isNull[i] ? null : values[i]; + if (shouldConvert) { + value = targetType.convertFrom(value); + } blockBuilder.writeObject(value); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SegmentedDecimalBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SegmentedDecimalBlock.java new file mode 100644 index 000000000..94d0ed5f1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SegmentedDecimalBlock.java @@ -0,0 +1,184 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.chunk; + +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import io.airlift.slice.Slice; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.FRACTIONS_OFFSET; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.INTEGERS_OFFSET; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.IS_NEG_OFFSET; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.roundUp; + +public interface SegmentedDecimalBlock { + int UNSET = -1; + + Slice segmentUncheckedAt(int position); + + boolean isDecimal64(); + + boolean isDecimal128(); + + long getDecimal128Low(int position); + + long getDecimal128High(int position); + + /** + * State of decimal block + */ + enum DecimalBlockState { + UNALLOC_STATE(false, UNSET, UNSET, UNSET), + + UNSET_STATE(false, UNSET, UNSET, UNSET), + + // 8 bytes + DECIMAL_64(false, UNSET, UNSET, UNSET), + // 16 bytes + DECIMAL_128(false, UNSET, UNSET, UNSET), + + // 40 bytes + FULL(false, UNSET, UNSET, UNSET), + + // (12 bytes) frac * 10^-9 + SIMPLE_MODE_1(true, UNSET, UNSET, 0), + + // (12 bytes) int1 + frac * 10^-9 + SIMPLE_MODE_2(true, UNSET, 0, 1), + + // (12 bytes) int2 * 10^9 + int1 + frac * 10^-9 + SIMPLE_MODE_3(true, 0, 1, 2); + + private final boolean isSimple; + private final int int2Pos; + private final int int1Pos; + private final int fracPos; + + DecimalBlockState(boolean isSimple, int int2Pos, int int1Pos, int fracPos) { + this.isSimple = isSimple; + this.int2Pos = int2Pos; + this.int1Pos = int1Pos; + this.fracPos = fracPos; + } + + public static DecimalBlockState stateOf(Slice memorySegments, int position) { + int isNeg = memorySegments.getByte(position * DECIMAL_MEMORY_SIZE + IS_NEG_OFFSET) & 0xFF; + if (isNeg == 1) { + return FULL; + } + + int integers = memorySegments.getByte(position * DECIMAL_MEMORY_SIZE + INTEGERS_OFFSET) & 0xFF; + int fractions = memorySegments.getByte(position * DECIMAL_MEMORY_SIZE + FRACTIONS_OFFSET) & 0xFF; + + int intWord = roundUp(integers); + int fracWord = roundUp(fractions); + + if (intWord == 0 && fracWord == 1) { + // frac * 10^-9 + return SIMPLE_MODE_1; + } else if (intWord == 1 && fracWord == 1) { + // int1 + frac * 10^-9 + return SIMPLE_MODE_2; + } else if (intWord == 2 && fracWord == 1) { + // int2 * 10^9 + int1 + frac * 10^-9 + return SIMPLE_MODE_3; + } + + return FULL; + } + + public static DecimalBlockState stateOf(DecimalStructure decimalStructure) { + if (decimalStructure == null || decimalStructure.isNeg()) { + return FULL; + } + + int integers = decimalStructure.getIntegers(); + int fractions = decimalStructure.getFractions(); + + int intWord = roundUp(integers); + int fracWord = roundUp(fractions); + + if (intWord == 0 && fracWord == 1) { + // frac * 10^-9 + return SIMPLE_MODE_1; + } else if (intWord == 1 && fracWord == 1) { + // int1 + frac * 10^-9 + return SIMPLE_MODE_2; + } else if (intWord == 2 && fracWord == 1) { + // int2 * 10^9 + int1 + frac * 10^-9 + return SIMPLE_MODE_3; + } + + return FULL; + } + + public DecimalBlockState merge(DecimalBlockState that) { + if (this == UNALLOC_STATE || this == UNSET_STATE) { + return that; + } + + if (that == UNSET_STATE || that == UNALLOC_STATE) { + return this; + } + + if (this == that) { + return this; + } + return FULL; + } + + public boolean isSimple() { + return isSimple; + } + + public boolean isUnset() { + return this == UNSET_STATE; + } + + public boolean isFull() { + return this == FULL; + } + + public boolean isDecimal64() { + return this == DECIMAL_64; + } + + public boolean isDecimal128() { + return this == DECIMAL_128; + } + + public boolean isDecimal64Or128() { + return this == DECIMAL_64 || this == DECIMAL_128; + } + + public boolean isNormal() { + return this == FULL || this.isSimple; + } + + public int getInt2Pos() { + return int2Pos; + } + + public int getInt1Pos() { + return int1Pos; + } + + public int getFracPos() { + return fracPos; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ShortBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ShortBlock.java index e2d439c70..bac4f2039 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ShortBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ShortBlock.java @@ -17,6 +17,8 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.XxhashUtils; +import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -60,6 +62,13 @@ public ShortBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, sho updateSizeInfo(); } + public static ShortBlock from(ShortBlock other, int selSize, int[] selection) { + return new ShortBlock(0, + selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + BlockUtils.copyShortArray(other.values, selection, selSize)); + } + @Override public short getShort(int position) { checkReadablePosition(position); @@ -97,6 +106,15 @@ public int hashCode(int position) { return Short.hashCode(values[position + arrayOffset]); } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } else { + return XxhashUtils.finalShuffle(values[pos + arrayOffset]); + } + } + @Override public int[] hashCodeVector() { if (mayHaveNull()) { @@ -110,6 +128,18 @@ public int[] hashCodeVector() { return hashes; } + @Override + public void hashCodeVector(int[] results, int positionCount) { + if (mayHaveNull()) { + super.hashCodeVector(results, positionCount); + return; + } + + for (int position = 0; position < positionCount; position++) { + results[position] = Short.hashCode(values[position + arrayOffset]); + } + } + @Override public boolean equals(int position, Block other, int otherPosition) { boolean n1 = isNull(position); @@ -134,7 +164,7 @@ public DataType getType() { @Override public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { if (output instanceof ShortBlock) { - ShortBlock outputVectorSlot = (ShortBlock) output; + ShortBlock outputVectorSlot = output.cast(ShortBlock.class); if (selectedInUse) { for (int i = 0; i < size; i++) { int j = sel[i]; @@ -155,7 +185,7 @@ public void shallowCopyTo(RandomAccessBlock another) { if (!(another instanceof ShortBlock)) { GeneralUtil.nestedException("cannot shallow copy to " + another == null ? null : another.toString()); } - ShortBlock vectorSlot = (ShortBlock) another; + ShortBlock vectorSlot = another.cast(ShortBlock.class); super.shallowCopyTo(vectorSlot); vectorSlot.values = values; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlock.java index 734eec92b..caaed6656 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlock.java @@ -17,6 +17,9 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.charset.CollationName; +import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; +import com.alibaba.polardbx.executor.operator.scan.impl.DictionaryMapping; import com.alibaba.polardbx.common.charset.SortKey; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; @@ -29,9 +32,11 @@ import io.airlift.slice.Slice; import io.airlift.slice.SliceOutput; import io.airlift.slice.Slices; +import io.airlift.slice.XxHash64; import org.openjdk.jol.info.ClassLayout; -import java.lang.ref.WeakReference; +import java.util.Arrays; +import java.util.BitSet; import static com.alibaba.polardbx.common.CrcAccumulator.NULL_TAG; import static com.alibaba.polardbx.common.utils.memory.SizeOf.sizeOf; @@ -40,23 +45,79 @@ public class SliceBlock extends AbstractCommonBlock { private static final long INSTANCE_SIZE = ClassLayout.parseClass(SliceBlock.class).instanceSize(); private static final byte[] EMPTY_BYTES = new byte[] {}; + /** + * If compatible is true, use collation to handle sorting, comparing and hashing. + */ + private final boolean compatible; private SliceType dataType; + /** + * In direct mode, storing the bytes' data. + */ private Slice data; + /** + * In direct mode, storing the offsets of each slot. + */ private int[] offsets; - private WeakReference[] sortKeys; - + /** + * In dictionary mode, storing the dict data. + */ + private BlockDictionary dictionary; + /** + * In dictionary mode, storing the dict id. + * the id = -1 means null value. + */ + private int[] dictIds; + /** + * Hold the effective position in this block. + */ private int[] selection; - private final boolean compatible; + // construct the slice block using dictionary. + public SliceBlock(SliceType dataType, int arrayOffset, int positionCount, boolean[] valueIsNull, + BlockDictionary dictionary, int[] dictIds, boolean compatible) { + super(dataType, positionCount, valueIsNull, valueIsNull != null); + this.dataType = dataType; + + this.offsets = null; + this.data = null; + + this.dictionary = dictionary; + this.dictIds = dictIds; + + this.selection = null; + this.compatible = compatible; + updateSizeInfo(); + } + + // construct the slice block using dictionary and selection array. + public SliceBlock(SliceType dataType, int arrayOffset, int positionCount, boolean[] valueIsNull, + BlockDictionary dictionary, int[] dictIds, int[] selection, boolean compatible) { + super(dataType, positionCount, valueIsNull, valueIsNull != null); + this.dataType = dataType; + + this.offsets = null; + this.data = null; + + this.dictionary = dictionary; + this.dictIds = dictIds; + + this.selection = selection; + this.compatible = compatible; + updateSizeInfo(); + } public SliceBlock(SliceType dataType, int arrayOffset, int positionCount, boolean[] valueIsNull, int[] offsets, Slice data, boolean compatible) { super(dataType, positionCount, valueIsNull, valueIsNull != null); Preconditions.checkNotNull(dataType); this.dataType = dataType; + this.offsets = offsets; this.data = data; - this.sortKeys = new WeakReference[positionCount]; + + this.dictionary = null; + this.dictIds = null; + this.selection = null; this.compatible = compatible; updateSizeInfo(); @@ -67,202 +128,494 @@ public SliceBlock(SliceType dataType, int arrayOffset, int positionCount, boolea super(dataType, positionCount, valueIsNull, valueIsNull != null); Preconditions.checkNotNull(dataType); this.dataType = dataType; + this.offsets = offsets; this.data = data; - this.sortKeys = new WeakReference[positionCount]; + + this.dictionary = null; + this.dictIds = null; + this.selection = selection; - this.compatible = false; + this.compatible = compatible; updateSizeInfo(); } - public int realPositionOf(int position) { - if (selection == null) { - return position; + // random access + public SliceBlock(SliceType inputType, int positionCount, boolean compatible, boolean useDictionary) { + super(inputType, positionCount); + this.dataType = inputType; + if (useDictionary) { + this.offsets = null; + this.data = null; + this.dictionary = null; + this.dictIds = new int[positionCount]; + } else { + this.offsets = new int[positionCount]; + this.data = null; + this.dictionary = null; + this.dictIds = null; } - return selection[position]; + this.selection = null; + this.compatible = compatible; + // need to manually call the updateSizeInfo(). } - public boolean[] nulls() { - return isNull; + public static SliceBlock from(SliceBlock other, int selSize, int[] selection, boolean compatible, + boolean useSelection) { + if (useSelection) { + if (other.dictionary == null) { + return new SliceBlock(other.dataType, other.arrayOffset, selSize, + other.isNull, other.offsets, other.data, + selection, compatible + ); + } else { + return new SliceBlock(other.dataType, other.arrayOffset, selSize, + other.isNull, other.dictionary, other.dictIds, + selection, compatible + ); + } + } + if (other.dictionary == null) { + + if (selection == null) { + // case 1: direct copy slice + if (other.data == null) { + // all values are null + return new SliceBlock(other.dataType, other.arrayOffset, selSize, + BlockUtils.copyNullArray(other.isNull, null, selSize), + BlockUtils.copyIntArray(other.offsets, null, selSize), + null, null, compatible); + } + return new SliceBlock(other.dataType, other.arrayOffset, selSize, + BlockUtils.copyNullArray(other.isNull, null, selSize), + BlockUtils.copyIntArray(other.offsets, null, selSize), + Slices.copyOf(other.data), + null, compatible); + } else { + // case 2: refactor offset & slice + boolean[] targetNulls = BlockUtils.copyNullArray(other.isNull, selection, selSize); + int[] targetOffsets = new int[selSize]; + if (other.data == null) { + // all values are null + Arrays.fill(targetOffsets, 0); + return new SliceBlock(other.dataType, other.arrayOffset, selSize, + targetNulls, + targetOffsets, + null, + null, compatible + ); + } + SliceOutput sliceOutput = new DynamicSliceOutput(selSize); + for (int position = 0; position < selSize; position++) { + int beginOffset = other.beginOffsetInner(selection[position]); + int endOffset = other.endOffsetInner(selection[position]); + sliceOutput.writeBytes(other.data, beginOffset, endOffset - beginOffset); + targetOffsets[position] = sliceOutput.size(); + } + + return new SliceBlock(other.dataType, other.arrayOffset, selSize, + targetNulls, + targetOffsets, + sliceOutput.slice(), + null, compatible + ); + } + } else { + + if (selection == null) { + // case 3: direct copy dictionary + return new SliceBlock(other.dataType, other.arrayOffset, selSize, + BlockUtils.copyNullArray(other.isNull, null, selSize), + other.dictionary, + BlockUtils.copyIntArray(other.dictIds, null, selSize), + null, compatible + ); + } else { + // case 4: copy arrays with selection. + return new SliceBlock(other.dataType, other.arrayOffset, selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + other.dictionary, + BlockUtils.copyIntArray(other.dictIds, selection, selSize), + null, compatible + ); + } + + } } - public int[] offsets() { - return offsets; + @Override + public void copyToIntArray(int positionOffset, int positionCount, int[] targetArray, int targetOffset, + DictionaryMapping dictionaryMapping) { + if (dictionary == null) { + throw new UnsupportedOperationException(); + } + int[] reMapping = dictionaryMapping.merge(dictionary); + if (selection != null) { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + int j = selection[i]; + int dictId = dictIds[j + arrayOffset]; + targetArray[targetOffset++] = dictId == -1 ? 0 : reMapping[dictId]; + } + } else { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + int dictId = dictIds[i + arrayOffset]; + targetArray[targetOffset++] = dictId == -1 ? 0 : reMapping[dictId]; + } + } } - public Slice data() { + @Override + public void collectNulls(int positionOffset, int positionCount, BitSet nullBitmap, int targetOffset) { + Preconditions.checkArgument(positionOffset + positionCount <= this.positionCount); + if (isNull == null) { + return; + } + if (selection != null) { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + int j = selection[i]; + if (isNull[j + arrayOffset]) { + nullBitmap.set(targetOffset); + } + targetOffset++; + } + } else { + for (int i = positionOffset; i < positionOffset + positionCount; i++) { + if (isNull[i + arrayOffset]) { + nullBitmap.set(targetOffset); + } + targetOffset++; + } + } + } + + public Slice getData() { return data; } + public void setData(Slice data) { + Preconditions.checkArgument(dictionary == null || data == null); + this.data = data; + } + + public int[] getOffsets() { + return offsets; + } + + public BlockDictionary getDictionary() { + return dictionary; + } + + public void setDictionary(BlockDictionary dictionary) { + Preconditions.checkArgument(data == null || dictionary == null); + this.dictionary = dictionary; + } + + public int[] getDictIds() { + return dictIds; + } + + public SliceBlock setDictIds(int[] dictIds) { + this.dictIds = dictIds; + return this; + } + + public int getDictId(int pos) { + return dictIds[realPositionOf(pos)]; + } + + private int realPositionOf(int position) { + if (selection == null) { + return position; + } + return selection[position]; + } + @Override public boolean isNull(int position) { position = realPositionOf(position); - return isNull != null && isNull[position + arrayOffset]; + return isNullInner(position); } @Override public Object getObjectForCmp(int position) { - return getSortKey(position); + position = realPositionOf(position); + return getSortKeyInner(position); } public Comparable getSortKey(int position) { - if (isNull(position)) { - return null; - } else if (compatible) { - WeakReference ref = sortKeys[realPositionOf(position)]; - SortKey sortKey; - if (ref == null || ref.get() == null) { - sortKey = dataType.getSortKey(getRegion(position)); - sortKeys[position] = new WeakReference<>(sortKey); - } else { - sortKey = ref.get(); - } - return sortKey; - } else { - return getRegion(position); - } + position = realPositionOf(position); + return getSortKeyInner(position); } @Override public Object getObject(int position) { - return isNull(position) ? null : copySlice(position); + position = realPositionOf(position); + return isNullInner(position) ? null : copySliceInner(position); } public Slice getRegion(int position) { position = realPositionOf(position); - - int beginOffset = beginOffset(position); - int endOffset = endOffset(position); - return data.slice(beginOffset, endOffset - beginOffset); + return getRegionInner(position); } - public Slice copySlice(int position) { + public Slice getRegion(int position, Slice output) { position = realPositionOf(position); - - int beginOffset = beginOffset(position); - int endOffset = endOffset(position); - return Slices.copyOf(data, beginOffset, endOffset - beginOffset); + return getRegionInner(position, output); } - public byte[] copyBytes(int position) { - position = realPositionOf(position); + @Override + public void writePositionTo(int[] selection, int offsetInSelection, int positionCount, + BlockBuilder blockBuilder) { + if (this.selection != null || !(blockBuilder instanceof SliceBlockBuilder)) { + // don't support it when selection in use. + super.writePositionTo(selection, offsetInSelection, positionCount, blockBuilder); + return; + } - int beginOffset = beginOffset(position); - int endOffset = endOffset(position); - return data.getBytes(beginOffset, endOffset - beginOffset); - } + SliceBlockBuilder sliceBlockBuilder = (SliceBlockBuilder) blockBuilder; - @Override - public void writePositionTo(int position, BlockBuilder blockBuilder) { - if (blockBuilder instanceof SliceBlockBuilder) { - writePositionTo(position, (SliceBlockBuilder) blockBuilder); - } else { - throw new AssertionError(); + if (!mayHaveNull()) { + writeNonNullTo(selection, offsetInSelection, positionCount, sliceBlockBuilder); + return; + } + + for (int i = 0; i < positionCount; i++) { + writePositionToInner(selection[i + offsetInSelection], sliceBlockBuilder); } } - private void writePositionTo(int position, SliceBlockBuilder b) { - if (isNull(position)) { - b.appendNull(); - } else { - position = realPositionOf(position); - int beginOffset = beginOffset(position); - int endOffset = endOffset(position); + private void writeNonNullTo(int[] selection, int offsetInSelection, int positionCount, + SliceBlockBuilder blockBuilder) { + if (dictionary == null && blockBuilder.blockDictionary == null) { + // case 1: Both the target block builder and this block don't use dictionary. + for (int i = 0; i < positionCount; i++) { + int position = selection[i + offsetInSelection]; + int beginOffset = beginOffsetInner(position); + int endOffset = endOffsetInner(position); - b.valueIsNull.add(false); - b.sliceOutput.writeBytes(data, beginOffset, endOffset - beginOffset); - b.offsets.add(b.sliceOutput.size()); + blockBuilder.sliceOutput.writeBytes(data, beginOffset, endOffset - beginOffset); + blockBuilder.offsets.add(blockBuilder.sliceOutput.size()); + } + + // write nulls + blockBuilder.valueIsNull.add(false, positionCount); + return; + } + + if (dictionary != null && blockBuilder.blockDictionary == null) { + // case 2: The target block builder doesn't use dictionary, but this block uses it. + if (blockBuilder.isEmpty()) { + // case 2.1: the block builder is empty, just overwrite the dictionary. + blockBuilder.setDictionary(dictionary); + for (int i = 0; i < positionCount; i++) { + Slice dictValue; + int position = selection[i + offsetInSelection]; + int dictId = dictIds[position]; + if (dictId == -1) { + dictValue = Slices.EMPTY_SLICE; + } else { + dictValue = dictionary.getValue(dictId); + } + blockBuilder.valueIsNull.add(false); + blockBuilder.values.add(dictId); + blockBuilder.sliceOutput.writeBytes(dictValue); + blockBuilder.offsets.add(blockBuilder.sliceOutput.size()); + } + + } else { + // case 2.2: the block builder is not empty, fall back to normal slice. + for (int i = 0; i < positionCount; i++) { + Slice dictValue; + int position = selection[i + offsetInSelection]; + int dictId = dictIds[position]; + if (dictId == -1) { + dictValue = Slices.EMPTY_SLICE; + } else { + dictValue = dictionary.getValue(dictId); + } + blockBuilder.valueIsNull.add(false); + blockBuilder.sliceOutput.writeBytes(dictValue); + blockBuilder.offsets.add(blockBuilder.sliceOutput.size()); + } + } + return; + } + + if (dictionary != null && blockBuilder.blockDictionary != null) { + // case 3: Both the target block builder and this block use dictionary. + if (this.dictionary.hashCode() == blockBuilder.blockDictionary.hashCode() + && this.dictionary.sizeInBytes() == blockBuilder.blockDictionary.sizeInBytes()) { + // same dictionary + Slice dictValue; + for (int i = 0; i < positionCount; i++) { + int position = selection[i + offsetInSelection]; + if (position >= dictIds.length) { + throw new ArrayIndexOutOfBoundsException("DictId len: " + dictIds.length + + ", position: " + position); + } + int dictId = dictIds[position]; + if (dictId == -1) { + dictValue = Slices.EMPTY_SLICE; + } else { + dictValue = dictionary.getValue(dictId); + } + blockBuilder.valueIsNull.add(false); + blockBuilder.values.add(dictId); + blockBuilder.sliceOutput.writeBytes(dictValue); + blockBuilder.offsets.add(blockBuilder.sliceOutput.size()); + } + return; + } + // different dictionary + int[] remapping = blockBuilder.mergeDictionary(dictionary); + for (int i = 0; i < positionCount; i++) { + int position = selection[i + offsetInSelection]; + int originalDictId = dictIds[position]; + if (originalDictId == -1) { + throw new IllegalStateException("Expect non-null value in dictionary"); + } + int newDictId = remapping[originalDictId]; + Slice dictValue = dictionary.getValue(originalDictId); + blockBuilder.valueIsNull.add(false); + blockBuilder.values.add(newDictId); + blockBuilder.sliceOutput.writeBytes(dictValue); + blockBuilder.offsets.add(blockBuilder.sliceOutput.size()); + } + return; } + + // case 4: The target block builder uses dictionary, but this block doesn't use. + // worst performance, considered as a rare case + Slice[] newValues = new Slice[positionCount]; + for (int i = 0; i < positionCount; i++) { + int position = selection[i + offsetInSelection]; + newValues[i] = getRegionInner(position); + } + int[] remapping = blockBuilder.mergeValues(newValues); + for (int i = 0; i < positionCount; i++) { + blockBuilder.valueIsNull.add(false); + blockBuilder.values.add(remapping[i]); + blockBuilder.sliceOutput.writeBytes(newValues[i]); + blockBuilder.offsets.add(blockBuilder.sliceOutput.size()); + } + } + + @Override + public void writePositionTo(int position, BlockBuilder blockBuilder) { + position = realPositionOf(position); + writePositionToInner(position, blockBuilder); } @Override public void addToHasher(IStreamingHasher sink, int position) { - if (isNull(position)) { - sink.putBytes(EMPTY_BYTES); - } else { - Slice encodedSlice = dataType.getCharsetHandler().encodeFromUtf8(getRegion(position)); - sink.putBytes(encodedSlice.getBytes()); - } + position = realPositionOf(position); + addToHasherInner(sink, position); } @Override public int hashCode(int position) { - if (isNull(position)) { - return 0; - } else if (compatible) { - Slice subRegion = getRegion(position); - return dataType.hashcode(subRegion); - } else { - position = realPositionOf(position); - int beginOffset = beginOffset(position); - int endOffset = endOffset(position); - return data.hashCode(beginOffset, endOffset - beginOffset); - } + position = realPositionOf(position); + return hashCodeInner(position); } @Override - public int checksum(int position) { - if (isNull(position)) { - return NULL_TAG; + public long hashCodeUseXxhash(int pos) { + pos = realPositionOf(pos); + return hashCodeUseXxHashInner(pos); + } + + @Override + public long hashCodeUnderPairWise(int pos, boolean enableCompatible) { + if (!enableCompatible) { + return hashCodeUseXxhash(pos); + } else { + if (isNull(pos)) { + return NULL_HASH_CODE; + } + // should use collation handler to calculate hashcode under partition wise + int position = realPositionOf(pos); + Slice subRegion = getRegionInner(position); + return dataType.hashcode(subRegion); } - position = realPositionOf(position); - int beginOffset = beginOffset(position); - int endOffset = endOffset(position); - return ChunkUtil.hashCode(data, beginOffset, endOffset); } - public int equals(int position, Slice that) { + @Override + public int checksum(int position) { position = realPositionOf(position); - int beginOffset = beginOffset(position); - int endOffset = endOffset(position); - return this.data.compareTo(beginOffset, endOffset - beginOffset, that, 0, that.length()) == 0 ? 1 : 0; + return checksumInner(position); } public int anyMatch(int position, Slice that1, Slice that2) { position = realPositionOf(position); - int beginOffset = beginOffset(position); - int endOffset = endOffset(position); - return this.data.compareTo(beginOffset, endOffset - beginOffset, that1, 0, that1.length()) == 0 - || this.data.compareTo(beginOffset, endOffset - beginOffset, that2, 0, that2.length()) == 0 - ? 1 : 0; + + if (dictionary == null) { + int beginOffset = beginOffsetInner(position); + int endOffset = endOffsetInner(position); + + return this.data.compareTo(beginOffset, endOffset - beginOffset, that1, 0, that1.length()) == 0 + || this.data.compareTo(beginOffset, endOffset - beginOffset, that2, 0, that2.length()) == 0 + ? 1 : 0; + } else { + Slice value = getDictValue(position); + + return value.compareTo(that1) == 0 + || value.compareTo(that2) == 0 + ? 1 : 0; + } } public int anyMatch(int position, Slice that1, Slice that2, Slice that3) { position = realPositionOf(position); - int beginOffset = beginOffset(position); - int endOffset = endOffset(position); - return this.data.compareTo(beginOffset, endOffset - beginOffset, that1, 0, that1.length()) == 0 - || this.data.compareTo(beginOffset, endOffset - beginOffset, that2, 0, that2.length()) == 0 - || this.data.compareTo(beginOffset, endOffset - beginOffset, that3, 0, that3.length()) == 0 - ? 1 : 0; + + if (dictionary == null) { + int beginOffset = beginOffsetInner(position); + int endOffset = endOffsetInner(position); + + return this.data.compareTo(beginOffset, endOffset - beginOffset, that1, 0, that1.length()) == 0 + || this.data.compareTo(beginOffset, endOffset - beginOffset, that2, 0, that2.length()) == 0 + || this.data.compareTo(beginOffset, endOffset - beginOffset, that3, 0, that3.length()) == 0 + ? 1 : 0; + } else { + Slice value = getDictValue(position); + + return value.compareTo(that1) == 0 + || value.compareTo(that2) == 0 + || value.compareTo(that3) == 0 + ? 1 : 0; + } } public int anyMatch(int position, Comparable[] those) { position = realPositionOf(position); - int beginOffset = beginOffset(position); - int endOffset = endOffset(position); - for (int i = 0; i < those.length; i++) { - if (this.data.compareTo( - beginOffset, endOffset - beginOffset, (Slice) those[i], 0, ((Slice) those[i]).length()) == 0) { - return 1; + + if (dictionary == null) { + int beginOffset = beginOffsetInner(position); + int endOffset = endOffsetInner(position); + + for (int i = 0; i < those.length; i++) { + if (this.data.compareTo( + beginOffset, endOffset - beginOffset, (Slice) those[i], 0, ((Slice) those[i]).length()) == 0) { + return 1; + } + } + return 0; + } else { + Slice value = getDictValue(position); + + for (int i = 0; i < those.length; i++) { + if (value.compareTo((Slice) those[i]) == 0) { + return 1; + } } + return 0; } - return 0; } @Override public boolean equals(int position, Block other, int otherPosition) { - if (other instanceof SliceBlock) { - return equals(position, (SliceBlock) other, otherPosition); - } else if (other instanceof SliceBlockBuilder) { - return equals(position, (SliceBlockBuilder) other, otherPosition); - } else { - throw new AssertionError(); - } - } + position = realPositionOf(position); - boolean equals(int position, SliceBlock other, int otherPosition) { - boolean n1 = isNull(position); + boolean n1 = isNullInner(position); boolean n2 = other.isNull(otherPosition); if (n1 && n2) { return true; @@ -270,34 +623,66 @@ boolean equals(int position, SliceBlock other, int otherPosition) { return false; } - // by collation - Slice region1 = getRegion(position); - Slice region2 = other.getRegion(otherPosition); - if (compatible) { - return dataType.compare(region1, region2) == 0; + if (other instanceof SliceBlock) { + return equalsInner(position, other.cast(SliceBlock.class), otherPosition); + } else if (other instanceof SliceBlockBuilder) { + return equalsInner(position, (SliceBlockBuilder) other, otherPosition); + } else { + throw new AssertionError(); + } + } + + if (this.dictionary == null) { + if (other instanceof SliceBlockBuilder) { + return BlockComparator.SLICE_BLOCK_NO_DICT_SLICE_BLOCK_BUILDER.compareTo( + this, position, other, otherPosition + ) == 0; + } else if (other instanceof SliceBlock) { + if (((SliceBlock) other).dictionary == null) { + return BlockComparator.SLICE_BLOCK_NO_DICT_SLICE_BLOCK_NO_DICT.compareTo( + this, position, other, otherPosition + ) == 0; + } else { + return BlockComparator.SLICE_BLOCK_NO_DICT_SLICE_BLOCK_DICT.compareTo( + this, position, other, otherPosition + ) == 0; + } + } } else { - return region1.equals(region2); + if (other instanceof SliceBlockBuilder) { + return BlockComparator.SLICE_BLOCK_DICT_SLICE_BLOCK_BUILDER.compareTo( + this, position, other, otherPosition + ) == 0; + } else if (other instanceof SliceBlock) { + if (((SliceBlock) other).dictionary == null) { + return BlockComparator.SLICE_BLOCK_DICT_SLICE_BLOCK_NO_DICT.compareTo( + this, position, other, otherPosition + ) == 0; + } else { + return BlockComparator.SLICE_BLOCK_DICT_SLICE_BLOCK_DICT.compareTo( + this, position, other, otherPosition + ) == 0; + } + } } + + throw new AssertionError(); + } - boolean equals(int position, SliceBlockBuilder other, int otherPosition) { - boolean n1 = isNull(position); - boolean n2 = other.isNull(otherPosition); - if (n1 && n2) { - return true; - } else if (n1 != n2) { - return false; - } + public int equals(int position, Slice that) { + position = realPositionOf(position); - // by collation - Slice region1 = getRegion(position); - Slice region2 = other.getRegion(otherPosition); + if (dictionary == null) { + int beginOffset = beginOffsetInner(position); + int endOffset = endOffsetInner(position); - if (compatible) { - return dataType.compare(region1, region2) == 0; + return this.data.compareTo(beginOffset, endOffset - beginOffset, that, 0, that.length()) == 0 ? 1 : 0; } else { - return region1.equals(region2); + Slice value = getDictValue(position); + + return value.compareTo(that) == 0 ? 1 : 0; } } @@ -318,14 +703,23 @@ public void resetCollation(CollationName collationName) { } public void encoding(SliceOutput sliceOutput) { + sliceOutput.writeBoolean(dictionary != null); + if (dictionary == null) { + encodingInner(sliceOutput); + } else { + encodingDictionaryInner(sliceOutput); + } + } + + private void encodingInner(SliceOutput sliceOutput) { if (selection != null) { int currentSize = 0; int[] realOffsets = new int[positionCount]; for (int i = 0; i < positionCount; i++) { int j = selection[i]; if (isNull == null || !this.isNull[j]) { - int beginOffset = beginOffset(j); - int endOffset = endOffset(j); + int beginOffset = beginOffsetInner(j); + int endOffset = endOffsetInner(j); int len = endOffset - beginOffset; currentSize += len; } @@ -341,8 +735,8 @@ public void encoding(SliceOutput sliceOutput) { for (int position = 0; position < positionCount; position++) { int j = selection[position]; if (isNull == null || !this.isNull[j]) { - int beginOffset = beginOffset(j); - int endOffset = endOffset(j); + int beginOffset = beginOffsetInner(j); + int endOffset = endOffsetInner(j); int len = endOffset - beginOffset; Slice slice = data.slice(beginOffset, len); sliceOutput.writeBytes(slice); @@ -364,19 +758,326 @@ public void encoding(SliceOutput sliceOutput) { } } - private int beginOffset(int position) { + private void encodingDictionaryInner(SliceOutput sliceOutput) { + dictionary.encoding(sliceOutput); + if (selection == null) { + sliceOutput.writeInt(dictIds.length); + for (int dictId : dictIds) { + sliceOutput.writeInt(dictId); + } + } else { + sliceOutput.writeInt(positionCount); + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + sliceOutput.writeInt(dictIds[j]); + } + } + } + + // get dict value from dictionary by dictId. + public Slice getDictValue(int position) { + Slice value; + int dictId = dictIds[position]; + if (dictId == -1) { + value = Slices.EMPTY_SLICE; + } else { + value = dictionary.getValue(dictId); + } + return value; + } + + private long hashCodeUseXxHashInner(int pos) { + if (isNullInner(pos)) { + return NULL_HASH_CODE; + } + + if (compatible) { + Slice subRegion = getRegionInner(pos); + return dataType.hashcode(subRegion); + } + + if (dictionary == null) { + int beginOffset = beginOffsetInner(pos); + int endOffset = endOffsetInner(pos); + return XxHash64.hash(data, beginOffset, endOffset - beginOffset); + } else { + int dictId = dictIds[pos]; + if (dictId == -1) { + return NULL_HASH_CODE; + } else { + Slice dictValue = dictionary.getValue(dictId); + return XxHash64.hash(dictValue, 0, dictValue.length()); + } + } + } + + public int beginOffsetInner(int position) { return position + arrayOffset > 0 ? offsets[position + arrayOffset - 1] : 0; } - private int endOffset(int position) { + public int endOffsetInner(int position) { return offsets[position + arrayOffset]; } + private boolean isNullInner(int position) { + return isNull != null && isNull[position + arrayOffset]; + } + + private Comparable getSortKeyInner(int position) { + if (isNullInner(position)) { + return null; + } + if (compatible) { + return dataType.getSortKey(getRegionInner(position)); + } + return getRegionInner(position); + } + + private boolean equalsInner(int realPosition, SliceBlock other, int otherPosition) { + + // by collation + Slice region1 = getRegionInner(realPosition); + Slice region2 = other.getRegion(otherPosition); + + if (compatible) { + return dataType.compare(region1, region2) == 0; + } else { + return region1.equals(region2); + } + } + + boolean equalsInner(int realPosition, SliceBlockBuilder other, int otherPosition) { + + // by collation + Slice region1 = getRegionInner(realPosition); + Slice region2 = other.getRegion(otherPosition); + + if (compatible) { + return dataType.compare(region1, region2) == 0; + } else { + return region1.equals(region2); + } + } + + private int hashCodeInner(int position) { + if (isNullInner(position)) { + return 0; + } + + if (compatible) { + Slice subRegion = getRegionInner(position); + return dataType.hashcode(subRegion); + } + + if (dictionary == null) { + int beginOffset = beginOffsetInner(position); + int endOffset = endOffsetInner(position); + + return data.hashCode(beginOffset, endOffset - beginOffset); + } else { + int dictId = dictIds[position]; + if (dictId == -1) { + return 0; + } + return dictionary.getValue(dictId).hashCode(); + } + } + + private int checksumInner(int position) { + if (isNullInner(position)) { + return NULL_TAG; + } + + if (dictionary == null) { + int beginOffset = beginOffsetInner(position); + int endOffset = endOffsetInner(position); + return ChunkUtil.hashCode(data, beginOffset, endOffset); + } else { + int dictId = dictIds[position]; + if (dictId == -1) { + return NULL_TAG; + } + Slice dictValue = dictionary.getValue(dictId); + return ChunkUtil.hashCode(dictValue, 0, dictValue.length()); + } + } + + private void writePositionToInner(int position, BlockBuilder blockBuilder) { + if (!(blockBuilder instanceof SliceBlockBuilder)) { + throw new AssertionError("Expect writing to a SliceBlockBuilder"); + } + + SliceBlockBuilder b = (SliceBlockBuilder) blockBuilder; + if (isNullInner(position)) { + if (dictionary != null && b.blockDictionary == null) { + if (b.isEmpty()) { + b.setDictionary(dictionary); + } + } + b.appendNull(); + return; + } + + if (dictionary == null && b.blockDictionary == null) { + // case 1: Both the target block builder and this block don't use dictionary. + int beginOffset = beginOffsetInner(position); + int endOffset = endOffsetInner(position); + + b.valueIsNull.add(false); + b.sliceOutput.writeBytes(data, beginOffset, endOffset - beginOffset); + b.offsets.add(b.sliceOutput.size()); + + return; + } + + if (dictionary != null && b.blockDictionary == null) { + // case 2: The target block builder doesn't use dictionary, but this block uses it. + + if (b.isEmpty()) { + // case 2.1: the block builder is empty, just overwrite the dictionary. + b.setDictionary(dictionary); + + Slice dictValue; + int dictId = dictIds[position]; + if (dictId == -1) { + dictValue = Slices.EMPTY_SLICE; + } else { + dictValue = dictionary.getValue(dictId); + } + b.valueIsNull.add(false); + b.values.add(dictId); + b.sliceOutput.writeBytes(dictValue); + b.offsets.add(b.sliceOutput.size()); + } else { + // case 2.2: the block builder is not empty, fall back to normal slice. + Slice dictValue; + int dictId = dictIds[position]; + if (dictId == -1) { + dictValue = Slices.EMPTY_SLICE; + } else { + dictValue = dictionary.getValue(dictId); + } + b.valueIsNull.add(false); + b.sliceOutput.writeBytes(dictValue); + b.offsets.add(b.sliceOutput.size()); + } + + return; + } + + if (dictionary != null && b.blockDictionary != null) { + // case 3: Both the target block builder and this block use dictionary. + if (this.dictionary.hashCode() == b.blockDictionary.hashCode() + && this.dictionary.sizeInBytes() == b.blockDictionary.sizeInBytes()) { + // same dictionary + Slice dictValue; + if (position >= dictIds.length) { + throw new ArrayIndexOutOfBoundsException("DictId len: " + dictIds.length + + ", position: " + position); + } + int dictId = dictIds[position]; + if (dictId == -1) { + dictValue = Slices.EMPTY_SLICE; + } else { + dictValue = dictionary.getValue(dictId); + } + b.valueIsNull.add(false); + b.values.add(dictId); + b.sliceOutput.writeBytes(dictValue); + b.offsets.add(b.sliceOutput.size()); + return; + } + + // different dictionary + int[] remapping = b.mergeDictionary(dictionary); + int originalDictId = dictIds[position]; + if (originalDictId == -1) { + throw new IllegalStateException("Expect non-null value in dictionary"); + } + int newDictId = remapping[originalDictId]; + Slice dictValue = dictionary.getValue(originalDictId); + b.valueIsNull.add(false); + b.values.add(newDictId); + b.sliceOutput.writeBytes(dictValue); + b.offsets.add(b.sliceOutput.size()); + return; + } + + // case 4: The target block builder uses dictionary, but this block doesn't use. + // bad performance, considered as a rare case + Slice value = getRegionInner(position); + int[] remapping = b.mergeValue(value); + + b.valueIsNull.add(false); + b.values.add(remapping[0]); + b.sliceOutput.writeBytes(value); + b.offsets.add(b.sliceOutput.size()); + } + + private Slice getRegionInner(int position) { + if (dictionary == null) { + int beginOffset = beginOffsetInner(position); + int endOffset = endOffsetInner(position); + return data.slice(beginOffset, endOffset - beginOffset); + } else { + int dictId = dictIds[position]; + if (dictId == -1) { + return Slices.EMPTY_SLICE; + } + return dictionary.getValue(dictId); + } + } + + private Slice getRegionInner(int position, Slice output) { + if (dictionary == null) { + int beginOffset = beginOffsetInner(position); + int endOffset = endOffsetInner(position); + return data.slice(beginOffset, endOffset - beginOffset, output); + } else { + int dictId = dictIds[position]; + if (dictId == -1) { + return Slices.EMPTY_SLICE; + } + Slice dictValue = dictionary.getValue(dictId); + return dictValue.slice(0, dictValue.length(), output); + } + } + + private Slice copySliceInner(int position) { + if (dictionary == null) { + int beginOffset = beginOffsetInner(position); + int endOffset = endOffsetInner(position); + return Slices.copyOf(data, beginOffset, endOffset - beginOffset); + } else { + int dictId = dictIds[position]; + if (dictId == -1) { + return Slices.EMPTY_SLICE; + } + Slice dictValue = dictionary.getValue(dictId); + return dictValue == null ? Slices.EMPTY_SLICE : Slices.copyOf(dictValue); + } + } + + private void addToHasherInner(IStreamingHasher sink, int position) { + if (isNullInner(position)) { + sink.putBytes(EMPTY_BYTES); + } else { + Slice encodedSlice = dataType.getCharsetHandler().encodeFromUtf8(getRegionInner(position)); + sink.putBytes(encodedSlice.getBytes()); + } + } + @Override public void updateSizeInfo() { - // Slice.length is the memory size in bytes. - estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + data.length() + sizeOf(offsets); - elementUsedBytes = Byte.BYTES * positionCount + data.length() + Integer.BYTES * positionCount; + if (dictionary == null) { + // Slice.length is the memory size in bytes. + estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + (data == null ? 0 : data.length()) + sizeOf(offsets); + elementUsedBytes = + Byte.BYTES * positionCount + (data == null ? 0 : data.length()) + Integer.BYTES * positionCount; + } else { + estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + dictionary.sizeInBytes() + sizeOf(dictIds); + elementUsedBytes = Byte.BYTES * positionCount + dictionary.sizeInBytes() + Integer.BYTES * positionCount; + } } public int[] getSelection() { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlockBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlockBuilder.java index 583c1aaf2..82ee3cb66 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlockBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlockBuilder.java @@ -17,7 +17,10 @@ package com.alibaba.polardbx.executor.chunk; import com.alibaba.polardbx.common.charset.CharsetName; -import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; +import com.alibaba.polardbx.executor.operator.scan.impl.DictionaryMapping; +import com.alibaba.polardbx.executor.operator.scan.impl.DictionaryMappingImpl; +import com.alibaba.polardbx.executor.operator.scan.impl.LocalBlockDictionary; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.SliceType; @@ -28,15 +31,21 @@ import io.airlift.slice.Slices; import it.unimi.dsi.fastutil.ints.IntArrayList; +import java.util.List; + import static com.alibaba.polardbx.common.charset.MySQLUnicodeUtils.LATIN1_TO_UTF8_BYTES; public class SliceBlockBuilder extends AbstractBlockBuilder { private static final int EXPECTED_STRING_SIZE_IN_BYTES = 64; - SliceOutput sliceOutput; - ExecutionContext context; final IntArrayList offsets; // records where the bytes end at final SliceType dataType; final boolean compatible; + final IntArrayList values; + SliceOutput sliceOutput; + ExecutionContext context; + // for dictionary + BlockDictionary blockDictionary; + DictionaryMapping mapping = null; public SliceBlockBuilder(DataType dataType, int initialCapacity, ExecutionContext context, boolean compatible) { super(initialCapacity); @@ -46,6 +55,7 @@ public SliceBlockBuilder(DataType dataType, int initialCapacity, ExecutionContex this.context = context; this.sliceOutput = new DynamicSliceOutput(EXPECTED_STRING_SIZE_IN_BYTES * initialCapacity); this.compatible = compatible; + this.values = new IntArrayList(4); } @Override @@ -55,18 +65,68 @@ public void ensureCapacity(int capacity) { sliceOutput.ensureCapacity(capacity * EXPECTED_STRING_SIZE_IN_BYTES); } + public void setDictionary(BlockDictionary dictionary) { + if (dictionary != null && this.blockDictionary != null) { + boolean notSame = dictionary.size() != this.blockDictionary.size(); + notSame &= dictionary.hashCode() != this.blockDictionary.hashCode(); + if (notSame) { + throw new IllegalArgumentException("Setting a new different dictionary in SliceBlockBuilder"); + } + } + if (this.blockDictionary == null && !valueIsNull.isEmpty()) { + // lazy append nulls in dictIds + for (int i = 0; i < valueIsNull.size(); i++) { + if (valueIsNull.getBoolean(i)) { + values.add(-1); + } else { + throw new UnsupportedOperationException( + "Do not support setting a new dictionary to builder with values"); + } + } + } + this.blockDictionary = dictionary; + } + + public boolean isEmpty() { + return valueIsNull.isEmpty(); + } + @Override public Block build() { - // prevent from memory leak - Slice slice = sliceOutput.slice(); - Slice data = Slices.copyOf(slice); + if (blockDictionary == null) { + Slice data = sliceOutput.slice(); - return new SliceBlock(dataType, - 0, - getPositionCount(), - mayHaveNull() ? valueIsNull.elements() : null, - offsets.elements(), - data, compatible); + return new SliceBlock(dataType, + 0, + getPositionCount(), + mayHaveNull() ? valueIsNull.elements() : null, + offsets.elements(), + data, compatible); + } else { + if (mapping == null) { + return new SliceBlock(dataType, 0, getPositionCount(), + mayHaveNull() ? valueIsNull.elements() : null, + blockDictionary, values.elements(), compatible + ); + } else { + List mergedDict = ((DictionaryMappingImpl) mapping).getMergedDict(); + BlockDictionary blockDictionary1 = new LocalBlockDictionary(mergedDict.toArray(new Slice[0])); + + return new SliceBlock(dataType, 0, getPositionCount(), + mayHaveNull() ? valueIsNull.elements() : null, + blockDictionary1, values.elements(), compatible + ); + } + + } + } + + public SliceOutput getSliceOutput() { + return sliceOutput; + } + + public IntArrayList getOffsets() { + return offsets; } @Override @@ -132,6 +192,9 @@ public void writeObject(Object value) { public void appendNull() { appendNullInternal(); offsets.add(sliceOutput.size()); + if (blockDictionary != null) { + values.add(-1); + } } @Override @@ -144,6 +207,35 @@ public Object getObject(int position) { return isNull(position) ? null : copySlice(position); } + public int[] mergeDictionary(BlockDictionary newDict) { + if (this.mapping == null) { + this.mapping = new DictionaryMappingImpl(); + this.mapping.merge(blockDictionary); + } + return this.mapping.merge(newDict); + } + + public int[] mergeValue(Slice newValue) { + if (this.mapping == null) { + this.mapping = new DictionaryMappingImpl(); + this.mapping.merge(blockDictionary); + } + BlockDictionary tmpDict = new LocalBlockDictionary(new Slice[] {newValue}); + return this.mapping.merge(tmpDict); + } + + /** + * bad performance + */ + public int[] mergeValues(Slice[] newValues) { + if (this.mapping == null) { + this.mapping = new DictionaryMappingImpl(); + this.mapping.merge(blockDictionary); + } + BlockDictionary tmpDict = new LocalBlockDictionary(newValues); + return this.mapping.merge(tmpDict); + } + private Slice copySlice(int position) { checkReadablePosition(position); @@ -153,11 +245,11 @@ private Slice copySlice(int position) { return Slices.copyOf(slice, beginOffset, endOffset - beginOffset); } - int beginOffset(int position) { + public int beginOffset(int position) { return position > 0 ? offsets.getInt(position - 1) : 0; } - int endOffset(int position) { + public int endOffset(int position) { return offsets.getInt(position); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlockEncoding.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlockEncoding.java index 1653ab5a9..3be985f75 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlockEncoding.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/SliceBlockEncoding.java @@ -18,6 +18,7 @@ import com.alibaba.polardbx.common.charset.CharsetName; import com.alibaba.polardbx.common.charset.CollationName; +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.SliceType; import io.airlift.slice.Slice; @@ -48,7 +49,8 @@ public String getName() { @Override public void writeBlock(SliceOutput sliceOutput, Block block) { // write type information. - SliceBlock sliceBlock = (SliceBlock) block; + SliceBlock sliceBlock = block.cast(SliceBlock.class); + SliceType dataType = (SliceType) sliceBlock.getType(); byte[] charsetBytes = dataType.getCharsetName().name().getBytes(UTF8); byte[] collationBytes = dataType.getCollationName().name().getBytes(UTF8); @@ -98,22 +100,41 @@ public Block readBlock(SliceInput sliceInput) { boolean[] valueIsNull = decodeNullBits(sliceInput, positionCount); boolean existNonNull = sliceInput.readBoolean(); - int[] offset = new int[0]; - Slice data = Slices.EMPTY_SLICE; - if (existNonNull) { - offset = new int[positionCount]; - for (int position = 0; position < positionCount; position++) { - offset[position] = sliceInput.readInt(); - } - int maxOffset = offset[positionCount - 1]; - - if (maxOffset > 0) { - int length = sliceInput.readInt(); - Slice subRegion = sliceInput.readSlice(length); - data = Slices.copyOf(subRegion); + boolean useDictionary = sliceInput.readBoolean(); + + if (useDictionary) { + // read dictionary and dictIds. + BlockDictionary dictionary = BlockDictionary.decoding(sliceInput); + int len = sliceInput.readInt(); + int[] dictIds = new int[len]; + for (int i = 0; i < len; i++) { + dictIds[i] = sliceInput.readInt(); + } + return new SliceBlock(dataType, 0, positionCount, valueIsNull, dictionary, + dictIds, isCompatible); + + } else { + // read offset and slice data. + int[] offset = new int[positionCount]; + Slice data = Slices.EMPTY_SLICE; + for (int position = 0; position < positionCount; position++) { + offset[position] = sliceInput.readInt(); + } + int maxOffset = offset[positionCount - 1]; + + if (maxOffset > 0) { + int length = sliceInput.readInt(); + Slice subRegion = sliceInput.readSlice(length); + data = Slices.copyOf(subRegion); + } + + return new SliceBlock(dataType, 0, positionCount, valueIsNull, offset, + data, isCompatible); } } - return new SliceBlock(dataType, 0, positionCount, valueIsNull, offset, data, isCompatible); + + return new SliceBlock(dataType, 0, positionCount, valueIsNull, new int[0], + Slices.EMPTY_SLICE, isCompatible); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/StringBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/StringBlock.java index 9f8e27bec..c33789b29 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/StringBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/StringBlock.java @@ -16,11 +16,17 @@ package com.alibaba.polardbx.executor.chunk; +import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.base.Preconditions; +import io.airlift.slice.XxHash64; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import org.openjdk.jol.info.ClassLayout; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Objects; @@ -35,7 +41,11 @@ public class StringBlock extends AbstractCommonBlock { private static final long INSTANCE_SIZE = ClassLayout.parseClass(StringBlock.class).instanceSize(); private final int[] offsets; - private final char[] data; + private char[] data; + + public StringBlock(DataType dataType, int positionCount) { + this(dataType, 0, positionCount, new boolean[positionCount], new int[positionCount], null); + } StringBlock(DataType dataType, int arrayOffset, int positionCount, boolean[] valueIsNull, int[] offsets, char[] data) { @@ -45,6 +55,48 @@ public class StringBlock extends AbstractCommonBlock { updateSizeInfo(); } + /** + * Designed for test purpose + */ + public static StringBlock of(String... values) { + int totalLength = Arrays.stream(values).filter(Objects::nonNull).mapToInt(String::length).sum(); + StringBlockBuilder builder = new StringBlockBuilder(values.length, totalLength); + for (String value : values) { + if (value != null) { + builder.writeString(value); + } else { + builder.appendNull(); + } + } + return builder.build().cast(StringBlock.class); + } + + public static StringBlock from(StringBlock other, int selSize, int[] selection) { + int[] newOffsets = new int[selSize]; + + if (other.data == null) { + return new StringBlock(other.dataType, 0, selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + newOffsets, null); + } + if (selection == null) { + return new StringBlock(other.dataType, 0, selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + newOffsets, Arrays.copyOf(other.data, other.data.length)); + } else { + StringBlockBuilder stringBlockBuilder = new StringBlockBuilder(other.dataType, selSize, + other.data.length / (other.positionCount + 1) * selSize); + for (int i = 0; i < selSize; i++) { + if (other.isNull(selection[i])) { + stringBlockBuilder.appendNull(); + } else { + stringBlockBuilder.writeString(other.getString(selection[i])); + } + } + return (StringBlock) stringBlockBuilder.build(); + } + } + @Override public String getString(int position) { checkReadablePosition(position); @@ -77,6 +129,16 @@ public void addToHasher(IStreamingHasher sink, int position) { } } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } else { + byte[] rawBytes = getString(pos).getBytes(StandardCharsets.UTF_8); + return XxHash64.hash(rawBytes, 0, rawBytes.length); + } + } + private void writePositionTo(int position, StringBlockBuilder b) { if (isNull(position)) { b.appendNull(); @@ -112,7 +174,7 @@ public int checksum(int position) { @Override public boolean equals(int position, Block other, int otherPosition) { if (other instanceof StringBlock) { - return equals(position, (StringBlock) other, otherPosition); + return equals(position, other.cast(StringBlock.class), otherPosition); } else if (other instanceof StringBlockBuilder) { return equals(position, (StringBlockBuilder) other, otherPosition); } else { @@ -150,22 +212,6 @@ boolean equals(int position, StringBlockBuilder other, int otherPosition) { return ExecUtils.arrayEquals(data, pos1, len1, other.data.elements(), pos2, len2, true); } - /** - * Designed for test purpose - */ - public static StringBlock of(String... values) { - int totalLength = Arrays.stream(values).filter(Objects::nonNull).mapToInt(String::length).sum(); - StringBlockBuilder builder = new StringBlockBuilder(values.length, totalLength); - for (String value : values) { - if (value != null) { - builder.writeString(value); - } else { - builder.appendNull(); - } - } - return (StringBlock) builder.build(); - } - private int beginOffset(int position) { return position + arrayOffset > 0 ? offsets[position + arrayOffset - 1] : 0; } @@ -182,6 +228,11 @@ public char[] getData() { return data; } + public void setData(char[] data) { + Preconditions.checkArgument(this.data == null); + this.data = data; + } + @Override public void updateSizeInfo() { estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + sizeOf(data) + sizeOf(offsets); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/StringBlockEncoding.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/StringBlockEncoding.java index 4b46dd023..0ab5448f7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/StringBlockEncoding.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/StringBlockEncoding.java @@ -54,7 +54,7 @@ public String getName() { @Override public void writeBlock(SliceOutput sliceOutput, Block block) { - StringBlock stringBlock = (StringBlock) block; + StringBlock stringBlock = block.cast(StringBlock.class); DataType dataType = stringBlock.getType(); boolean hasStringType = dataType != null; sliceOutput.writeBoolean(hasStringType); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimeBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimeBlock.java index 58abc57f7..470c540c6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimeBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimeBlock.java @@ -21,9 +21,11 @@ import com.alibaba.polardbx.common.utils.time.core.OriginalTime; import com.alibaba.polardbx.common.utils.time.core.TimeStorage; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import io.airlift.slice.XxHash64; import org.openjdk.jol.info.ClassLayout; +import java.nio.charset.StandardCharsets; import java.sql.Time; import java.util.TimeZone; @@ -43,14 +45,30 @@ public class TimeBlock extends AbstractCommonBlock { private final long[] packed; private final TimeZone timezone; - TimeBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, long[] packed, - DataType dataType, TimeZone timezone) { + // random access + public TimeBlock(DataType dataType, int positionCount, TimeZone timezone) { + super(dataType, positionCount); + this.packed = new long[positionCount]; + this.timezone = timezone; + updateSizeInfo(); + } + + public TimeBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, long[] packed, + DataType dataType, TimeZone timezone) { super(dataType, positionCount, valueIsNull, valueIsNull != null); this.packed = packed; this.timezone = timezone; updateSizeInfo(); } + public static TimeBlock from(TimeBlock other, int selSize, int[] selection) { + return new TimeBlock(0, selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + BlockUtils.copyLongArray(other.packed, selection, selSize), + other.dataType, + other.timezone); + } + @Override public Time getTime(int position) { checkReadablePosition(position); @@ -128,6 +146,16 @@ public void addToHasher(IStreamingHasher sink, int position) { } } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } else { + byte[] rawBytes = getTime(pos).toString().getBytes(StandardCharsets.UTF_8); + return XxHash64.hash(rawBytes, 0, rawBytes.length); + } + } + public DataType getDataType() { return dataType; } @@ -135,7 +163,7 @@ public DataType getDataType() { @Override public boolean equals(int position, Block other, int otherPosition) { if (other instanceof TimeBlock) { - return equals(position, (TimeBlock) other, otherPosition); + return equals(position, other.cast(TimeBlock.class), otherPosition); } else if (other instanceof TimeBlockBuilder) { return equals(position, (TimeBlockBuilder) other, otherPosition); } else { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimeBlockEncoding.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimeBlockEncoding.java index 5bbc03d17..99617bc03 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimeBlockEncoding.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimeBlockEncoding.java @@ -49,7 +49,7 @@ public String getName() { @Override public void writeBlock(SliceOutput sliceOutput, Block block) { - TimeBlock timeBlock = (TimeBlock) block; + TimeBlock timeBlock = block.cast(TimeBlock.class); // write scale value DataType dataType = timeBlock.getDataType(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimestampBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimestampBlock.java index 7cf57095b..2333460e8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimestampBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimestampBlock.java @@ -16,6 +16,10 @@ package com.alibaba.polardbx.executor.chunk; +import com.alibaba.polardbx.common.type.MySQLStandardFieldType; +import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.common.utils.time.MySQLTimeConverter; +import com.alibaba.polardbx.common.utils.time.core.MySQLTimeVal; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; import com.alibaba.polardbx.common.utils.time.core.OriginalTimestamp; @@ -23,9 +27,15 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.google.common.base.Preconditions; +import io.airlift.slice.SliceOutput; +import io.airlift.slice.XxHash64; import org.openjdk.jol.info.ClassLayout; +import java.nio.charset.StandardCharsets; import java.sql.Timestamp; +import java.time.ZoneId; +import java.util.Arrays; +import java.util.Objects; import java.util.TimeZone; import static com.alibaba.polardbx.common.utils.memory.SizeOf.sizeOf; @@ -37,10 +47,6 @@ public class TimestampBlock extends AbstractCommonBlock { private static final long NULL_VALUE = 0L; private static final byte[] NULL_VALUE_FOR_HASHER = new byte[0]; private static final long INSTANCE_SIZE = ClassLayout.parseClass(TimestampBlock.class).instanceSize(); - - public static final long ZERO_TIMESTAMP_MILLIS = -1; - public static final long ZERO_TIMESTAMP_NANOS = -1; - /** * Store the timestamp as long value. */ @@ -48,8 +54,29 @@ public class TimestampBlock extends AbstractCommonBlock { private TimeZone timezone; + private int[] selection; + + // random access + public TimestampBlock(DataType dataType, int positionCount, TimeZone timezone) { + super(dataType, positionCount); + this.dataType = dataType; + this.timezone = timezone; + this.packed = new long[positionCount]; + updateSizeInfo(); + } + TimestampBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, long[] packed, - DataType dataType, TimeZone timezone) { + DataType dataType, TimeZone timezone, int[] selection) { + super(dataType, positionCount, valueIsNull, valueIsNull != null); + this.dataType = dataType; + this.timezone = timezone; + this.packed = Preconditions.checkNotNull(packed); + this.selection = selection; + updateSizeInfo(); + } + + public TimestampBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, long[] packed, + DataType dataType, TimeZone timezone) { super(dataType, positionCount, valueIsNull, valueIsNull != null); this.dataType = dataType; this.timezone = timezone; @@ -66,10 +93,112 @@ public class TimestampBlock extends AbstractCommonBlock { updateSizeInfo(); } + public static TimestampBlock from(TimestampBlock timestampBlock, int selSize, int[] selection, + boolean useSelection, TimeZone timeZone) { + // Only convert timezone for timestamp type + MySQLStandardFieldType fieldType = timestampBlock.getDataType().fieldType(); + boolean notTimestampType = (fieldType != MySQLStandardFieldType.MYSQL_TYPE_TIMESTAMP && + fieldType != MySQLStandardFieldType.MYSQL_TYPE_TIMESTAMP2); + boolean isSameTimeZone = Objects.equals(timestampBlock.timezone.toZoneId(), timeZone.toZoneId()); + + if (notTimestampType || isSameTimeZone) { + // no need to do timezone conversion + if (useSelection) { + return new TimestampBlock(0, selSize, timestampBlock.isNull, timestampBlock.packed, + timestampBlock.dataType, + timestampBlock.timezone, selection); + } else { + return new TimestampBlock(0, selSize, + BlockUtils.copyNullArray(timestampBlock.isNull, selection, selSize), + BlockUtils.copyLongArray(timestampBlock.packed, selection, selSize), + timestampBlock.dataType, + timestampBlock.timezone, null); + } + } + + // do timezone conversion + // no need to keep selection since we have to rewrite the data + return convertTimeZone(timestampBlock, timeZone, selSize); + } + + /** + * Time zone conversion for csv chunk, assuming that the selection is null and the offset is zero + */ + public static TimestampBlock from(TimestampBlock timestampBlock, TimeZone timeZone) { + // Only convert timezone for timestamp type + MySQLStandardFieldType fieldType = timestampBlock.getDataType().fieldType(); + if (fieldType != MySQLStandardFieldType.MYSQL_TYPE_TIMESTAMP && + fieldType != MySQLStandardFieldType.MYSQL_TYPE_TIMESTAMP2) { + return timestampBlock; + } + + ZoneId sourceZoneId = timestampBlock.getTimezone().toZoneId(); + ZoneId targetZoneId = timeZone.toZoneId(); + + if (sourceZoneId.equals(targetZoneId)) { + return timestampBlock; + } + + assert timestampBlock.selection == null && timestampBlock.arrayOffset == 0; + return convertTimeZone(timestampBlock, timeZone, timestampBlock.positionCount); + } + + public static TimestampBlock convertTimeZone(TimestampBlock timestampBlock, TimeZone targetTimeZone, + int positionCount) { + ZoneId sourceZoneId = timestampBlock.getTimezone().toZoneId(); + ZoneId targetZoneId = targetTimeZone.toZoneId(); + + long[] convertPacked = Arrays.copyOf(timestampBlock.packed, positionCount); + + for (int i = 0; i < positionCount; i++) { + if (timestampBlock.isNull(i)) { + continue; + } + MySQLTimeVal timeVal = + MySQLTimeConverter.convertValidDatetimeToTimestamp( + timestampBlock.getTimestamp(i).getMysqlDateTime(), + null, + sourceZoneId + ); + + if (timeVal.getSeconds() == 0) { + convertPacked[i] = TimeStorage.writeTimestamp(MysqlDateTime.zeroDateTime()); + } else { + MysqlDateTime mysqlDateTime = MySQLTimeConverter.convertTimestampToDatetime(timeVal, targetZoneId); + convertPacked[i] = TimeStorage.writeTimestamp(mysqlDateTime); + } + } + + return new TimestampBlock( + 0, + positionCount, + timestampBlock.isNull, + convertPacked, + timestampBlock.dataType, + targetTimeZone, + null + ); + } + + private int realPositionOf(int position) { + if (selection == null) { + return position; + } + return selection[position]; + } + @Override + public boolean isNull(int position) { + position = realPositionOf(position); + return isNullInner(position); + } + public OriginalTimestamp getTimestamp(int position) { - checkReadablePosition(position); + position = realPositionOf(position); + return getTimestampInner(position); + } + private OriginalTimestamp getTimestampInner(int position) { // unpacked from long, and make original timestamp object. long l = packed[arrayOffset + position]; MysqlDateTime t = TimeStorage.readTimestamp(l); @@ -81,30 +210,45 @@ public OriginalTimestamp getTimestamp(int position) { @Override public Object getObject(int position) { - return isNull(position) ? null : getTimestamp(position); + position = realPositionOf(position); + return getObjectInner(position); + } + + private Object getObjectInner(int position) { + return isNullInner(position) ? null : getTimestampInner(position); } @Override public Object getObjectForCmp(int position) { - return isNull(position) ? null : packed[arrayOffset + position]; + position = realPositionOf(position); + return getObjectForCmpInner(position); + } + + private Object getObjectForCmpInner(int position) { + return isNullInner(position) ? null : packed[arrayOffset + position]; } @Override public void writePositionTo(int position, BlockBuilder blockBuilder) { - if (isNull(position)) { - blockBuilder.appendNull(); - } else { - // write packed long. - blockBuilder.writeLong(packed[arrayOffset + position]); - } + position = realPositionOf(position); + writePositionToInner(position, blockBuilder); } @Override public int hashCode(int position) { - if (isNull(position)) { - return 0; + position = realPositionOf(position); + return hashCodeInner(position); + } + + @Override + public long hashCodeUseXxhash(int pos) { + int realPos = realPositionOf(pos); + if (isNull(realPos)) { + return NULL_HASH_CODE; + } else { + byte[] rawBytes = getTimestampInner(realPos).toString().getBytes(StandardCharsets.UTF_8); + return XxHash64.hash(rawBytes, 0, rawBytes.length); } - return Long.hashCode(packed[arrayOffset + position]); } public long[] getPacked() { @@ -113,43 +257,32 @@ public long[] getPacked() { @Override public void addToHasher(IStreamingHasher sink, int position) { - if (isNull(position)) { - sink.putBytes(NULL_VALUE_FOR_HASHER); - } else { - sink.putString(getTimestamp(position).toString()); - } + position = realPositionOf(position); + addToHasherInner(sink, position); } public DataType getDataType() { return dataType; } - @Override - public long getPackedLong(int position) { - // assume not null - checkReadablePosition(position); - - long l = packed[arrayOffset + position]; - return l; - } - public TimeZone getTimezone() { return timezone; } @Override public boolean equals(int position, Block other, int otherPosition) { + position = realPositionOf(position); if (other instanceof TimestampBlock) { - return equals(position, (TimestampBlock) other, otherPosition); + return equalsInner(position, other.cast(TimestampBlock.class), otherPosition); } else if (other instanceof TimestampBlockBuilder) { - return equals(position, (TimestampBlockBuilder) other, otherPosition); + return equalsInner(position, (TimestampBlockBuilder) other, otherPosition); } else { throw new AssertionError(); } } - boolean equals(int position, TimestampBlock other, int otherPosition) { - boolean n1 = isNull(position); + private boolean equalsInner(int position, TimestampBlock other, int otherPosition) { + boolean n1 = isNullInner(position); boolean n2 = other.isNull(otherPosition); if (n1 && n2) { return true; @@ -158,13 +291,13 @@ boolean equals(int position, TimestampBlock other, int otherPosition) { } // by packed long value - long l1 = getPackedLong(position); + long l1 = getPackedLongInner(position); long l2 = other.getPackedLong(otherPosition); return l1 == l2; } - boolean equals(int position, TimestampBlockBuilder other, int otherPosition) { - boolean n1 = isNull(position); + private boolean equalsInner(int position, TimestampBlockBuilder other, int otherPosition) { + boolean n1 = isNullInner(position); boolean n2 = other.isNull(otherPosition); if (n1 && n2) { return true; @@ -173,11 +306,57 @@ boolean equals(int position, TimestampBlockBuilder other, int otherPosition) { } // by packed long value - long l1 = getPackedLong(position); + long l1 = getPackedLongInner(position); long l2 = other.getPackedLong(otherPosition); return l1 == l2; } + public int[] getSelection() { + return selection; + } + + public void writeLong(SliceOutput sliceOutput, int position) { + position = realPositionOf(position); + sliceOutput.writeLong(getPackedLongInner(position)); + } + + public long getPackedLong(int position) { + position = realPositionOf(position); + return packed[arrayOffset + position]; + } + + private long getPackedLongInner(int position) { + return packed[arrayOffset + position]; + } + + private int hashCodeInner(int position) { + if (isNullInner(position)) { + return 0; + } + return Long.hashCode(getPackedLongInner(position)); + } + + private void writePositionToInner(int position, BlockBuilder blockBuilder) { + if (isNullInner(position)) { + blockBuilder.appendNull(); + } else { + // write packed long. + blockBuilder.writeLong(getPackedLongInner(position)); + } + } + + private void addToHasherInner(IStreamingHasher sink, int position) { + if (isNullInner(position)) { + sink.putBytes(NULL_VALUE_FOR_HASHER); + } else { + sink.putString(getTimestampInner(position).toString()); + } + } + + private boolean isNullInner(int position) { + return isNull != null && isNull[position + arrayOffset]; + } + @Override public void updateSizeInfo() { estimatedSize = INSTANCE_SIZE + sizeOf(isNull) + sizeOf(packed); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimestampBlockEncoding.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimestampBlockEncoding.java index b432708f6..bda382d45 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimestampBlockEncoding.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/TimestampBlockEncoding.java @@ -49,7 +49,7 @@ public String getName() { @Override public void writeBlock(SliceOutput sliceOutput, Block block) { - TimestampBlock timestampBlock = (TimestampBlock) block; + TimestampBlock timestampBlock = block.cast(TimestampBlock.class); // write scale value DataType dataType = timestampBlock.getDataType(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ULongBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ULongBlock.java index ac2cda6c2..ff881bb97 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ULongBlock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ULongBlock.java @@ -18,6 +18,7 @@ import com.alibaba.polardbx.common.datatype.UInt64; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.XxhashUtils; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -59,6 +60,13 @@ public ULongBlock(int arrayOffset, int positionCount, boolean[] valueIsNull, lon updateSizeInfo(); } + public static ULongBlock from(ULongBlock other, int selSize, int[] selection) { + return new ULongBlock(0, + selSize, + BlockUtils.copyNullArray(other.isNull, selection, selSize), + BlockUtils.copyLongArray(other.values, selection, selSize)); + } + @Override public BigInteger getBigInteger(int position) { return getUInt64(position).toBigInteger(); @@ -123,6 +131,15 @@ public int hashCode(int position) { return Long.hashCode(values[position + arrayOffset]); } + @Override + public long hashCodeUseXxhash(int pos) { + if (isNull(pos)) { + return NULL_HASH_CODE; + } else { + return XxhashUtils.finalShuffle(values[pos + arrayOffset]); + } + } + @Override public int[] hashCodeVector() { if (mayHaveNull()) { @@ -136,6 +153,17 @@ public int[] hashCodeVector() { return hashes; } + @Override + public void hashCodeVector(int[] results, int positionCount) { + if (mayHaveNull()) { + super.hashCodeVector(results, positionCount); + return; + } + for (int position = 0; position < positionCount; position++) { + results[position] = Long.hashCode(values[position + arrayOffset]); + } + } + @Override public DataType getType() { return DataTypes.ULongType; @@ -144,7 +172,7 @@ public DataType getType() { @Override public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { if (output instanceof ULongBlock) { - ULongBlock outputVectorSlot = (ULongBlock) output; + ULongBlock outputVectorSlot = output.cast(ULongBlock.class); if (selectedInUse) { for (int i = 0; i < size; i++) { int j = sel[i]; @@ -165,7 +193,7 @@ public void shallowCopyTo(RandomAccessBlock another) { if (!(another instanceof ULongBlock)) { GeneralUtil.nestedException("cannot shallow copy to " + another == null ? null : another.toString()); } - ULongBlock vectorSlot = (ULongBlock) another; + ULongBlock vectorSlot = another.cast(ULongBlock.class); super.shallowCopyTo(vectorSlot); vectorSlot.values = values; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ULongBlockBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ULongBlockBuilder.java index 068f44413..f9c5a3c1c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ULongBlockBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/ULongBlockBuilder.java @@ -73,6 +73,10 @@ public void writeObject(Object value) { appendNull(); return; } + if (value instanceof Long) { + writeLong((Long) value); + return; + } Preconditions.checkArgument(value instanceof UInt64); writeUInt64((UInt64) value); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/columnar/BlockLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/columnar/BlockLoader.java new file mode 100644 index 000000000..c88fab1e8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/columnar/BlockLoader.java @@ -0,0 +1,49 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.chunk.columnar; + +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.operator.scan.CacheReader; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; + +import java.io.IOException; + +/** + * A block-level loader of one column in one row group. + * Several block-level loader will share the column reader spanning multiple row groups. + */ +public interface BlockLoader { + /** + * Trigger the processing of loading. + */ + Block load(DataType dataType, int[] selection, int selSize) throws IOException; + + /** + * Get the column reader inside this block loader. + * Several block-level loader will share the column reader spanning multiple row groups. + * + * @return Column reader. + */ + ColumnReader getColumnReader(); + + CacheReader getCacheReader(); + + int startPosition(); + + int positionCount(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/columnar/CommonLazyBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/columnar/CommonLazyBlock.java new file mode 100644 index 000000000..d25d83998 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/columnar/CommonLazyBlock.java @@ -0,0 +1,649 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.chunk.columnar; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.bloomfilter.RFBloomFilter; +import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.archive.reader.TypeComparison; +import com.alibaba.polardbx.executor.chunk.AbstractBlock; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.BlockConverter; +import com.alibaba.polardbx.executor.chunk.BlockUtils; +import com.alibaba.polardbx.executor.chunk.CastableBlock; +import com.alibaba.polardbx.executor.chunk.Converters; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.executor.operator.scan.impl.DictionaryMapping; +import com.alibaba.polardbx.executor.operator.util.DriverObjectPool; +import com.alibaba.polardbx.executor.operator.util.TypedList; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.executor.accumulator.state.NullableLongGroupState; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.math.BigInteger; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.BitSet; +import java.util.TimeZone; +import java.util.concurrent.atomic.AtomicInteger; + +public class CommonLazyBlock implements LazyBlock { + private static final Logger LOGGER = LoggerFactory.getLogger("oss"); + public static final int REF_QUOTA_NUM = 1; + private final DataType targetType; + + private final OSSColumnTransformer columnTransformer; + private final int colId; + + /** + * Loader for memory allocation and parsing. + */ + private final BlockLoader blockLoader; + + private final ColumnReader columnReader; + + private final boolean useSelection; + private final boolean enableCompatible; + + private final AtomicInteger refQuota; + private TimeZone timeZone; + + /** + * Internal block implementation. + */ + private AbstractBlock block; + + /** + * Has this lazy block been loaded. + */ + private boolean isLoaded; + + /** + * Store the exception during loading. + */ + private Exception loadingException; + + private int[] selection; + private int selSize; + + private ExecutionContext context; + + public CommonLazyBlock(DataType targetType, BlockLoader blockLoader, ColumnReader columnReader, + boolean useSelection, boolean enableCompatible, TimeZone timeZone, + ExecutionContext context, int colId, OSSColumnTransformer ossColumnTransformer) { + this.blockLoader = blockLoader; + this.targetType = targetType; + this.columnReader = columnReader; + this.useSelection = useSelection; + this.enableCompatible = enableCompatible; + this.timeZone = timeZone; + + this.refQuota = new AtomicInteger(REF_QUOTA_NUM); + + if (this.columnReader != null) { + this.columnReader.retainRef(REF_QUOTA_NUM); + } + + this.isLoaded = false; + this.block = null; + this.loadingException = null; + this.context = context; + this.colId = colId; + this.columnTransformer = ossColumnTransformer; + } + + public void setSelection(int[] selection, int selSize) { + this.selection = selection; + this.selSize = selSize; + + // for filter column that has been loaded. + if (isLoaded) { + this.block = fillSelection(block); + } + } + + @Override + public Block getLoaded() { + return block; + } + + @Override + public void load() { + try { + // Not concurrency-safe. + if (loadingException != null) { + throw GeneralUtil.nestedException(loadingException); + } + if (!isLoaded) { + try { + // It may invoke parsing or just fetch cached block. + // If the types are not identical, the zero copy will be nonsense + AbstractBlock result = loadAndTransformDataType(); + + if (result != null && selection != null && selSize >= 0) { + // For zero copy + this.block = fillSelection(result); + } else { + this.block = BlockUtils.wrapNullSelection(result, true, enableCompatible, timeZone); + } + + } catch (IOException e) { + loadingException = e; + throw GeneralUtil.nestedException(e); + } + isLoaded = true; + } + } finally { + releaseRef(); + } + } + + private AbstractBlock loadAndTransformDataType() throws IOException { + ColumnMeta sourceColumnMeta = columnTransformer.getSourceColumnMeta(colId); + ColumnMeta targetColumnMeta = columnTransformer.getTargetColumnMeta(colId); + TypeComparison comparison = columnTransformer.getCompareResult(colId); + + switch (comparison) { + case MISSING_EQUAL: + return (AbstractBlock) OSSColumnTransformer.fillDefaultValue( + targetType, + columnTransformer.getInitColumnMeta(colId), + columnTransformer.getTimeStamp(colId), + // TODO(siyun): while using selection, this could be optimized + blockLoader.positionCount(), + context + ); + case MISSING_NO_EQUAL: + return (AbstractBlock) OSSColumnTransformer.fillDefaultValueAndTransform( + targetColumnMeta, + columnTransformer.getInitColumnMeta(colId), + blockLoader.positionCount(), + context + ); + default: + DataType sourceType = sourceColumnMeta.getDataType(); + AbstractBlock sourceBlock = (AbstractBlock) blockLoader.load(sourceType, selection, selSize); + BlockConverter converter = Converters.createBlockConverter(sourceType, targetType, context); + return (AbstractBlock) converter.apply(sourceBlock); + } + } + + @Override + public void releaseRef() { + if (columnReader == null) { + return; + } + // get ref quota and release ref from column reader. + int ref = refQuota.getAndDecrement(); + if (ref > 0) { + columnReader.releaseRef(ref); + } + // Check status and ref count of column reader, and try to close it. + if (columnReader.hasNoMoreBlocks() && columnReader.refCount() <= 0) { + columnReader.close(); + } + } + + private AbstractBlock fillSelection(AbstractBlock result) { + return BlockUtils.fillSelection(result, selection, selSize, useSelection, enableCompatible, timeZone); + } + + @Override + public int getPositionCount() { + // don't load + return selection != null ? selSize : blockLoader.positionCount(); + } + + @Override + public long getElementUsedBytes() { + // todo + return getPositionCount() * 8; + } + + @Override + public long estimateSize() { + // todo + return getPositionCount() * 8; + } + + @Override + public DataType getType() { + // don't load + return targetType; + } + + @Override + public boolean hasVector() { + return block != null; + } + + @Override + public BlockLoader getLoader() { + return blockLoader; + } + + @Override + public T cast(Class clazz) { + load(); + if (!clazz.isInstance(getLoaded())) { + throw GeneralUtil.nestedException(new ClassCastException()); + } + return (T) getLoaded(); + } + + @Override + public boolean isInstanceOf(Class clazz) { + load(); + return clazz.isInstance(getLoaded()); + } + + @Override + public boolean isNull(int position) { + load(); + return block.isNull(position); + } + + @Override + public byte getByte(int position) { + load(); + return block.getByte(position); + } + + @Override + public short getShort(int position) { + load(); + return block.getShort(position); + } + + @Override + public int getInt(int position) { + load(); + return block.getInt(position); + } + + @Override + public long getLong(int position) { + load(); + return block.getLong(position); + } + + @Override + public double getDouble(int position) { + load(); + return block.getDouble(position); + } + + @Override + public float getFloat(int position) { + load(); + return block.getFloat(position); + } + + @Override + public Timestamp getTimestamp(int position) { + load(); + return block.getTimestamp(position); + } + + @Override + public Date getDate(int position) { + load(); + return block.getDate(position); + } + + @Override + public Time getTime(int position) { + load(); + return block.getTime(position); + } + + @Override + public String getString(int position) { + load(); + return block.getString(position); + } + + @Override + public Decimal getDecimal(int position) { + load(); + return block.getDecimal(position); + } + + @Override + public BigInteger getBigInteger(int position) { + load(); + return block.getBigInteger(position); + } + + @Override + public boolean getBoolean(int position) { + load(); + return block.getBoolean(position); + } + + @Override + public byte[] getByteArray(int position) { + load(); + return block.getByteArray(position); + } + + @Override + public Blob getBlob(int position) { + load(); + return block.getBlob(position); + } + + @Override + public Clob getClob(int position) { + load(); + return block.getClob(position); + } + + @Override + public int hashCode(int position) { + load(); + return block.hashCode(position); + } + + @Override + public long hashCodeUseXxhash(int pos) { + load(); + return block.hashCodeUseXxhash(pos); + } + + @Override + public int checksum(int position) { + load(); + return block.checksum(position); + } + + @Override + public int[] hashCodeVector() { + load(); + return block.hashCodeVector(); + } + + @Override + public void hashCodeVector(int[] results, int positionCount) { + load(); + block.hashCodeVector(results, positionCount); + } + + @Override + public boolean equals(int position, Block other, int otherPosition) { + load(); + if (other instanceof CommonLazyBlock) { + ((CommonLazyBlock) other).load(); + return block.equals(position, ((CommonLazyBlock) other).block, otherPosition); + } + return block.equals(position, other, otherPosition); + } + + @Override + public boolean mayHaveNull() { + load(); + return block.mayHaveNull(); + } + + @Override + public Object getObject(int position) { + load(); + return block.getObject(position); + } + + @Override + public void writePositionTo(int position, BlockBuilder blockBuilder) { + load(); + block.writePositionTo(position, blockBuilder); + } + + @Override + public void addToHasher(IStreamingHasher sink, int position) { + load(); + block.addToHasher(sink, position); + } + + @Override + public Object getObjectForCmp(int position) { + load(); + return block.getObjectForCmp(position); + } + + @Override + public void setIsNull(boolean[] isNull) { + load(); + block.setIsNull(isNull); + } + + @Override + public boolean hasNull() { + load(); + return block.hasNull(); + } + + @Override + public void setHasNull(boolean hasNull) { + load(); + block.setHasNull(hasNull); + } + + @Override + public boolean[] nulls() { + load(); + return block.nulls(); + } + + @Override + public String getDigest() { + load(); + return block.getDigest(); + } + + @Override + public void copySelected(boolean selectedInUse, int[] sel, int size, RandomAccessBlock output) { + load(); + block.copySelected(selectedInUse, sel, size, output); + } + + @Override + public void shallowCopyTo(RandomAccessBlock another) { + load(); + block.shallowCopyTo(another); + } + + @Override + public Object elementAt(int position) { + load(); + return block.elementAt(position); + } + + @Override + public void setElementAt(int position, Object element) { + load(); + block.setElementAt(position, element); + } + + @Override + public void resize(int positionCount) { + load(); + block.resize(positionCount); + } + + @Override + public void compact(int[] selection) { + load(); + block.compact(selection); + } + + @Override + public void collectNulls(int positionOffset, int positionCount, BitSet nullBitmap, int targetOffset) { + load(); + block.collectNulls(positionOffset, positionCount, nullBitmap, targetOffset); + } + + @Override + public void copyToIntArray(int positionOffset, int positionCount, int[] targetArray, int targetOffset, + DictionaryMapping dictionaryMapping) { + load(); + block.copyToIntArray(positionOffset, positionCount, targetArray, targetOffset, dictionaryMapping); + } + + @Override + public void copyToLongArray(int positionOffset, int positionCount, long[] targetArray, int targetOffset) { + load(); + block.copyToLongArray(positionOffset, positionCount, targetArray, targetOffset); + } + + @Override + public void sum(int[] groupSelected, int selSize, long[] results) { + load(); + block.sum(groupSelected, selSize, results); + } + + @Override + public void sum(int startIndexIncluded, int endIndexExcluded, long[] results) { + load(); + block.sum(startIndexIncluded, endIndexExcluded, results); + } + + @Override + public void sum(int startIndexIncluded, int endIndexExcluded, long[] sumResultArray, int[] sumStatusArray, + int[] normalizedGroupIds) { + load(); + block.sum(startIndexIncluded, endIndexExcluded, sumResultArray, sumStatusArray, normalizedGroupIds); + } + + @Override + public void appendTypedHashTable(TypedList typedList, int sourceIndex, int startIndexIncluded, + int endIndexExcluded) { + load(); + block.appendTypedHashTable(typedList, sourceIndex, startIndexIncluded, endIndexExcluded); + } + + @Override + public void count(int[] groupIds, int[] probePositions, int selSize, NullableLongGroupState state) { + load(); + block.count(groupIds, probePositions, selSize, state); + } + + @Override + public void recycle() { + load(); + block.recycle(); + } + + @Override + public boolean isRecyclable() { + load(); + return block.isRecyclable(); + } + + @Override + public void setRecycler(DriverObjectPool.Recycler recycler) { + load(); + block.setRecycler(recycler); + } + + @Override + public void addIntToBloomFilter(int totalPartitionCount, RFBloomFilter[] RFBloomFilters) { + load(); + block.addIntToBloomFilter(totalPartitionCount, RFBloomFilters); + } + + @Override + public void addIntToBloomFilter(RFBloomFilter RFBloomFilter) { + load(); + block.addIntToBloomFilter(RFBloomFilter); + } + + @Override + public int mightContainsInt(RFBloomFilter RFBloomFilter, boolean[] bitmap) { + load(); + return block.mightContainsInt(RFBloomFilter, bitmap); + } + + @Override + public int mightContainsInt(RFBloomFilter RFBloomFilter, boolean[] bitmap, boolean isConjunctive) { + load(); + return block.mightContainsInt(RFBloomFilter, bitmap, isConjunctive); + } + + @Override + public int mightContainsInt(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent) { + load(); + return block.mightContainsInt(totalPartitionCount, RFBloomFilters, bitmap, isPartitionConsistent); + } + + @Override + public int mightContainsInt(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent, boolean isConjunctive) { + load(); + return block.mightContainsInt(totalPartitionCount, RFBloomFilters, bitmap, isPartitionConsistent, + isConjunctive); + } + + @Override + public void addLongToBloomFilter(int totalPartitionCount, RFBloomFilter[] RFBloomFilters) { + load(); + block.addLongToBloomFilter(totalPartitionCount, RFBloomFilters); + } + + @Override + public void addLongToBloomFilter(RFBloomFilter RFBloomFilter) { + load(); + block.addLongToBloomFilter(RFBloomFilter); + } + + @Override + public int mightContainsLong(RFBloomFilter RFBloomFilter, boolean[] bitmap) { + load(); + return block.mightContainsLong(RFBloomFilter, bitmap); + } + + @Override + public int mightContainsLong(RFBloomFilter RFBloomFilter, boolean[] bitmap, boolean isConjunctive) { + load(); + return block.mightContainsLong(RFBloomFilter, bitmap, isConjunctive); + } + + @Override + public int mightContainsLong(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent) { + load(); + return block.mightContainsLong(totalPartitionCount, RFBloomFilters, bitmap, isPartitionConsistent); + } + + @Override + public int mightContainsLong(int totalPartitionCount, RFBloomFilter[] RFBloomFilters, boolean[] bitmap, + boolean isPartitionConsistent, boolean isConjunctive) { + load(); + return block.mightContainsLong(totalPartitionCount, RFBloomFilters, bitmap, isPartitionConsistent, + isConjunctive); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/columnar/LazyBlock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/columnar/LazyBlock.java new file mode 100644 index 000000000..eebdb5fbf --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/columnar/LazyBlock.java @@ -0,0 +1,47 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.chunk.columnar; + +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; + +/** + * The data block with delay materialization. + */ +public interface LazyBlock extends RandomAccessBlock, Block { + /** + * Get internal arrow-vector. + */ + Block getLoaded(); + + /** + * Load arrow-block data lazily. + */ + void load(); + + /** + * whether the array is already materialized. + */ + boolean hasVector(); + + BlockLoader getLoader(); + + /** + * Release the reference of column reader used by this lazy block. + */ + void releaseRef(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLJobScheduler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/columnar/LazyBlockUtils.java similarity index 100% rename from polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLJobScheduler.java rename to polardbx-executor/src/main/java/com/alibaba/polardbx/executor/chunk/columnar/LazyBlockUtils.java diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/ByteCSVReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/ByteCSVReader.java new file mode 100644 index 000000000..4db00b483 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/ByteCSVReader.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; + +public class ByteCSVReader { + private final ByteBuffer lengthBuffer = ByteBuffer.allocate(Integer.BYTES); + private final InputStream inputStream; + + private final String csvFileName; + + private long currentPosition; + private final long fileEndOffset; + + public ByteCSVReader(String csvFileName, InputStream inputStream) throws IOException { + this.csvFileName = csvFileName; + this.inputStream = inputStream; + this.currentPosition = 0; + this.fileEndOffset = inputStream.available(); + } + + public boolean isReadable() { + return currentPosition < fileEndOffset; + } + + public CSVRow nextRow() throws IOException { + lengthBuffer.clear(); + canReadFileLength(Integer.BYTES); + inputStream.read(lengthBuffer.array(), 0, Integer.BYTES); + + int length = lengthBuffer.getInt(); + byte[] data = new byte[length]; + canReadFileLength(length); + inputStream.read(data); + + currentPosition += (Integer.BYTES + length); + + return CSVRow.deserialize(data); + } + + public void close() throws IOException { + if (inputStream != null) { + inputStream.close(); + } + } + + private void canReadFileLength(int needRead) throws IOException { + if (currentPosition + needRead > fileEndOffset) { + throw new IOException( + String.format("%s need read %d failed! current Position:%d, end Position:%d", csvFileName, + needRead, currentPosition, fileEndOffset)); + } + } + + public long position() { + return currentPosition; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/CSVFileReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/CSVFileReader.java new file mode 100644 index 000000000..f38db1b10 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/CSVFileReader.java @@ -0,0 +1,66 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; + +/** + * Read .csv in given storage engine into memory as list of chunks. + */ +public interface CSVFileReader extends Closeable { + /** + * It means read fully when length = EOF (end of file) + */ + int EOF = -1; + + /** + * Open the .csv file resource. + * + * @param context execution context. + * @param columnMetas the column meta of .csv file, including the implicit columns. + * @param chunkLimit maximum chunk size to fetch at a time. + * @param engine storage engine of .csv file + * @param csvFileName csv file name (without uri prefix like oss://dir/) + * @param offset file offset to read from + * @param length file length to read + * @throws IOException throw exception when IO failure + */ + void open(ExecutionContext context, + List columnMetas, + int chunkLimit, + Engine engine, + String csvFileName, + int offset, + int length) throws IOException; + + /** + * Fetch the next chunk under the size limit. + * + * @return executor chunk + */ + Chunk next(); + + Chunk nextUntilPosition(long pos); + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/CSVRow.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/CSVRow.java new file mode 100644 index 000000000..753d35800 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/CSVRow.java @@ -0,0 +1,267 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar; + +import com.alibaba.polardbx.optimizer.core.CursorMeta; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.row.Row; +import com.google.common.base.Preconditions; +import io.airlift.slice.Slice; +import io.airlift.slice.Slices; + +import java.io.IOException; +import java.math.BigDecimal; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.List; + +public class CSVRow implements Row { + /** + * The continuous memory structure for the whole row. + */ + private final Slice data; + + /** + * The offsets of each column in the row. + */ + private final int[] offsets; + + /** + * The null value of each column in the row. + */ + private final byte[] nulls; + + /** + * The count of fields. + */ + private final int fieldNum; + + public CSVRow(Slice data, int[] offsets, byte[] nulls, int fieldNum) { + this.data = data; + this.offsets = offsets; + this.nulls = nulls; + this.fieldNum = fieldNum; + } + + public int getFieldNum() { + return fieldNum; + } + + private int beginOffset(int position) { + return position > 0 ? offsets[position - 1] : 0; + } + + private int endOffset(int position) { + return offsets[position]; + } + + @Override + public byte[] getBytes(int pos) { + int beginOffset = beginOffset(pos); + int endOffset = endOffset(pos); + + return data.getBytes(beginOffset, endOffset - beginOffset); + } + + @Override + public Long getLong(int pos) { + int beginOffset = beginOffset(pos); + int endOffset = endOffset(pos); + + // Check data length equal to Long.BYTES + Preconditions.checkArgument(beginOffset + Long.BYTES == endOffset); + + return data.getLong(beginOffset); + } + + @Override + public Integer getInteger(int pos) { + int beginOffset = beginOffset(pos); + int endOffset = endOffset(pos); + + // Check data length equal to Integer.BYTES + Preconditions.checkArgument(beginOffset + Integer.BYTES == endOffset); + + return data.getInt(beginOffset); + } + + public int sizeInBytes() { + return data.length(); + } + + public boolean isNullAt(int pos) { + return nulls[pos] == 1; + } + + @Override + public Object getObject(int index) { + return null; + } + + @Override + public void setObject(int index, Object value) { + + } + + @Override + public List getValues() { + return null; + } + + @Override + public List getBytes() { + return null; + } + + @Override + public String getString(int index) { + return null; + } + + @Override + public Boolean getBoolean(int index) { + return null; + } + + @Override + public Short getShort(int index) { + return null; + } + + @Override + public Float getFloat(int index) { + return null; + } + + @Override + public Double getDouble(int index) { + return null; + } + + @Override + public Byte getByte(int index) { + return null; + } + + @Override + public BigDecimal getBigDecimal(int index) { + return null; + } + + @Override + public Time getTime(int index) { + return null; + } + + @Override + public Date getDate(int index) { + return null; + } + + @Override + public Timestamp getTimestamp(int index) { + return null; + } + + @Override + public Blob getBlob(int index) { + return null; + } + + @Override + public Clob getClob(int index) { + return null; + } + + @Override + public byte[] getBytes(int index, String encoding) { + return new byte[0]; + } + + @Override + public byte[] getBytes(DataType dataType, int index, String encoding) { + return new byte[0]; + } + + @Override + public CursorMeta getParentCursorMeta() { + return null; + } + + @Override + public void setCursorMeta(CursorMeta cursorMeta) { + + } + + @Override + public int getColNum() { + return 0; + } + + @Override + public long estimateSize() { + return 0; + } + + public byte[] serialize() throws IOException { + int size = Integer.BYTES + Byte.BYTES * fieldNum + Integer.BYTES * fieldNum + data.length(); + Slice sliceOutput = Slices.allocate(size); + + int pos = 0; + //write filedNum + sliceOutput.setLong(0, fieldNum); + pos += Integer.BYTES; + + //write nulls + sliceOutput.setBytes(pos, nulls); + pos += Byte.BYTES * fieldNum; + + //write offsets + for (int offset : offsets) { + sliceOutput.setInt(pos, offset); + pos += Integer.BYTES; + } + + //write data + sliceOutput.setBytes(pos, data.getBytes()); + return sliceOutput.getBytes(); + } + + public static CSVRow deserialize(byte[] values) throws IOException { + Slice data = Slices.wrappedBuffer(values); + int pos = 0; + + int filedNum = data.getInt(pos); + pos += Integer.BYTES; + + byte[] nulls = data.getBytes(pos, filedNum); + pos += (Byte.BYTES * filedNum); + + int[] offsets = new int[filedNum]; + for (int i = 0; i < filedNum; i++) { + offsets[i] = data.getInt(pos); + pos += Integer.BYTES; + } + + Slice realData = Slices.wrappedBuffer(data.getBytes(pos, data.length() - pos)); + + return new CSVRow(realData, offsets, nulls, filedNum); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/CSVRowReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/CSVRowReader.java new file mode 100644 index 000000000..4f83e70c4 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/CSVRowReader.java @@ -0,0 +1,196 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import io.airlift.slice.DynamicSliceOutput; +import io.airlift.slice.Slice; +import io.airlift.slice.SliceOutput; + +import java.io.IOException; +import java.io.InputStream; +import java.text.MessageFormat; +import java.util.Arrays; + +public class CSVRowReader { + private static final int BUFFER_INITIAL_SIZE = 1 << 10; + + private final String csvFileName; + private InputStream inputStream; + private int fieldNum; + + private int currentPosition; + private final int fileEndOffset; + + private final static int END_OF_LINE_LENGTH = 1; + private SliceOutput fileBufferOutput; + private Slice fileBuffer; + + public CSVRowReader(String csvFileName, InputStream inputStream, int fieldNum) throws IOException { + this.csvFileName = csvFileName; + this.inputStream = inputStream; + this.fieldNum = fieldNum; + + this.currentPosition = 0; + this.fileEndOffset = inputStream.available(); + + this.fileBufferOutput = new DynamicSliceOutput(fieldNum * Byte.BYTES); + } + + public boolean isReadable() { + return currentPosition < fileEndOffset; + } + + public CSVRow nextRow() throws IOException { + if (!isReadable()) { + return null; + } + + int endOffset = nextLine(); + + SliceOutput buffer = new DynamicSliceOutput(BUFFER_INITIAL_SIZE); + int currentOffset = currentPosition; + + // For column offsets. + int[] offsets = new int[fieldNum]; + + // For null value + byte[] nulls = new byte[fieldNum]; + Arrays.fill(nulls, (byte) 0); + + for (int columnIndex = 0; columnIndex < fieldNum; columnIndex++) { + if (currentOffset >= endOffset) { + throw GeneralUtil.nestedException("bad format error for this csv file"); + } + + // Record the beginning position of current column. + final int columnBeginPosition = currentOffset; + + byte currentByte = nextByte(currentOffset); + + // Handle the case where the first character is a quote. + if (currentByte == '"') { + // Increment past the first quote + currentOffset++; + + // Loop through the row to extract the values for the current field + for (; currentOffset < endOffset; currentOffset++) { + currentByte = nextByte(currentOffset); + + // Check for end of the current field + if (currentByte == '"' + && (currentOffset == endOffset - 1 + || nextByte(currentOffset + 1) == ',')) { + + // Move past the ',' and the '"' + // It means the empty byte array. + currentOffset += 2; + break; + } + + if (currentByte == '\\' && currentOffset != (endOffset - 1)) { + currentOffset++; + currentByte = nextByte(currentOffset); + + if (currentByte == 'r') { + buffer.appendByte('\r'); + } else if (currentByte == 'n') { + buffer.appendByte('\n'); + } else if (currentByte == '\\' || currentByte == '"') { + buffer.appendByte(currentByte); + } else { + buffer.appendByte('\\'); + buffer.appendByte(currentByte); + } + } else { + if (currentOffset == endOffset - 1) { + // If we are at final symbol and no last quote was found, + // we are working with a damaged file. + throw GeneralUtil.nestedException("bad format error for this csv file"); + } + // For ordinary symbol + buffer.appendByte(currentByte); + } + } + } else { + for (; currentOffset < endOffset; currentOffset++) { + currentByte = nextByte(currentOffset); + // Move past the ',' + if (currentByte == ',') { + if (columnBeginPosition == currentOffset) { + // It means null value because there is comma without any character + // (including quote character) + nulls[columnIndex] = 1; + } + + currentOffset++; + break; + } + if (currentByte == '\\' && currentOffset != (endOffset - 1)) { + currentOffset++; + currentByte = nextByte(currentOffset); + if (currentByte == 'r') { + buffer.appendByte('\r'); + } else if (currentByte == 'n') { + buffer.appendByte('\n'); + } else if (currentByte == '\\' || currentByte == '"') { + buffer.appendByte(currentByte); + } else /* This could only happed with an externally created file */ { + buffer.appendByte('\\'); + buffer.appendByte(currentByte); + } + } else { + if (currentOffset == endOffset - 1 && currentByte == '"') { + // We are at the final symbol and a quote was found for the + // unquoted field => We are working with a damaged field. + throw GeneralUtil.nestedException("bad format error for this csv file"); + } + + buffer.appendByte(currentByte); + } + } + } + + offsets[columnIndex] = buffer.size(); + } + + currentPosition = endOffset + END_OF_LINE_LENGTH; + + Slice result = buffer.slice(); + return new CSVRow(result, offsets, nulls, fieldNum); + } + + private int nextLine() throws IOException { + fileBufferOutput.reset(); + for (int position = currentPosition; position < fileEndOffset; position++) { + byte b = (byte) inputStream.read(); + fileBufferOutput.appendByte(b); + + // Line delimiter + if (b == '\n') { + fileBuffer = fileBufferOutput.slice(); + return position; + } + } + throw GeneralUtil.nestedException(MessageFormat.format("File {0} is damaged", csvFileName)); + } + + private byte nextByte(int position) throws IOException { + return fileBuffer.getByte(position - currentPosition); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/DeletionFileReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/DeletionFileReader.java new file mode 100644 index 000000000..5c4aa025c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/DeletionFileReader.java @@ -0,0 +1,71 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar; + +import com.alibaba.polardbx.common.Engine; +import lombok.Data; +import org.roaringbitmap.RoaringBitmap; + +import java.io.Closeable; +import java.io.IOException; + +/** + * Read and parse .del file in given storage engine into memory as bitmaps. + */ +public interface DeletionFileReader extends Closeable { + /** + * It means read fully when length = EOF (end of file) + */ + int EOF = -1; + + /** + * The format of deletion entry unit: + * 1. generated timestamp + * 2. file identifier of .csv or .orc + * 3. memory structure of deletion masks in format of bitmap. + */ + @Data + class DeletionEntry { + private final long tso; + private final int fileId; + private final RoaringBitmap bitmap; + + public DeletionEntry(long tso, int fileId, RoaringBitmap bitmap) { + this.tso = tso; + this.fileId = fileId; + this.bitmap = bitmap; + } + } + + /** + * Open the .del file resource. + * + * @param engine storage engine of .del file. + * @param delFileName file name of .del without uri prefix like 'oss://dir/' + * @param offset file offset to read from. + * @param length file length to read. + * @throws IOException throw exception when IO failure + */ + void open(Engine engine, String delFileName, int offset, int length) throws IOException; + + /** + * Fetch and parse the next deletion entry unit. + */ + DeletionEntry next(); + + int position(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/RawOrcTypeCsvReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/RawOrcTypeCsvReader.java new file mode 100644 index 000000000..78fd7de7c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/RawOrcTypeCsvReader.java @@ -0,0 +1,385 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.charset.CharsetName; +import com.alibaba.polardbx.common.datatype.UInt64Utils; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.time.core.MySQLTimeVal; +import com.alibaba.polardbx.executor.archive.columns.ColumnProvider; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.BlockBuilders; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.gms.engine.FileSystemUtils; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.BinaryType; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.EnumType; +import com.alibaba.polardbx.optimizer.core.datatype.SetType; +import com.alibaba.polardbx.rpc.result.XResultUtil; +import org.apache.orc.impl.TypeUtils; +import org.jetbrains.annotations.NotNull; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.Charset; +import java.util.List; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.common.datatype.DecimalConverter.getUnscaledDecimal; +import static com.alibaba.polardbx.executor.archive.columns.ColumnProvider.longFromByte; + +/** + * @author yaozhili + */ +public class RawOrcTypeCsvReader implements CSVFileReader { + + private int fieldNum; + private InputStream inputStream; + private List columnMetas; + private ByteCSVReader rowReader; + private ExecutionContext context; + private int chunkLimit; + private int offset; + public static Charset DEFAULT_CHARSET = CharsetName.defaultCharset().toJavaCharset(); + + @Override + public void open(ExecutionContext context, List columnMetas, int chunkLimit, Engine engine, + String csvFileName, int offset, int length) throws IOException { + this.chunkLimit = chunkLimit; + this.context = context; + this.fieldNum = columnMetas.size(); + // synchronous reading + byte[] buffer; + if (offset == 0 && length == EOF) { + buffer = FileSystemUtils.readFullyFile(csvFileName, engine, true); + } else { + buffer = new byte[length]; + FileSystemUtils.readFile(csvFileName, offset, length, buffer, engine, true); + } + + this.inputStream = new ByteArrayInputStream(buffer); + this.columnMetas = columnMetas; + this.rowReader = new ByteCSVReader(csvFileName, inputStream); + this.offset = offset; + } + + @Override + public Chunk next() { + return nextUntilPosition(Long.MAX_VALUE); + } + + @Override + public Chunk nextUntilPosition(long pos) { + List blockBuilders = this.columnMetas + .stream() + .map(ColumnMeta::getDataType) + .map(t -> BlockBuilders.create(t, context)) + .collect(Collectors.toList()); + + int totalRow = 0; + while (offset + rowReader.position() < pos && rowReader.isReadable()) { + try { + CSVRow row = rowReader.nextRow(); + + // for each row, parse each column and append onto block-builder + for (int columnId = 0; columnId < fieldNum; columnId++) { + BlockBuilder blockBuilder = blockBuilders.get(columnId); + DataType dataType = columnMetas.get(columnId).getDataType(); + + convertFromCsvToOrc(blockBuilder, row, columnId, dataType); + } + + // reach chunk limit + if (++totalRow >= chunkLimit) { + return buildChunk(blockBuilders, totalRow); + } + + } catch (IOException e) { + throw GeneralUtil.nestedException(e); + } + } + + // flush the remaining rows + return totalRow == 0 ? null : buildChunk(blockBuilders, totalRow); + } + + private void convertFromCsvToOrc(BlockBuilder blockBuilder, CSVRow row, int columnId, DataType dataType) { + if (row.isNullAt(columnId)) { + blockBuilder.appendNull(); + return; + } + + byte[] bytes = row.getBytes(columnId); + switch (dataType.fieldType()) { + // we can hold data using long value from bit(1) to bit(64) + case MYSQL_TYPE_BIT: { + //大端模式 + blockBuilder.writeLong(ColumnProvider.bigBitLongFromByte(bytes, bytes.length)); + return; + } + + // for tiny int, small int, medium int, int + case MYSQL_TYPE_TINY: { + long longVal; + boolean isUnsigned = dataType.isUnsigned(); + if (isUnsigned) { + longVal = getUint8(bytes, 0); + } else { + longVal = getInt8(bytes, 0); + } + blockBuilder.writeLong(longVal); + return; + } + case MYSQL_TYPE_SHORT: { + long longVal; + boolean isUnsigned = dataType.isUnsigned(); + if (isUnsigned) { + longVal = getUint16(bytes, 0); + } else { + longVal = getInt16(bytes, 0); + } + blockBuilder.writeLong(longVal); + return; + } + case MYSQL_TYPE_INT24: { + long longVal; + boolean isUnsigned = dataType.isUnsigned(); + if (isUnsigned) { + longVal = getUint24(bytes, 0); + } else { + longVal = getInt24(bytes, 0); + } + blockBuilder.writeLong(longVal); + return; + } + case MYSQL_TYPE_LONG: { + long longVal; + boolean isUnsigned = dataType.isUnsigned(); + if (isUnsigned) { + longVal = getUint32(bytes, 0); + } else { + longVal = getInt32(bytes, 0); + } + blockBuilder.writeLong(longVal); + return; + } + + // for bigint, bigint unsigned. + case MYSQL_TYPE_LONGLONG: { + long longVal; + int length = Math.min(8, bytes.length); + boolean isUnsigned = dataType.isUnsigned(); + if (isUnsigned) { + longVal = longFromByte(bytes, length) ^ UInt64Utils.FLIP_MASK; + } else { + longVal = longFromByte(bytes, length); + } + blockBuilder.writeLong(longVal); + return; + } + + // for real type. + case MYSQL_TYPE_FLOAT: { + int result = ColumnProvider.intFromByte(bytes, bytes.length); + blockBuilder.writeDouble(Float.intBitsToFloat(result)); + return; + } + case MYSQL_TYPE_DOUBLE: { + long result = ColumnProvider.longFromByte(bytes, bytes.length); + blockBuilder.writeDouble(Double.longBitsToDouble(result)); + return; + } + + // for date type + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: { + blockBuilder.writeLong(ColumnProvider.convertDateToLong(bytes)); + return; + } + + // for datetime type + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_DATETIME2: { + final int scale = dataType.getScale(); + blockBuilder.writeLong(ColumnProvider.convertDateTimeToLong(bytes, scale)); + return; + } + + // for time type. + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_TIME2: { + final int scale = dataType.getScale(); + blockBuilder.writeLong(ColumnProvider.convertTimeToLong(bytes, scale)); + return; + } + + // for timestamp type. + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_TIMESTAMP2: { + final int scale = dataType.getScale(); + long second = 0; + for (int i = 0; i < 4; i++) { + byte b = bytes[i]; + second = (second << 8) | (b >= 0 ? (int) b : (b + 256)); + } + + // parse fsp + int micro = 0; + int length = (scale + 1) / 2; + if (length > 0) { + int fraction = 0; + for (int i = 4; i < (4 + length); i++) { + byte b = bytes[i]; + fraction = (fraction << 8) | (b >= 0 ? (int) b : (b + 256)); + } + micro = fraction * (int) Math.pow(100, 3 - length); + } + + // pack time value to long + MySQLTimeVal timeVal = new MySQLTimeVal(second, micro * 1000L); + blockBuilder.writeLong(XResultUtil.timeValToLong(timeVal)); + return; + } + + // for year type. + case MYSQL_TYPE_YEAR: { + long longVal = ColumnProvider.longFromByte(bytes, bytes.length); + blockBuilder.writeLong(longVal == 0 ? 0 : longVal + 1900); + return; + } + + // for decimal type. + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: { + if (TypeUtils.isDecimal64Precision(dataType.getPrecision())) { + int precision = dataType.getPrecision(); + int scale = dataType.getScale(); + blockBuilder.writeLong(getUnscaledDecimal(bytes, precision, scale)); + } else { + // fall back to byte[] representation + blockBuilder.writeByteArray(bytes); + } + return; + } + + case MYSQL_TYPE_ENUM: { + int val = ColumnProvider.intFromByte(bytes, bytes.length); + EnumType enumType = (EnumType) dataType; + blockBuilder.writeByteArray(enumType.convertTo(val).getBytes()); + return; + } + + case MYSQL_TYPE_JSON: { + String charsetName = dataType.getCharsetName().getJavaCharset(); + String string = ColumnProvider.convertToString(bytes, charsetName); + blockBuilder.writeByteArray(string.getBytes()); + return; + } + + case MYSQL_TYPE_VAR_STRING: { + if (dataType instanceof BinaryType) { + BinaryType binaryType = (BinaryType) dataType; + if (binaryType.isFixedLength()) { + byte[] paddingBytes = ColumnProvider.convertToPaddingBytes(bytes, binaryType); + blockBuilder.writeByteArray(paddingBytes); + } else { + blockBuilder.writeByteArray(bytes); + } + } else { + blockBuilder.writeByteArray(convertFromBinary(dataType.getCharsetName(), bytes)); + } + return; + } + case MYSQL_TYPE_SET: { + int val = ColumnProvider.intFromByte(bytes, bytes.length); + SetType setType = (SetType) dataType; + blockBuilder.writeByteArray(String.join(",", setType.convertFromBinary(val)).getBytes()); + return; + } + case MYSQL_TYPE_STRING: + if (dataType instanceof SetType) { + int val = ColumnProvider.intFromByte(bytes, bytes.length); + SetType setType = (SetType) dataType; + blockBuilder.writeByteArray(String.join(",", setType.convertFromBinary(val)).getBytes()); + } else { + blockBuilder.writeByteArray(convertFromBinary(dataType.getCharsetName(), bytes)); + } + return; + default: + blockBuilder.writeByteArray(bytes); + } + } + + @NotNull + private Chunk buildChunk(List blockBuilders, int totalRow) { + return new Chunk(totalRow, blockBuilders.stream() + .map(BlockBuilder::build).toArray(Block[]::new)); + } + + @Override + public void close() throws IOException { + if (inputStream != null) { + inputStream.close(); + } + } + + public static int getInt8(byte[] buf, int pos) { + return buf[pos]; + } + + public static int getUint8(byte[] buf, int pos) { + return 0xff & buf[pos]; + } + + public static int getInt16(byte[] buf, int pos) { + return (0xff & buf[pos]) | ((buf[pos + 1]) << 8); + } + + public static int getUint16(byte[] buf, int pos) { + return (0xff & buf[pos]) | ((0xff & buf[pos + 1]) << 8); + } + + public static int getInt24(byte[] buf, int pos) { + return (0xff & buf[pos]) | ((0xff & buf[pos + 1]) << 8) | ((buf[pos + 2]) << 16); + } + + public static int getUint24(byte[] buf, int pos) { + return (0xff & buf[pos]) | ((0xff & buf[pos + 1]) << 8) | ((0xff & buf[pos + 2]) << 16); + } + + public static int getInt32(byte[] buf, int pos) { + return (0xff & buf[pos]) | ((0xff & buf[pos + 1]) << 8) | ((0xff & buf[pos + 2]) << 16) + | ((buf[pos + 3]) << 24); + } + + public static long getUint32(byte[] buf, int pos) { + return ((long) (0xff & buf[pos])) | ((long) (0xff & buf[pos + 1]) << 8) | ((long) (0xff & buf[pos + 2]) << 16) + | ((long) (0xff & buf[pos + 3]) << 24); + } + + public static byte[] convertFromBinary(CharsetName charsetName, byte[] bytes) { + if (charsetName == CharsetName.UTF8MB4 || charsetName == CharsetName.UTF8) { + return bytes; + } + return new String(bytes, charsetName.toJavaCharset()).getBytes(DEFAULT_CHARSET); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/SimpleCSVFileReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/SimpleCSVFileReader.java new file mode 100644 index 000000000..932b33c75 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/SimpleCSVFileReader.java @@ -0,0 +1,140 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.archive.columns.ColumnProvider; +import com.alibaba.polardbx.executor.archive.columns.ColumnProviders; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.BlockBuilders; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.gms.engine.FileSystemUtils; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import org.jetbrains.annotations.NotNull; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Simple implementation of csv file reader. + * It will load all .csv file bytes into memory, and parse bytes into blocks line by line. + */ +public class SimpleCSVFileReader implements CSVFileReader { + private int fieldNum; + private InputStream inputStream; + private List columnProviders; + private List columnMetas; + private ByteCSVReader rowReader; + private ExecutionContext context; + private int chunkLimit; + private int offset; + + @Override + public void open(ExecutionContext context, + List columnMetas, + int chunkLimit, + Engine engine, + String csvFileName, + int offset, + int length) throws IOException { + this.chunkLimit = chunkLimit; + this.context = context; + this.fieldNum = columnMetas.size(); + // synchronous reading + byte[] buffer; + if (offset == 0 && length == EOF) { + buffer = FileSystemUtils.readFullyFile(csvFileName, engine, true); + } else { + buffer = new byte[length]; + FileSystemUtils.readFile(csvFileName, offset, length, buffer, engine, true); + } + + this.inputStream = new ByteArrayInputStream(buffer); + this.columnProviders = columnMetas.stream() + .map(ColumnProviders::getProvider).collect(Collectors.toList()); + this.columnMetas = columnMetas; + this.rowReader = new ByteCSVReader(csvFileName, inputStream); + this.offset = offset; + } + + @Override + public Chunk next() { + return nextUntilPosition(Long.MAX_VALUE); + } + + @Override + public Chunk nextUntilPosition(long pos) { + List blockBuilders = this.columnMetas + .stream() + .map(ColumnMeta::getDataType) + .map(t -> BlockBuilders.create(t, context)) + .collect(Collectors.toList()); + + int totalRow = 0; + while (offset + rowReader.position() < pos && rowReader.isReadable()) { + try { + CSVRow row = rowReader.nextRow(); + + // for each row, parse each column and append onto block-builder + for (int columnId = 0; columnId < fieldNum; columnId++) { + ColumnProvider columnProvider = columnProviders.get(columnId); + BlockBuilder blockBuilder = blockBuilders.get(columnId); + DataType dataType = columnMetas.get(columnId).getDataType(); + + columnProvider.parseRow( + blockBuilder, row, columnId, dataType + ); + } + + // reach chunk limit + if (++totalRow >= chunkLimit) { + return buildChunk(blockBuilders, totalRow); + } + + } catch (IOException e) { + throw GeneralUtil.nestedException(e); + } + } + + // flush the remaining rows + return totalRow == 0 ? null : buildChunk(blockBuilders, totalRow); + } + + @NotNull + private Chunk buildChunk(List blockBuilders, int totalRow) { + return new Chunk(totalRow, blockBuilders.stream() + .map(BlockBuilder::build).toArray(Block[]::new)); + } + + @Override + public void close() throws IOException { + if (inputStream != null) { + inputStream.close(); + } + } + + public long position() { + return offset + rowReader.position(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/SimpleDeletionFileReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/SimpleDeletionFileReader.java new file mode 100644 index 000000000..c5a74068d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/SimpleDeletionFileReader.java @@ -0,0 +1,96 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.gms.engine.FileSystemUtils; +import org.roaringbitmap.RoaringBitmap; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.text.MessageFormat; + +/** + * Simple implementation of .del file reader. + * It will load all .del file bytes into memory, and parse bytes to DeletionEntry unit byte-by-byte. + */ +public class SimpleDeletionFileReader implements DeletionFileReader { + private static final Logger LOGGER = LoggerFactory.getLogger("oss"); + private ByteBuffer byteBuffer; + private int offset; + + @Override + public void open(Engine engine, String delFileName, int offset, int length) throws IOException { + // synchronous reading (it may cause OOM) + byte[] buffer; + + if (!FileSystemUtils.fileExists(delFileName, engine, true)) { + buffer = new byte[0]; + LOGGER.warn( + MessageFormat.format("{0} in Engine:{1} is not exists with offset:{2} and length:{3}", delFileName, + engine, offset, length)); + } else if (offset == 0 && length == EOF) { + // read fully + buffer = FileSystemUtils.readFullyFile(delFileName, engine, true); + } else { + // read from offset + buffer = new byte[length]; + FileSystemUtils.readFile(delFileName, offset, length, buffer, engine, true); + } + this.byteBuffer = ByteBuffer.wrap(buffer); + this.offset = offset; + } + + @Override + public DeletionEntry next() { + // We suppose that the data in byte buffer is complete serialized bitmap list. + if (byteBuffer.hasRemaining()) { + final int sizeInBytes = byteBuffer.getInt(); + final int fileId = byteBuffer.getInt(); + final long tso = byteBuffer.getLong(); + RoaringBitmap bitmap = new RoaringBitmap(); + try { + bitmap.deserialize(byteBuffer); + byteBuffer.position(byteBuffer.position() + + sizeInBytes - (Integer.BYTES + Long.BYTES)); + } catch (IOException e) { + LOGGER.error(MessageFormat.format( + "current bitmap information: sizeInBytes = {0}, fileId = {1}, tso = {2}, dataLen = {3}", + sizeInBytes, fileId, tso, byteBuffer.remaining()), e); + throw GeneralUtil.nestedException(e); + } + + return new DeletionEntry(tso, fileId, bitmap); + } + return null; + } + + @Override + public int position() { + return offset + byteBuffer.position(); + } + + @Override + public void close() { + if (byteBuffer != null) { + this.byteBuffer.clear(); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/checker/CciChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/checker/CciChecker.java new file mode 100644 index 000000000..7ff68fd6f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/checker/CciChecker.java @@ -0,0 +1,270 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.checker; + +import com.alibaba.polardbx.common.IInnerConnection; +import com.alibaba.polardbx.common.IInnerConnectionManager; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.gms.util.ColumnarTransactionUtils; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.utils.ITransaction; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Collection; +import java.util.stream.Collectors; + +/** + * @author yaozhili + */ +public class CciChecker implements ICciChecker { + private static final Logger logger = LoggerFactory.getLogger(CciChecker.class); + private final String schemaName; + private final String tableName; + private final String indexName; + private long primaryHashCode = -1; + private long columnarHashCode = -1; + private long primaryCount = -1; + private long columnarCount = -1; + private long primaryPkHashCode = -1; + private long columnarPkHashCode = -1; + + private String columnarCheckSql; + private String primaryCheckSql; + private static final String CALCULATE_PRIMARY_HASH = + "select count(0) as count, check_sum_v2(%s) as pk_checksum, check_sum_v2(*) as checksum " + + "from %s force index(primary)"; + private static final String CALCULATE_COLUMNAR_HASH = + "select count(0) as count, check_sum_v2(%s) as pk_checksum, check_sum_v2(*) as checksum " + + "from %s force index(%s)"; + private static final String PRIMARY_HINT = + "/*+TDDL:WORKLOAD_TYPE=AP ENABLE_MPP=true ENABLE_MASTER_MPP=true " + + "SOCKET_TIMEOUT=259200000 MPP_TASK_MAX_RUN_TIME=259200000 %s */"; + private static final String COLUMNAR_HINT = + "/*+TDDL:WORKLOAD_TYPE=AP ENABLE_MPP=true ENABLE_MASTER_MPP=true ENABLE_COLUMNAR_OPTIMIZER=true " + + "OPTIMIZER_TYPE='columnar' ENABLE_HTAP=true SOCKET_TIMEOUT=259200000 " + + "MPP_TASK_MAX_RUN_TIME=259200000 %s */"; + + public CciChecker(String schemaName, String tableName, String indexName) { + this.schemaName = schemaName; + this.tableName = tableName; + this.indexName = indexName; + } + + @Override + public void check(ExecutionContext baseEc, Runnable recoverChangedConfigs) throws Throwable { + Pair tso = + ColumnarTransactionUtils.getLatestOrcCheckpointTsoFromGms(); + IInnerConnectionManager manager = ExecutorContext.getContext(schemaName).getInnerConnectionManager(); + logger.warn("Check cci using innodb tso " + tso.getKey() + ", columnar tso " + tso.getValue()); + + // Hold this trx object to prevent purge. + ITransaction trx = ExecUtils.createColumnarTransaction(schemaName, baseEc, tso.getValue()); + try { + // Calculate primary table checksum. + try (Connection conn = manager.getConnection(schemaName); + Statement stmt = conn.createStatement()) { + long start = System.nanoTime(); + calPrimaryHashCode(stmt, baseEc, tso.getKey()); + SQLRecorderLogger.ddlLogger.info("[Naive checker] Primary checksum calculated, costing " + + ((System.nanoTime() - start) / 1_000_000) + " ms"); + } catch (Throwable t) { + SQLRecorderLogger.ddlLogger.error( + String.format("Error occurs when checking primary index %s.%s", tableName, indexName), t); + throw t; + } finally { + if (null != recoverChangedConfigs) { + recoverChangedConfigs.run(); + } + } + + // Calculate columnar table checksum. + try (Connection conn = manager.getConnection(schemaName); + Statement stmt = conn.createStatement()) { + if (conn instanceof IInnerConnection) { + ((IInnerConnection) conn).addExecutionContextInjectHook( + // To see non-PUBLIC CCI. + (ec) -> ((ExecutionContext) ec).setCheckingCci(true)); + } + long start = System.nanoTime(); + calColumnarHashCode(stmt, baseEc, tso.getValue()); + SQLRecorderLogger.ddlLogger.info("[Naive checker] Columnar checksum calculated, costing " + + ((System.nanoTime() - start) / 1_000_000) + " ms"); + if (conn instanceof IInnerConnection) { + ((IInnerConnection) conn).clearExecutionContextInjectHooks(); + } + + } catch (Throwable t) { + SQLRecorderLogger.ddlLogger.error( + String.format("Error occurs when checking columnar index %s.%s", tableName, indexName), t); + throw t; + } + + } finally { + trx.close(); + } + + } + + @Override + public boolean getCheckReports(Collection reports) { + boolean success = true; + if (-1 == primaryCount || primaryCount != columnarCount) { + // Check fail. + reports.add("Inconsistency detected: primary count: " + primaryCount + + ", columnar count: " + columnarCount); + success = false; + } + + if (-1 == primaryPkHashCode || primaryPkHashCode != columnarPkHashCode) { + // Check fail. + reports.add("Inconsistency detected: primary pk hash: " + primaryPkHashCode + + ", columnar pk hash: " + columnarPkHashCode); + success = false; + } + + if (-1 == primaryHashCode || primaryHashCode != columnarHashCode) { + // Check fail. + reports.add("Inconsistency detected: primary hash: " + primaryHashCode + + ", columnar hash: " + columnarHashCode); + success = false; + } + + if (!success) { + reports.add("Primary table check sql: " + primaryCheckSql + + "\nColumnar table check sql: " + columnarCheckSql); + } + + return success; + } + + private void calPrimaryHashCode(Statement stmt, ExecutionContext ec, long tso) throws SQLException { + // Build hint. + StringBuilder sb = new StringBuilder(); + long parallelism; + if ((parallelism = ec.getParamManager().getInt(ConnectionParams.MPP_PARALLELISM)) > 0) { + sb.append(" MPP_PARALLELISM=") + .append(parallelism) + .append(" "); + } + if ((parallelism = ec.getParamManager().getInt(ConnectionParams.PARALLELISM)) > 0) { + sb.append(" PARALLELISM=") + .append(parallelism) + .append(" "); + } + sb.append(" SNAPSHOT_TS=") + .append(tso) + .append(" "); + sb.append(" TRANSACTION_POLICY=TSO"); + String hint = String.format(PRIMARY_HINT, sb); + + // Build pk list. + String pkList = ec + .getSchemaManager(schemaName) + .getTable(tableName) + .getPrimaryKey() + .stream() + .map(ColumnMeta::getName) + .collect(Collectors.joining(",")); + + String sql = String.format(CALCULATE_PRIMARY_HASH, pkList, tableName); + + // Assert using logical view as table scan. + sql = hint + sql; + logger.warn("Check CCI primary sql: " + sql); + primaryCheckSql = sql; + ResultSet explainRs = stmt.executeQuery("explain " + sql); + StringBuilder explainResult = new StringBuilder(); + while (explainRs.next()) { + explainResult.append(explainRs.getString(1)).append("\n"); + } + logger.warn("Check CCI primary sql plan: \n" + explainResult); + if (!explainResult.toString().contains("LogicalView")) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, "Check cci plan does not contain LogicalView"); + } + + ResultSet rs = stmt.executeQuery(hint + sql); + if (rs.next()) { + primaryCount = rs.getLong("count"); + primaryPkHashCode = rs.getLong("pk_checksum"); + primaryHashCode = rs.getLong("checksum"); + } + } + + private void calColumnarHashCode(Statement stmt, ExecutionContext ec, long tso) throws SQLException { + // Build hint. + StringBuilder sb = new StringBuilder(); + long parallelism; + if ((parallelism = ec.getParamManager().getInt(ConnectionParams.MPP_PARALLELISM)) > 0) { + sb.append(" MPP_PARALLELISM=") + .append(parallelism) + .append(" "); + } + if ((parallelism = ec.getParamManager().getInt(ConnectionParams.PARALLELISM)) > 0) { + sb.append(" PARALLELISM=") + .append(parallelism) + .append(" "); + } + sb.append(" SNAPSHOT_TS=") + .append(tso) + .append(" "); + String hint = String.format(COLUMNAR_HINT, sb); + + // Build pk list. + String pkList = ec + .getSchemaManager(schemaName) + .getTable(tableName) + .getPrimaryKey() + .stream() + .map(ColumnMeta::getName) + .collect(Collectors.joining(",")); + + String sql = String.format(CALCULATE_COLUMNAR_HASH, pkList, tableName, indexName); + + // Assert using columnar scan. + sql = hint + sql; + logger.warn("Check CCI columnar sql: " + sql); + columnarCheckSql = sql; + ResultSet explainRs = stmt.executeQuery("explain " + sql); + StringBuilder explainResult = new StringBuilder(); + while (explainRs.next()) { + explainResult.append(explainRs.getString(1)).append("\n"); + } + logger.warn("Check CCI columnar sql plan: \n" + explainResult); + if (!explainResult.toString().contains("OSSTableScan")) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, "Check cci plan does not contain OSSTableScan"); + } + + ResultSet rs = stmt.executeQuery(sql); + if (rs.next()) { + columnarCount = rs.getLong("count"); + columnarPkHashCode = rs.getLong("pk_checksum"); + columnarHashCode = rs.getLong("checksum"); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/checker/CciFastChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/checker/CciFastChecker.java new file mode 100644 index 000000000..7ab218f9e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/checker/CciFastChecker.java @@ -0,0 +1,337 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.checker; + +import com.alibaba.polardbx.common.IInnerConnection; +import com.alibaba.polardbx.common.IInnerConnectionManager; +import com.alibaba.polardbx.common.RevisableOrderInvariantHash; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.common.utils.thread.ServerThreadPool; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.gms.util.ColumnarTransactionUtils; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.gms.metadb.table.FilesRecord; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.partition.PartSpecBase; +import com.alibaba.polardbx.optimizer.utils.ITransaction; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.Future; + +/** + * @author yaozhili + */ +public class CciFastChecker implements ICciChecker { + private static final Logger logger = LoggerFactory.getLogger(CciFastChecker.class); + + private final String schemaName; + private final String tableName; + private final String indexName; + private long columnarHashCode = -1; + private long primaryHashCode = -1; + /** + * Record connection id in use. + * If the checking thread is interrupted, kill these connections. + */ + private List connIds = new ArrayList<>(); + + private static final String CALCULATE_PRIMARY_HASH = + "select check_sum_v2(*) as checksum from %s force index(primary)"; + private static final String CALCULATE_COLUMNAR_HASH = + "select check_sum_v2(*) as checksum from %s force index(%s)"; + + private static final String PRIMARY_HINT = + "/*+TDDL:WORKLOAD_TYPE=AP ENABLE_MPP=true ENABLE_MASTER_MPP=true ENABLE_ORC_RAW_TYPE_BLOCK=true " + + "SOCKET_TIMEOUT=259200000 MPP_TASK_MAX_RUN_TIME=259200000 %s */"; + private static final String COLUMNAR_HINT = + "/*+TDDL:WORKLOAD_TYPE=AP ENABLE_MPP=true ENABLE_MASTER_MPP=true ENABLE_COLUMNAR_OPTIMIZER=true " + + "OPTIMIZER_TYPE='columnar' ENABLE_HTAP=true ENABLE_ORC_RAW_TYPE_BLOCK=true " + + "SOCKET_TIMEOUT=259200000 MPP_TASK_MAX_RUN_TIME=259200000 %s */"; + + public CciFastChecker(String schemaName, String tableName, String indexName) { + this.schemaName = schemaName; + this.tableName = tableName; + this.indexName = indexName; + } + + @Override + public void check(ExecutionContext baseEc, Runnable recoverChangedConfigs) throws Throwable { + Pair tso = + ColumnarTransactionUtils.getLatestOrcCheckpointTsoFromGms(); + logger.warn("Check cci using innodb tso " + tso.getKey() + ", columnar tso " + tso.getValue()); + + ITransaction trx = ExecUtils.createColumnarTransaction(schemaName, baseEc, tso.getValue()); + + try { + ExecutorContext executorContext = ExecutorContext.getContext(schemaName); + ServerThreadPool threadPool = executorContext.getTopologyExecutor().getExecutorService(); + IInnerConnectionManager connectionManager = executorContext.getInnerConnectionManager(); + + // Calculate primary table checksum. + long start = System.nanoTime(); + calculatePrimaryChecksum(baseEc, tso.getKey(), connectionManager, recoverChangedConfigs); + SQLRecorderLogger.ddlLogger.info("[Fast checker] Primary checksum calculated, costing " + + ((System.nanoTime() - start) / 1_000_000) + " ms"); + + // Calculate columnar table checksum. + start = System.nanoTime(); + calculateColumnarChecksum(baseEc, tso.getValue(), threadPool, connectionManager); + SQLRecorderLogger.ddlLogger.info("[Fast checker] Columnar checksum calculated, costing " + + ((System.nanoTime() - start) / 1_000_000) + " ms"); + + SQLRecorderLogger.ddlLogger.info("primary checksum: " + primaryHashCode); + logger.info("primary checksum: " + primaryHashCode); + SQLRecorderLogger.ddlLogger.info("columnar checksum: " + columnarHashCode); + logger.info("columnar checksum: " + columnarHashCode); + } finally { + trx.close(); + } + } + + private void calculatePrimaryChecksum(ExecutionContext baseEc, long tso, + IInnerConnectionManager connectionManager, + Runnable recoverChangedConfigs) { + // Calculate primary checksum in this thread. + try (Connection conn = connectionManager.getConnection(schemaName); + Statement stmt = conn.createStatement()) { + + if (conn instanceof IInnerConnection) { + ((IInnerConnection) conn).setTimeZone("+8:00"); + } + + // Build hint. + StringBuilder sb = new StringBuilder(); + long parallelism; + if ((parallelism = baseEc.getParamManager().getInt(ConnectionParams.MPP_PARALLELISM)) > 0) { + sb.append(" MPP_PARALLELISM=") + .append(parallelism); + } + if ((parallelism = baseEc.getParamManager().getInt(ConnectionParams.PARALLELISM)) > 0) { + sb.append(" PARALLELISM=") + .append(parallelism); + } + sb.append(" SNAPSHOT_TS=") + .append(tso); + sb.append(" TRANSACTION_POLICY=TSO"); + String hint = String.format(PRIMARY_HINT, sb); + + String sql = String.format(CALCULATE_PRIMARY_HASH, tableName); + + // Assert using logical view as table scan. + sql = hint + sql; + SQLRecorderLogger.ddlLogger.info("primary checksum sql: " + sql); + logger.info("primary checksum sql: " + sql); + + ResultSet rs = stmt.executeQuery(sql); + if (rs.next()) { + primaryHashCode = rs.getLong("checksum"); + } + } catch (Throwable t) { + SQLRecorderLogger.ddlLogger.error( + String.format("Error occurs when checking columnar index %s.%s", tableName, indexName), t); + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_INDEX_CHECKER, + "Fast CCI checker error: " + t.getMessage()); + } finally { + if (null != recoverChangedConfigs) { + recoverChangedConfigs.run(); + } + } + } + + private void calculateColumnarChecksum(ExecutionContext baseEc, + long tso, + ServerThreadPool threadPool, + IInnerConnectionManager connectionManager) { + // 1. Get all orc files of this CCI. + ColumnarManager columnarManager = ColumnarManager.getInstance(); + TableMeta tableMeta = baseEc.getSchemaManager(schemaName).getTable(indexName); + List orcFiles = new ArrayList<>(); + List csvFiles = new ArrayList<>(); + tableMeta.getPartitionInfo() + .getPartitionBy() + .getPartitions() + .stream() + // Get each partition name. + .map(PartSpecBase::getName) + // Get files from each partition. + .forEach(partitionName -> { + Pair, List> orcAndCsv = + columnarManager.findFileNames(tso, schemaName, indexName, partitionName); + orcFiles.addAll(orcAndCsv.getKey()); + csvFiles.addAll(orcAndCsv.getValue()); + }); + // RTT 1: get file records. + List filesRecords = ExecUtils.getFilesMetaByNames(orcFiles); + if (filesRecords.size() != orcFiles.size()) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_INDEX_CHECKER, "The number of orc files not match"); + } + + // 2. Filter out files needed to be process, and by the way calculate cached checksum. + final RevisableOrderInvariantHash hasher = new RevisableOrderInvariantHash(); + final List toBeProcessedFiles = new ArrayList<>(); + for (FilesRecord filesRecord : filesRecords) { + hasher.add(filesRecord.checksum).remove(0); + if (0 != filesRecord.deletedChecksum) { + toBeProcessedFiles.add(filesRecord.fileName); + } + } + + SQLRecorderLogger.ddlLogger.info("all orc files checksum: " + hasher.getResult()); + logger.info("all orc files checksum: " + hasher.getResult()); + + Future future = null; + // 3. RTT 2: Calculate deleted checksum. + if (!toBeProcessedFiles.isEmpty()) { + future = threadPool.submit(null, null, () -> { + try (Connection conn = connectionManager.getConnection(schemaName); + Statement stmt = conn.createStatement()) { + if (conn instanceof IInnerConnection) { + ((IInnerConnection) conn).addExecutionContextInjectHook( + (ec) -> { + ((ExecutionContext) ec).setCheckingCci(true); + ((ExecutionContext) ec).setReadOrcFiles(toBeProcessedFiles); + }); + } + StringBuilder sb = + new StringBuilder( + " READ_ORC_ONLY=true ENABLE_ORC_DELETED_SCAN=true "); + long parallelism; + if ((parallelism = baseEc.getParamManager().getInt(ConnectionParams.MPP_PARALLELISM)) > 0) { + sb.append(" MPP_PARALLELISM=") + .append(parallelism); + } + if ((parallelism = baseEc.getParamManager().getInt(ConnectionParams.PARALLELISM)) > 0) { + sb.append(" PARALLELISM=") + .append(parallelism); + } + + String hint = String.format(COLUMNAR_HINT, sb); + String sql = hint + String.format(CALCULATE_COLUMNAR_HASH, tableName, indexName); + + SQLRecorderLogger.ddlLogger.info("columnar deleted checksum sql: " + sql); + logger.info("columnar deleted checksum sql: " + sql); + + ResultSet rs = stmt.executeQuery(sql); + if (rs.next()) { + long deletedChecksum = rs.getLong("checksum"); + SQLRecorderLogger.ddlLogger.info("columnar deleted checksum: " + deletedChecksum); + logger.info("columnar deleted checksum: " + deletedChecksum); + hasher.remove(deletedChecksum).add(0); + } else { + return "Not found deleted checksum in result set."; + } + + if (conn instanceof IInnerConnection) { + ((IInnerConnection) conn).clearExecutionContextInjectHooks(); + } + } catch (Throwable t) { + SQLRecorderLogger.ddlLogger.error(t); + return "Error occurs, caused by " + t.getMessage(); + } + return null; + }); + } + + // 4. RTT 2: Calculate csv part in this thread. + if (!csvFiles.isEmpty()) { + try (Connection conn = connectionManager.getConnection(schemaName); + Statement stmt = conn.createStatement()) { + if (conn instanceof IInnerConnection) { + ((IInnerConnection) conn).addExecutionContextInjectHook( + (ec) -> ((ExecutionContext) ec).setCheckingCci(true)); + } + StringBuilder sb = new StringBuilder(" READ_CSV_ONLY=true"); + long parallelism; + if ((parallelism = baseEc.getParamManager().getInt(ConnectionParams.MPP_PARALLELISM)) > 0) { + sb.append(" MPP_PARALLELISM=") + .append(parallelism); + } + if ((parallelism = baseEc.getParamManager().getInt(ConnectionParams.PARALLELISM)) > 0) { + sb.append(" PARALLELISM=") + .append(parallelism); + } + sb.append(" SNAPSHOT_TS=") + .append(tso) + .append(" "); + + String hint = String.format(COLUMNAR_HINT, sb); + String sql = hint + String.format(CALCULATE_COLUMNAR_HASH, tableName, indexName); + + SQLRecorderLogger.ddlLogger.info("columnar csv checksum sql: " + sql); + logger.info("columnar csv checksum sql: " + sql); + + ResultSet rs = stmt.executeQuery(sql); + if (rs.next()) { + long csvChecksum = rs.getLong("checksum"); + SQLRecorderLogger.ddlLogger.info("columnar csv checksum: " + csvChecksum); + logger.info("columnar csv checksum: " + csvChecksum); + hasher.add(csvChecksum).remove(0); + } else { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_INDEX_CHECKER, "not found any csv checksum."); + } + + if (conn instanceof IInnerConnection) { + ((IInnerConnection) conn).clearExecutionContextInjectHooks(); + } + } catch (Throwable t) { + if (null != future) { + future.cancel(true); + } + SQLRecorderLogger.ddlLogger.error(t); + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_INDEX_CHECKER, t.getMessage()); + } + } + + try { + String error = null == future ? null : future.get(); + if (null != error) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_INDEX_CHECKER, error); + } + } catch (Throwable t) { + SQLRecorderLogger.ddlLogger.error(t); + future.cancel(true); + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_INDEX_CHECKER, t.getMessage()); + } + + columnarHashCode = hasher.getResult(); + } + + @Override + public boolean getCheckReports(Collection reports) { + boolean success = true; + if (-1 == primaryHashCode || primaryHashCode != columnarHashCode) { + // Check fail. + reports.add("Inconsistency detected: primary hash: " + primaryHashCode + + ", columnar hash: " + columnarHashCode); + success = false; + } + return success; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/checker/ICciChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/checker/ICciChecker.java new file mode 100644 index 000000000..30a780efe --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/checker/ICciChecker.java @@ -0,0 +1,35 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.checker; + +import com.alibaba.polardbx.optimizer.context.ExecutionContext; + +import java.util.Collection; + +/** + * @author yaozhili + */ +public interface ICciChecker { + void check(ExecutionContext baseEc, Runnable recoverChangedConfigs) throws Throwable; + + /** + * @param reports [OUT] check reports returned + * @return true if anything is ok (reports may be empty), + * or false if inconsistency detected (inconsistency details are in reports) + */ + boolean getCheckReports(Collection reports); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/ColumnarPruneManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/ColumnarPruneManager.java new file mode 100644 index 000000000..a331f6a67 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/ColumnarPruneManager.java @@ -0,0 +1,173 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning; + +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruner; +import com.alibaba.polardbx.executor.operator.scan.impl.PreheatFileMeta; +import com.alibaba.polardbx.gms.module.LogLevel; +import com.alibaba.polardbx.gms.module.LogPattern; +import com.alibaba.polardbx.gms.module.Module; +import com.alibaba.polardbx.gms.module.ModuleLogInfo; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.google.common.base.Preconditions; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheStats; +import org.apache.hadoop.fs.Path; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static com.alibaba.polardbx.gms.engine.FileStoreStatistics.CACHE_STATS_FIELD_COUNT; + +/** + * @author fangwu + */ +public class ColumnarPruneManager { + + private static final String PRUNER_CACHE_NAME = "PRUNER_CACHE"; + + private static final int PRUNER_CACHE_MAX_ENTRY = 1024 * 256; + + private static final int PRUNER_CACHE_TTL_HOURS = 12; + + private static final Cache PRUNER_CACHE = + CacheBuilder.newBuilder() + .recordStats() + .maximumSize(PRUNER_CACHE_MAX_ENTRY) + .expireAfterAccess(PRUNER_CACHE_TTL_HOURS, TimeUnit.HOURS) + .softValues() + .build(); + + public static IndexPruner getIndexPruner(Path targetFile, + PreheatFileMeta preheat, + List columns, int clusteringKeyPosition, + List orcIndexes, + boolean enableOssCompatible) + throws ExecutionException { + return PRUNER_CACHE.get(targetFile, () -> { + IndexPruner.IndexPrunerBuilder builder = + new IndexPruner.IndexPrunerBuilder(targetFile.toString(), enableOssCompatible); + int rgNum = 0; + for (int i = 0; i < preheat.getPreheatStripes().size(); i++) { + OrcIndex stripeIndex = preheat.getOrcIndex(i); + // init sort key index if exist + if (clusteringKeyPosition != -1) { + // alter sort key column is not supported + Preconditions.checkArgument(orcIndexes.get(clusteringKeyPosition) != null); + OrcProto.RowIndex rgIndex = stripeIndex.getRowGroupIndex()[orcIndexes.get(clusteringKeyPosition)]; + builder.setSortKeyColId(clusteringKeyPosition); + builder.setSortKeyDataType(columns.get(clusteringKeyPosition).getDataType()); + // init sort key index + for (OrcProto.RowIndexEntry rowIndexEntry : rgIndex.getEntryList()) { + rgNum++; + builder.appendSortKeyIndex(rowIndexEntry.getStatistics().getIntStatistics()); + } + } else { + // error log + ModuleLogInfo.getInstance().logRecord( + Module.COLUMNAR_PRUNE, + LogPattern.UNEXPECTED, + new String[] {"sort key load", "neg clustering key position " + clusteringKeyPosition}, + LogLevel.CRITICAL); + } + + for (int m = 0; m < columns.size(); m++) { + // skip sort key index + if (m == clusteringKeyPosition) { + continue; + } + ColumnMeta cm = columns.get(m); + // get orc column index by table meta column index + Integer orcIndex = orcIndexes.get(m); + if (orcIndex == null) { + continue; + } + + DataType dataType = cm.getDataType();// zone map + if (DataTypeUtil.equalsSemantically(DataTypes.IntegerType, dataType) || + DataTypeUtil.equalsSemantically(DataTypes.LongType, dataType) || + DataTypeUtil.equalsSemantically(DataTypes.DateType, dataType) || + DataTypeUtil.equalsSemantically(DataTypes.DatetimeType, dataType)) { + builder.appendZoneMap(m, dataType); + OrcProto.RowIndex rgIndex = stripeIndex.getRowGroupIndex()[orcIndex]; + for (OrcProto.RowIndexEntry rowIndexEntry : rgIndex.getEntryList()) { + OrcProto.ColumnStatistics columnStatistics = rowIndexEntry.getStatistics(); + // zone map index build + if (columnStatistics.hasIntStatistics()) { + builder.appendZoneMap(m, rowIndexEntry.getStatistics().getIntStatistics()); + } else if (columnStatistics.hasDateStatistics()) { + builder.appendZoneMap(m, rowIndexEntry.getStatistics().getDateStatistics()); + } + if (columnStatistics.hasHasNull()) { + builder.appendZoneMap(m, rowIndexEntry.getStatistics().getHasNull()); + } + } + } else if (DataTypes.DecimalType.equals(dataType)) { + // TODO build zone map index for DecimalType + } else { + continue; + } + } + + // bitmap index was built by stripe index + + // TODO bloom filter build + builder.stripeEnd(); + } + + builder.setRgNum(rgNum); + return builder.build(); + }); + } + + private static int getMetaIndex(List orcIndexes, int orcIndex) { + for (int i = 0; i < orcIndexes.size(); i++) { + if (orcIndexes.get(i) == orcIndex) { + return i; + } + } + throw new IllegalArgumentException("orc column index not match:" + orcIndex + "," + orcIndexes); + } + + public static byte[][] getCacheStat() { + CacheStats cacheStats = PRUNER_CACHE.stats(); + + byte[][] results = new byte[CACHE_STATS_FIELD_COUNT][]; + int pos = 0; + results[pos++] = PRUNER_CACHE_NAME.getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = String.valueOf(PRUNER_CACHE.size()).getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = String.valueOf(cacheStats.hitCount()).getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = String.valueOf(cacheStats.missCount()).getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = "IN MEMORY".getBytes(); + results[pos++] = new StringBuilder().append(PRUNER_CACHE_TTL_HOURS).append(" h").toString().getBytes(); + results[pos++] = String.valueOf(PRUNER_CACHE_MAX_ENTRY).getBytes(); + results[pos++] = new StringBuilder().append(-1).append(" BYTES").toString().getBytes(); + return results; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/data/PruneUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/data/PruneUtils.java new file mode 100644 index 000000000..38f8b262b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/data/PruneUtils.java @@ -0,0 +1,118 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.data; + +import com.alibaba.polardbx.common.charset.CharsetName; +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import com.alibaba.polardbx.executor.columnar.pruning.predicate.AndColumnPredicate; +import com.alibaba.polardbx.executor.columnar.pruning.predicate.ColumnPredicatePruningInf; +import com.alibaba.polardbx.executor.columnar.pruning.predicate.ColumnarPredicatePruningVisitor; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.row.Row; +import com.alibaba.polardbx.optimizer.index.Index; +import com.alibaba.polardbx.optimizer.utils.RexLiteralTypeUtils; +import com.alibaba.polardbx.optimizer.utils.RexUtils; +import io.airlift.slice.Slice; +import org.apache.calcite.rex.RexDynamicParam; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexUtil; +import org.roaringbitmap.RoaringBitmap; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * @author fangwu + */ +public class PruneUtils { + public static RoaringBitmap and(long rgNum, Iterator rbs) { + return RoaringBitmap.and(rbs, 0, rgNum); + } + + public static RoaringBitmap or(long rgNum, Iterator rbs) { + return RoaringBitmap.or(rbs, 0, rgNum); + } + + public static ColumnPredicatePruningInf transformRexToIndexMergeTree(RexNode rex, IndexPruneContext ipc) { + return rex.accept(new ColumnarPredicatePruningVisitor(ipc)); + } + + public static ColumnPredicatePruningInf transformRexToIndexMergeTree(List rexList, IndexPruneContext ipc) { + if (rexList == null || rexList.size() == 0) { + return null; + } + ColumnarPredicatePruningVisitor columnarPredicatePruningVisitor = new ColumnarPredicatePruningVisitor(ipc); + AndColumnPredicate and = new AndColumnPredicate(); + and.addAll(rexList.stream().map(r -> r.accept(columnarPredicatePruningVisitor)) + .collect(Collectors.toList())); + return and.flat(); + } + + public static String display(ColumnPredicatePruningInf columnPredicate, List columns, + IndexPruneContext indexPruneContext) { + if (columnPredicate == null) { + return "null"; + } + return columnPredicate.display( + columns.stream() + .map(cm -> cm.getName()) + .collect(Collectors.toList()) + .toArray(new String[0]), + indexPruneContext).toString(); + } + + public static Object getValueFromRexNode(RexNode rexNode, IndexPruneContext ipc) { + final Map rowParameters = ipc.getParameters().getCurrentParameter(); + ExecutionContext ec = new ExecutionContext(); + ec.setParams(ipc.getParameters()); + Object result = null; + if (rexNode instanceof RexDynamicParam) { + final int valueIndex = ((RexDynamicParam) rexNode).getIndex(); + result = rowParameters.get(valueIndex).getValue(); + } else if (rexNode instanceof RexLiteral) { + result = RexLiteralTypeUtils.getJavaObjectFromRexLiteral((RexLiteral) rexNode, true); + } else { + final Object value = RexUtils.buildRexNode(rexNode, ec).eval(null); + if (value instanceof Decimal) { + result = ((Decimal) value).toBigDecimal(); + } else if (value instanceof Slice) { + if (RexUtils.isBinaryReturnType(rexNode)) { + result = ((Slice) value).getBytes(); + } else { + result = ((Slice) value).toString(CharsetName.DEFAULT_STORAGE_CHARSET_IN_CHUNK); + } + } else { + result = value; + } + } + return result; + } + + public interface TriFunction { + R apply(T var1, U var2, V var3); + } + + public interface FourFunction { + void apply(T var1, U var2, V var3, W var4); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/BaseColumnIndex.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/BaseColumnIndex.java new file mode 100644 index 000000000..e925d3b4c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/BaseColumnIndex.java @@ -0,0 +1,62 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index; + +import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; + +/** + * @author fangwu + */ +public abstract class BaseColumnIndex implements ColumnIndex { + private long rgNum; + + protected BaseColumnIndex(long rgNum) { + this.rgNum = rgNum; + } + + @Override + public long rgNum() { + return rgNum; + } + + /** + * Use this method to force index handling the same type of data internal. + * Index data should be written by same data type, and the datatype could also handle comparison correctly. + * + * @param value the origin value of column datatype + * @return return the same type with index data + * @throws IllegalArgumentException if value was not meet the requirements, throw IllegalArgumentException + */ + protected Long paramTransform(Object value, DataType dt) { + if (value == null || dt == null) { + return null; + } + if (DataTypeUtil.isIntType(dt)) { + return ((Number) value).longValue(); + } + if (DataTypeUtil.isDateType(dt)) { + MysqlDateTime date = DataTypeUtil.toMySQLDatetime(value, dt.getSqlType()); + if (date == null) { + return null; + } + return date.toPackedLong(); + } + throw new IllegalArgumentException("not supported index value:" + value); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/BitMapRowGroupIndex.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/BitMapRowGroupIndex.java new file mode 100644 index 000000000..289813afb --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/BitMapRowGroupIndex.java @@ -0,0 +1,153 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.gms.config.impl.InstConfUtil; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.commons.lang.StringUtils; +import org.roaringbitmap.RoaringBitmap; + +import java.util.Map; + +import static com.alibaba.polardbx.common.properties.ConnectionParams.COLUMNAR_BITMAP_INDEX_MAX_SCAN_SIZE_FOR_PRUNING; + +/** + * bitmap row group index for columnar pruning + * + * @author fangwu + */ +public class BitMapRowGroupIndex extends BaseColumnIndex { + + /** + * col id -> col value -> bitset + */ + private final Map> val; + private final Map dtMap; + + public BitMapRowGroupIndex(int rgNum, Map> val, Map dtMap) { + super(rgNum); + this.val = val; + this.dtMap = dtMap; + } + + /** + * prune range value + */ + public void between(int colId, Object start, boolean includeStart, Object end, boolean includeEnd, + RoaringBitmap cur) { + Map bitmap = val.get(colId); + if (bitmap == null) { + return; + } + DataType dt = dtMap.get(colId); + if (dt == null) { + return; + } + if (bitmap.size() > InstConfUtil.getLong(COLUMNAR_BITMAP_INDEX_MAX_SCAN_SIZE_FOR_PRUNING)) { + return; + } + + // scan to merge row groups + RoaringBitmap result = new RoaringBitmap(); + for (Map.Entry entry : bitmap.entrySet()) { + String val = entry.getKey(); + RoaringBitmap rb = entry.getValue(); + if (start != null) { + if (includeStart) { + if (check(dt, start, SqlKind.GREATER_THAN_OR_EQUAL, val)) { + result.or(rb); + } + } else { + if (check(dt, start, SqlKind.GREATER_THAN, val)) { + result.or(rb); + } + } + } + + if (end != null) { + if (includeEnd) { + if (check(dt, end, SqlKind.LESS_THAN_OR_EQUAL, val)) { + result.or(rb); + } + } else { + if (check(dt, end, SqlKind.LESS_THAN, val)) { + result.or(rb); + } + } + } + } + cur.and(result); + } + + /** + * check if target value and source value satisfied sql kind(operator) + * + * @param dt datatype + * @param target target value from request + * @param sqlKind represent operator:GREATER_THAN, GREATER_THAN_OR_EQUAL, + * LESS_THAN,LESS_THAN_OR_EQUAL + * @param val source value from index + */ + private boolean check(DataType dt, Object target, SqlKind sqlKind, Object val) { + switch (sqlKind) { + case GREATER_THAN: + return dt.compare(val, target) > 0; + case GREATER_THAN_OR_EQUAL: + return dt.compare(val, target) >= 0; + case LESS_THAN: + return dt.compare(val, target) < 0; + case LESS_THAN_OR_EQUAL: + return dt.compare(val, target) <= 0; + default: + throw new TddlRuntimeException(ErrorCode.ERR_BITMAP_ROW_GROUP_INDEX, + "not support operator for BitMapRowGroupIndex"); + } + } + + public void pruneEquals(int colId, Object operand, RoaringBitmap cur) { + Map bitmap = val.get(colId); + if (bitmap == null) { + return; + } + DataType dt = dtMap.get(colId); + for (Map.Entry entry : bitmap.entrySet()) { + if (dt.compare(operand, entry.getKey()) == 0) { + cur.and(entry.getValue()); + return; + } + } + cur.and(new RoaringBitmap()); + } + + @Override + public boolean checkSupport(int columnId, SqlTypeName columnType) { + return val.containsKey(columnId); + } + + @Override + public DataType getColumnDataType(int columnId) { + return dtMap.get(columnId); + } + + public String colIds() { + return StringUtils.join(dtMap.keySet(), ","); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/BloomFilterIndex.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/BloomFilterIndex.java new file mode 100644 index 000000000..a8495eacc --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/BloomFilterIndex.java @@ -0,0 +1,39 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index; + +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import org.apache.calcite.sql.type.SqlTypeName; + +/** + * @author fangwu + */ +public class BloomFilterIndex extends BaseColumnIndex { + protected BloomFilterIndex(long rgNum) { + super(rgNum); + } + + @Override + public boolean checkSupport(int columnId, SqlTypeName columnType) { + return false; + } + + @Override + public DataType getColumnDataType(int columnId) { + return null; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/ColumnIndex.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/ColumnIndex.java new file mode 100644 index 000000000..c6e0e4459 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/ColumnIndex.java @@ -0,0 +1,31 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index; + +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import org.apache.calcite.sql.type.SqlTypeName; + +/** + * @author fangwu + */ +public interface ColumnIndex { + long rgNum(); + + boolean checkSupport(int columnId, SqlTypeName columnType); + + DataType getColumnDataType(int columnId); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/ColumnarIndexManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/ColumnarIndexManager.java new file mode 100644 index 000000000..14126d988 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/ColumnarIndexManager.java @@ -0,0 +1,48 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index; + +import com.google.common.collect.Maps; + +import java.util.Map; + +/** + * @author fangwu + */ +public class ColumnarIndexManager { + private static final ColumnarIndexManager INSTANCE = new ColumnarIndexManager(); + + public static ColumnarIndexManager getInstance() { + return INSTANCE; + } + + private ColumnarIndexManager() { + } + + Map indexPrunerMap = Maps.newConcurrentMap(); + + public IndexPruner loadColumnarIndex(String schema, String table, long fileId) { + String fileKey = fileKey(schema, table, fileId); + // TODO load, cache and return IndexPruner + return indexPrunerMap.getOrDefault(fileKey, null); + } + + private String fileKey(String schema, String table, long fileId) { + return schema + "_" + table + "_" + fileId; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/IndexPruneContext.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/IndexPruneContext.java new file mode 100644 index 000000000..c3984d437 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/IndexPruneContext.java @@ -0,0 +1,108 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index; + +import com.alibaba.polardbx.common.jdbc.Parameters; +import com.alibaba.polardbx.common.jdbc.RawString; +import com.alibaba.polardbx.common.utils.time.core.TimeStorage; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.statis.ColumnarTracer; +import org.apache.calcite.sql.type.SqlTypeName; +import org.jetbrains.annotations.Nullable; + +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Optional; + +/** + * @author fangwu + */ +public class IndexPruneContext { + private Parameters parameters; + private ColumnarTracer pruneTracer; + + public Object acquireFromParameter(int paramIndex, DataType dataType, SqlTypeName type) { + return parameters.getCurrentParameter().get(paramIndex).getValue(); + } + + public Object[] acquireArrayFromParameter(int paramIndex, DataType dataType, SqlTypeName typeName) { + Object value = parameters.getCurrentParameter().get(paramIndex).getValue(); + if (value instanceof RawString) { + RawString rs = (RawString) value; + return rs.getObjList().toArray(); + } else { + return null; + } + } + + @Nullable + public Object transformToObject(DataType dataType, SqlTypeName typeName, Object value) { + switch (typeName) { + case TIMESTAMP: + case DATE: + case DATETIME: + case TIME: + return packDateTypeToLong(dataType, value); + default: + return value; + } + } + + private static long packDateTypeToLong(DataType dataType, Object obj) { + if (DataTypeUtil.equalsSemantically(DataTypes.TimestampType, dataType) || + DataTypeUtil.equalsSemantically(DataTypes.DatetimeType, dataType)) { + Timestamp timestamp = (Timestamp) dataType.convertFrom(obj); + return Optional.ofNullable(timestamp) + .map(TimeStorage::packDatetime) + .orElse(-1L); + } else if (DataTypeUtil.equalsSemantically(DataTypes.DateType, dataType)) { + Date date = (Date) dataType.convertFrom(obj); + return Optional.ofNullable(date) + .map(TimeStorage::packDate) + .orElse(-1L); + } else if (DataTypeUtil.equalsSemantically(DataTypes.TimeType, dataType)) { + Time time = (Time) dataType.convertFrom(obj); + return Optional.ofNullable(time) + .map(TimeStorage::packTime) + .orElse(-1L); + } else if (DataTypeUtil.equalsSemantically(DataTypes.YearType, dataType)) { + Long year = (Long) dataType.convertFrom(obj); + return Optional.ofNullable(year) + .orElse(-1L); + } + throw new IllegalStateException("Unexpected value: " + dataType); + } + + public Parameters getParameters() { + return parameters; + } + + public void setParameters(Parameters parameters) { + this.parameters = parameters; + } + + public ColumnarTracer getPruneTracer() { + return pruneTracer; + } + + public void setPruneTracer(ColumnarTracer pruneTracer) { + this.pruneTracer = pruneTracer; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/IndexPruner.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/IndexPruner.java new file mode 100644 index 000000000..baf4219e1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/IndexPruner.java @@ -0,0 +1,305 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index; + +import com.alibaba.polardbx.executor.columnar.pruning.data.PruneUtils; +import com.alibaba.polardbx.executor.columnar.pruning.index.builder.BitMapRowGroupIndexBuilder; +import com.alibaba.polardbx.executor.columnar.pruning.index.builder.SortKeyIndexBuilder; +import com.alibaba.polardbx.executor.columnar.pruning.index.builder.ZoneMapIndexBuilder; +import com.alibaba.polardbx.executor.columnar.pruning.predicate.ColumnPredicatePruningInf; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.statis.ColumnarTracer; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import io.airlift.slice.Slice; +import org.apache.orc.OrcProto; +import org.jetbrains.annotations.NotNull; +import org.roaringbitmap.RoaringBitmap; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + +/** + * defined the order of prune indexes + * + * @author fangwu + */ +public class IndexPruner { + + // target file meta + private String orcFile; + + // index for prune + /** + * Total number of row groups in this orc file. + */ + private final long rgNum; + private SortKeyIndex sortKeyIndex; + private BitMapRowGroupIndex bitMapRowGroupIndex; + private ZoneMapIndex zoneMapIndex; + private BloomFilterIndex bloomFilterIndex; + /** + * The i-th strip contains stripeRgNum[i] row groups. + */ + private List stripeRgNum; + + private IndexPruner(long rgNum) { + this.rgNum = rgNum; + } + + public SortedMap pruneToSortMap(String table, + List columns, + @Nonnull ColumnPredicatePruningInf cpp, + IndexPruneContext ipc) { + RoaringBitmap rr = prune(table, columns, cpp, ipc); + SortedMap sortedMap = new TreeMap<>(); + int curRgNum = 0; + for (int i = 0; i < stripeRgNum.size(); i++) { + int curStripeRgNum = stripeRgNum.get(i); + boolean[] bs = new boolean[curStripeRgNum]; + + for (int j = 0; j < curStripeRgNum; j++) { + if (rr.contains(j + curRgNum)) { + bs[j] = true; + } + } + curRgNum = curRgNum + curStripeRgNum; + sortedMap.put(i, bs); + } + return sortedMap; + } + + public SortedMap pruneToSortMap(@Nonnull RoaringBitmap rr) { + SortedMap sortedMap = new TreeMap<>(); + int curRgNum = 0; + for (int i = 0; i < stripeRgNum.size(); i++) { + int curStripeRgNum = stripeRgNum.get(i); + boolean[] bs = new boolean[curStripeRgNum]; + + for (int j = 0; j < curStripeRgNum; j++) { + if (rr.contains(j + curRgNum)) { + bs[j] = true; + } + } + curRgNum = curRgNum + curStripeRgNum; + sortedMap.put(i, bs); + } + return sortedMap; + } + + public RoaringBitmap prune(String table, + List columns, + @Nonnull ColumnPredicatePruningInf cpp, + IndexPruneContext ipc) { + RoaringBitmap pruneResult = RoaringBitmap.bitmapOfRange(0, rgNum); + pruneIndex(table, cpp, ipc, PruneAction.SORT_KEY_INDEX_PRUNE, sortKeyIndex, pruneResult, columns); +// pruneIndex(table, cpp, ipc, PruneAction.BITMAP_INDEX_PRUNE, bitMapRowGroupIndex, pruneResult, columns); + pruneIndex(table, cpp, ipc, PruneAction.ZONE_MAP_INDEX_PRUNE, zoneMapIndex, pruneResult, columns); + // TODO bloom filter pruning + return pruneResult; + } + + public RoaringBitmap pruneOnlyDeletedRowGroups(RoaringBitmap deleteBitmap) { + + return new RoaringBitmap(); + } + + private void pruneIndex(String table, + @NotNull ColumnPredicatePruningInf cpp, + IndexPruneContext ipc, + PruneAction pruneAction, + ColumnIndex columnIndex, + RoaringBitmap cur, + List columns) { + if (columnIndex == null) { + return; + } + if (cur.isEmpty()) { + return; + } + ColumnarTracer columnarTracer = ipc.getPruneTracer(); + int before = cur.getCardinality(); + pruneAction.getPruneAction().apply(cpp, columnIndex, ipc, cur); + int after = cur.getCardinality(); + if (columnarTracer != null) { + switch (pruneAction) { + case SORT_KEY_INDEX_PRUNE: + columnarTracer.tracePruneIndex(table, PruneUtils.display(cpp, columns, ipc), before - after, 0, 0); + break; + case ZONE_MAP_INDEX_PRUNE: + columnarTracer.tracePruneIndex(table, PruneUtils.display(cpp, columns, ipc), 0, before - after, 0); + break; + case BITMAP_INDEX_PRUNE: + columnarTracer.tracePruneIndex(table, PruneUtils.display(cpp, columns, ipc), 0, 0, before - after); + break; + } + } + } + + public String getOrcFile() { + return orcFile; + } + + public void setOrcFile(String orcFile) { + this.orcFile = orcFile; + } + + public long getRgNum() { + return rgNum; + } + + public List getStripeRgNum() { + return stripeRgNum; + } + + public static class IndexPrunerBuilder { + private long rgNum; + private final String filePath; + private final SortKeyIndexBuilder sortKeyIndexBuilder = new SortKeyIndexBuilder(); + private final ZoneMapIndexBuilder zoneMapIndexBuilder = new ZoneMapIndexBuilder(); + private final BitMapRowGroupIndexBuilder bitMapRowGroupIndexBuilder = new BitMapRowGroupIndexBuilder(); + private final List stripeRgNum = Lists.newArrayList(); + private int curRgNum; + + // TODO replaced by builder + private BloomFilterIndex bloomFilterIndex; + + private final boolean enableOssCompatible; + + public IndexPrunerBuilder(String filePath, + boolean enableOssCompatible) { + this.filePath = filePath; + this.enableOssCompatible = enableOssCompatible; + } + + public IndexPruner build() { + IndexPruner indexPruner = new IndexPruner(rgNum); + indexPruner.setOrcFile(filePath); + indexPruner.sortKeyIndex = sortKeyIndexBuilder.build(); + indexPruner.bitMapRowGroupIndex = bitMapRowGroupIndexBuilder.build(); + indexPruner.zoneMapIndex = zoneMapIndexBuilder.build(); + indexPruner.bloomFilterIndex = bloomFilterIndex; + indexPruner.stripeRgNum = stripeRgNum; + return indexPruner; + } + + public IndexPrunerBuilder appendSortKeyIndex(OrcProto.IntegerStatistics integerStatistics) { + sortKeyIndexBuilder.appendDataEntry(integerStatistics); + curRgNum++; + return this; + } + + public IndexPrunerBuilder appendSortKeyIndex(long min, long max) { + sortKeyIndexBuilder.appendDataEntry(min, max); + curRgNum++; + return this; + } + + public void setSortKeyColId(int colId) { + sortKeyIndexBuilder.setColId(colId); + } + + public void setSortKeyDataType(DataType dt) { + sortKeyIndexBuilder.setDt(dt); + } + + public void stripeEnd() { + stripeRgNum.add(curRgNum); + curRgNum = 0; + } + + public void setBloomFilterIndex(BloomFilterIndex bloomFilterIndex) { + this.bloomFilterIndex = bloomFilterIndex; + } + + public void appendZoneMap(int columnId, OrcProto.IntegerStatistics intStatistics) { + Long min = intStatistics.getMinimum(); + Long max = intStatistics.getMaximum(); + Preconditions.checkArgument( + columnId >= 0 && + min != null && max != null && + min >= Long.MIN_VALUE && min <= Long.MAX_VALUE && + max >= Long.MIN_VALUE && max <= Long.MAX_VALUE, + "bad data for zone map index:" + columnId + ", int type," + min + "," + max); + zoneMapIndexBuilder.appendLongData(columnId, min).appendLongData(columnId, max); + } + + public void appendZoneMap(int columnId, OrcProto.DateStatistics dateStatistics) { + Integer min = dateStatistics.getMinimum(); + Integer max = dateStatistics.getMaximum(); + Preconditions.checkArgument( + columnId >= 0 && + min != null && max != null, + "bad data for zone map index:" + columnId + ", int type," + min + "," + max); + zoneMapIndexBuilder.appendIntegerData(columnId, min).appendIntegerData(columnId, max); + } + + public void appendZoneMap(int columnId, Long min, Long max) { + Preconditions.checkArgument( + columnId > 0 && + min != null && max != null, + "bad data for zone map index:" + columnId + ", int type," + min + "," + max); + zoneMapIndexBuilder.appendLongData(columnId, min).appendLongData(columnId, max); + } + + public void appendZoneMap(int columnId, boolean hasNull) { + Preconditions.checkArgument( + columnId >= 0, + "bad data for zone map index:" + columnId); + zoneMapIndexBuilder.appendNull(columnId, hasNull); + } + + public void appendZoneMap(int columnId, DataType dataType) { + Preconditions.checkArgument(columnId >= 0 && dataType != null, + "bad data for zone map index:" + columnId + "," + dataType); + zoneMapIndexBuilder.appendColumn(columnId, dataType); + } + + public void setRgNum(int rgNum) { + this.rgNum = rgNum; + bitMapRowGroupIndexBuilder.setRgNum(rgNum); + } + + public void appendBitmap(int columnId, List bitMapIndexList) throws IOException { + // bit map not support this column, just return + if (!bitMapRowGroupIndexBuilder.supportColumn(columnId)) { + return; + } + for (OrcProto.BitmapColumn bitmapColumn : bitMapIndexList) { + RoaringBitmap rb = new RoaringBitmap(); + rb.deserialize(ByteBuffer.wrap(bitmapColumn.getBitmap().toByteArray())); + bitMapRowGroupIndexBuilder.appendValue(columnId, bitmapColumn.getVal().toStringUtf8(), rb); + } + } + + public void appendBitmap(int columnId, DataType dt) { + Preconditions.checkArgument(columnId >= 0 && dt != null, + "bad data for zone map index:" + columnId + "," + dt); + // disable bitmap when oss compatible and column type is char/varchar + if (!(enableOssCompatible && + (dt.getDataClass() == String.class || dt.getDataClass() == Slice.class))) { + bitMapRowGroupIndexBuilder.appendColumn(columnId, dt); + } + } + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/PruneAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/PruneAction.java new file mode 100644 index 000000000..a3722d0f6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/PruneAction.java @@ -0,0 +1,51 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index; + +import com.alibaba.polardbx.executor.columnar.pruning.data.PruneUtils; +import com.alibaba.polardbx.executor.columnar.pruning.predicate.ColumnPredicatePruningInf; +import org.roaringbitmap.RoaringBitmap; + +/** + * @author fangwu + */ +public enum PruneAction { + LOAD_PRUNE_INDEX((pred, columnIndex, ipc, cur) -> { + }), + SORT_KEY_INDEX_PRUNE((pred, columnIndex, ipc, cur) -> pred.sortKey((SortKeyIndex) columnIndex, ipc, cur)), + ZONE_MAP_INDEX_PRUNE((pred, columnIndex, ipc, cur) -> pred.zoneMap((ZoneMapIndex) columnIndex, ipc, cur)), + BITMAP_INDEX_PRUNE((pred, columnIndex, ipc, cur) -> pred.bitmap((BitMapRowGroupIndex) columnIndex, ipc, cur)), + BLOOM_FILTER_INDEX_PRUNE( + (pred, columnIndex, ipc, cur) -> pred.bloomFilter((BloomFilterIndex) columnIndex, ipc, cur)); + + private final PruneUtils.FourFunction pruneAction; + + PruneAction( + PruneUtils.FourFunction pruneAction) { + this.pruneAction = pruneAction; + } + + public PruneUtils.FourFunction getPruneAction() { + return pruneAction; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/SortKeyIndex.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/SortKeyIndex.java new file mode 100644 index 000000000..fe5e6cb02 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/SortKeyIndex.java @@ -0,0 +1,255 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index; + +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.base.Preconditions; +import org.apache.calcite.sql.type.SqlTypeName; +import org.roaringbitmap.RoaringBitmap; + +import java.util.Arrays; +import java.util.Objects; + +/** + * Sort Key Index for Columnar pruning, each Sort key index for one orc file. + * this index only support number/date datatype, which could being easily transformed&compared + * by long value. + *

+ * data the long array contains both min/max value for each row group,so the rg num equals data.length/2. like: + * RG data: rg1(1000, 2000), rg2(2000, 3000), rg3(4000, 5000), rg4(5000, 5001) + * index data: [1000, 2000, 2000, 3000, 4000, 5000, 5000, 5001] + *

+ * Sort key index provided two type pruning interface:pruneEqual/pruneRange + *

+ * pruneEqual will return 0~data_length/2 rgs. + * - if target value was between any min/max values of rg(not included min/max value), return that rg + * - if target value was between any two rgs, return 0 rg. like target value(3500) won't return any rg + * - if target value was contain by multi rgs, return them. like target value(5000) would return rg3 and rg4 + *

+ * pruneEqual is a special case for pruneRange. + *

+ * pruneRange will return rgs that ranges intersected + * + * @author fangwu + */ +public class SortKeyIndex extends BaseColumnIndex { + /** + * index data + */ + private long[] data; + + /** + * col index in the orc file, start with 0 + */ + private final int colId; + + /** + * column type + */ + private final DataType dt; + + private SortKeyIndex(long rgNum, int colId, DataType dt) { + super(rgNum); + this.colId = colId; + this.dt = dt; + } + + public static SortKeyIndex build(int colId, long[] data, DataType dt) { + Preconditions.checkArgument(data != null && data.length > 0 && data.length % 2 == 0, "bad short key index"); + SortKeyIndex sortKeyIndex = new SortKeyIndex(data.length / 2, colId, dt); + + sortKeyIndex.data = data; + return sortKeyIndex; + } + + /** + * return full rg for null arg. + * + * @param param target value + * @return pruneRange result for the same value + */ + public void pruneEqual(Object param, RoaringBitmap cur) { + if (param == null) { + return; + } + pruneRange(param, param, cur); + } + + /** + * prune range contains two steps. + * - value transformed to long + * - prune by long range values + */ + public void pruneRange(Object startObj, Object endObj, RoaringBitmap cur) { + Preconditions.checkArgument(!(startObj == null && endObj == null), "null val"); + Long start; + Long end; + + try { + start = paramTransform(startObj, dt); + if (start == null) { + start = data[0]; + } + end = paramTransform(endObj, dt); + if (end == null) { + end = data[data.length - 1]; + } + } catch (IllegalArgumentException e) { + return; + } + + if (end < data[0] || + start > data[data.length - 1]) { + cur.and(new RoaringBitmap()); + return; + } + + Preconditions.checkArgument(start <= end, "error range value"); + + // get lower bound rg index + Pair sIndex = binarySearchLowerBound(start); + // get upper bound rg index + Pair eIndex = binarySearchUpperBound(end); + int startRgIndex; + int endRgIndex; + + // if lower rg index was not included, plus it was different from upper index, then add 1 to lower rg index + if (!sIndex.getValue() && !Objects.equals(sIndex.getKey(), eIndex.getKey())) { + startRgIndex = sIndex.getKey() + 1; + } else { + startRgIndex = sIndex.getKey(); + } + if (eIndex.getValue()) { + endRgIndex = eIndex.getKey() + 1; + } else { + endRgIndex = eIndex.getKey(); + } + + cur.and(RoaringBitmap.bitmapOfRange(startRgIndex, endRgIndex)); + } + + /** + * binary search target value from data array + * - if data array wasn't contains target value, then check odd or even. + * odd meaning target is inside of one row group. even meaning target isn't + * belong any row group. + * data [1, 10, 50, 100] and target 5 will return (0,true) + * data [1, 10, 50, 100] and target 20 will return (0,false) + * - if data array contains target value, try to find the upper bound value + * for the same target value + * data [1, 10, 50, 100, 100, 100] and target 100 will return (3,true) + * + * @param target target value to be searched + */ + private Pair binarySearchUpperBound(Long target) { + if (target < data[0]) { + return Pair.of(0, false); + } + + int index = Arrays.binarySearch(data, target); + + if (index < 0) { + index = -(index + 1); + } else { + for (int i = index + 1; i < data.length; i++) { + if (data[i] == target) { + index = i; + } else { + break; + } + } + return Pair.of(index / 2, true); + } + if (index % 2 == 0) { + return Pair.of(index / 2, false); + } else { + return Pair.of(index / 2, true); + } + } + + /** + * binary search target value from data array + * - if data array wasn't contains target value, then check odd or even. + * odd meaning target is inside of one row group. even meaning target isn't + * belong any row group. + * data [1, 10, 50, 100] and target 5 will return (0,false) + * data [1, 10, 50, 100] and target 20 will return (0,true) + * - if data array contains target value, try to find the lower bound value + * for the same target value + * data [1, 10, 50, 100, 100, 100] and target 100 will return (3,true) + * + * @param target target value to be searched + */ + private Pair binarySearchLowerBound(Long target) { + if (target > data[data.length - 1]) { + return Pair.of((data.length - 1) / 2, false); + } else if (target < data[0]) { + return Pair.of(0, true); + } + int index = Arrays.binarySearch(data, target); + + if (index < 0) { + index = -index - 1; + } else { + for (int i = index - 1; i > 0; i--) { + if (data[i] == target) { + index = i; + } else { + break; + } + } + return Pair.of(index / 2, true); + } + if (index % 2 == 0) { + return Pair.of((index / 2) - 1, false); + } else { + return Pair.of(index / 2, true); + } + } + + @Override + public boolean checkSupport(int columnId, SqlTypeName type) { + if (type != SqlTypeName.BIGINT && + type != SqlTypeName.INTEGER && + type != SqlTypeName.YEAR && + type != SqlTypeName.DATE && + type != SqlTypeName.DATETIME) { + return false; + } + + // TODO col id might need transform + return columnId == colId; + } + + @Override + public DataType getColumnDataType(int colId) { + return dt; + } + + public long[] getData() { + return data; + } + + public int getColId() { + return colId; + } + + public DataType getDt() { + return dt; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/ZoneMapIndex.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/ZoneMapIndex.java new file mode 100644 index 000000000..ddf6fc783 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/ZoneMapIndex.java @@ -0,0 +1,279 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index; + +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.gms.config.impl.InstConfUtil; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.google.common.base.Preconditions; +import com.google.common.collect.Maps; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.commons.lang.StringUtils; +import org.roaringbitmap.RoaringBitmap; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Map; +import java.util.function.Function; + +import static com.alibaba.polardbx.common.properties.ConnectionParams.ZONEMAP_MAX_GROUP_SIZE; + +/** + * Zone map index for columnar scan pruning + * this index contains index data values for multi columns + * in one orc file. + * it serv two interface: + * - pruneNull(col id):xx column is null + * - prune(col id, min value, max value) : get rg range + * calculate by min/max value + * + * @author fangwu + */ +public class ZoneMapIndex extends BaseColumnIndex { + private final Map> dataMap; + private final Map dtMap; + private final Map nullValMap; + + // internal fields + private Map> groupDataMap = Maps.newConcurrentMap(); + + private ZoneMapIndex(long rgNum, Map dtMap, Map> dataMap, + Map nullValMap) { + super(rgNum); + this.dtMap = dtMap; + this.dataMap = dataMap; + this.nullValMap = nullValMap; + + int maxGroupSize = InstConfUtil.getInt(ZONEMAP_MAX_GROUP_SIZE); + for (int index : dataMap.keySet()) { + ArrayList dataTemp = dataMap.get(index); + Map groupData = Maps.newConcurrentMap(); + boolean valid = true; + Function, String> keyFunc = null; + DataType dt = dtMap.get(index); + if (DataTypes.LongType.equals(dt) || + DataTypes.IntegerType.equals(dt) || + DataTypes.TimestampType.equals(dt) || + DataTypes.DatetimeType.equals(dt) || + DataTypes.DateType.equals(dt) || + DataTypes.TimeType.equals(dt)) { + if (dataTemp.get(0) instanceof Long) { + keyFunc = (pair) -> { + Long start = (Long) pair.getKey(); + Long end = (Long) pair.getValue(); + return start + "_" + end; + }; + } else { + keyFunc = (pair) -> { + Integer start = (Integer) pair.getKey(); + Integer end = (Integer) pair.getValue(); + return start + "_" + end; + }; + } + } + + if (keyFunc == null) { + continue; + } + for (int i = 0; i < dataTemp.size() / 2; i++) { + Object start = dataTemp.get(i * 2); + Object end = dataTemp.get(i * 2 + 1); + Pair pair = Pair.of(start, end); + String key = keyFunc.apply(pair); + final int groupIndex = i; + groupData.compute(key, (s, roaringBitmap) -> { + if (roaringBitmap == null) { + RoaringBitmap r = new RoaringBitmap(); + r.add(groupIndex); + return r; + } else { + roaringBitmap.add(groupIndex); + return roaringBitmap; + } + }); + if (groupData.size() > (dataTemp.size() / 10) || groupData.size() > maxGroupSize) { + valid = false; + break; + } + } + if (valid) { + groupDataMap.put(index, groupData.values()); + } + } + } + + public static ZoneMapIndex build(long rgNum, Map dtMap, Map> dataMap, + Map nullValMap) { + Preconditions.checkArgument(!(dtMap == null && dataMap == null && nullValMap == null), + "bad data for zone map index"); + Preconditions.checkArgument(rgNum > 0, "bad rg num:" + rgNum); + return new ZoneMapIndex(rgNum, dtMap, dataMap, nullValMap); + } + + /** + * if any row group has null value, then its value is true + * return true/false array for target column + */ + public void pruneNull(int colId, RoaringBitmap cur) { + if (nullValMap.containsKey(colId)) { + cur.and(nullValMap.get(colId)); + } + } + + /** + * prune range + * + * @param colId target column id + * @param start lower value + * @param includeStart is lower value included + * @param end upper value + * @param includeEnd is upper value included + */ + public void prune(int colId, Object start, boolean includeStart, Object end, boolean includeEnd, + RoaringBitmap cur) { + try { + start = paramTransform(start, dtMap.get(colId)); + end = paramTransform(end, dtMap.get(colId)); + } catch (IllegalArgumentException e) { + return; + } + if (includeStart) { + search(colId, start, SqlKind.GREATER_THAN_OR_EQUAL, cur); + } else { + search(colId, start, SqlKind.GREATER_THAN, cur); + } + + if (includeEnd) { + search(colId, end, SqlKind.LESS_THAN_OR_EQUAL, cur); + } else { + search(colId, end, SqlKind.LESS_THAN, cur); + } + } + + /** + * search each zone for the target value and sql kind + * + * @param colId target column id + * @param target target value + * @param sqlKind represent operator: EQUALS,GREATER_THAN, + * GREATER_THAN_OR_EQUAL,LESS_THAN,LESS_THAN_OR_EQUAL + */ + private void search(int colId, Object target, SqlKind sqlKind, RoaringBitmap cur) { + ArrayList data = dataMap.get(colId); + DataType dt = dtMap.get(colId); + long rgNum = rgNum(); + if (data == null || dt == null || target == null) { + return; + } + if (groupDataMap.get(colId) != null) { + for (RoaringBitmap r : groupDataMap.get(colId)) { + int targetPair = r.first(); + switch (sqlKind) { + case EQUALS: + if (dt.compare(target, data.get(targetPair * 2)) < 0 || + dt.compare(data.get(targetPair * 2 + 1), target) > 0) { + cur.andNot(r); + } + break; + case GREATER_THAN: + if (dt.compare(data.get(targetPair * 2 + 1), target) <= 0) { + cur.andNot(r); + } + break; + case GREATER_THAN_OR_EQUAL: + if (dt.compare(data.get(targetPair * 2 + 1), target) < 0) { + cur.andNot(r); + } + break; + case LESS_THAN: + if (dt.compare(data.get(targetPair * 2), target) >= 0) { + cur.andNot(r); + } + break; + case LESS_THAN_OR_EQUAL: + if (dt.compare(data.get(targetPair * 2), target) > 0) { + cur.andNot(r); + } + } + } + } else { + for (int i = 0; i < rgNum; i++) { + if (!cur.contains(i)) { + continue; + } + switch (sqlKind) { + case EQUALS: + if (dt.compare(target, data.get(i * 2)) < 0 || + dt.compare(data.get(i * 2 + 1), target) > 0) { + cur.flip(i); + } + break; + case GREATER_THAN: + if (dt.compare(data.get(i * 2 + 1), target) <= 0) { + cur.flip(i); + } + break; + case GREATER_THAN_OR_EQUAL: + if (dt.compare(data.get(i * 2 + 1), target) < 0) { + cur.flip(i); + } + break; + case LESS_THAN: + if (dt.compare(data.get(i * 2), target) >= 0) { + cur.flip(i); + } + break; + case LESS_THAN_OR_EQUAL: + if (dt.compare(data.get(i * 2), target) > 0) { + cur.flip(i); + } + } + } + } + + } + + @Override + public boolean checkSupport(int columnId, SqlTypeName type) { + if (!dtMap.containsKey(columnId)) { + return false; + } + return type == SqlTypeName.BIGINT || + type == SqlTypeName.INTEGER || + type == SqlTypeName.YEAR || + type == SqlTypeName.DATE || + type == SqlTypeName.DATETIME; + } + + @Override + public DataType getColumnDataType(int columnId) { + return dtMap.get(columnId); + } + + public String colIds() { + return StringUtils.join(dtMap.keySet(), ","); + } + + public int groupSize(int colId) { + if (groupDataMap == null || groupDataMap.get(colId) == null) { + return 0; + } + return groupDataMap.get(colId).size(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/builder/BitMapRowGroupIndexBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/builder/BitMapRowGroupIndexBuilder.java new file mode 100644 index 000000000..4d002894d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/builder/BitMapRowGroupIndexBuilder.java @@ -0,0 +1,65 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index.builder; + +import com.alibaba.polardbx.executor.columnar.pruning.index.BitMapRowGroupIndex; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.base.Preconditions; +import com.google.common.collect.Maps; +import org.roaringbitmap.RoaringBitmap; + +import java.util.Map; + +/** + * builder for column bitmap index + * + * @author fangwu + */ +public class BitMapRowGroupIndexBuilder { + private int rgNum = 0; + private final Map> valMap = Maps.newHashMap(); + private final Map dtMap = Maps.newHashMap(); + + public BitMapRowGroupIndexBuilder appendColumn(int columnId, DataType dataType) { + Preconditions.checkArgument(columnId > 0 && dataType != null, + "bad data for zone map index:" + columnId + "," + dataType); + dtMap.put(columnId, dataType); + return this; + } + + public BitMapRowGroupIndexBuilder appendValue(int columnId, String val, RoaringBitmap rb) { + Preconditions.checkArgument(columnId > 0 && val != null, + "bad data for bitmap index:" + columnId + "," + val); + valMap.computeIfAbsent(columnId, i -> Maps.newHashMap()).put(val, rb); + return this; + } + + public BitMapRowGroupIndex build() { + if (valMap.size() == 0 || dtMap.size() == 0 || rgNum == 0) { + return null; + } + return new BitMapRowGroupIndex(rgNum, valMap, dtMap); + } + + public void setRgNum(int rgNum) { + this.rgNum = rgNum; + } + + public boolean supportColumn(int columnId) { + return dtMap.containsKey(columnId); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/builder/SortKeyIndexBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/builder/SortKeyIndexBuilder.java new file mode 100644 index 000000000..c88310b3b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/builder/SortKeyIndexBuilder.java @@ -0,0 +1,63 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index.builder; + +import com.alibaba.polardbx.executor.columnar.pruning.index.SortKeyIndex; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.collect.Lists; +import org.apache.orc.OrcProto; + +import java.util.Iterator; +import java.util.List; + +/** + * @author fangwu + */ +public class SortKeyIndexBuilder { + private int colId; + private DataType dt; + private List dataEntry = Lists.newArrayList(); + + public void appendDataEntry(OrcProto.IntegerStatistics integerStatistics) { + dataEntry.add(integerStatistics.getMinimum()); + dataEntry.add(integerStatistics.getMaximum()); + } + + public void appendDataEntry(long min, long max) { + dataEntry.add(min); + dataEntry.add(max); + } + + public SortKeyIndex build() { + long[] data = new long[dataEntry.size()]; + Iterator it = dataEntry.iterator(); + int cur = 0; + while (it.hasNext()) { + Long l = it.next(); + data[cur++] = l; + } + return SortKeyIndex.build(colId, data, dt); + } + + public void setDt(DataType dt) { + this.dt = dt; + } + + public void setColId(int colId) { + this.colId = colId; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/builder/ZoneMapIndexBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/builder/ZoneMapIndexBuilder.java new file mode 100644 index 000000000..bcf0ab000 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/index/builder/ZoneMapIndexBuilder.java @@ -0,0 +1,87 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.index.builder; + +import com.alibaba.polardbx.executor.columnar.pruning.index.ZoneMapIndex; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import org.roaringbitmap.RoaringBitmap; + +import java.util.ArrayList; +import java.util.Map; + +/** + * builder for column zone map index + * + * @author fangwu + */ +public class ZoneMapIndexBuilder { + private final Map> dataMap = Maps.newHashMap(); + private final Map dtMap = Maps.newHashMap(); + private final Map> nullValMap = Maps.newHashMap(); + + public ZoneMapIndexBuilder appendColumn(int columnId, DataType dataType) { + Preconditions.checkArgument(columnId >= 0 && dataType != null, + "bad data for zone map index:" + columnId + "," + dataType); + dtMap.put(columnId, dataType); + return this; + } + + public ZoneMapIndexBuilder appendNull(int columnId, Boolean hasNull) { + Preconditions.checkArgument(columnId >= 0 && hasNull != null, + "bad data for zone map index:" + columnId + "," + hasNull); + nullValMap.computeIfAbsent(columnId, i -> Lists.newArrayList()).add(hasNull); + return this; + } + + public ZoneMapIndexBuilder appendIntegerData(int columnId, Integer data) { + Preconditions.checkArgument(columnId >= 0 && data != null, + "bad data for zone map index:" + columnId + "," + data); + dataMap.computeIfAbsent(columnId, i -> Lists.newArrayList()).add(data); + return this; + } + + public ZoneMapIndexBuilder appendLongData(int columnId, Long data) { + Preconditions.checkArgument(columnId >= 0 && data != null, + "bad data for zone map index:" + columnId + "," + data); + dataMap.computeIfAbsent(columnId, i -> Lists.newArrayList()).add(data); + return this; + } + + public ZoneMapIndex build() { + if (dataMap.size() == 0) { + return null; + } + int rgNum = dataMap.values().iterator().next().size() / 2; + + // build null bitset + Map rrMap = Maps.newHashMap(); + for (Map.Entry> entry : nullValMap.entrySet()) { + ArrayList booleans = entry.getValue(); + RoaringBitmap rr = RoaringBitmap.bitmapOfRange(0, rgNum); + for (int i = 0; i < booleans.size(); i++) { + if (!booleans.get(i)) { + rr.flip(i); + } + } + } + return ZoneMapIndex.build(rgNum, dtMap, dataMap, rrMap); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/AndColumnPredicate.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/AndColumnPredicate.java new file mode 100644 index 000000000..e0ac6b525 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/AndColumnPredicate.java @@ -0,0 +1,40 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.predicate; + +import com.alibaba.polardbx.executor.columnar.pruning.data.PruneUtils; +import com.alibaba.polardbx.executor.columnar.pruning.index.ColumnIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import org.roaringbitmap.RoaringBitmap; + +/** + * @author fangwu + */ +public class AndColumnPredicate extends MultiColumnPredicate { + + @Override + protected void handleMulti(ColumnIndex index, IndexPruneContext ipc, + PruneUtils.FourFunction f, + RoaringBitmap cur) { + children().stream().forEach(columnPredicatePruningInf -> f.apply(columnPredicatePruningInf, index, ipc, cur)); + } + + @Override + public StringBuilder display(String[] columns, IndexPruneContext ipc) { + return super.display("AND", columns, ipc); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/BetweenColumnPredicate.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/BetweenColumnPredicate.java new file mode 100644 index 000000000..6a688d729 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/BetweenColumnPredicate.java @@ -0,0 +1,121 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.predicate; + +import com.alibaba.polardbx.executor.columnar.pruning.index.BitMapRowGroupIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.BloomFilterIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import com.alibaba.polardbx.executor.columnar.pruning.index.SortKeyIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.ZoneMapIndex; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.google.common.base.Preconditions; +import org.apache.calcite.sql.type.SqlTypeName; +import org.jetbrains.annotations.NotNull; +import org.roaringbitmap.RoaringBitmap; + +import javax.annotation.Nonnull; + +/** + * @author fangwu + */ +public class BetweenColumnPredicate extends ColumnPredicate { + + private final int paramIndex1; + private final Object paramObj1; + private final int paramIndex2; + private final Object paramObj2; + + public BetweenColumnPredicate(SqlTypeName type, int colId, int paramIndex1, int paramIndex2, Object paramObj1, + Object paramObj2) { + super(type, colId); + this.paramIndex1 = paramIndex1; + this.paramIndex2 = paramIndex2; + this.paramObj1 = paramObj1; + this.paramObj2 = paramObj2; + } + + @Override + public void sortKey(@NotNull SortKeyIndex sortKeyIndex, @NotNull IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + if (!sortKeyIndex.checkSupport(colId, type)) { + return; + } + // get args + Object arg1 = getArg(sortKeyIndex.getColumnDataType(colId), type, paramIndex1, paramObj1, ipc); + Object arg2 = getArg(sortKeyIndex.getColumnDataType(colId), type, paramIndex2, paramObj2, ipc); + + if (arg1 == null && arg2 == null) { + return; + } + sortKeyIndex.pruneRange(arg1, arg2, cur); + } + + @Override + public void bitmap(@Nonnull @NotNull BitMapRowGroupIndex bitMapIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + if (!bitMapIndex.checkSupport(colId, type)) { + return; + } + // get args + Object arg1 = getArg(bitMapIndex.getColumnDataType(colId), type, paramIndex1, paramObj1, ipc); + Object arg2 = getArg(bitMapIndex.getColumnDataType(colId), type, paramIndex2, paramObj2, ipc); + + if (arg1 == null && arg2 == null) { + return; + } + bitMapIndex.between(colId, arg1, true, arg2, true, cur); + } + + @Override + public void zoneMap(@NotNull ZoneMapIndex zoneMapIndex, IndexPruneContext ipc, @NotNull RoaringBitmap cur) { + if (!zoneMapIndex.checkSupport(colId, type)) { + return; + } + // get args + Object arg1 = getArg(zoneMapIndex.getColumnDataType(colId), type, paramIndex1, paramObj1, ipc); + Object arg2 = getArg(zoneMapIndex.getColumnDataType(colId), type, paramIndex2, paramObj2, ipc); + + if (arg1 == null && arg2 == null) { + return; + } + zoneMapIndex.prune(colId, arg1, true, arg2, true, cur); + } + + @Override + public void bloomFilter(@NotNull BloomFilterIndex bloomFilterIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + } + + @Override + public StringBuilder display(String[] columns, IndexPruneContext ipc) { + Preconditions.checkArgument(columns != null && columns.length > colId, "error column meta"); + // get args + Object arg1 = getArg(DataTypes.StringType, SqlTypeName.VARCHAR, paramIndex1, null, ipc); + Object arg2 = getArg(DataTypes.StringType, SqlTypeName.VARCHAR, paramIndex2, null, ipc); + StringBuilder sb = new StringBuilder(); + sb.append(columns[colId]) + .append("_") + .append(colId) + .append(" BETWEEN ") + .append(arg1) + .append(" ") + .append(arg2); + + return sb; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/BinaryColumnPredicate.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/BinaryColumnPredicate.java new file mode 100644 index 000000000..d93bfa3dd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/BinaryColumnPredicate.java @@ -0,0 +1,187 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.predicate; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.columnar.pruning.index.BitMapRowGroupIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.BloomFilterIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import com.alibaba.polardbx.executor.columnar.pruning.index.SortKeyIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.ZoneMapIndex; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.google.common.base.Preconditions; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.type.SqlTypeName; +import org.jetbrains.annotations.NotNull; +import org.roaringbitmap.RoaringBitmap; + +import javax.annotation.Nonnull; + +/** + * @author fangwu + */ +public class BinaryColumnPredicate extends ColumnPredicate { + + private final SqlKind operator; + private final int paramIndex; + private final Object param; + + public BinaryColumnPredicate(SqlTypeName type, int colId, SqlKind operator, int paramIndex) { + super(type, colId); + checkOperator(operator); + this.operator = operator; + this.paramIndex = paramIndex; + this.param = null; + } + + public BinaryColumnPredicate(SqlTypeName type, int colId, SqlKind operator, Object param) { + super(type, colId); + checkOperator(operator); + this.operator = operator; + this.param = param; + this.paramIndex = -1; + } + + private void checkOperator(SqlKind operator) { + switch (operator) { + case EQUALS: + case LESS_THAN: + case LESS_THAN_OR_EQUAL: + case GREATER_THAN: + case GREATER_THAN_OR_EQUAL: + return; + default: + throw new TddlRuntimeException(ErrorCode.ERR_BINARY_PREDICATE, + "not support operator for BinaryColumnPredicate"); + } + } + + @Override + public StringBuilder display(String[] columns, IndexPruneContext ipc) { + Preconditions.checkArgument(columns != null && columns.length > colId, "error column meta"); + // get arg + Object arg = getArg(DataTypes.StringType, SqlTypeName.VARCHAR, paramIndex, param, ipc); + + StringBuilder sb = new StringBuilder(); + sb.append(columns[colId]) + .append("_") + .append(colId) + .append(" " + operator.name() + " ") + .append(arg); + return sb; + } + + @Override + public void sortKey(@NotNull SortKeyIndex sortKeyIndex, + IndexPruneContext ipc, @NotNull RoaringBitmap cur) { + if (!sortKeyIndex.checkSupport(colId, type)) { + return; + } + // get args + Object arg = getArg(sortKeyIndex.getColumnDataType(colId), type, paramIndex, param, ipc); + + if (arg == null) { + return; + } + switch (operator) { + case EQUALS: + sortKeyIndex.pruneRange(arg, arg, cur); + return; + case LESS_THAN_OR_EQUAL: + case LESS_THAN: + sortKeyIndex.pruneRange(null, arg, cur); + return; + case GREATER_THAN: + case GREATER_THAN_OR_EQUAL: + sortKeyIndex.pruneRange(arg, null, cur); + return; + default: + throw new TddlRuntimeException(ErrorCode.ERR_BINARY_PREDICATE, + "not support operator for BinaryColumnPredicate"); + } + + } + + @Override + public void bitmap(@Nonnull @NotNull BitMapRowGroupIndex bitMapIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + if (!bitMapIndex.checkSupport(colId, type)) { + return; + } + // get args + Object arg = getArg(bitMapIndex.getColumnDataType(colId), type, paramIndex, param, ipc); + + if (arg == null) { + return; + } + switch (operator) { + case EQUALS: + bitMapIndex.pruneEquals(colId, arg, cur); + return; + case LESS_THAN_OR_EQUAL: + case LESS_THAN: + bitMapIndex.between(colId, null, true, arg, true, cur); + return; + case GREATER_THAN: + case GREATER_THAN_OR_EQUAL: + bitMapIndex.between(colId, arg, true, null, true, cur); + return; + default: + throw new TddlRuntimeException(ErrorCode.ERR_BINARY_PREDICATE, + "not support operator for BinaryColumnPredicate"); + } + } + + @Override + public void zoneMap(@NotNull ZoneMapIndex zoneMapIndex, IndexPruneContext ipc, @NotNull RoaringBitmap cur) { + if (!zoneMapIndex.checkSupport(colId, type)) { + return; + } + // get args + Object arg = getArg(zoneMapIndex.getColumnDataType(colId), type, paramIndex, param, ipc); + + if (arg == null) { + return; + } + switch (operator) { + case EQUALS: + zoneMapIndex.prune(colId, arg, true, arg, true, cur); + return; + case LESS_THAN_OR_EQUAL: + case LESS_THAN: + zoneMapIndex.prune(colId, null, true, arg, true, cur); + return; + case GREATER_THAN: + case GREATER_THAN_OR_EQUAL: + zoneMapIndex.prune(colId, arg, true, null, true, cur); + return; + default: + throw new TddlRuntimeException(ErrorCode.ERR_BINARY_PREDICATE, + "not support operator for BinaryColumnPredicate"); + } + } + + @Override + public void bloomFilter(@NotNull BloomFilterIndex bloomFilterIndex, + IndexPruneContext ipc, @NotNull RoaringBitmap cur) { + } + + public int getParamIndex() { + return paramIndex; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/ColumnPredicate.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/ColumnPredicate.java new file mode 100644 index 000000000..11bb36214 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/ColumnPredicate.java @@ -0,0 +1,52 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.predicate; + +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import org.apache.calcite.sql.type.SqlTypeName; +import org.jetbrains.annotations.NotNull; + +/** + * @author fangwu + */ +public abstract class ColumnPredicate implements ColumnPredicatePruningInf { + + // column type, might be null + protected SqlTypeName type; + protected int colId; + + public ColumnPredicate(SqlTypeName type, int colId) { + this.type = type; + this.colId = colId; + } + + protected Object getArg(@NotNull DataType dataType, SqlTypeName typeName, int paramIndex, Object constant, + @NotNull IndexPruneContext ipc) { + if (constant == null) { + return ipc.acquireFromParameter(paramIndex, dataType, typeName); + } else { + return constant; + } + } + + protected Object[] getArgs(@NotNull DataType dataType, SqlTypeName typeName, int paramIndex, + @NotNull IndexPruneContext ipc) { + return ipc.acquireArrayFromParameter(paramIndex, dataType, typeName); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/ColumnPredicatePruningInf.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/ColumnPredicatePruningInf.java new file mode 100644 index 000000000..1dfabfad1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/ColumnPredicatePruningInf.java @@ -0,0 +1,43 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.predicate; + +import com.alibaba.polardbx.executor.columnar.pruning.index.BitMapRowGroupIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.BloomFilterIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import com.alibaba.polardbx.executor.columnar.pruning.index.SortKeyIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.ZoneMapIndex; +import org.roaringbitmap.RoaringBitmap; + +import javax.annotation.Nonnull; + +/** + * @author fangwu + */ +public interface ColumnPredicatePruningInf { + + StringBuilder display(String[] columns, IndexPruneContext ipc); + + void sortKey(@Nonnull SortKeyIndex sortKeyIndex, IndexPruneContext ipc, @Nonnull RoaringBitmap cur); + + void bitmap(@Nonnull BitMapRowGroupIndex bitMapIndex, IndexPruneContext ipc, @Nonnull RoaringBitmap cur); + + void zoneMap(@Nonnull ZoneMapIndex zoneMapIndex, IndexPruneContext ipc, @Nonnull RoaringBitmap cur); + + void bloomFilter(@Nonnull BloomFilterIndex bloomFilterIndex, IndexPruneContext ipc, + @Nonnull RoaringBitmap cur); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/ColumnarPredicatePruningVisitor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/ColumnarPredicatePruningVisitor.java new file mode 100644 index 000000000..bdc30ec60 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/ColumnarPredicatePruningVisitor.java @@ -0,0 +1,196 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.predicate; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.columnar.pruning.data.PruneUtils; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexDynamicParam; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexUtil; +import org.apache.calcite.rex.RexVisitorImpl; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.type.SqlTypeName; + +/** + * @author fangwu + */ +public class ColumnarPredicatePruningVisitor extends RexVisitorImpl { + private IndexPruneContext ipc; + + public ColumnarPredicatePruningVisitor(IndexPruneContext ipc) { + super(false); + this.ipc = ipc; + } + + @Override + public ColumnPredicatePruningInf visitCall(RexCall call) { + SqlKind kind = call.op.kind; + switch (kind) { + case AND: + return visitAnd(call); + case OR: + return visitOr(call); + case NOT: + return visitNot(call); + case EQUALS: + case LESS_THAN: + case LESS_THAN_OR_EQUAL: + case GREATER_THAN: + case GREATER_THAN_OR_EQUAL: + return visitBinary(call); + case BETWEEN: + return visitBetween(call); + case IN: + return visitIn(call); + case IS_NULL: + return visitIsNull(call); + case RUNTIME_FILTER: + return visitRuntimeFilter(call); + default: + return null; + } + } + + private ColumnPredicatePruningInf visitRuntimeFilter(RexCall call) { + // todo support min max rf & bloom filter rf + return null; + } + + private ColumnPredicatePruningInf visitIsNull(RexCall call) { + if (!(call.getOperands().get(0) instanceof RexInputRef)) { + return null; + } + RexInputRef field = (RexInputRef) call.getOperands().get(0); + return new IsNullColumnPredicate(field.getIndex()); + } + + private ColumnPredicatePruningInf visitIn(RexCall call) { + if (!(call.getOperands().get(0) instanceof RexInputRef)) { + return null; + } + + if (!(call.getOperands().get(1) instanceof RexCall && call.getOperands().get(1).isA(SqlKind.ROW))) { + return null; + } + + RexInputRef field = (RexInputRef) call.getOperands().get(0); + RexCall inRexCall = ((RexCall) call.getOperands().get(1)); + + if (inRexCall.getOperands().size() > 1) { + // only support raw string now + return null; + } + if (!(inRexCall.getOperands().get(0) instanceof RexDynamicParam)) { + return null; + } + RexDynamicParam para = (RexDynamicParam) inRexCall.getOperands().get(0); + SqlTypeName typeName = field.getType().getSqlTypeName(); + return new InColumnPredicate(typeName, field.getIndex(), para.getIndex()); + } + + private ColumnPredicatePruningInf visitBetween(RexCall call) { + if (!(call.getOperands().get(0) instanceof RexInputRef)) { + return null; + } + if (RexUtil.isConstant(call.getOperands().get(1)) && RexUtil.isConstant(call.getOperands().get(2))) { + RexInputRef field = (RexInputRef) call.getOperands().get(0); + Object param1 = PruneUtils.getValueFromRexNode(call.getOperands().get(1), ipc); + Object param2 = PruneUtils.getValueFromRexNode(call.getOperands().get(2), ipc); + SqlTypeName typeName = field.getType().getSqlTypeName(); + return new BetweenColumnPredicate(typeName, field.getIndex(), -1, -1, param1, param2); + } + return null; + } + + private ColumnPredicatePruningInf visitBinary(RexCall call) { + RexInputRef field; + SqlKind sqlKind = call.getKind(); + if (call.getOperands().get(0) instanceof RexInputRef && + RexUtil.isConstant(call.getOperands().get(1))) { + field = (RexInputRef) call.getOperands().get(0); + Object param = PruneUtils.getValueFromRexNode(call.getOperands().get(1), ipc); + SqlTypeName typeName = field.getType().getSqlTypeName(); + return new BinaryColumnPredicate(typeName, field.getIndex(), sqlKind, param); + } else if (call.getOperands().get(1) instanceof RexInputRef && + RexUtil.isConstant(call.getOperands().get(0))) { + field = (RexInputRef) call.getOperands().get(1); + Object param = PruneUtils.getValueFromRexNode(call.getOperands().get(0), ipc); + SqlTypeName typeName = field.getType().getSqlTypeName(); + return new BinaryColumnPredicate(typeName, field.getIndex(), flipSqlKind(sqlKind), param); + } else { + return null; + } + + } + + private SqlKind flipSqlKind(SqlKind sqlKind) { + switch (sqlKind) { + case EQUALS: + return sqlKind; + case LESS_THAN: + return SqlKind.GREATER_THAN; + case LESS_THAN_OR_EQUAL: + return SqlKind.GREATER_THAN_OR_EQUAL; + case GREATER_THAN: + return SqlKind.LESS_THAN; + case GREATER_THAN_OR_EQUAL: + return SqlKind.LESS_THAN_OR_EQUAL; + default: + throw new TddlRuntimeException(ErrorCode.ERR_BINARY_PREDICATE, + "not support operator for BinaryColumnPredicate"); + } + } + + private ColumnPredicatePruningInf visitNot(RexCall call) { + if (call.getOperands().size() != 1) { + return null; + } + ColumnPredicatePruningInf sub = call.getOperands().get(0).accept(this); + if (sub != null) { + return new NotColumnPredicate(sub); + } + return null; + } + + private ColumnPredicatePruningInf visitOr(RexCall call) { + OrColumnPredicate or = new OrColumnPredicate(); + for (RexNode operand : call.getOperands()) { + ColumnPredicatePruningInf sub = operand.accept(this); + if (sub != null) { + or.add(sub); + } else { + return null; + } + } + return or.flat(); + } + + private ColumnPredicatePruningInf visitAnd(RexCall call) { + AndColumnPredicate and = new AndColumnPredicate(); + for (RexNode operand : call.getOperands()) { + ColumnPredicatePruningInf sub = operand.accept(this); + if (sub != null) { + and.add(sub); + } + } + return and.flat(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/InColumnPredicate.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/InColumnPredicate.java new file mode 100644 index 000000000..16dd5ce0f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/InColumnPredicate.java @@ -0,0 +1,128 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.predicate; + +import com.alibaba.polardbx.executor.columnar.pruning.index.BitMapRowGroupIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.BloomFilterIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import com.alibaba.polardbx.executor.columnar.pruning.index.SortKeyIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.ZoneMapIndex; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.google.common.base.Preconditions; +import org.apache.calcite.sql.type.SqlTypeName; +import org.jetbrains.annotations.NotNull; +import org.roaringbitmap.RoaringBitmap; + +import javax.annotation.Nonnull; + +/** + * @author fangwu + */ +public class InColumnPredicate extends ColumnPredicate { + + private final int paramIndex; + + public InColumnPredicate(SqlTypeName type, int colId, int paramIndex) { + super(type, colId); + this.paramIndex = paramIndex; + } + + @Override + public StringBuilder display(String[] columns, IndexPruneContext ipc) { + Preconditions.checkArgument(columns != null && columns.length > colId, "error column meta"); + // get arg + Object arg = getArg(DataTypes.StringType, SqlTypeName.VARCHAR, paramIndex, null, ipc); + + StringBuilder sb = new StringBuilder(); + sb.append(columns[colId]) + .append("_") + .append(colId) + .append(" IN ") + .append("(" + arg + ")"); + return sb; + } + + @Override + public void sortKey(@NotNull SortKeyIndex sortKeyIndex, IndexPruneContext ipc, @NotNull RoaringBitmap cur) { + if (!sortKeyIndex.checkSupport(colId, type)) { + return; + } + // get args + Object[] args = getArgs(sortKeyIndex.getColumnDataType(colId), type, paramIndex, ipc); + + if (args == null) { + return; + } + RoaringBitmap rb = new RoaringBitmap(); + for (Object arg : args) { + RoaringBitmap tmp = cur.clone(); + sortKeyIndex.pruneEqual(arg, tmp); + rb.or(tmp); + } + cur.and(rb); + } + + @Override + public void bitmap(@Nonnull @NotNull BitMapRowGroupIndex bitMapIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + if (!bitMapIndex.checkSupport(colId, type)) { + return; + } + // get args + Object[] args = getArgs(bitMapIndex.getColumnDataType(colId), type, paramIndex, ipc); + + if (args == null) { + return; + } + + RoaringBitmap rb = new RoaringBitmap(); + for (Object arg : args) { + RoaringBitmap tmp = cur.clone(); + bitMapIndex.pruneEquals(colId, arg, tmp); + rb.or(tmp); + } + cur.and(rb); + } + + @Override + public void zoneMap(@NotNull ZoneMapIndex zoneMapIndex, IndexPruneContext ipc, @NotNull RoaringBitmap cur) { + if (!zoneMapIndex.checkSupport(colId, type)) { + return; + } + // get args + Object[] args = getArgs(zoneMapIndex.getColumnDataType(colId), type, paramIndex, ipc); + + if (args == null) { + return; + } + + RoaringBitmap rb = new RoaringBitmap(); + for (Object arg : args) { + RoaringBitmap tmp = cur.clone(); + zoneMapIndex.prune(colId, arg, true, arg, true, tmp); + rb.or(tmp); + } + cur.and(rb); + } + + @Override + public void bloomFilter(@NotNull BloomFilterIndex bloomFilterIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + // TODO support bloom filter + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/IsNullColumnPredicate.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/IsNullColumnPredicate.java new file mode 100644 index 000000000..30c360a75 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/IsNullColumnPredicate.java @@ -0,0 +1,75 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.predicate; + +import com.alibaba.polardbx.executor.columnar.pruning.index.BitMapRowGroupIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.BloomFilterIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import com.alibaba.polardbx.executor.columnar.pruning.index.SortKeyIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.ZoneMapIndex; +import com.google.common.base.Preconditions; +import org.jetbrains.annotations.NotNull; +import org.roaringbitmap.RoaringBitmap; + +import javax.annotation.Nonnull; + +/** + * @author fangwu + */ +public class IsNullColumnPredicate extends ColumnPredicate { + + public IsNullColumnPredicate(int colId) { + super(null, colId); + } + + @Override + public StringBuilder display(String[] columns, IndexPruneContext ipc) { + Preconditions.checkArgument(columns != null && columns.length > colId, "error column meta"); + + StringBuilder sb = new StringBuilder(); + sb.append(columns[colId]) + .append("_") + .append(colId) + .append(" IS NULL "); + return sb; + } + + @Override + public void sortKey(@NotNull SortKeyIndex sortKeyIndex, IndexPruneContext ipc, @NotNull RoaringBitmap cur) { + } + + @Override + public void bitmap(@Nonnull @NotNull BitMapRowGroupIndex bitMapIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + } + + @Override + public void zoneMap(@NotNull ZoneMapIndex zoneMapIndex, IndexPruneContext ipc, @NotNull RoaringBitmap cur) { + if (!zoneMapIndex.checkSupport(colId, type)) { + return; + } + + zoneMapIndex.pruneNull(colId, cur); + } + + @Override + public void bloomFilter(@NotNull BloomFilterIndex bloomFilterIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + // TODO support bloom filter + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/MultiColumnPredicate.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/MultiColumnPredicate.java new file mode 100644 index 000000000..7f05fe307 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/MultiColumnPredicate.java @@ -0,0 +1,115 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.predicate; + +import com.alibaba.polardbx.executor.columnar.pruning.data.PruneUtils; +import com.alibaba.polardbx.executor.columnar.pruning.index.BitMapRowGroupIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.BloomFilterIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.ColumnIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import com.alibaba.polardbx.executor.columnar.pruning.index.SortKeyIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.ZoneMapIndex; +import com.google.common.collect.Lists; +import org.jetbrains.annotations.NotNull; +import org.roaringbitmap.RoaringBitmap; + +import javax.annotation.Nonnull; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +/** + * @author fangwu + */ +public abstract class MultiColumnPredicate implements ColumnPredicatePruningInf, Iterable { + + private final List columnPredicates = Lists.newArrayList(); + + public void add(ColumnPredicatePruningInf cpp) { + columnPredicates.add(cpp); + } + + public void addAll(Collection cpps) { + columnPredicates.addAll(cpps); + } + + protected List children() { + return columnPredicates; + } + + public ColumnPredicatePruningInf flat() { + switch (columnPredicates.size()) { + case 0: + return null; + case 1: + return columnPredicates.get(0); + default: + sort(); + return this; + } + } + + private void sort() { + // TODO: move good prune performance expr to the front + } + + @Override + public @NotNull void sortKey(@NotNull SortKeyIndex sortKeyIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + handleMulti(sortKeyIndex, ipc, (p, c, i, r) -> p.sortKey(sortKeyIndex, ipc, r), cur); + } + + @Override + public @NotNull void bitmap(@Nonnull @NotNull BitMapRowGroupIndex bitMapIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + handleMulti(bitMapIndex, ipc, (p, c, i, r) -> p.bitmap(bitMapIndex, ipc, r), cur); + } + + @Override + public void zoneMap(@NotNull ZoneMapIndex zoneMapIndex, IndexPruneContext ipc, @NotNull RoaringBitmap cur) { + handleMulti(zoneMapIndex, ipc, (p, c, i, r) -> p.zoneMap(zoneMapIndex, ipc, r), cur); + } + + @Override + public void bloomFilter(@NotNull BloomFilterIndex bloomFilterIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + handleMulti(bloomFilterIndex, ipc, (p, c, i, r) -> p.bloomFilter(bloomFilterIndex, ipc, r), cur); + } + + protected abstract void handleMulti(ColumnIndex index, IndexPruneContext ipc, + PruneUtils.FourFunction f, + RoaringBitmap cur); + + protected StringBuilder display(String delimiter, String[] columns, IndexPruneContext ipc) { + StringBuilder sb = new StringBuilder(); + children().stream() + .forEach(columnPredicatePruningInf -> + sb.append("(" + columnPredicatePruningInf.display(columns, ipc) + ")") + .append(" " + delimiter + " ")); + if (sb.length() > 5) { + sb.setLength(sb.length() - 5); + } + return sb; + } + + @NotNull + @Override + public Iterator iterator() { + return columnPredicates.iterator(); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/NotColumnPredicate.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/NotColumnPredicate.java new file mode 100644 index 000000000..cc2433e55 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/NotColumnPredicate.java @@ -0,0 +1,69 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.predicate; + +import com.alibaba.polardbx.executor.columnar.pruning.index.BitMapRowGroupIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.BloomFilterIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import com.alibaba.polardbx.executor.columnar.pruning.index.SortKeyIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.ZoneMapIndex; +import org.jetbrains.annotations.NotNull; +import org.roaringbitmap.RoaringBitmap; + +import javax.annotation.Nonnull; + +/** + * @author fangwu + */ +public class NotColumnPredicate implements ColumnPredicatePruningInf { + ColumnPredicatePruningInf child; + + public NotColumnPredicate(ColumnPredicatePruningInf cpp) { + this.child = cpp; + } + + @Override + public StringBuilder display(String[] columns, IndexPruneContext ipc) { + StringBuilder sb = new StringBuilder(); + sb.append("NOT(") + .append(child.display(columns, ipc)) + .append(")"); + return sb; + } + + @Override + public void sortKey(@NotNull SortKeyIndex sortKeyIndex, IndexPruneContext ipc, @NotNull RoaringBitmap cur) { + return; + } + + @Override + public void bitmap(@Nonnull @NotNull BitMapRowGroupIndex bitMapIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + return; + } + + @Override + public void zoneMap(@NotNull ZoneMapIndex zoneMapIndex, IndexPruneContext ipc, @NotNull RoaringBitmap cur) { + return; + } + + @Override + public void bloomFilter(@NotNull BloomFilterIndex bloomFilterIndex, IndexPruneContext ipc, + @NotNull RoaringBitmap cur) { + return; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/OrColumnPredicate.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/OrColumnPredicate.java new file mode 100644 index 000000000..649adb061 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columnar/pruning/predicate/OrColumnPredicate.java @@ -0,0 +1,46 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.columnar.pruning.predicate; + +import com.alibaba.polardbx.executor.columnar.pruning.data.PruneUtils; +import com.alibaba.polardbx.executor.columnar.pruning.index.ColumnIndex; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import org.roaringbitmap.RoaringBitmap; + +/** + * @author fangwu + */ +public class OrColumnPredicate extends MultiColumnPredicate { + + @Override + protected void handleMulti(ColumnIndex index, IndexPruneContext ipc, + PruneUtils.FourFunction f, + RoaringBitmap cur) { + RoaringBitmap rb = new RoaringBitmap(); + for (ColumnPredicatePruningInf columnPredicatePruningInf : children()) { + RoaringBitmap tmp = cur.clone(); + f.apply(columnPredicatePruningInf, index, ipc, tmp); + rb.or(tmp); + } + cur.and(rb); + } + + @Override + public StringBuilder display(String[] columns, IndexPruneContext ipc) { + return super.display("OR", columns, ipc); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columns/ColumnBackfillExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columns/ColumnBackfillExecutor.java index 3069fe218..b2e9f4511 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columns/ColumnBackfillExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columns/ColumnBackfillExecutor.java @@ -115,14 +115,13 @@ public ColumnBackfillExecutor(String schemaName, String tableName, long batchSiz PhyTableOperation planUpdateReturningWithMax, PhyTableOperation planUpdateReturningWithMinAndMax, PhyTableOperation planSelectSample, - PhyTableOperation planSelectMinAndMaxSample, List primaryKeysId, List primaryKeys, List selectKeys, List tableColumns, boolean allExprPushable, PhyTableOperation planUpdateSingleRow, List sourceNodes, List targetColumns, ExecutionContext ec) { - super(schemaName, tableName, tableName, batchSize, speedMin, speedLimit, parallelism, planSelectWithMax, - planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, - planSelectSample, planSelectMinAndMaxSample, primaryKeysId); + super(schemaName, tableName, tableName, batchSize, speedMin, speedLimit, parallelism, false, null, + planSelectWithMax, planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, + planSelectSample, primaryKeysId); this.planUpdateWithMinAndMax = planUpdateWithMinAndMax; this.planUpdateReturningWithMin = planUpdateReturningWithMin; @@ -248,10 +247,7 @@ public static ColumnBackfillExecutor create(String schemaName, String tableName, true), builder.buildUpdateReturningForColumnBackfill(tableMeta, sourceNodes, targetColumns, primaryKeys, true, true), - builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys(), info.getPrimaryKeys(), - false, false), - builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys(), info.getPrimaryKeys(), - true, true), + builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys()), info.getPrimaryKeysId(), info.getPrimaryKeys(), selectKeys, @@ -439,7 +435,7 @@ public Map> getSourcePhyTables() { /** * Check whether all dn using XProtocol */ - private static boolean isAllDnUseXDataSource(TopologyHandler topologyHandler) { + public static boolean isAllDnUseXDataSource(TopologyHandler topologyHandler) { return topologyHandler.getGroupNames().stream() .allMatch(groupName -> Optional.ofNullable(topologyHandler.get(groupName)) .map((Function) IGroupExecutor::getDataSource) diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columns/ColumnChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columns/ColumnChecker.java index 8a8654d1a..b723ca6db 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columns/ColumnChecker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/columns/ColumnChecker.java @@ -287,10 +287,17 @@ private void forEachPhyTable(String dbIndex, String tbIndex, ExecutionContext ba return result; }; + ParamManager.setVal( + baseEc.getParamManager().getProps(), + ConnectionParams.SOCKET_TIMEOUT, + Integer.toString(1000 * 60 * 60 * 24 * 7), + true + ); + List>> result = GsiUtils.retryOnException(() -> GsiUtils.wrapWithSingleDbTrx(tm, baseEc, select), e -> Boolean.TRUE, - (e, retryCount) -> errConsumer(selectPlan, baseEc, e, retryCount)); + (e, retryCount) -> errConsumer(plan, baseEc, e, retryCount)); if (!result.isEmpty()) { throw GeneralUtil.nestedException( diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/ExecutorContext.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/ExecutorContext.java index 8413ca3fc..2c60cce51 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/ExecutorContext.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/ExecutorContext.java @@ -16,11 +16,14 @@ package com.alibaba.polardbx.executor.common; +import com.alibaba.polardbx.common.IInnerConnectionManager; import com.alibaba.polardbx.config.ConfigDataMode; +import com.alibaba.polardbx.executor.gms.ColumnarManager; import com.alibaba.polardbx.executor.gsi.GsiManager; import com.alibaba.polardbx.executor.repo.RepositoryHolder; import com.alibaba.polardbx.executor.spi.ITopologyExecutor; import com.alibaba.polardbx.executor.spi.ITransactionManager; +import com.alibaba.polardbx.gms.node.NodeStatusManager; import com.alibaba.polardbx.optimizer.config.schema.InformationSchema; import com.alibaba.polardbx.optimizer.config.schema.MetaDbSchema; import com.alibaba.polardbx.optimizer.config.server.IServerConfigManager; @@ -28,6 +31,7 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Consumer; /** * @author mengshi.sunmengshi 2013-12-4 下午6:16:32 @@ -42,6 +46,9 @@ public class ExecutorContext { private AbstractSequenceManager sequenceManager = null; private StorageInfoManager storageInfoManager = null; private GsiManager gsiManager = null; + private NodeStatusManager nodeStatusManager; + private Consumer reloadColumnarManager; + private IInnerConnectionManager innerConnectionManager; private static Map executorContextMap = new ConcurrentHashMap(); @@ -141,4 +148,22 @@ public void setGsiManager(GsiManager gsiManager) { public final static Map getExecutorContextMap() { return executorContextMap; } + + public void reloadColumnarManager() { + if (null != reloadColumnarManager) { + reloadColumnarManager.accept(ColumnarManager.getInstance()); + } + } + + public void setReloadColumnarManager(Consumer reloadColumnarManager) { + this.reloadColumnarManager = reloadColumnarManager; + } + + public IInnerConnectionManager getInnerConnectionManager() { + return innerConnectionManager; + } + + public void setInnerConnectionManager(IInnerConnectionManager innerConnectionManager) { + this.innerConnectionManager = innerConnectionManager; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/GsiStatisticsManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/GsiStatisticsManager.java index 2c0f4c958..be091ed6d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/GsiStatisticsManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/GsiStatisticsManager.java @@ -192,7 +192,7 @@ public void increaseGsiVisitFrequency(String schema, String gsi) { gsiRecorder.increase(); } - public boolean enableGsiStatisticsCollection() { + public static boolean enableGsiStatisticsCollection() { String enable = MetaDbInstConfigManager.getInstance() .getInstProperty(ConnectionProperties.GSI_STATISTICS_COLLECTION, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/SequenceLoadFromDBManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/SequenceLoadFromDBManager.java index 3a1eb7556..a382757b5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/SequenceLoadFromDBManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/SequenceLoadFromDBManager.java @@ -32,6 +32,7 @@ import com.alibaba.polardbx.executor.sync.SequenceSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.gms.util.SeqTypeUtil; import com.alibaba.polardbx.optimizer.OptimizerContext; @@ -586,7 +587,8 @@ private void updateGroupSeqValue(GroupSequence groupSeq) { long minValueInAllRanges = getMinValueFromAllRanges(seqName); boolean needSync = groupSeq.updateValueRegularly(minValueInAllRanges); if (needSync) { - SyncManagerHelper.sync(new SequenceSyncAction(schemaName, seqName), schemaName); + SyncManagerHelper.sync(new SequenceSyncAction(schemaName, seqName), schemaName, + SyncScope.ALL); } } } catch (Throwable t) { @@ -598,7 +600,8 @@ private long getMinValueFromAllRanges(String seqName) { long minValue = DEFAULT_INNER_STEP; try { List>> resultSets = - SyncManagerHelper.sync(new InspectGroupSeqMinValueSyncAction(schemaName, seqName), schemaName); + SyncManagerHelper.sync(new InspectGroupSeqMinValueSyncAction(schemaName, seqName), schemaName, + SyncScope.ALL); if (resultSets != null && resultSets.size() > 0) { for (List> resultSet : resultSets) { if (resultSet != null && resultSet.size() > 0) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/StorageInfoManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/StorageInfoManager.java index 65a4e008d..67c21f133 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/StorageInfoManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/StorageInfoManager.java @@ -30,6 +30,7 @@ import com.alibaba.polardbx.executor.spi.IGroupExecutor; import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; import com.alibaba.polardbx.gms.topology.DbGroupInfoManager; +import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.rpc.XConfig; import com.alibaba.polardbx.rpc.compatible.XDataSource; import com.google.common.base.Preconditions; @@ -75,6 +76,7 @@ public class StorageInfoManager extends AbstractLifecycle { private volatile boolean supportOpenSSL; private volatile boolean supportSharedReadView; private volatile boolean supportsReturning; + private volatile boolean supportsBackfillReturning; private volatile boolean supportsAlterType; private boolean readOnly; private boolean lowerCaseTableNames; @@ -112,6 +114,10 @@ public class StorageInfoManager extends AbstractLifecycle { private volatile boolean supportXRpc = false; + private volatile boolean supportMarkDistributed = false; + + private volatile boolean supportXOptForPhysicalBackfill = false; + private volatile boolean support2pcOpt = false; public StorageInfoManager(TopologyHandler topologyHandler) { @@ -119,6 +125,7 @@ public StorageInfoManager(TopologyHandler topologyHandler) { supportXA = false; supportsBloomFilter = false; supportsReturning = false; + supportsBackfillReturning = false; Preconditions.checkNotNull(topologyHandler); this.topologyHandler = topologyHandler; @@ -311,6 +318,45 @@ public static boolean checkSupportReturning(DataSource dataSource) { } } + public static boolean checkSupportBackfillReturning(DataSource dataSource) { + if (!ConfigDataMode.isPolarDbX() || XConfig.GALAXY_X_PROTOCOL) { + return false; + } + try (Connection conn = dataSource.getConnection(); + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("call dbms_admin.show_native_procedure()")) { + boolean supportReturning = false; + while (rs.next()) { + final String schemaName = rs.getString(1); + final String procName = rs.getString(2); + supportReturning |= "dbms_trans".equalsIgnoreCase(schemaName) && "backfill".equalsIgnoreCase(procName); + if (supportReturning) { + break; + } + } + return supportReturning; + } catch (SQLException ex) { + final boolean ER_SP_DOES_NOT_EXIST = + "42000".equalsIgnoreCase(ex.getSQLState()) && 1305 == ex.getErrorCode() && ex.getMessage() + .contains("does not exist"); + if (ER_SP_DOES_NOT_EXIST) { + logger.warn("PROCEDURE dbms_admin.show_native_procedure does not exist"); + return false; + } + + final boolean ER_PLUGGABLE_PROTOCOL_COMMAND_NOT_SUPPORTED = + "HY000".equalsIgnoreCase(ex.getSQLState()) && 3130 == ex.getErrorCode() && ex.getMessage() + .contains("Command not supported by pluggable protocols"); + if (ER_PLUGGABLE_PROTOCOL_COMMAND_NOT_SUPPORTED) { + logger.warn("Do not support call dbms_amdin procedures within XPotocol"); + return false; + } + + throw new TddlRuntimeException(ErrorCode.ERR_OTHER, ex, + "Failed to check returning support: " + ex.getMessage()); + } + } + public static boolean checkSupportAlterType(DataSource dataSource) { try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); @@ -440,6 +486,17 @@ private static boolean checkSupportXRpc(IDataSource dataSource) { } } + private static boolean checkSupportMarkDistributed(IDataSource dataSource) { + try (Connection conn = dataSource.getConnection(); + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SHOW VARIABLES LIKE 'innodb_mark_distributed'")) { + return rs.next(); + } catch (SQLException ex) { + throw new TddlRuntimeException(ErrorCode.ERR_OTHER, ex, + "Failed to check innodb_mark_distributed support: " + ex.getMessage()); + } + } + private static boolean checkSupport2pcOpt(IDataSource dataSource) { try (Connection conn = dataSource.getConnection(); Statement stmt = conn.createStatement(); @@ -465,6 +522,17 @@ public static int getLowerCaseTableNames(IDataSource dataSource) { } } + private static boolean checkSupportXOptForPhysicalBackfill(IDataSource dataSource) { + try (Connection conn = dataSource.getConnection(); + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery("SHOW VARIABLES LIKE 'physical_backfill_opt'")) { + return rs.next() && StringUtils.equalsIgnoreCase(rs.getString(2), "ON"); + } catch (SQLException ex) { + throw new TddlRuntimeException(ErrorCode.ERR_OTHER, ex, + "Failed to check x-protocol for physical backfill support: " + ex.getMessage()); + } + } + @Override protected void doInit() { boolean tmpSupportXA = true; @@ -475,6 +543,7 @@ protected void doInit() { boolean tmpSupportMdlDeadlockDetection = true; boolean tmpSupportsBloomFilter = true; boolean tmpSupportsReturning = true; + boolean tmpSupportsBackfillReturning = true; boolean tmpSupportsAlterType = true; boolean tmpLowerCaseTableNames = true; boolean tmpSupportOpenSSL = true; @@ -490,6 +559,10 @@ protected void doInit() { boolean tmpSupportChangeSet = true; boolean tmpSupportXOptForAutoSp = true; boolean tmpSupportXRpc = true; + boolean tmpSupportXOptForPhysicalBackfill = true; + boolean tmpSupportMarkDistributed = true; + + boolean storageInfoEmpty = true; boolean tmpSupport2pcOpt = true; for (Group group : topologyHandler.getMatrix().getGroups()) { if (group.getType() != GroupType.MYSQL_JDBC || !DbGroupInfoManager.isNormalGroup(group)) { @@ -500,6 +573,7 @@ protected void doInit() { final StorageInfo storageInfo = initStorageInfo(group, groupExecutor.getDataSource()); if (storageInfo != null) { + storageInfoEmpty = false; tmpSupportXA &= supportXA(storageInfo); lessMysql56 = lessMysql56 || lessMysql56Version(storageInfo); tmpSupportTso &= storageInfo.supportTso; @@ -514,6 +588,7 @@ protected void doInit() { tmpSupportOpenSSL &= storageInfo.supportOpenSSL; tmpSupportHyperLogLog &= storageInfo.supportHyperLogLog; tmpSupportsReturning &= storageInfo.supportsReturning; + tmpSupportsBackfillReturning &= storageInfo.supportsBackfillReturning; tmpSupportsAlterType &= storageInfo.supportsAlterType; tmpLowerCaseTableNames &= enableLowerCaseTableNames(storageInfo); tmpSupportSharedReadView &= storageInfo.supportSharedReadView; @@ -523,16 +598,19 @@ protected void doInit() { tmpSupportChangeSet &= storageInfo.supportChangeSet; tmpSupportXOptForAutoSp &= storageInfo.supportXOptForAutoSp; tmpSupportXRpc &= storageInfo.supportXRpc; + tmpSupportXOptForPhysicalBackfill &= storageInfo.supportXOptForPhysicalBackfill; + tmpSupportMarkDistributed &= storageInfo.supportMarkDistributed; tmpSupport2pcOpt &= storageInfo.support2pcOpt; } } - this.readOnly = !ConfigDataMode.needInitMasterModeResource(); + this.readOnly = !ConfigDataMode.needInitMasterModeResource() && !ConfigDataMode.isFastMock(); // Do not enable XA transaction in read-only instance this.supportXA = tmpSupportXA && !readOnly; this.supportsBloomFilter = tmpSupportsBloomFilter; this.supportsReturning = tmpSupportsReturning; + this.supportsBackfillReturning = tmpSupportsBackfillReturning; this.supportsAlterType = tmpSupportsAlterType; this.supportTso = tmpSupportTso && (metaDbUsesXProtocol() || tmpRDS80); this.supportTsoHeartbeat = tmpSupportTsoHeartbeat && metaDbUsesXProtocol(); @@ -553,9 +631,13 @@ protected void doInit() { this.supportChangeSet = tmpSupportChangeSet; this.supportXOptForAutoSp = tmpSupportXOptForAutoSp && tmpSupportXRpc; this.supportXRpc = tmpSupportXRpc; + this.supportXOptForPhysicalBackfill = tmpSupportXOptForPhysicalBackfill && tmpSupportXRpc; + this.supportMarkDistributed = tmpSupportMarkDistributed; this.support2pcOpt = tmpSupport2pcOpt; - InstanceVersion.setMYSQL80(this.isMysql80); + if (!storageInfoEmpty) { + InstanceVersion.setMYSQL80(this.isMysql80); + } } private boolean metaDbUsesXProtocol() { @@ -602,9 +684,15 @@ protected void doDestroy() { supportXA = false; supportsBloomFilter = false; supportsReturning = false; + supportsBackfillReturning = false; } private StorageInfo initStorageInfo(Group group, IDataSource dataSource) { + + if (!ConfigDataMode.needDNResource() && !SystemDbHelper.isDBBuildInExceptCdc(group.getSchemaName())) { + return null; + } + if (group.getType() != GroupType.MYSQL_JDBC) { return null; } @@ -762,6 +850,14 @@ public boolean supportsReturning() { return supportsReturning; } + public boolean supportsBackfillReturning() { + if (!isInited()) { + init(); + } + + return supportsBackfillReturning; + } + public boolean supportsAlterType() { if (!isInited()) { init(); @@ -806,6 +902,20 @@ public boolean supportXRpc() { return supportXRpc; } + public boolean isSupportMarkDistributed() { + if (!isInited()) { + init(); + } + return supportMarkDistributed; + } + + public boolean supportXOptForPhysicalBackfill() { + if (!isInited()) { + init(); + } + return supportXOptForPhysicalBackfill; + } + public boolean support2pcOpt() { if (!isInited()) { init(); @@ -824,6 +934,7 @@ public static class StorageInfo { public final boolean supportLizard1PCTransaction; public final boolean supportsBloomFilter; public final boolean supportsReturning; + public final boolean supportsBackfillReturning; public final boolean supportsAlterType; public final int lowerCaseTableNames; public final boolean supportPerformanceSchema; @@ -838,6 +949,8 @@ public static class StorageInfo { boolean supportChangeSet; boolean supportXOptForAutoSp; boolean supportXRpc; + boolean supportXOptForPhysicalBackfill; + boolean supportMarkDistributed; boolean support2pcOpt; public StorageInfo( @@ -850,6 +963,7 @@ public StorageInfo( boolean supportLizard1PCTransaction, boolean supportsBloomFilter, boolean supportsReturning, + boolean supportsBackfillReturning, boolean supportsAlterType, int lowerCaseTableNames, boolean supportPerformanceSchema, @@ -864,6 +978,8 @@ public StorageInfo( boolean supportChangeSet, boolean supportXOptForAutoSp, boolean supportXRpc, + boolean supportXOptForPhysicalBackfill, + boolean supportMarkDistributed, boolean support2pcOpt ) { this.version = version; @@ -875,6 +991,7 @@ public StorageInfo( this.supportLizard1PCTransaction = supportLizard1PCTransaction; this.supportsBloomFilter = supportsBloomFilter; this.supportsReturning = supportsReturning; + this.supportsBackfillReturning = supportsBackfillReturning; this.supportsAlterType = supportsAlterType; this.lowerCaseTableNames = lowerCaseTableNames; this.supportPerformanceSchema = supportPerformanceSchema; @@ -889,6 +1006,8 @@ public StorageInfo( this.supportChangeSet = supportChangeSet; this.supportXOptForAutoSp = supportXOptForAutoSp; this.supportXRpc = supportXRpc; + this.supportXOptForPhysicalBackfill = supportXOptForPhysicalBackfill && supportXRpc; + this.supportMarkDistributed = supportMarkDistributed; this.support2pcOpt = support2pcOpt; } @@ -906,6 +1025,7 @@ public static StorageInfo create(IDataSource dataSource) { false, false, false, + false, 1, false, false, @@ -919,6 +1039,8 @@ public static StorageInfo create(IDataSource dataSource) { false, false, false, + false, + false, false ); } @@ -933,6 +1055,7 @@ public static StorageInfo create(IDataSource dataSource) { Optional polarxUDFInfo = PolarxUDFInfo.build(dataSource); boolean supportsBloomFilter = polarxUDFInfo.map(PolarxUDFInfo::supportsBloomFilter).orElse(false); boolean supportsReturning = checkSupportReturning(dataSource); + boolean supportsBackfillReturning = checkSupportBackfillReturning(dataSource); boolean supportsAlterType = checkSupportAlterType(dataSource); boolean supportCtsTransaction = checkSupportCtsTransaction(dataSource); boolean supportAsyncCommit = checkSupportAsyncCommit(dataSource); @@ -949,6 +1072,8 @@ public static StorageInfo create(IDataSource dataSource) { boolean supportChangeSet = polarxUDFInfo.map(PolarxUDFInfo::supportChangeSet).orElse(false); boolean supportXOptForAutoSp = checkSupportXOptForAutoSp(dataSource); boolean supportXRpc = checkSupportXRpc(dataSource); + boolean supportXoptForPhysicalBackfill = checkSupportXOptForPhysicalBackfill(dataSource); + boolean supportMarkDistributed = checkSupportMarkDistributed(dataSource); boolean support2pcOpt = checkSupport2pcOpt(dataSource); return new StorageInfo( @@ -961,6 +1086,7 @@ public static StorageInfo create(IDataSource dataSource) { supportLizard1PCTransaction, supportsBloomFilter, supportsReturning, + supportsBackfillReturning, supportsAlterType, lowerCaseTableNames, supportPerformanceSchema, @@ -975,6 +1101,8 @@ public static StorageInfo create(IDataSource dataSource) { supportChangeSet, supportXOptForAutoSp, supportXRpc, + supportXoptForPhysicalBackfill, + supportMarkDistributed, support2pcOpt); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/TopologyHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/TopologyHandler.java index 942f56a66..667cdec86 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/TopologyHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/common/TopologyHandler.java @@ -37,6 +37,7 @@ import com.alibaba.polardbx.gms.topology.DbGroupInfoAccessor; import com.alibaba.polardbx.gms.topology.DbGroupInfoManager; import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; +import com.alibaba.polardbx.gms.topology.DbTopologyManager; import com.alibaba.polardbx.gms.util.GroupInfoUtil; import com.alibaba.polardbx.group.config.Weight; import com.alibaba.polardbx.group.jdbc.TGroupDataSource; @@ -64,7 +65,6 @@ public class TopologyHandler extends AbstractLifecycle { public final static Logger logger = LoggerFactory.getLogger(TopologyHandler.class); - private final Map executorMap = new ConcurrentHashMap(); @@ -117,7 +117,8 @@ protected void doInit() { // refresh the topology for matrix refresh(); } catch (Throwable ex) { - logger.error("matrix topology init error,file is: appname is: " + this.getAppName(), ex); + logger.error("matrix topology init error, appname is: " + + this.getAppName(), ex); throw GeneralUtil.nestedException(ex); } MetaDbConfigManager.getInstance().register(MetaDbDataIdBuilder.getDbTopologyDataId(schemaName), null); @@ -230,11 +231,13 @@ private void mergeMatrix(Matrix matrix) { } List grpListToClose = new ArrayList<>(); + List grpNameListToClose = new ArrayList<>(); for (Group oldGroup : oldGroups) { boolean found = newGrpNameSet.contains(oldGroup.getName().toUpperCase()); if (!found) { // 关闭老的group grpListToClose.add(oldGroup); + grpNameListToClose.add(oldGroup.getName()); } } for (Group oldGroup : oldScaleOutGroups) { @@ -242,6 +245,7 @@ private void mergeMatrix(Matrix matrix) { if (!found) { // 关闭老的scale out group grpListToClose.add(oldGroup); + grpNameListToClose.add(oldGroup.getName()); } } loadAllTransGroupList(); @@ -259,6 +263,8 @@ private void mergeMatrix(Matrix matrix) { logger.error(e); } } + DbTopologyManager.refreshGroupKeysIntoTopologyMapping(this.allTransGroupList, grpNameListToClose, + matrix.getSchemaName()); } } @@ -569,7 +575,6 @@ protected void loadAllTransGroupList() { LoggerInit.TDDL_DYNAMIC_CONFIG.info(logMsg); this.allTransGroupList = groupNames; - if (topologyChanger != null) { topologyChanger.onTopology(this); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/corrector/Checker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/corrector/Checker.java index 255d9ae1b..c0a588dac 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/corrector/Checker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/corrector/Checker.java @@ -200,10 +200,11 @@ public class Checker { // Flags. private boolean inBackfill = false; private long jobId = 0; // Set to jobId if in async ddl task or generate one. + protected boolean useBinary; public Checker(String schemaName, String tableName, String indexName, TableMeta primaryTableMeta, TableMeta gsiTableMeta, long batchSize, long speedMin, long speedLimit, long parallelism, - SqlSelect.LockMode primaryLock, SqlSelect.LockMode gsiLock, + boolean useBinary, SqlSelect.LockMode primaryLock, SqlSelect.LockMode gsiLock, PhyTableOperation planSelectWithMaxPrimary, PhyTableOperation planSelectWithMaxGsi, PhyTableOperation planSelectWithMinAndMaxPrimary, PhyTableOperation planSelectWithMinAndMaxGsi, SqlSelect planSelectWithInTemplate, PhyTableOperation planSelectWithIn, @@ -220,6 +221,7 @@ public Checker(String schemaName, String tableName, String indexName, TableMeta this.t = new Throttle(speedMin, speedLimit, schemaName); this.nowSpeedLimit = speedLimit; this.parallelism = parallelism; + this.useBinary = useBinary; this.primaryLock = primaryLock; this.gsiLock = gsiLock; this.planSelectWithMaxPrimary = planSelectWithMaxPrimary; @@ -346,8 +348,8 @@ public static void validate(String schemaName, String tableName, String indexNam } public static Checker create(String schemaName, String tableName, String indexName, long batchSize, long speedMin, - long speedLimit, - long parallelism, SqlSelect.LockMode primaryLock, SqlSelect.LockMode gsiLock, + long speedLimit, long parallelism, boolean useBinary, + SqlSelect.LockMode primaryLock, SqlSelect.LockMode gsiLock, ExecutionContext checkerEc) { // Build select plan // Caution: This should get latest schema to check column which newly added. @@ -368,7 +370,7 @@ public static Checker create(String schemaName, String tableName, String indexNa } Extractor.ExtractorInfo info = Extractor.buildExtractorInfo(checkerEc, schemaName, tableName, indexName, false); - final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, checkerEc); + final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, useBinary, checkerEc); final Pair selectWithIn = builder .buildSelectWithInForChecker(baseTableMeta, info.getTargetTableColumns(), info.getPrimaryKeys(), @@ -398,6 +400,7 @@ public static Checker create(String schemaName, String tableName, String indexNa speedMin, speedLimit, parallelism, + useBinary, primaryLock, gsiLock, builder.buildSelectForBackfill(info.getSourceTableMeta(), info.getTargetTableColumns(), @@ -740,7 +743,7 @@ public boolean recheckRow(String dbIndex, String phyTable, ExecutionContext base try { Row row; while ((row = cursor.next()) != null) { - recheckBaseRows.add(row2objects(row)); + recheckBaseRows.add(row2objects(row, useBinary)); } } finally { cursor.close(new ArrayList<>()); @@ -771,7 +774,7 @@ public boolean recheckRow(String dbIndex, String phyTable, ExecutionContext base try { Row checkRow; while ((checkRow = checkCursor.next()) != null) { - recheckCheckRows.add(row2objects(checkRow)); + recheckCheckRows.add(row2objects(checkRow, useBinary)); } } finally { checkCursor.close(new ArrayList<>()); @@ -834,7 +837,7 @@ private List getUpperBound(ExecutionContext baseEc, String dbI Row row; while ((row = cursor.next()) != null) { // Fetch first line. - rowData = null == rowData ? row2objects(row) : rowData; + rowData = null == rowData ? row2objects(row, useBinary) : rowData; } } finally { cursor.close(new ArrayList<>()); @@ -911,7 +914,7 @@ private void foreachPhyTableCheck(String logTblOrIndexTbl, String dbIndex, Strin try { Row row; while ((row = cursor.next()) != null) { - baseRows.add(row2objects(row)); + baseRows.add(row2objects(row, useBinary)); } } finally { cursor.close(new ArrayList<>()); @@ -939,7 +942,7 @@ private void foreachPhyTableCheck(String logTblOrIndexTbl, String dbIndex, Strin try { Row checkRow; while ((checkRow = checkCursor.next()) != null) { - checkRows.add(row2objects(checkRow)); + checkRows.add(row2objects(checkRow, useBinary)); } } finally { checkCursor.close(new ArrayList<>()); @@ -1220,10 +1223,14 @@ public void check(ExecutionContext baseEc, CheckerCallback cb) { // Convert data to ParameterContext(for sort and correction(insert)) and raw // bytes(for compare). public static List> row2objects(Row rowSet) { + return row2objects(rowSet, false); + } + + public static List> row2objects(Row rowSet, boolean useBinary) { final List columns = rowSet.getParentCursorMeta().getColumns(); final List> result = new ArrayList<>(columns.size()); for (int i = 0; i < columns.size(); i++) { - result.add(new Pair<>(Transformer.buildColumnParam(rowSet, i), rowSet.getBytes(i))); + result.add(new Pair<>(Transformer.buildColumnParam(rowSet, i, useBinary), rowSet.getBytes(i))); } return result; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/corrector/Reporter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/corrector/Reporter.java index 3d0d167a3..840fb38e6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/corrector/Reporter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/corrector/Reporter.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.corrector; import com.alibaba.fastjson.JSON; -import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; @@ -26,6 +25,7 @@ import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.gsi.CheckerManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.google.common.collect.ImmutableList; import org.apache.calcite.sql.SqlSelect; import org.apache.calcite.util.Pair; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/Cursor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/Cursor.java index 5c58523d7..c444e89ba 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/Cursor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/Cursor.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.cursor; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.core.row.Row; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/AsyncCacheCursor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/AsyncCacheCursor.java index c1b8a35c7..64db38b13 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/AsyncCacheCursor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/AsyncCacheCursor.java @@ -113,7 +113,7 @@ public AsyncCacheCursor( this.context = context; } - void doInit(){ + void doInit() { if (!inited) { synchronized (this) { if (!inited) { @@ -125,16 +125,16 @@ void doInit(){ } - void startAsyncWrite(){ + void startAsyncWrite() { final Map mdcContext = MDC.getCopyOfContextMap(); writeFuture = context.getExecutorService().submitListenableFuture( context.getSchemaName(), context.getTraceId(), -1, () -> { MDC.setContextMap(mdcContext); - try{ + try { cacheAllRows(); - } catch (Throwable e){ + } catch (Throwable e) { if (throwable == null) { throwable = e; } @@ -146,7 +146,7 @@ void startAsyncWrite(){ }, context.getRuntimeStatistics()); } - void cacheAllRows(){ + void cacheAllRows() { Row currentRow; while ((currentRow = cursor.next()) != null && throwable == null) { try { @@ -189,14 +189,14 @@ void cacheAllRows(){ cursor = null; } - void writeProducer(long rows){ + void writeProducer(long rows) { synchronized (lock) { flushRowsNum += rows; lock.notifyAll(); } } - void writeFinish(){ + void writeFinish() { synchronized (lock) { writeFinished = true; lock.notifyAll(); @@ -224,7 +224,6 @@ long readConsumer() { return canRead; } - private void createBlockBuilders() { if (blockBuilders == null) { // Create all block builders by default @@ -296,7 +295,6 @@ public Row next() { currentPos = 0; } - } else { if (!bufferRows.isEmpty()) { return bufferRows.remove(0); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/CdcResultCursor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/CdcResultCursor.java index b03464470..5ea601c49 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/CdcResultCursor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/CdcResultCursor.java @@ -16,18 +16,18 @@ package com.alibaba.polardbx.executor.cursor.impl; -import java.util.Iterator; -import java.util.List; -import java.util.function.Function; - -import com.alibaba.polardbx.rpc.cdc.BinaryLog; -import com.alibaba.polardbx.rpc.cdc.BinlogEvent; - import com.alibaba.polardbx.optimizer.core.row.ArrayRow; import com.alibaba.polardbx.optimizer.core.row.Row; +import com.alibaba.polardbx.rpc.cdc.BinaryLog; +import com.alibaba.polardbx.rpc.cdc.BinlogEvent; +import com.alibaba.polardbx.rpc.cdc.FullBinaryLog; import io.grpc.Channel; import io.grpc.ManagedChannel; +import java.util.Iterator; +import java.util.List; +import java.util.function.Function; + public class CdcResultCursor extends ArrayResultCursor { private Function function = obj -> { @@ -41,6 +41,19 @@ public class CdcResultCursor extends ArrayResultCursor { ((BinlogEvent) obj).getEventType(), ((BinlogEvent) obj).getServerId(), ((BinlogEvent) obj).getEndLogPos(), ((BinlogEvent) obj).getInfo()}); + } else if (obj instanceof FullBinaryLog) { + return new ArrayRow(getMeta(), new Object[] { + ((FullBinaryLog) obj).getLogName(), + ((FullBinaryLog) obj).getFileSize(), + ((FullBinaryLog) obj).getCreatedTime(), + ((FullBinaryLog) obj).getLastModifyTime(), + ((FullBinaryLog) obj).getFirstEventTime(), + ((FullBinaryLog) obj).getLastEventTime(), + ((FullBinaryLog) obj).getLastTso(), + ((FullBinaryLog) obj).getUploadStatus(), + ((FullBinaryLog) obj).getFileLocation(), + ((FullBinaryLog) obj).getExt() + }); } return null; }; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/GatherCursor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/GatherCursor.java index 346ea65ca..822a2a67e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/GatherCursor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/GatherCursor.java @@ -19,6 +19,7 @@ import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.cursor.AbstractCursor; import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.row.Row; import com.alibaba.polardbx.optimizer.utils.FunctionUtils; @@ -39,7 +40,7 @@ public class GatherCursor extends AbstractCursor { private int currentIndex = 0; public GatherCursor(List cursors, ExecutionContext executionContext) { - super(false); + super(ExecUtils.isOperatorMetricEnabled(executionContext)); this.executionContext = executionContext; this.cursors = cursors; this.returnColumns = cursors.get(0).getReturnColumns(); @@ -83,4 +84,8 @@ private void switchCursor() { currentIndex++; currentCursor = null; } + + public List getCursors() { + return cursors; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/MultiCursorAdapter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/MultiCursorAdapter.java deleted file mode 100644 index 5f012f0a8..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/MultiCursorAdapter.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.cursor.impl; - -import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.executor.cursor.AbstractCursor; -import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; -import com.alibaba.polardbx.optimizer.core.row.Row; - -import java.util.List; - -/** - * Created by chuanqin on 17/8/3. - */ -public class MultiCursorAdapter extends AbstractCursor { - - public List getSubCursors() { - return subCursors; - } - - private List subCursors; - - public MultiCursorAdapter(List subCursors) { - super(false); - this.subCursors = subCursors; - } - - public static Cursor wrap(List cursors) { - return new MultiCursorAdapter(cursors); - } - - @Override - public Row doNext() { - if (subCursors.size() != 1) { - throw GeneralUtil.nestedException("cannot be invoked directly"); - } - return subCursors.get(0).next(); - } - - @Override - public List doClose(List exs) { - for (Cursor cursor : subCursors) { - exs = cursor.close(exs); - } - return exs; - } - - @Override - public List getReturnColumns() { - return subCursors.get(0).getReturnColumns(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/OutFileStatisticsCursor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/OutFileStatisticsCursor.java index e8c06c109..66f1090c9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/OutFileStatisticsCursor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/cursor/impl/OutFileStatisticsCursor.java @@ -45,8 +45,10 @@ import com.alibaba.polardbx.optimizer.PlannerContext; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.Field; +import com.alibaba.polardbx.optimizer.config.table.GlobalIndexMeta; import com.alibaba.polardbx.optimizer.config.table.SchemaManager; import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.CursorMeta; @@ -62,8 +64,9 @@ import com.alibaba.polardbx.optimizer.memory.MemoryType; import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter; import com.alibaba.polardbx.optimizer.utils.CalciteUtils; -import com.alibaba.polardbx.optimizer.view.DrdsSystemTableView; +import com.alibaba.polardbx.optimizer.view.SystemTableView; import com.alibaba.polardbx.optimizer.view.ViewManager; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; @@ -74,6 +77,7 @@ import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.sql.OutFileParams; import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.commons.collections.CollectionUtils; import org.eclipse.jetty.util.StringUtil; import java.sql.Connection; @@ -105,6 +109,8 @@ public class OutFileStatisticsCursor extends OutFileCursor { private static Set ignoreSessionVariables = ImmutableSet.builder() .add(ConnectionProperties.SERVER_ID.toLowerCase()) .add(ConnectionProperties.CONN_POOL_PROPERTIES.toLowerCase()) + .add(ConnectionProperties.VERSION_PREFIX.toLowerCase()) + .add(ConnectionProperties.TRX_LOG_METHOD.toLowerCase()) .build(); /** * whether to print catalog only @@ -136,6 +142,12 @@ public class OutFileStatisticsCursor extends OutFileCursor { private static final String SET_SESSION = "set session %s = '%s'"; + private static final String SKIP_COLUMNAR = "/*+TDDL:CMD_EXTRA(SKIP_DDL_TASKS='WaitColumnarTableCreationTask')*/ "; + + private static final String VISIBLE_COLUMNAR = + "/*+TDDL:CMD_EXTRA(ALTER_CCI_STATUS=true, ALTER_CCI_STATUS_BEFORE=CREATING, ALTER_CCI_STATUS_AFTER=PUBLIC)*/" + + " ALTER TABLE `%s` ALTER INDEX `%s` VISIBLE;"; + private static final String CREATE_TABLEGROUP = "CREATE TABLEGROUP IF NOT EXISTS %s"; private static final String DROP_VIEW_IF_EXISTS = "DROP VIEW IF EXISTS `%s`"; @@ -420,7 +432,7 @@ private Map getSources() { } TableMeta meta = CBOUtil.getTableMeta(tableScan.getTable()); final String schemaName = StringUtil.isEmpty(meta.getSchemaName()) ? baseSchema : meta.getSchemaName(); - String tableName = meta.getTableName(); + String tableName = StatisticManager.getSourceTableName(schemaName, meta.getTableName()); tablesUsed.computeIfAbsent(schemaName, x -> new HashSet<>()); tablesUsed.get(schemaName).add(tableName); } @@ -444,7 +456,10 @@ private Map getSources() { */ public void prepareGlobalVariables() { outputCatalog(String.format(SET_GLOBAL, ConnectionProperties.AUTO_PARTITION_PARTITIONS, - DynamicConfig.getInstance().getAutoPartitionPartitions())); + DynamicConfig.getInstance().getAutoPartitionPartitions(false))); + + outputCatalog(String.format(SET_GLOBAL, ConnectionProperties.COLUMNAR_DEFAULT_PARTITIONS, + DynamicConfig.getInstance().getAutoPartitionPartitions(true))); } public void prepareCreateDatabase(Map sources) { @@ -509,6 +524,11 @@ public void prepareCreateTable(Map sources) { List tableDef = Lists.newArrayList(); for (String tableName : tables) { + List columnarIndexes = + GlobalIndexMeta.getColumnarIndexNames(tableName, schemaName, context); + // with columnar + boolean withColumnar = !CollectionUtils.isEmpty(columnarIndexes); + // show create table result of full table name ExecutionContext newExecutionContext = context.copy(); newExecutionContext.setTestMode(false); @@ -549,10 +569,17 @@ public void prepareCreateTable(Map sources) { outputCatalog(String.format(CREATE_TABLEGROUP, create.getTableGroup().getSimpleName())); } - } - tableDef.add(statement.toString().replace("\n", " ") + + tableDef.add((withColumnar ? SKIP_COLUMNAR : "") + + statement.toString().replace("\n", " ") .replace("\t", " ")); + if (withColumnar) { + for (String columnarIndex : columnarIndexes) { + tableDef.add(String.format(VISIBLE_COLUMNAR, tableName, + TddlSqlToRelConverter.unwrapGsiName(columnarIndex))); + } + } } } } finally { @@ -576,7 +603,7 @@ public void prepareCreateView(Map sources) { if (viewManager == null) { continue; } - DrdsSystemTableView.Row row = viewManager.select(view); + SystemTableView.Row row = viewManager.select(view); outputCatalog(String.format(DROP_VIEW_IF_EXISTS, row.getViewName())); outputCatalog(String.format(CREATE_VIEW, row.getViewName(), row.getViewDefinition().replace("\n", " ").replace("\t", " "))); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/ImplicitTableGroupUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/ImplicitTableGroupUtil.java new file mode 100644 index 000000000..24aecd95f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/ImplicitTableGroupUtil.java @@ -0,0 +1,712 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.Assert; +import com.alibaba.polardbx.druid.DbType; +import com.alibaba.polardbx.druid.sql.SQLUtils; +import com.alibaba.polardbx.druid.sql.ast.SQLName; +import com.alibaba.polardbx.druid.sql.ast.SQLStatement; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableAddConstraint; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableAddIndex; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableGroupItem; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableItem; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableTruncatePartition; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLConstraint; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateIndexStatement; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLTableElement; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlKey; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlPrimaryKey; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlUnique; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.DrdsAlterTableBroadcast; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.DrdsAlterTableSingle; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlAlterTableModifyColumn; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlTableIndex; +import com.alibaba.polardbx.druid.sql.parser.SQLParserUtils; +import com.alibaba.polardbx.druid.sql.parser.SQLStatementParser; +import com.alibaba.polardbx.druid.sql.visitor.VisitorFeature; +import com.alibaba.polardbx.druid.util.Pair; +import com.alibaba.polardbx.gms.partition.TablePartitionConfig; +import com.alibaba.polardbx.gms.partition.TablePartitionConfigUtil; +import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; +import com.alibaba.polardbx.gms.tablegroup.TableGroupUtils; +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.Getter; +import lombok.Setter; +import org.apache.commons.lang.StringUtils; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcSqlUtils.SQL_PARSE_FEATURES; +import static com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter.unwrapGsiName; + +/** + * description: + * author: ziyang.lb + * create: 2023-10-20 10:27 + **/ +public class ImplicitTableGroupUtil { + @Getter + @Setter + private static TableGroupConfigProvider tableGroupConfigProvider = new DefaultTableGroupConfigProvider(); + public static final ThreadLocal> exchangeNamesMapping = new ThreadLocal<>(); + + public static String tryAttachImplicitTableGroup(String schemaName, String tableName, String sql) { + try { + return tryAttachImplicitTableGroupInternal(schemaName, tableName, sql); + } finally { + exchangeNamesMapping.set(null); + } + } + + public static String tryAttachImplicitTableGroupInternal(String schemaName, String tableName, String sql) { + if (!tableGroupConfigProvider.isNewPartitionDb(schemaName)) { + return sql; + } + + SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, DbType.mysql, SQL_PARSE_FEATURES); + List parseResult = parser.parseStatementList(); + if (!parseResult.isEmpty()) { + boolean changed = false; + SQLStatement statement = parseResult.get(0); + + if (statement instanceof MySqlCreateTableStatement) { + changed = process4CreateTable(schemaName, tableName, (MySqlCreateTableStatement) statement, + false, null); + } else if (statement instanceof SQLAlterTableStatement) { + changed = process4AlterTable(schemaName, tableName, (SQLAlterTableStatement) statement, + false, null); + } else if (statement instanceof SQLCreateIndexStatement) { + changed = process4CreateIndex(schemaName, tableName, (SQLCreateIndexStatement) statement, + false, null); + } + + return changed ? statement.toString(VisitorFeature.OutputHashPartitionsByRange) : sql; + } + + return sql; + } + + public static boolean checkSql(String schemaName, String tableName, String sql, Set tgGroups) { + SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, DbType.mysql, SQL_PARSE_FEATURES); + List parseResult = parser.parseStatementList(); + SQLStatement statement = parseResult.get(0); + + if (statement instanceof MySqlCreateTableStatement) { + process4CreateTable(schemaName, tableName, (MySqlCreateTableStatement) statement, true, tgGroups); + } else if (statement instanceof SQLAlterTableStatement) { + process4AlterTable(schemaName, tableName, (SQLAlterTableStatement) statement, true, tgGroups); + } else if (statement instanceof SQLCreateIndexStatement) { + process4CreateIndex(schemaName, tableName, (SQLCreateIndexStatement) statement, true, tgGroups); + } + + return true; + } + + public static void removeImplicitTgSyntax(SQLStatement statement) { + if (statement instanceof MySqlCreateTableStatement) { + removeImplicitTgForCreateTable((MySqlCreateTableStatement) statement); + } else if (statement instanceof SQLAlterTableStatement) { + removeImplicitTgForAlterTable((SQLAlterTableStatement) statement); + } else if (statement instanceof SQLCreateIndexStatement) { + SQLCreateIndexStatement createIndexStatement = (SQLCreateIndexStatement) statement; + createIndexStatement.setTableGroup(null); + createIndexStatement.setWithImplicitTablegroup(false); + } + } + + public static String rewriteTableName(String sql, String newTableName) { + SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, DbType.mysql, SQL_PARSE_FEATURES); + List parseResult = parser.parseStatementList(); + SQLStatement statement = parseResult.get(0); + + if (statement instanceof MySqlCreateTableStatement) { + ((MySqlCreateTableStatement) statement).setTableName(newTableName); + } else if (statement instanceof SQLAlterTableStatement) { + ((SQLAlterTableStatement) statement).setName(new SQLIdentifierExpr(newTableName)); + } else if (statement instanceof SQLCreateIndexStatement) { + ((SQLCreateIndexStatement) statement).setTable(new SQLIdentifierExpr(newTableName)); + } + return statement.toString(VisitorFeature.OutputHashPartitionsByRange); + } + + public static void checkAutoCreateTableGroup(TableGroupConfig tableGroupConfig, boolean isOSS, + boolean withTableGroupImplicit, boolean autoCreateTg) { + if (tableGroupConfig == null) { + return; + } + TableGroupRecord record = tableGroupConfig.getTableGroupRecord(); + if (!isOSS && record != null && !autoCreateTg && !withTableGroupImplicit) { + throw new TddlRuntimeException(ErrorCode.ERR_AUTO_CREATE_TABLEGROUP, + "create tablegroup automatically is not allow"); + } + + } + + public static void checkAutoCreateTableGroup(ExecutionContext ec) { + boolean autoCreateTg = ec.getParamManager().getBoolean(ConnectionParams.ALLOW_AUTO_CREATE_TABLEGROUP); + if (!autoCreateTg) { + throw new TddlRuntimeException(ErrorCode.ERR_AUTO_CREATE_TABLEGROUP, + "create tablegroup automatically is not allow"); + } + + } + + private static boolean process4CreateTable(String schemaName, String tableName, + MySqlCreateTableStatement sqlCreateTableStatement, boolean forCheck, + Set tgGroups) { + boolean changed = false; + TableGroupConfig tableGroupConfig = + tableGroupConfigProvider.getTableGroupConfig(schemaName, buildQueryTableName(tableName), false); + if (tableGroupConfig != null && !tableGroupConfig.isManuallyCreated()) { + if (forCheck) { + Assert.assertNotNull(sqlCreateTableStatement.getTableGroup()); + forceCheckTgName(sqlCreateTableStatement.getTableGroup().getSimpleName(), + tableGroupConfig.getTableGroupRecord().getTg_name()); + Assert.assertTrue(sqlCreateTableStatement.isWithImplicitTablegroup()); + tgGroups.add(sqlCreateTableStatement.getTableGroup().getSimpleName()); + + // see aone 56399827 + if (tableGroupConfig.getTableGroupRecord().isSingleTableGroup()) { + Assert.assertTrue(sqlCreateTableStatement.isSingle()); + } + } else { + tryCheckTgName(sqlCreateTableStatement.getTableGroup(), tableGroupConfig); + sqlCreateTableStatement.setTableGroup( + new SQLIdentifierExpr(tableGroupConfig.getTableGroupRecord().getTg_name())); + sqlCreateTableStatement.setWithImplicitTablegroup(true); + + // see aone 56399827 + if (tableGroupConfig.getTableGroupRecord().isSingleTableGroup()) { + sqlCreateTableStatement.setSingle(true); + } + + changed = true; + } + } + + for (SQLTableElement element : sqlCreateTableStatement.getTableElementList()) { + if (element instanceof MySqlTableIndex) { + changed |= process4MysqlTableIndex(schemaName, tableName, (MySqlTableIndex) element, + forCheck, tgGroups); + } else if (element instanceof MySqlKey) { + if (!(element instanceof MySqlPrimaryKey)) { + if (element instanceof MySqlUnique) { + changed |= process4MysqlUnique(schemaName, tableName, (MySqlUnique) element, + forCheck, tgGroups); + } else { + changed |= process4MysqlKey(schemaName, tableName, (MySqlKey) element, + forCheck, tgGroups); + } + } + } + } + + return changed; + } + + private static boolean process4AlterTable(String schemaName, String tableName, + SQLAlterTableStatement alterTableStatement, boolean forCheck, + Set tgGroups) { + + if (alterTableStatement.getAlterIndexName() != null) { + return process4AlterIndex(schemaName, tableName, alterTableStatement, forCheck, tgGroups); + } else { + return process4AlterItems(schemaName, tableName, alterTableStatement, forCheck, tgGroups); + } + } + + private static boolean process4AlterItems(String schemaName, String tableName, + SQLAlterTableStatement alterTableStatement, boolean forCheck, + Set tgGroups) { + boolean changed = false; + + List items = alterTableStatement.getItems(); + if (items != null && !items.isEmpty()) { + for (SQLAlterTableItem item : items) { + if (item instanceof SQLAlterTableAddIndex) { + //"add key" or "add index" or "add global index" + SQLAlterTableAddIndex alterTableAddIndex = (SQLAlterTableAddIndex) item; + changed |= process4AlterTableAddIndex(schemaName, tableName, alterTableAddIndex, + forCheck, tgGroups); + } else if (item instanceof SQLAlterTableAddConstraint) { + // "add unique key" or "add unique index" or "add unique global index" + SQLAlterTableAddConstraint alterTableAddConstraint = (SQLAlterTableAddConstraint) item; + SQLConstraint sqlConstraint = alterTableAddConstraint.getConstraint(); + if (sqlConstraint instanceof MySqlUnique) { + changed |= process4MysqlUnique(schemaName, tableName, (MySqlUnique) sqlConstraint, + forCheck, tgGroups); + } else if (sqlConstraint instanceof MySqlTableIndex) { + changed |= process4MysqlTableIndex( + schemaName, tableName, (MySqlTableIndex) sqlConstraint, forCheck, tgGroups); + } + } else if (item instanceof MySqlAlterTableModifyColumn) { + MySqlAlterTableModifyColumn modifyColumn = (MySqlAlterTableModifyColumn) item; + changed |= process4ModifyColumn(schemaName, tableName, alterTableStatement, modifyColumn, + forCheck, tgGroups); + } else if (isAlterTableWithPartition(item)) { + changed |= process4Repartition(schemaName, tableName, alterTableStatement, forCheck, tgGroups); + } + } + } + + if (alterTableStatement.getPartition() != null) { + changed |= process4Repartition(schemaName, tableName, alterTableStatement, forCheck, tgGroups); + } + + return changed; + } + + private static boolean isAlterTableWithPartition(SQLAlterTableItem item) { + return (item instanceof SQLAlterTableGroupItem && !(item instanceof SQLAlterTableTruncatePartition)) + || item instanceof DrdsAlterTableSingle + || item instanceof DrdsAlterTableBroadcast; + } + + private static boolean process4Repartition(String schemaName, String tableName, + SQLAlterTableStatement alterTableStatement, boolean forCheck, + Set tgGroups) { + TableGroupConfig tableGroupConfig = + tableGroupConfigProvider.getTableGroupConfig(schemaName, buildQueryTableName(tableName), true); + if (forCheck || tableGroupConfig == null) { + tableGroupConfig = + tableGroupConfigProvider.getTableGroupConfig(schemaName, buildQueryTableName(tableName), false); + } + + if (tableGroupConfig != null && !tableGroupConfig.isManuallyCreated()) { + if (forCheck) { + Assert.assertNotNull(alterTableStatement.getTargetImplicitTableGroup()); + forceCheckTgName(alterTableStatement.getTargetImplicitTableGroup().getSimpleName(), + tableGroupConfig.getTableGroupRecord().getTg_name()); + tgGroups.add(alterTableStatement.getTargetImplicitTableGroup().getSimpleName()); + } else { + tryCheckTgName(alterTableStatement.getTargetImplicitTableGroup(), tableGroupConfig); + alterTableStatement.setTargetImplicitTableGroup( + new SQLIdentifierExpr(tableGroupConfig.getTableGroupRecord().getTg_name())); + return true; + } + } + return false; + } + + private static boolean process4CreateIndex(String schemaName, String tableName, + SQLCreateIndexStatement createIndexStatement, boolean forCheck, + Set tgGroups) { + String indexName = SQLUtils.normalize(createIndexStatement.getName().getSimpleName()); + TableGroupConfig tableGroupConfig = + tableGroupConfigProvider.getTableGroupConfig(schemaName, buildQueryTableName(tableName), indexName, false); + + if (tableGroupConfig != null && !tableGroupConfig.isManuallyCreated()) { + if (forCheck) { + Assert.assertNotNull(createIndexStatement.getTableGroup()); + forceCheckTgName(createIndexStatement.getTableGroup().getSimpleName(), + tableGroupConfig.getTableGroupRecord().getTg_name()); + Assert.assertTrue(createIndexStatement.isWithImplicitTablegroup()); + tgGroups.add(createIndexStatement.getTableGroup().getSimpleName()); + } else { + tryCheckTgName(createIndexStatement.getTableGroup(), tableGroupConfig); + createIndexStatement.setTableGroup( + new SQLIdentifierExpr(tableGroupConfig.getTableGroupRecord().getTg_name())); + createIndexStatement.setWithImplicitTablegroup(true); + return true; + } + } + + return false; + } + + private static boolean process4AlterIndex(String schemaName, String tableName, + SQLAlterTableStatement sqlAlterTableStatement, boolean forCheck, + Set tgGroups) { + String indexName = SQLUtils.normalize(sqlAlterTableStatement.getAlterIndexName().getSimpleName()); + TableGroupConfig tableGroupConfig = tableGroupConfigProvider.getTableGroupConfig( + schemaName, buildQueryTableName(tableName), indexName, true); + if (forCheck || tableGroupConfig == null) { + tableGroupConfig = tableGroupConfigProvider.getTableGroupConfig( + schemaName, buildQueryTableName(tableName), indexName, false); + } + + if (tableGroupConfig != null && !tableGroupConfig.isManuallyCreated()) { + if (forCheck) { + Assert.assertNotNull(sqlAlterTableStatement.getTargetImplicitTableGroup()); + forceCheckTgName(sqlAlterTableStatement.getTargetImplicitTableGroup().getSimpleName(), + tableGroupConfig.getTableGroupRecord().getTg_name()); + tgGroups.add(sqlAlterTableStatement.getTargetImplicitTableGroup().getSimpleName()); + } else { + tryCheckTgName(sqlAlterTableStatement.getTargetImplicitTableGroup(), tableGroupConfig); + sqlAlterTableStatement.setTargetImplicitTableGroup( + new SQLIdentifierExpr(tableGroupConfig.getTableGroupRecord().getTg_name())); + return true; + } + } + + return false; + } + + private static boolean process4MysqlUnique(String schemaName, String tableName, MySqlUnique mySqlUnique, + boolean forCheck, Set tgGroups) { + boolean changed = false; + TableGroupConfig config = + tableGroupConfigProvider.getTableGroupConfig(schemaName, buildQueryTableName(tableName), + SQLUtils.normalize(mySqlUnique.getName().getSimpleName()), false); + + if (config != null && !config.isManuallyCreated()) { + if (forCheck) { + Assert.assertNotNull(mySqlUnique.getTableGroup()); + forceCheckTgName(mySqlUnique.getTableGroup().getSimpleName(), + config.getTableGroupRecord().getTg_name()); + Assert.assertTrue(mySqlUnique.isWithImplicitTablegroup()); + tgGroups.add(mySqlUnique.getTableGroup().getSimpleName()); + } else { + tryCheckTgName(mySqlUnique.getTableGroup(), config); + mySqlUnique.setTableGroup(new SQLIdentifierExpr(config.getTableGroupRecord().getTg_name())); + mySqlUnique.setWithImplicitTablegroup(true); + changed = true; + } + } + return changed; + } + + private static boolean process4MysqlTableIndex(String schemaName, String tableName, MySqlTableIndex tableIndex, + boolean forCheck, Set tgGroups) { + boolean changed = false; + TableGroupConfig config = + tableGroupConfigProvider.getTableGroupConfig(schemaName, buildQueryTableName(tableName), + SQLUtils.normalize(tableIndex.getName().getSimpleName()), false); + + if (config != null && !config.isManuallyCreated()) { + if (forCheck) { + Assert.assertNotNull(tableIndex.getTableGroup()); + forceCheckTgName(tableIndex.getTableGroup().getSimpleName(), + config.getTableGroupRecord().getTg_name()); + Assert.assertTrue(tableIndex.isWithImplicitTablegroup()); + tgGroups.add(tableIndex.getTableGroup().getSimpleName()); + } else { + tryCheckTgName(tableIndex.getTableGroup(), config); + tableIndex.setTableGroup(new SQLIdentifierExpr(config.getTableGroupRecord().getTg_name())); + tableIndex.setWithImplicitTablegroup(true); + changed = true; + } + } + return changed; + } + + private static boolean process4MysqlKey(String schemaName, String tableName, MySqlKey mySqlKey, boolean forCheck, + Set tgGroups) { + boolean changed = false; + TableGroupConfig config = + tableGroupConfigProvider.getTableGroupConfig(schemaName, buildQueryTableName(tableName), + SQLUtils.normalize(mySqlKey.getName().getSimpleName()), false); + + if (config != null && !config.isManuallyCreated()) { + if (forCheck) { + Assert.assertNotNull(mySqlKey.getIndexDefinition().getTableGroup()); + forceCheckTgName(mySqlKey.getIndexDefinition().getTableGroup().getSimpleName(), + config.getTableGroupRecord().getTg_name()); + Assert.assertTrue(mySqlKey.getIndexDefinition().isWithImplicitTablegroup()); + tgGroups.add(mySqlKey.getIndexDefinition().getTableGroup().getSimpleName()); + } else { + tryCheckTgName(mySqlKey.getIndexDefinition().getTableGroup(), config); + mySqlKey.getIndexDefinition().setTableGroup( + new SQLIdentifierExpr(config.getTableGroupRecord().getTg_name())); + mySqlKey.getIndexDefinition().setWithImplicitTablegroup(true); + changed = true; + } + } + return changed; + } + + private static boolean process4AlterTableAddIndex(String schemaName, String tableName, + SQLAlterTableAddIndex alterTableAddIndex, boolean forCheck, + Set tgGroups) { + TableGroupConfig config = + tableGroupConfigProvider.getTableGroupConfig(schemaName, buildQueryTableName(tableName), + SQLUtils.normalize(alterTableAddIndex.getName().getSimpleName()), false); + + if (config != null && !config.isManuallyCreated()) { + if (forCheck) { + Assert.assertNotNull(alterTableAddIndex.getTableGroup()); + forceCheckTgName(alterTableAddIndex.getTableGroup().getSimpleName(), + config.getTableGroupRecord().getTg_name()); + Assert.assertTrue(alterTableAddIndex.isWithImplicitTablegroup()); + tgGroups.add(alterTableAddIndex.getTableGroup().getSimpleName()); + } else { + tryCheckTgName(alterTableAddIndex.getTableGroup(), config); + alterTableAddIndex.setTableGroup( + new SQLIdentifierExpr(config.getTableGroupRecord().getTg_name())); + alterTableAddIndex.setWithImplicitTablegroup(true); + return true; + } + } + return false; + } + + private static boolean process4ModifyColumn(String schemaName, String tableName, + SQLAlterTableStatement alterTableStatement, + MySqlAlterTableModifyColumn modifyColumn, + boolean forCheck, Set tgGroups) { + boolean changed = false; + + String modifyColumnName = SQLUtils.normalize( + modifyColumn.getNewColumnDefinition().getColumnName()).toLowerCase(); + PartitionColumnInfo partitionColumnInfo = + tableGroupConfigProvider.getPartitionColumnInfo(schemaName, tableName); + + for (Map.Entry> i : partitionColumnInfo.gsiPartitionColumns.entrySet()) { + if (i.getValue().contains(modifyColumnName.toLowerCase())) { + TableGroupConfig config = tableGroupConfigProvider.getTableGroupConfig( + schemaName, tableName, buildQueryGsiName(i.getKey()), false); + if (config != null && !config.isManuallyCreated()) { + if (forCheck) { + Pair pair = alterTableStatement.getIndexTableGroupPair().stream() + .filter(p -> StringUtils.equalsIgnoreCase(p.getKey().getSimpleName(), i.getKey())) + .findFirst().orElse(null); + Assert.assertNotNull(pair); + forceCheckTgName(pair.getValue().getSimpleName(), config.getTableGroupRecord().getTg_name()); + tgGroups.add(pair.getValue().getSimpleName()); + } else { + if (alterTableStatement.getIndexTableGroupPair().stream() + .noneMatch(p -> StringUtils.equalsIgnoreCase(p.getKey().getSimpleName(), i.getKey()))) { + alterTableStatement.addIndexTableGroupPair(new SQLIdentifierExpr(i.getKey()), + new SQLIdentifierExpr(config.getTableGroupRecord().tg_name)); + } + changed = true; + } + } + } + } + + // 索引所属的表组发生变更,可能也会导致主表的表组发生变更,即使modify column不是主表的分区键 + // 示例如下:第三条sql,会导致主表和索引各自新建一个table group + // create table if not exists `t_order` ( + // `order_id` varchar(20) DEFAULT NULL, + // `seller_id` varchar(20) DEFAULT NULL, + // GLOBAL INDEX seller_id(`seller_id`) PARTITION BY KEY (seller_id) + // ) DEFAULT CHARACTER SET = utf8mb4 DEFAULT COLLATE = utf8mb4_general_ci + // PARTITION BY KEY (order_id); + // + // ALTER TABLE `t_order` MODIFY COLUMN seller_id varchar(30); + // ALTER TABLE `t_order` MODIFY COLUMN seller_id varchar(30) CHARACTER SET utf8; + if (partitionColumnInfo.tablePartitionColumns.contains(modifyColumnName) || changed) { + TableGroupConfig tableGroupConfig = tableGroupConfigProvider.getTableGroupConfig( + schemaName, buildQueryTableName(tableName), false); + if (tableGroupConfig != null && !tableGroupConfig.isManuallyCreated()) { + if (forCheck) { + Assert.assertNotNull(alterTableStatement.getTargetImplicitTableGroup()); + forceCheckTgName(alterTableStatement.getTargetImplicitTableGroup().getSimpleName(), + tableGroupConfig.getTableGroupRecord().getTg_name()); + tgGroups.add(alterTableStatement.getTargetImplicitTableGroup().getSimpleName()); + } else { + tryCheckTgName(alterTableStatement.getTargetImplicitTableGroup(), tableGroupConfig); + alterTableStatement.setTargetImplicitTableGroup( + new SQLIdentifierExpr(tableGroupConfig.getTableGroupRecord().getTg_name())); + changed = true; + } + } + } + + return changed; + } + + private static void removeImplicitTgForCreateTable(MySqlCreateTableStatement createTableStatement) { + createTableStatement.setTableGroup(null); + createTableStatement.setWithImplicitTablegroup(false); + + for (SQLTableElement element : createTableStatement.getTableElementList()) { + if (element instanceof MySqlTableIndex) { + MySqlTableIndex tableIndex = (MySqlTableIndex) element; + tableIndex.setTableGroup(null); + tableIndex.setWithImplicitTablegroup(false); + } else if (element instanceof MySqlKey) { + if (!(element instanceof MySqlPrimaryKey)) { + if (element instanceof MySqlUnique) { + MySqlUnique mySqlUnique = (MySqlUnique) element; + mySqlUnique.setTableGroup(null); + mySqlUnique.setWithImplicitTablegroup(false); + } else { + MySqlKey mySqlKey = (MySqlKey) element; + mySqlKey.getIndexDefinition().setTableGroup(null); + mySqlKey.getIndexDefinition().setWithImplicitTablegroup(false); + } + } + } + } + } + + private static void removeImplicitTgForAlterTable(SQLAlterTableStatement alterTableStatement) { + alterTableStatement.setTargetImplicitTableGroup(null); + + List items = alterTableStatement.getItems(); + if (items != null && !items.isEmpty()) { + for (SQLAlterTableItem item : items) { + if (item instanceof SQLAlterTableAddIndex) { + SQLAlterTableAddIndex alterTableAddIndex = (SQLAlterTableAddIndex) item; + alterTableAddIndex.setTableGroup(null); + alterTableAddIndex.setWithImplicitTablegroup(false); + } else if (item instanceof SQLAlterTableAddConstraint) { + SQLAlterTableAddConstraint alterTableAddConstraint = (SQLAlterTableAddConstraint) item; + SQLConstraint sqlConstraint = alterTableAddConstraint.getConstraint(); + if (sqlConstraint instanceof MySqlUnique) { + MySqlUnique mySqlUnique = (MySqlUnique) sqlConstraint; + mySqlUnique.setTableGroup(null); + mySqlUnique.setWithImplicitTablegroup(false); + } else if (sqlConstraint instanceof MySqlTableIndex) { + MySqlTableIndex tableIndex = (MySqlTableIndex) sqlConstraint; + tableIndex.setTableGroup(null); + tableIndex.setWithImplicitTablegroup(false); + } + } + } + } + } + + private static void forceCheckTgName(String actualValue, String expectValue) { + if (tableGroupConfigProvider.isCheckTgNameValue()) { + Assert.assertTrue(StringUtils.equalsIgnoreCase(actualValue, expectValue), + String.format("force check tg_name failed, expect values is %s, actual values is %s.", expectValue, + actualValue)); + } + } + + private static void tryCheckTgName(SQLName expectValue, TableGroupConfig actualValue) { + if (expectValue != null && StringUtils.isNotBlank(expectValue.getSimpleName())) { + String expectName = SQLUtils.normalize(expectValue.getSimpleName()); + String actualName = actualValue.getTableGroupRecord().getTg_name(); + Assert.assertTrue(StringUtils.equalsIgnoreCase(actualName, expectName), + String.format("try check tg_name failed, expect tg name is %s, actual tg name is %s.", + expectName, actualName)); + } + } + + private static String buildQueryTableName(String tableName) { + if (exchangeNamesMapping.get() != null) { + return exchangeNamesMapping.get().getOrDefault(tableName, tableName); + } + return tableName; + } + + private static String buildQueryGsiName(String gsiName) { + if (exchangeNamesMapping.get() != null) { + if (exchangeNamesMapping.get().containsKey(gsiName)) { + return exchangeNamesMapping.get().get(gsiName); + } else { + for (Map.Entry entry : exchangeNamesMapping.get().entrySet()) { + if (entry.getKey().contains("_$")) { + String newKey = unwrapGsiName(entry.getKey()); + if (StringUtils.equalsIgnoreCase(newKey, gsiName)) { + return entry.getValue(); + } + } + } + } + } + return gsiName; + } + + public interface TableGroupConfigProvider { + boolean isNewPartitionDb(String schemaName); + + boolean isCheckTgNameValue(); + + TableGroupConfig getTableGroupConfig(String schemaName, String tableName, boolean fromDelta); + + TableGroupConfig getTableGroupConfig(String schemaName, String tableName, String gsiName, boolean fromDelta); + + PartitionColumnInfo getPartitionColumnInfo(String schemaName, String tableName); + } + + public static class DefaultTableGroupConfigProvider implements TableGroupConfigProvider { + + @Override + public boolean isCheckTgNameValue() { + return true; + } + + @Override + public boolean isNewPartitionDb(String schemaName) { + return DbInfoManager.getInstance().isNewPartitionDb(schemaName); + } + + @Override + public TableGroupConfig getTableGroupConfig(String schemaName, String tableName, boolean fromDelta) { + TablePartitionConfig tbPartConf = + TablePartitionConfigUtil.getTablePartitionConfig(schemaName, tableName, fromDelta); + + return tbPartConf == null ? null : + TableGroupUtils.getTableGroupInfoByGroupId(tbPartConf.getTableConfig().groupId); + } + + @Override + public TableGroupConfig getTableGroupConfig(String schemaName, String tableName, String gsiName, + boolean fromDelta) { + Long tableGroupId = TableGroupInfoManager.getTableGroupId( + schemaName, tableName, gsiName, true, fromDelta); + return tableGroupId == null ? null : + TableGroupUtils.getTableGroupInfoByGroupId(tableGroupId); + } + + @Override + public PartitionColumnInfo getPartitionColumnInfo(String schemaName, String tableName) { + Map> gsiPartitionColumns = new HashMap<>(); + Set tablePartitionColumns = new HashSet<>(); + + TableMeta tableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(tableName); + if (tableMeta.getGsiPublished() != null) { + for (GsiMetaManager.GsiIndexMetaBean i : tableMeta.getGsiPublished().values()) { + final String gsiName = StringUtils.substringBeforeLast(i.indexName, "_$"); + Set gsiColumns = i.indexColumns.stream() + .map(c -> c.columnName.toLowerCase()) + .collect(Collectors.toSet()); + gsiPartitionColumns.put(gsiName, gsiColumns); + } + } + if (tableMeta.getPartitionInfo() != null) { + List list = tableMeta.getPartitionInfo().getPartitionColumns(); + tablePartitionColumns = list.stream().map(StringUtils::lowerCase).collect(Collectors.toSet()); + } + return new PartitionColumnInfo(schemaName, tableName, tablePartitionColumns, gsiPartitionColumns); + } + } + + @AllArgsConstructor + @Data + public static class PartitionColumnInfo { + String schemaName; + String tableName; + Set tablePartitionColumns = new HashSet<>(); + Map> gsiPartitionColumns = new HashMap<>(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLCache.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLCache.java deleted file mode 100644 index 419dc2808..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLCache.java +++ /dev/null @@ -1,424 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.ddl.engine; - -import com.alibaba.polardbx.common.ddl.Attribute; -import com.alibaba.polardbx.common.ddl.Job; -import com.alibaba.polardbx.common.ddl.Job.JobType; -import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.common.utils.Pair; -import com.alibaba.polardbx.common.utils.TStringUtil; -import com.alibaba.polardbx.config.ConfigDataMode; -import com.alibaba.polardbx.druid.sql.SQLUtils; -import com.alibaba.polardbx.druid.sql.ast.SQLStatement; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableAddConstraint; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableAddIndex; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateIndexStatement; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLDropIndexStatement; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLUnique; -import com.alibaba.polardbx.druid.util.JdbcConstants; -import com.alibaba.polardbx.executor.ddl.sync.JobResponse.Response; -import com.alibaba.polardbx.group.jdbc.TGroupDataSource; -import com.alibaba.polardbx.optimizer.context.AsyncDDLContext; -import org.apache.calcite.sql.SqlAlterTableDropIndex; - -import javax.sql.DataSource; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; - -import static com.alibaba.polardbx.executor.ddl.engine.AsyncDDLJobBase.SEPARATOR_COMMON; - -public class AsyncDDLCache { - - // { schemaName, TGroupDataSource } - private static final Map dataSources = new ConcurrentHashMap<>(); - - // { schemaName, available } - private static final Set activeSchemas = ConcurrentHashMap.newKeySet(); - - // { schemaName, { jobId, } } - private static final Map> responses = new ConcurrentHashMap<>(); - - // { schemaName, { parentJobId, { subJobId, sub-job } } } - private static final Map>> leftSubJobs = new ConcurrentHashMap<>(); - - // { schemaName, { jobId, [ groupKey:physicalTableName ] } } - private static final Map>> objectsDone = new ConcurrentHashMap<>(); - - // { schemaName, { jobId, Job } } - private static final Map> ongoingJobs = new ConcurrentHashMap<>(); - - // { schemaName, { , Job } } - private static final Map, Job>> fencedObjects = new ConcurrentHashMap<>(); - - static void initAll(String schemaName, DataSource dataSource) { - schemaName = schemaName.toLowerCase(); - addDataSource(schemaName, dataSource); - activeSchemas.add(schemaName); - responses.put(schemaName, new ConcurrentHashMap<>()); - leftSubJobs.put(schemaName, new ConcurrentHashMap<>()); - objectsDone.put(schemaName, new ConcurrentHashMap<>()); - ongoingJobs.put(schemaName, new ConcurrentHashMap<>()); - fencedObjects.put(schemaName, new ConcurrentHashMap<>()); - } - - static void destroyAll(String schemaName) { - schemaName = schemaName.toLowerCase(); - removeDataSource(schemaName); - activeSchemas.remove(schemaName); - responses.remove(schemaName); - leftSubJobs.remove(schemaName); - objectsDone.remove(schemaName); - ongoingJobs.remove(schemaName); - fencedObjects.remove(schemaName); - } - - public static Map getDataSources() { - return dataSources; - } - - public static DataSource getDataSource(String schemaName) { - return dataSources.get(schemaName.toLowerCase()); - } - - static void addDataSource(String schemaName, DataSource dataSource) { - dataSources.put(schemaName.toLowerCase(), dataSource); - } - - static String removeDataSource(String schemaName) { - String unitName = null; - DataSource dataSource = dataSources.remove(schemaName.toLowerCase()); - return unitName; - } - - public static boolean isSchemaAvailable(String schemaName) { - return activeSchemas.contains(schemaName.toLowerCase()); - } - - public static void addResponse(String schemaName, Long jobId, Response response) { - responses.get(schemaName.toLowerCase()).put(jobId, response); - } - - static Response getResponse(String schemaName, Long jobId) { - return responses.get(schemaName.toLowerCase()).get(jobId); - } - - public static void removeResponse(String schemaName, Long jobId) { - responses.get(schemaName.toLowerCase()).remove(jobId); - } - - public static void removeResponses(String schemaName, List jobIds) { - if (jobIds != null && !jobIds.isEmpty()) { - for (Long jobId : jobIds) { - removeResponse(schemaName, jobId); - } - } - } - - public static void removeResponses(String schemaName) { - responses.get(schemaName.toLowerCase()).clear(); - } - - public static Collection getLeftSubJobs(String schemaName) { - Set allLeftSubJobs = new HashSet<>(); - Collection> leftSubJobSets = leftSubJobs.get(schemaName.toLowerCase()).values(); - for (Map leftSubJobSet : leftSubJobSets) { - allLeftSubJobs.addAll(leftSubJobSet.values()); - } - return allLeftSubJobs; - } - - static Map getLeftSubJobs(String schemaName, Long parentJobId) { - return leftSubJobs.get(schemaName.toLowerCase()).get(parentJobId); - } - - static void addLeftSubJob(String schemaName, Job subJob) { - Map subJobs = leftSubJobs.get(schemaName.toLowerCase()).get(subJob.getParentId()); - if (subJobs == null) { - subJobs = new ConcurrentHashMap<>(); - leftSubJobs.get(schemaName.toLowerCase()).put(subJob.getParentId(), subJobs); - } - subJobs.put(subJob.getId(), subJob); - // Cache objects done as well. - AsyncDDLCache.addObjectsDone(schemaName, subJob); - } - - public static void removeLeftSubJobs(String schemaName, List subJobIds) { - if (subJobIds != null && !subJobIds.isEmpty()) { - for (Long subJobId : subJobIds) { - removeLeftSubJob(schemaName, subJobId); - } - } - } - - public static void removeLeftSubJob(String schemaName, Long subJobId) { - removeLeftSubJobs(schemaName, subJobId); - removeObjectsDone(schemaName, subJobId); - } - - public static void removeLeftSubJobs(String schemaName) { - leftSubJobs.get(schemaName.toLowerCase()).clear(); - objectsDone.get(schemaName.toLowerCase()).clear(); - } - - private static void removeLeftSubJobs(String schemaName, Long subJobId) { - Collection> leftSubJobSets = leftSubJobs.get(schemaName.toLowerCase()).values(); - for (Map leftSubJobSet : leftSubJobSets) { - Long key = null; - for (Entry leftSubJob : leftSubJobSet.entrySet()) { - if (subJobId == leftSubJob.getValue().getId()) { - key = leftSubJob.getKey(); - break; - } - } - if (key != null) { - leftSubJobSet.remove(key); - } - } - } - - static void reloadObjectsDone(String schemaName, Job job) { - removeObjectsDone(schemaName.toLowerCase(), job.getId()); - addObjectsDone(schemaName.toLowerCase(), job); - } - - static Set getObjectsDone(String schemaName, Long jobId) { - return objectsDone.get(schemaName.toLowerCase()).get(jobId); - } - - static void addObjectsDone(String schemaName, Job job) { - addObjectsDone(schemaName, job.getId(), job.getPhysicalObjectDone()); - } - - public static void removeObjectsDone(String schemaName, List jobIds) { - if (jobIds != null && !jobIds.isEmpty()) { - for (Long jobId : jobIds) { - removeObjectsDone(schemaName, jobId); - } - } - } - - static void addObjectsDone(String schemaName, long jobId, String objectDone) { - if (TStringUtil.isNotEmpty(objectDone)) { - String[] objects = objectDone.split(SEPARATOR_COMMON); - Set objectSet = new HashSet<>(objects.length); - for (String object : objects) { - objectSet.add(object); - } - objectsDone.get(schemaName.toLowerCase()).put(jobId, objectSet); - } - } - - private static void removeObjectsDone(String schemaName, Long jobId) { - objectsDone.get(schemaName.toLowerCase()).remove(jobId); - } - - public static boolean isJobCancelled(String schemaName, Long jobId) { - return !isJobOngoing(schemaName, jobId); - } - - static boolean isJobOngoing(String schemaName, Long jobId) { - return ongoingJobs.get(schemaName.toLowerCase()).containsKey(jobId); - } - - public static boolean isObjectOngoing(String schemaName, String objectSchema, String objectName) { - Job job = getOngoingObject(schemaName, objectSchema, objectName); - return job != null; - } - - public static Job getOngoingObject(String schemaName, String objectSchema, String objectName) { - if (ongoingJobs.get(schemaName.toLowerCase()) == null) { - return null; - } - for (Job job : ongoingJobs.get(schemaName.toLowerCase()).values()) { - if (TStringUtil.equalsIgnoreCase(objectSchema, job.getObjectSchema()) - && TStringUtil.equalsIgnoreCase(objectName, job.getObjectName())) { - return job; - } - } - return null; - } - - public static Collection getOngoingJobs(String schemaName) { - return ongoingJobs.get(schemaName.toLowerCase()).values(); - } - - public static void addOngoingJob(String schemaName, Job job) { - ongoingJobs.get(schemaName.toLowerCase()).put(job.getId(), job); - } - - public static void removeOngoingJob(String schemaName, Long jobId) { - ongoingJobs.get(schemaName.toLowerCase()).remove(jobId); - } - - public static void removeOngoingJobs(String schemaName, List jobIds) { - if (jobIds != null && !jobIds.isEmpty()) { - for (Long jobId : jobIds) { - removeOngoingJob(schemaName, jobId); - } - } - } - - public static void removeOngoingJobs(String schemaName) { - ongoingJobs.get(schemaName.toLowerCase()).clear(); - } - - public static Collection getFencedObjects(String schemaName) { - Map, Job> fencedSchemaObjects = fencedObjects.get(schemaName.toLowerCase()); - return fencedSchemaObjects != null ? fencedSchemaObjects.values() : null; - } - - public static void addFencedObject(String schemaName, Job job) { - Pair objectInfo = new Pair<>(job.getObjectSchema(), job.getObjectName()); - Map, Job> fencedSchemaObjects = fencedObjects.get(schemaName.toLowerCase()); - if (fencedSchemaObjects != null) { - fencedSchemaObjects.put(objectInfo, job); - } - } - - static JobType getFencedJobType(String schemaName, String objectSchema, String objectName) { - Job fencedJob = getFencedJob(schemaName, objectSchema, objectName); - if (fencedJob != null) { - switch (fencedJob.getType()) { - case CREATE_GLOBAL_INDEX: - case DROP_GLOBAL_INDEX: - if (AsyncDDLContext.isParentJob(fencedJob)) { - // Special dealing with compound job. - // Compound job is primary table, which should not fenced. - fencedJob = null; - } - if (fencedJob != null) { - // Or add drop local index on primary or clustered. - final List stmts = - SQLUtils.parseStatements(fencedJob.getDdlStmt(), JdbcConstants.MYSQL); - if (1 == stmts.size()) { - final String indexName; - if (stmts.get(0) instanceof SQLCreateIndexStatement) { - indexName = SQLUtils.normalizeNoTrim( - ((SQLCreateIndexStatement) stmts.get(0)).getIndexDefinition().getName() - .getSimpleName()); - } else if (stmts.get(0) instanceof SQLDropIndexStatement) { - indexName = SQLUtils - .normalizeNoTrim(((SQLDropIndexStatement) stmts.get(0)).getIndexName().getSimpleName()); - } else if (stmts.get(0) instanceof SQLAlterTableStatement) { - final SQLAlterTableStatement statement = (SQLAlterTableStatement) stmts.get(0); - if (1 == statement.getItems().size()) { - if (statement.getItems().get(0) instanceof SQLAlterTableAddIndex) { - indexName = SQLUtils.normalizeNoTrim( - ((SQLAlterTableAddIndex) statement.getItems().get(0)).getIndexDefinition() - .getName().getSimpleName()); - } else if (statement.getItems().get(0) instanceof SQLAlterTableAddConstraint && - ((SQLAlterTableAddConstraint) statement.getItems().get(0)) - .getConstraint() instanceof SQLUnique) { - indexName = SQLUtils.normalizeNoTrim( - ((SQLUnique) ((SQLAlterTableAddConstraint) statement.getItems().get(0)) - .getConstraint()).getIndexDefinition().getName().getSimpleName()); - } else if (statement.getItems().get(0) instanceof SqlAlterTableDropIndex) { - indexName = SQLUtils.normalizeNoTrim( - ((SqlAlterTableDropIndex) statement.getItems().get(0)).getIndexName() - .getLastName()); - } else { - indexName = null; - } - } else { - indexName = null; - } - } else { - indexName = null; - } - if (indexName != null && !indexName.equalsIgnoreCase(fencedJob.getObjectName())) { - // Get index name and this is not for creating or dropping GSI table. - fencedJob = null; - } - } - } - break; - default: - break; - } - } - return fencedJob != null ? fencedJob.getType() : JobType.UNSUPPORTED; - } - - public static Job getFencedJob(String schemaName, String objectSchema, String objectName) { - Pair objectInfo = new Pair<>(objectSchema, objectName); - Map, Job> fencedSchemaObjects = fencedObjects.get(schemaName.toLowerCase()); - return fencedSchemaObjects != null ? fencedObjects.get(schemaName.toLowerCase()).get(objectInfo) : null; - } - - public static boolean isObjectFenced(String schemaName, String objectSchema, String objectName) { - return getFencedJobType(schemaName, objectSchema, objectName) != JobType.UNSUPPORTED; - } - - public static void removeFencedObject(String schemaName, Job job) { - Pair objectInfo = new Pair<>(job.getObjectSchema(), job.getObjectName()); - Map, Job> fencedSchemaObjects = fencedObjects.get(schemaName.toLowerCase()); - if (fencedSchemaObjects != null) { - fencedSchemaObjects.remove(objectInfo); - } - } - - public static void removeFencedObjects(String schemaName, List jobIds) { - if (jobIds != null && !jobIds.isEmpty()) { - List> objectInfos = new ArrayList<>(); - Map, Job> fencedJobs = fencedObjects.get(schemaName.toLowerCase()); - for (Long jobId : jobIds) { - for (Entry, Job> fencedJob : fencedJobs.entrySet()) { - if (fencedJob.getValue().getId() == jobId.longValue()) { - objectInfos.add(fencedJob.getKey()); - break; - } - } - } - for (Pair objectInfo : objectInfos) { - fencedJobs.remove(objectInfo); - } - } - } - - public static void removeFencedObjects(String schemaName) { - // Remove fenced objects for the jobs that are not ongoing, e.g. left - // caused by pending. - List> objectInfos = new ArrayList<>(); - Map, Job> fencedJobs = fencedObjects.get(schemaName.toLowerCase()); - for (Entry, Job> fencedJob : fencedJobs.entrySet()) { - if (!ongoingJobs.get(schemaName.toLowerCase()).keySet().contains(fencedJob.getValue().getId())) { - objectInfos.add(fencedJob.getKey()); - } - } - for (Pair objectInfo : objectInfos) { - fencedJobs.remove(objectInfo); - } - } - - public static int getLogicalParallelism(AsyncDDLContext asyncDDLContext) { - int parallelism = asyncDDLContext.getParamManager().getInt(ConnectionParams.LOGICAL_DDL_PARALLELISM); - if (parallelism < Attribute.MIN_LOGICAL_DDL_PARALLELISM - || parallelism > Attribute.MAX_LOGICAL_DDL_PARALLELISM) { - parallelism = Attribute.DEFAULT_LOGICAL_DDL_PARALLELISM; - } - return parallelism; - } - -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLCommon.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLCommon.java deleted file mode 100644 index 972d82847..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLCommon.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.ddl.engine; - -import com.alibaba.polardbx.common.TddlNode; -import com.alibaba.polardbx.common.ddl.Attribute; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.utils.TStringUtil; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; -import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; -import org.apache.calcite.sql.SqlNode; - -import javax.sql.DataSource; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.RejectedExecutionHandler; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.ThreadPoolExecutor.AbortPolicy; -import java.util.concurrent.TimeUnit; - -public abstract class AsyncDDLCommon { - - private static final Logger logger = LoggerFactory.getLogger(AsyncDDLCommon.class); - - public static final String SEPARATOR_COMMON = ";"; - public static final String SEPARATOR_INNER = ":"; - - public static final String QUOTATION_COMMON = "`"; - private static final String NODE_ID_WRAPPER = "-"; - - private static final String THREAD_NAME_CONNECTOR = NODE_ID_WRAPPER; - - public static final String DDL_HASH_NONE = "--"; - - public static final String EMPTY_CONTENT = ""; - - public static final String FETCH_PHY_TABLE_DDL = "SHOW CREATE TABLE %s"; - - public static final String SQLSTATE_TABLE_EXISTS = "42S01"; - public static final int ERROR_TABLE_EXISTS = 1050; - - public static final String SQLSTATE_UNKNOWN_TABLE = "42S02"; - public static final int ERROR_UNKNOWN_TABLE = 1051; - - public static final String SQLSTATE_VIOLATION = "42000"; - public static final int ERROR_DUPLICATE_KEY = 1061; - public static final int ERROR_CANT_DROP_KEY = 1091; - - private final Map dataSources = AsyncDDLCache.getDataSources(); - - protected static ExecutorService createThreadPool(String threadName) { - int numJobSchedulers = getNumOfJobSchedulers(); - return createThreadPool(numJobSchedulers, numJobSchedulers * 2, threadName); - } - - protected static ExecutorService createThreadPool(int coreSize, int maxSize, String threadName) { - // Throw an exception to abort the new request in case all the threads - // have been occupied. - return createThreadPool(coreSize, maxSize, threadName, new AbortPolicy()); - } - - protected static ExecutorService createThreadPool(int coreSize, int maxSize, String threadName, - RejectedExecutionHandler handler) { - return new ThreadPoolExecutor(coreSize, - maxSize, - 0L, - TimeUnit.MILLISECONDS, - new LinkedBlockingQueue<>(1024), - new NamedThreadFactory(threadName, true), - handler); - } - - protected void renameCurrentThread(String threadNamePrefix, String schemaName, String appName) { - try { - String currentName = Thread.currentThread().getName(); - if (TStringUtil.containsIgnoreCase(currentName, threadNamePrefix)) { - String newName = currentName + THREAD_NAME_CONNECTOR + schemaName + THREAD_NAME_CONNECTOR + appName; - Thread.currentThread().setName(newName); - } - } catch (Throwable ignored) { - } - } - - protected void removeThreadNameSuffix(String threadNamePrefix) { - try { - String currentName = Thread.currentThread().getName(); - if (TStringUtil.containsIgnoreCase(currentName, threadNamePrefix)) { - String[] nameParts = TStringUtil.split(currentName, THREAD_NAME_CONNECTOR); - if (nameParts.length > 6) { - // We have appended schema name and AppName before, so let's - // remove them since the task has been de-registered. - StringBuilder newName = new StringBuilder(); - for (int i = 0; i < 6; i++) { - newName.append(THREAD_NAME_CONNECTOR).append(nameParts[i]); - } - Thread.currentThread().setName(newName.deleteCharAt(0).toString()); - } - } - } catch (Throwable ignored) { - } - } - - protected boolean executeAndCheck(PreparedStatement ps, String action) throws SQLException { - int rowCountAffected = ps.executeUpdate(); - if (rowCountAffected > 0) { - return true; - } else { - logger.error("Failed to " + action + ", no row affected"); - return false; - } - } - - protected void executeAndThrow(PreparedStatement ps, String action) throws SQLException { - int rowCountAffected = ps.executeUpdate(); - if (rowCountAffected <= 0) { - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_FAILED, action + ", no row affected"); - } - } - - - protected static int extractNodeId(String nodeInfo) { - if (TStringUtil.isEmpty(nodeInfo) || !TStringUtil.contains(nodeInfo, SEPARATOR_INNER)) { - logger.error("invalid node info for leader's response"); - return TddlNode.getNodeId(); - } - try { - String[] nodeParts = nodeInfo.split(SEPARATOR_INNER); - return Integer.valueOf(nodeParts[0]); - } catch (Throwable t) { - logger.error("Bad node info: " + nodeInfo, t); - return TddlNode.getNodeId(); - } - } - - protected boolean areAllNodesSynced(String syncedIdList) { - boolean allNodesSynced = true; - - Set syncedNodeIds = getSyncedNodeIds(syncedIdList); - Set currentNodeIds = getCurrentNodeIds(); - - if (syncedNodeIds != null && !syncedNodeIds.isEmpty()) { - for (String currentNodeId : currentNodeIds) { - if (!syncedNodeIds.contains(currentNodeId)) { - allNodesSynced = false; - break; - } - } - } else { - allNodesSynced = false; - } - - if (!allNodesSynced) { - logger.warn("Not all nodes have been synchronized: " - + (syncedNodeIds != null ? syncedNodeIds.toString() : "") + " done, but " - + (currentNodeIds != null ? currentNodeIds.toString() : "") + " expected"); - } - - return allNodesSynced; - } - - protected int waitToContinue(int waitingTime) { - try { - Thread.sleep(waitingTime); - } catch (InterruptedException ignored) { - } - return waitingTime; - } - - protected static Set getSyncedNodeIds(String syncedIdList) { - if (TStringUtil.isNotEmpty(syncedIdList) - && !syncedIdList.equalsIgnoreCase(String.valueOf(TddlNode.DEFAULT_SERVER_NODE_ID))) { - Set syncedNodeIds = new HashSet<>(); - for (String syncedNodeId : syncedIdList.split(SEPARATOR_COMMON)) { - syncedNodeIds.add(syncedNodeId); - } - return syncedNodeIds; - } else { - return null; - } - } - - protected static Set getCurrentNodeIds() { - Set currentNodeIds = new HashSet<>(); - for (String nodeId : TddlNode.getNodeIdList().split(SEPARATOR_COMMON)) { - // Need wrapped node id here - currentNodeIds.add(NODE_ID_WRAPPER + nodeId + NODE_ID_WRAPPER); - } - return currentNodeIds; - } - - protected static String getWrappedNodeId() { - return NODE_ID_WRAPPER + TddlNode.getNodeId() + NODE_ID_WRAPPER; - } - - protected DataSource checkDataSource(String schemaName) { - return checkDataSource(schemaName, false); - } - - protected DataSource checkDataSource(String schemaName, boolean nullable) { - DataSource dataSource; - dataSource = MetaDbDataSource.getInstance().getDataSource(); - if (dataSource == null && !nullable) { - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_UNEXPECTED, "Data source for " + schemaName - + "(" + schemaName.toLowerCase() + ") is null"); - } - return dataSource; - } - - private static int getNumOfJobSchedulers() { - int numJobSchedulers = Attribute.DEFAULT_NUM_OF_JOB_SCHEDULERS; - - try { - } catch (Throwable t) { - logger.error("Failed to parse instance properties. Use default settings instead.", t); - } - - return numJobSchedulers; - } - - public void checkPartitionCompliance(SqlNode partition) { - return; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLJobBase.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLJobBase.java deleted file mode 100644 index eda64b63d..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLJobBase.java +++ /dev/null @@ -1,290 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.ddl.engine; - -import com.alibaba.polardbx.common.constants.SystemTables; -import com.alibaba.polardbx.common.ddl.Job; -import com.alibaba.polardbx.common.ddl.Job.JobPhase; -import com.alibaba.polardbx.common.ddl.Job.JobState; -import com.alibaba.polardbx.common.ddl.Job.JobType; -import com.alibaba.polardbx.config.ConfigDataMode; -import com.alibaba.polardbx.gms.metadb.GmsSystemTables; -import com.alibaba.polardbx.optimizer.context.AsyncDDLContext; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.text.SimpleDateFormat; -import java.util.Date; - -public abstract class AsyncDDLJobBase extends AsyncDDLCommon { - - public static final String JOB_SCHEDULER_PREFIX = "DDL-Job-Scheduler"; - - public static final String DDL_JOBS_TABLE = GmsSystemTables.DDL_JOBS; - - protected static final boolean IS_POLARDB_X = true; - - static final String CREATE_DDL_JOBS_TABLE = "CREATE TABLE IF NOT EXISTS `" + DDL_JOBS_TABLE + "` (" - + " `ID` BIGINT UNSIGNED NOT NULL AUTO_INCREMENT," - + " `JOB_ID` BIGINT UNSIGNED NOT NULL," - + " `PARENT_JOB_ID` BIGINT UNSIGNED NOT NULL DEFAULT 0," - + " `JOB_NO` SMALLINT UNSIGNED NOT NULL DEFAULT 0," - + " `SERVER` VARCHAR(64) NOT NULL," - + " `OBJECT_SCHEMA` VARCHAR(256) NOT NULL," - + " `OBJECT_NAME` VARCHAR(256) NOT NULL," - + " `NEW_OBJECT_NAME` VARCHAR(256) NOT NULL," - + " `JOB_TYPE` VARCHAR(64) NOT NULL," - + " `PHASE` VARCHAR(64) NOT NULL," - + " `STATE` VARCHAR(64) NOT NULL," - + " `PHYSICAL_OBJECT_DONE` LONGTEXT DEFAULT NULL," - + " `PROGRESS` SMALLINT DEFAULT 0," - + " `DDL_STMT` LONGTEXT DEFAULT NULL," - + " `OLD_RULE_TEXT` LONGTEXT DEFAULT NULL," - + " `NEW_RULE_TEXT` LONGTEXT DEFAULT NULL," - + " `GMT_CREATED` BIGINT UNSIGNED NOT NULL," - + " `GMT_MODIFIED` BIGINT UNSIGNED NOT NULL," - + " `REMARK` LONGTEXT DEFAULT NULL," - + " `RESERVED_GSI_INT` INT DEFAULT NULL," - + " `RESERVED_GSI_TXT` LONGTEXT DEFAULT NULL," - + " `RESERVED_DDL_INT` INT DEFAULT NULL," - + " `RESERVED_DDL_TXT` LONGTEXT DEFAULT NULL," - + " `RESERVED_CMN_INT` INT DEFAULT NULL," - + " `RESERVED_CMN_TXT` LONGTEXT DEFAULT NULL," - + " PRIMARY KEY (`ID`)," - + " UNIQUE KEY `UNI_JOB_ID`(`JOB_ID`)" - + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4"; - - private static final int PERCENTAGE_DONE = 100; - - static final String JOB_INSERT = "INSERT INTO " - + DDL_JOBS_TABLE - + "(JOB_ID, PARENT_JOB_ID, JOB_NO, SERVER, OBJECT_SCHEMA, OBJECT_NAME, " - + "NEW_OBJECT_NAME, JOB_TYPE, PHASE, STATE, REMARK, DDL_STMT, " - + "GMT_CREATED, GMT_MODIFIED, RESERVED_DDL_INT" + (IS_POLARDB_X ? ", SCHEMA_NAME" : "") + ")" - + " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?" + (IS_POLARDB_X ? ", ?" : "") + ")"; - - private static final String JOB_SELECT = - "SELECT JOB_ID, PARENT_JOB_ID, JOB_NO, SERVER, OBJECT_SCHEMA, OBJECT_NAME, " - + "NEW_OBJECT_NAME, JOB_TYPE, PHASE, STATE, PHYSICAL_OBJECT_DONE, PROGRESS, " - + "DDL_STMT, GMT_CREATED, GMT_MODIFIED, REMARK, " - + "RESERVED_GSI_INT, RESERVED_DDL_INT FROM " - + DDL_JOBS_TABLE; - - private static final String GSI_JOB_SELECT = "SELECT RESERVED_GSI_TXT, RESERVED_GSI_INT FROM " + DDL_JOBS_TABLE; - - private static final String DDL_JOB_SELECT = "SELECT RESERVED_DDL_TXT, RESERVED_DDL_INT FROM " - + DDL_JOBS_TABLE; - - private static final String JOB_UPDATE = "UPDATE " + DDL_JOBS_TABLE + " SET GMT_MODIFIED = ?, "; - - private static final String JOB_DELETE = "DELETE FROM " + DDL_JOBS_TABLE; - - private static final String WHERE_JOB_SPECIFIC = " WHERE JOB_ID = ? "; - - private static final String SCHEMA_NAME_FILTER = (IS_POLARDB_X ? "SCHEMA_NAME = ? AND " : ""); - - private static final String ORDER_BY_ID = " ORDER BY ID "; - - private static final String ORDER_BY_ID_DESC = ORDER_BY_ID + "DESC"; - - static final String JOB_COUNT_ALL = "SELECT COUNT(1) FROM " + DDL_JOBS_TABLE; - - static final String JOB_FETCH_ALL_IN_SCHEMA = JOB_SELECT - + (IS_POLARDB_X ? " WHERE SCHEMA_NAME = ?" : "") - + ORDER_BY_ID_DESC; - - static final String JOB_FETCH_ALL_DB_JOBS = JOB_SELECT + ORDER_BY_ID_DESC; - - static final String JOB_FETCH_SHOW_IN_SCHEMA = JOB_SELECT + " WHERE " - + SCHEMA_NAME_FILTER + - "(JOB_ID IN (%s) OR PARENT_JOB_ID IN (%s))" - + ORDER_BY_ID_DESC; - - static final String JOB_FETCH_SHOW = JOB_SELECT + " WHERE " - + "(JOB_ID IN (%s) OR PARENT_JOB_ID IN (%s))" - + ORDER_BY_ID_DESC; - - static final String JOB_FETCH_SPECIFIC = JOB_SELECT - + WHERE_JOB_SPECIFIC; - - static final String JOB_FETCH_ACTIVE = JOB_SELECT + " WHERE " - + SCHEMA_NAME_FILTER - + "STATE NOT IN (?, ?, ?) AND PARENT_JOB_ID IN (?, ?)" - + ORDER_BY_ID + " LIMIT ?"; - - static final String JOB_FETCH_BATCH = JOB_SELECT - + " WHERE JOB_ID IN (%s)" - + ORDER_BY_ID; - - static final String JOB_FETCH_STATED = JOB_SELECT + " WHERE " - + SCHEMA_NAME_FILTER - + "STATE = ? AND PARENT_JOB_ID IN (?, ?)" - + ORDER_BY_ID; - - static final String JOB_FETCH_INCOMPLETE = JOB_SELECT + " WHERE " - + SCHEMA_NAME_FILTER - + "STATE NOT IN (?, ?) AND PARENT_JOB_ID IN (?, ?)" - + ORDER_BY_ID; - - static final String JOB_FETCH_SUB_JOBS = JOB_SELECT + " WHERE " - + SCHEMA_NAME_FILTER - + "PARENT_JOB_ID = ?" - + ORDER_BY_ID; - - private static final String JOB_STATE_UPDATE = JOB_UPDATE + "STATE = ?, REMARK = ?"; - - private static final String JOB_STATE_PHASE_UPDATE = JOB_STATE_UPDATE + ", PHASE = ?"; - - static final String JOB_STATE_CHANGE = JOB_STATE_PHASE_UPDATE - + WHERE_JOB_SPECIFIC; - - static final String JOB_STATE_DONE = JOB_STATE_PHASE_UPDATE - + ", PROGRESS = " + PERCENTAGE_DONE - + WHERE_JOB_SPECIFIC; - - static final String JOB_STATE_ONLY = JOB_STATE_UPDATE - + WHERE_JOB_SPECIFIC; - - private static final String JOB_RECORD_DONE = JOB_UPDATE - + "PHYSICAL_OBJECT_DONE = CONCAT_WS('" + SEPARATOR_COMMON + "', PHYSICAL_OBJECT_DONE, ?)"; - - static final String JOB_RECORD_DONE_ONLY = JOB_RECORD_DONE - + WHERE_JOB_SPECIFIC; - - static final String JOB_RECORD_DONE_FULL = JOB_RECORD_DONE - + ", PROGRESS = ?" - + WHERE_JOB_SPECIFIC; - - static final String JOB_RESET_DONE = JOB_UPDATE - + "PHYSICAL_OBJECT_DONE = ?, PROGRESS = ?" - + WHERE_JOB_SPECIFIC; - - static final String JOB_ROLLBACK = JOB_STATE_CHANGE; - - static final String JOB_FLAG_UPDATE = JOB_UPDATE - + "RESERVED_DDL_INT = RESERVED_DDL_INT | ?" - + WHERE_JOB_SPECIFIC; - - static final String JOB_SERVER_UPDATE = JOB_UPDATE - + "SERVER = ?" - + WHERE_JOB_SPECIFIC; - - static final String JOB_FLAG_SERVER_UPDATE = JOB_UPDATE - + "SERVER = ?, RESERVED_DDL_INT = RESERVED_DDL_INT | ?" - + WHERE_JOB_SPECIFIC; - - static final String JOB_REMOVE_DONE = JOB_DELETE + " WHERE " - + SCHEMA_NAME_FILTER - + "(JOB_ID = ? OR PARENT_JOB_ID = ?) AND PHASE = ? AND STATE in (?, ?)"; - - static final String SCALEOUT_JOB_REMOVE = JOB_DELETE + " WHERE " - + SCHEMA_NAME_FILTER - + "(JOB_ID = ? OR PARENT_JOB_ID = ?)"; - - static final String JOB_REMOVE_FORCE = JOB_DELETE + " WHERE (JOB_ID = ? OR PARENT_JOB_ID = ?)"; - - static final String JOB_REMOVE_BATCH = JOB_DELETE - + " WHERE JOB_ID IN (%s) AND STATE IN (?, ?)"; - - static final String JOB_REMARK_UPDATE = JOB_UPDATE - + "REMARK = ?" - + WHERE_JOB_SPECIFIC; - - static final String GSI_JOB_STATE_UPDATE = JOB_UPDATE - + "RESERVED_GSI_TXT = ?" - + WHERE_JOB_SPECIFIC; - - static final String GSI_JOB_PROGRESS_UPDATE = JOB_UPDATE - + "RESERVED_GSI_INT = ?" - + WHERE_JOB_SPECIFIC; - - static final String GSI_JOB_FETCH_SPECIFIC = GSI_JOB_SELECT - + WHERE_JOB_SPECIFIC; - - static final String DDL_JOB_STATE_UPDATE = JOB_UPDATE + "RESERVED_DDL_TXT = ?" + WHERE_JOB_SPECIFIC; - - static final String DDL_JOB_PROGRESS_UPDATE = JOB_UPDATE + "RESERVED_DDL_INT = ?" - + WHERE_JOB_SPECIFIC; - - static final String SCALEOUT_JOB_PROGRESS_UPDATE = JOB_UPDATE + "PROGRESS = ?" - + WHERE_JOB_SPECIFIC; - - static final String DDL_JOB_FETCH_SPECIFIC = DDL_JOB_SELECT + WHERE_JOB_SPECIFIC; - - static final SimpleDateFormat DATE_FORMATTER = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); - - String getCommonInfo(AsyncDDLContext asyncDDLContext) { - Job job = asyncDDLContext.getJob(); - return " for job '" + job.getId() + "' with type '" + job.getType() + "' and object '" + job.getObjectSchema() - + "." + job.getObjectName() + "' in " + asyncDDLContext.getSchemaName(); - } - - Job fillInJob(ResultSet rs) throws SQLException { - return fillInJob(rs, false); - } - - Job fillInJob(ResultSet rs, boolean withDate) throws SQLException { - Job job = new Job(); - - job.setId(rs.getLong("JOB_ID")); - job.setParentId(rs.getLong("PARENT_JOB_ID")); - job.setSeq(rs.getInt("JOB_NO")); - - job.setServer(rs.getString("SERVER")); - - job.setObjectSchema(rs.getString("OBJECT_SCHEMA")); - job.setObjectName(rs.getString("OBJECT_NAME")); - job.setNewObjectName(rs.getString("NEW_OBJECT_NAME")); - job.setType(JobType.valueOf(rs.getString("JOB_TYPE"))); - - job.setPhase(JobPhase.valueOf(rs.getString("PHASE"))); - job.setState(JobState.valueOf(rs.getString("STATE"))); - - job.setPhysicalObjectDone(rs.getString("PHYSICAL_OBJECT_DONE")); - - job.setProgress(rs.getInt("PROGRESS")); - - job.setDdlStmt(rs.getString("DDL_STMT")); - - if (withDate) { - long gmtCreated = rs.getLong("GMT_CREATED"); - long gmtModified = rs.getLong("GMT_MODIFIED"); - long gmtCurrent = System.currentTimeMillis(); - - job.setGmtCreated(DATE_FORMATTER.format(new Date(gmtCreated))); - job.setGmtModified(DATE_FORMATTER.format(new Date(gmtModified))); - - switch (job.getState()) { - case PENDING: - case STAGED: - case COMPLETED: - job.setElapsedTime(gmtModified - gmtCreated); - break; - default: - job.setElapsedTime(gmtCurrent - gmtCreated); - break; - } - } - - job.setRemark(rs.getString("REMARK")); - job.setBackfillProgress(rs.getString("RESERVED_GSI_INT")); - job.setFlag(rs.getInt("RESERVED_DDL_INT")); - - return job; - } - -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/MockDdlJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/MockDdlJob.java index 2cb425cfb..014104728 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/MockDdlJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/MockDdlJob.java @@ -58,11 +58,12 @@ protected void validate() { @Override protected ExecutableDdlJob doCreate() { ExecutableDdlJob executableDdlJob = generateRandomDag(expectNodeCount, maxOutEdgeCount, edgeRate, mockSubJob); - FailPoint.inject(FailPointKey.FP_HIJACK_DDL_JOB_FORMAT, (k, v)->{ - if(StringUtils.equalsIgnoreCase(v, "SEQUELTIAL")){ + FailPoint.inject(FailPointKey.FP_HIJACK_DDL_JOB_FORMAT, (k, v) -> { + if (StringUtils.equalsIgnoreCase(v, "SEQUELTIAL")) { executableDdlJob.overrideTasks(generateSequentialDag(expectNodeCount, mockSubJob)); - }else { - executableDdlJob.overrideTasks(generateRandomDag(expectNodeCount, maxOutEdgeCount, edgeRate, mockSubJob)); + } else { + executableDdlJob.overrideTasks( + generateRandomDag(expectNodeCount, maxOutEdgeCount, edgeRate, mockSubJob)); } }); return executableDdlJob; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/AlterTableBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/AlterTableBuilder.java index 41a8d7369..bfc79a096 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/AlterTableBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/AlterTableBuilder.java @@ -40,6 +40,7 @@ import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTable; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTablePreparedData; +import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; import com.alibaba.polardbx.optimizer.sharding.DataNodeChooser; import com.alibaba.polardbx.optimizer.utils.ForeignKeyUtils; import com.alibaba.polardbx.optimizer.utils.RelUtils; @@ -56,6 +57,7 @@ import org.apache.calcite.sql.dialect.MysqlSqlDialect; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.calcite.sql.pretty.SqlPrettyWriter; +import org.apache.commons.lang3.StringUtils; import javax.sql.DataSource; import java.util.ArrayList; @@ -162,6 +164,7 @@ public void buildPhysicalPlans() { protected void buildSqlTemplate() { super.buildSqlTemplate(); sqlTemplateFkRewrite(); + sqlTemplateWithTableGroupRewrite(); this.sequenceBean = ((SqlAlterTable) this.sqlTemplate).getAutoIncrement(); } @@ -370,4 +373,20 @@ public void sqlTemplateFkRewrite() { this.sqlTemplate = sqlTemplate; } + + public void sqlTemplateWithTableGroupRewrite() { + final SqlAlterTable sqlTemplate = (SqlAlterTable) this.sqlTemplate; + if (sqlTemplate.getIndexTableGroupMap().isEmpty() && StringUtils.isEmpty( + sqlTemplate.getTargetImplicitTableGroupName())) { + return; + } + SQLAlterTableStatement alterTableStmt = + (SQLAlterTableStatement) FastsqlUtils.parseSql(sqlTemplate.getSourceSql()).get(0); + alterTableStmt.setTargetImplicitTableGroup(null); + alterTableStmt.getIndexTableGroupPair().clear(); + //alterTableStmt.getTableSource().setExpr("?"); + sqlTemplate.setSourceSql(alterTableStmt.toString()); + + this.sqlTemplate = sqlTemplate; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/AlterTableDiscardTableSpaceBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/AlterTableDiscardTableSpaceBuilder.java new file mode 100644 index 000000000..0c91baced --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/AlterTableDiscardTableSpaceBuilder.java @@ -0,0 +1,87 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.builder; + +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; +import com.alibaba.polardbx.optimizer.core.rel.ReplaceTableNameWithQuestionMarkVisitor; +import com.alibaba.polardbx.optimizer.core.rel.ddl.data.DdlPreparedData; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.rel.core.DDL; +import org.apache.calcite.rel.ddl.AlterTableDiscardTableSpace; +import org.apache.calcite.sql.SqlAlterTableDiscardTableSpace; +import org.apache.calcite.sql.SqlDdlNodes; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.parser.SqlParserPos; + +import java.util.List; +import java.util.Map; + +public class AlterTableDiscardTableSpaceBuilder extends DdlPhyPlanBuilder { + + final static String SQL_TEMPLATE = "ALTER TABLE ? DISCARD TABLESPACE"; + + public AlterTableDiscardTableSpaceBuilder(DDL ddl, DdlPreparedData preparedData, + Map>> tableTopology, + ExecutionContext executionContext) { + super(ddl, preparedData, executionContext); + this.tableTopology = tableTopology; + } + + public static AlterTableDiscardTableSpaceBuilder createBuilder(String schemaName, + String logicalTableName, + Map>> tableTopology, + ExecutionContext executionContext) { + ReplaceTableNameWithQuestionMarkVisitor visitor = + new ReplaceTableNameWithQuestionMarkVisitor(schemaName, executionContext); + + SqlIdentifier logicalTableNameNode = new SqlIdentifier(logicalTableName, SqlParserPos.ZERO); + + SqlAlterTableDiscardTableSpace + sqlAlterTableDiscardTableSpace = + SqlDdlNodes.alterTableDiscardTableSpace(logicalTableNameNode, SQL_TEMPLATE); + sqlAlterTableDiscardTableSpace = + (SqlAlterTableDiscardTableSpace) sqlAlterTableDiscardTableSpace.accept(visitor); + + final RelOptCluster cluster = SqlConverter.getInstance(executionContext).createRelOptCluster(null); + AlterTableDiscardTableSpace alterTableDiscardTableSpace = + AlterTableDiscardTableSpace.create(sqlAlterTableDiscardTableSpace, logicalTableNameNode, cluster); + + DdlPreparedData preparedData = new DdlPreparedData(); + preparedData.setSchemaName(schemaName); + preparedData.setTableName(logicalTableName); + + return new AlterTableDiscardTableSpaceBuilder(alterTableDiscardTableSpace, preparedData, tableTopology, + executionContext); + } + + @Override + protected void buildTableRuleAndTopology() { + boolean isNewPartDb = DbInfoManager.getInstance().isNewPartitionDb(ddlPreparedData.getSchemaName()); + if (!isNewPartDb) { + buildExistingTableRule(ddlPreparedData.getTableName()); + } + } + + @Override + public void buildPhysicalPlans() { + buildSqlTemplate(); + buildPhysicalPlans(ddlPreparedData.getTableName()); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/AlterTableImportTableSpaceBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/AlterTableImportTableSpaceBuilder.java new file mode 100644 index 000000000..8e99a8360 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/AlterTableImportTableSpaceBuilder.java @@ -0,0 +1,89 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.builder; + +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; +import com.alibaba.polardbx.optimizer.core.rel.ReplaceTableNameWithQuestionMarkVisitor; +import com.alibaba.polardbx.optimizer.core.rel.ddl.data.DdlPreparedData; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.rel.core.DDL; +import org.apache.calcite.rel.ddl.AlterTableImportTableSpace; +import org.apache.calcite.sql.SqlAlterTableImportTableSpace; +import org.apache.calcite.sql.SqlDdlNodes; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.parser.SqlParserPos; + +import java.util.List; +import java.util.Map; + +public class AlterTableImportTableSpaceBuilder extends DdlPhyPlanBuilder { + + final static String SQL_TEMPLATE = "ALTER TABLE ? IMPORT TABLESPACE"; + final static String SQL_TEMPLATE_IF_NOT_EXIST = "ALTER TABLE ? IMPORT TABLESPACE IF NOT EXISTS"; + + public AlterTableImportTableSpaceBuilder(DDL ddl, DdlPreparedData preparedData, + Map>> tableTopology, + ExecutionContext executionContext) { + super(ddl, preparedData, executionContext); + this.tableTopology = tableTopology; + } + + public static AlterTableImportTableSpaceBuilder createBuilder(String schemaName, + String logicalTableName, + boolean ifNotExists, + Map>> tableTopology, + ExecutionContext executionContext) { + ReplaceTableNameWithQuestionMarkVisitor visitor = + new ReplaceTableNameWithQuestionMarkVisitor(schemaName, executionContext); + + SqlIdentifier logicalTableNameNode = new SqlIdentifier(logicalTableName, SqlParserPos.ZERO); + + SqlAlterTableImportTableSpace + sqlAlterTableImportTableSpace = + ifNotExists ? SqlDdlNodes.alterTableImportTableSpace(logicalTableNameNode, SQL_TEMPLATE_IF_NOT_EXIST) : + SqlDdlNodes.alterTableImportTableSpace(logicalTableNameNode, SQL_TEMPLATE); + sqlAlterTableImportTableSpace = (SqlAlterTableImportTableSpace) sqlAlterTableImportTableSpace.accept(visitor); + + final RelOptCluster cluster = SqlConverter.getInstance(executionContext).createRelOptCluster(null); + AlterTableImportTableSpace alterTableImportTableSpace = + AlterTableImportTableSpace.create(sqlAlterTableImportTableSpace, logicalTableNameNode, cluster); + + DdlPreparedData preparedData = new DdlPreparedData(); + preparedData.setSchemaName(schemaName); + preparedData.setTableName(logicalTableName); + + return new AlterTableImportTableSpaceBuilder(alterTableImportTableSpace, preparedData, tableTopology, + executionContext); + } + + @Override + protected void buildTableRuleAndTopology() { + boolean isNewPartDb = DbInfoManager.getInstance().isNewPartitionDb(ddlPreparedData.getSchemaName()); + if (!isNewPartDb) { + buildExistingTableRule(ddlPreparedData.getTableName()); + } + } + + @Override + public void buildPhysicalPlans() { + buildSqlTemplate(); + buildPhysicalPlans(ddlPreparedData.getTableName()); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreatePartitionTableBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreatePartitionTableBuilder.java index 91efca732..65a8b500f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreatePartitionTableBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreatePartitionTableBuilder.java @@ -17,6 +17,7 @@ package com.alibaba.polardbx.executor.ddl.job.builder; import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.gms.locality.LocalityDesc; import com.alibaba.polardbx.gms.partition.TablePartitionRecord; import com.alibaba.polardbx.gms.topology.DbInfoManager; @@ -29,6 +30,7 @@ import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.PartitionInfoBuilder; import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil; +import com.alibaba.polardbx.optimizer.partition.PartitionSpec; import com.alibaba.polardbx.optimizer.partition.common.PartitionTableType; import org.apache.calcite.rel.core.DDL; import org.apache.calcite.sql.SqlCreateTable; @@ -66,6 +68,14 @@ public PartitionInfo getPartitionInfo() { return partitionInfo; } + protected void rectifyPartitionInfoForImportTable(PartitionInfo partitionInfo) { + partitionInfo.setRandomTableNamePatternEnabled(false); + partitionInfo.setTableNamePattern(null); + String tableName = partitionInfo.getTableName(); + PartitionSpec spec = partitionInfo.getPartitionBy().getNthPartition(1); + spec.getLocation().setPhyTableName(tableName); + } + protected PartitionInfo buildPartitionInfo() { String tbName = null; TableMeta tableMeta = null; @@ -96,7 +106,8 @@ protected PartitionInfo buildPartitionInfo() { } partitionInfo = PartitionInfoBuilder.buildPartitionInfoByPartDefAst(preparedData.getSchemaName(), tbName, tableGroupName, - joinGroupName, (SqlPartitionBy) preparedData.getPartitioning(), preparedData.getPartBoundExprInfo(), + preparedData.isWithImplicitTableGroup(), joinGroupName, (SqlPartitionBy) preparedData.getPartitioning(), + preparedData.getPartBoundExprInfo(), pkColMetas, allColMetas, partitionTableType, executionContext, localityDesc); partitionInfo.setTableType(partitionTableType); @@ -112,6 +123,15 @@ protected PartitionInfo buildPartitionInfo() { partitionInfo.setAutoFlag(autoFlag); } + /** + * 对于import table, 需要修正 + * 1. 物理表名和逻辑表名应一致 + * 2. partition中的location + * */ + if (preparedData.isImportTable() || preparedData.isReimportTable()) { + rectifyPartitionInfoForImportTable(partitionInfo); + } + return partitionInfo; } @@ -127,6 +147,7 @@ public void buildCreatePartitionReferenceTableTopology() { data.setPushDown(false); } (sqlCreateTable).setPushDownForeignKeys(false); + (sqlCreateTable).setIsAddLogicalForeignKeyOnly(isAddLogicalForeignKeyOnly()); if (refTables.stream().allMatch(refTable -> pushableForeignConstraint(refTable.right.refSchema, preparedData.getTableName(), refTable, @@ -189,6 +210,14 @@ public boolean pushableForeignConstraint(String schema, return false; } + //import table场景下, 肯定都是单表,所以是pushdown + boolean isImportTable = executionContext.getParamManager().getBoolean(ConnectionParams.IMPORT_TABLE) + || executionContext.getParamManager().getBoolean(ConnectionParams.REIMPORT_TABLE); + + if (isImportTable) { + return true; + } + final PartitionInfo leftPartitionInfo = partitionInfo; final PartitionInfo rightPartitionInfo = OptimizerContext.getContext(schema).getPartitionInfoManager() @@ -209,10 +238,7 @@ public boolean pushableForeignConstraint(String schema, return false; } - if (leftPartitionInfo.isSingleTable() && rightPartitionInfo.isSingleTable()) { - return true; - } - - return false; + return leftPartitionInfo.isSingleTable() && rightPartitionInfo.isSingleTable(); } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreatePartitionTableLocalIndexBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreatePartitionTableLocalIndexBuilder.java index b0925edcf..dc88e15a9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreatePartitionTableLocalIndexBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreatePartitionTableLocalIndexBuilder.java @@ -16,19 +16,11 @@ package com.alibaba.polardbx.executor.ddl.job.builder; -import com.alibaba.polardbx.executor.ddl.job.builder.gsi.IndexBuilderHelper; -import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.rel.ReplaceTableNameWithQuestionMarkVisitor; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CreateLocalIndexPreparedData; import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil; import org.apache.calcite.rel.core.DDL; -import org.apache.calcite.sql.SqlAlterTable; -import org.apache.calcite.sql.SqlCreateIndex; -import org.apache.calcite.sql.SqlIdentifier; -import org.apache.calcite.sql.SqlNode; -import org.apache.calcite.sql.parser.SqlParserPos; public class CreatePartitionTableLocalIndexBuilder extends CreateLocalIndexBuilder { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreateTableBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreateTableBuilder.java index 0bfb8d21f..dc5b9af8e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreateTableBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/CreateTableBuilder.java @@ -17,8 +17,11 @@ package com.alibaba.polardbx.executor.ddl.job.builder; import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.DynamicConfig; +import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.druid.sql.ast.expr.SQLCharExpr; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; @@ -26,6 +29,8 @@ import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.gms.topology.DbInfoRecord; import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.IndexMeta; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CreateTablePreparedData; import com.alibaba.polardbx.optimizer.index.TableRuleBuilder; @@ -48,6 +53,8 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; +import java.util.TreeSet; import java.util.stream.Collectors; public class CreateTableBuilder extends DdlPhyPlanBuilder { @@ -153,7 +160,7 @@ private boolean checkIfSupportSingleDbMultiTbs() { || (preparedData.getDbPartitions() != null && dbCount == 1); boolean multiTbs = preparedData.getTbPartitionBy() != null && tbCount > 1; - return ConfigDataMode.isSupportSingleDbMultiTbs() || !(singleDb && multiTbs); + return DynamicConfig.getInstance().isSupportSingleDbMultiTbs() || !(singleDb && multiTbs); } @Override @@ -163,6 +170,8 @@ protected void buildSqlTemplate() { final SqlCreateTable sqlTemplate = (SqlCreateTable) this.sqlTemplate; Engine engine = sqlTemplate.getEngine(); + sqlTemplate.setIsAddLogicalForeignKeyOnly(isAddLogicalForeignKeyOnly()); + MySqlCreateTableStatement stmt = (MySqlCreateTableStatement) sqlTemplate.rewrite(); if (sqlTemplate.getEncryption() == null && checkDatabaseEncryption(preparedData.getSchemaName())) { @@ -216,6 +225,12 @@ public PhysicalPlanData genPhysicalPlanData(boolean autoPartition) { if (data.getLocalityDesc() == null || data.getLocalityDesc().isEmpty()) { data.setLocalityDesc(preparedData.getLocality()); } + if (data.getTableESA() == null) { + data.setTableESA(preparedData.getTableEAS()); + } + if (data.getColEsaList() == null || data.getColEsaList().isEmpty()) { + data.setColEsaList(preparedData.getColEASList()); + } return data; } @@ -305,4 +320,23 @@ public void buildCreateReferenceTableTopology() { } } } + + public List isAddLogicalForeignKeyOnly() { + List isAddLogicalForeignKeyOnly = new ArrayList<>(); + TableMeta tableMeta = preparedData.getTableMeta(); + + Set indexes = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + indexes.addAll( + tableMeta.getAllIndexes().stream().map(IndexMeta::getPhysicalIndexName).collect(Collectors.toList())); + if (GeneralUtil.isNotEmpty(preparedData.getAddedForeignKeys())) { + for (ForeignKeyData fk : preparedData.getAddedForeignKeys()) { + if (indexes.contains(fk.constraint)) { + isAddLogicalForeignKeyOnly.add(true); + } else { + isAddLogicalForeignKeyOnly.add(false); + } + } + } + return isAddLogicalForeignKeyOnly; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DdlPhyPlanBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DdlPhyPlanBuilder.java index 775bdc7e9..7cfac148b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DdlPhyPlanBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DdlPhyPlanBuilder.java @@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.jdbc.BytesSql; import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.model.Group; import com.alibaba.polardbx.common.utils.Pair; @@ -46,6 +47,8 @@ import com.alibaba.polardbx.optimizer.utils.RelUtils; import com.alibaba.polardbx.rule.TableRule; import com.alibaba.polardbx.rule.model.TargetDB; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; import org.apache.calcite.rel.core.DDL; import org.apache.calcite.sql.SequenceBean; import org.apache.calcite.sql.SqlCreateTable; @@ -141,9 +144,10 @@ public PhysicalPlanData genPhysicalPlanData(boolean autoPartition) { Engine tableEngine = ((SqlCreateTable) relDdl.sqlNode).getEngine(); boolean pushDownFk = ((SqlCreateTable) relDdl.sqlNode).getPushDownForeignKeys(); return DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, physicalPlans, false, autoPartition, - Engine.isFileStore(tableEngine), pushDownFk); + Engine.isFileStore(tableEngine), pushDownFk, executionContext); } else { - return DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, physicalPlans, false, autoPartition); + return DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, physicalPlans, false, autoPartition, + executionContext); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DdlPhyPlanBuilderFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DdlPhyPlanBuilderFactory.java index a45b581b5..6f0b658de 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DdlPhyPlanBuilderFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DdlPhyPlanBuilderFactory.java @@ -18,6 +18,4 @@ public class DdlPhyPlanBuilderFactory { - - } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DirectPhysicalSqlPlanBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DirectPhysicalSqlPlanBuilder.java index 09e3b9dfb..473a81c4a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DirectPhysicalSqlPlanBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DirectPhysicalSqlPlanBuilder.java @@ -28,7 +28,6 @@ */ public class DirectPhysicalSqlPlanBuilder extends DdlPhyPlanBuilder { - public DirectPhysicalSqlPlanBuilder(DDL ddl, ReorganizeLocalPartitionPreparedData preparedData, ExecutionContext executionContext) { @@ -39,11 +38,11 @@ public DirectPhysicalSqlPlanBuilder(DDL ddl, public void buildTableRuleAndTopology() { final String schemaName = ddlPreparedData.getSchemaName(); final String tableName = ddlPreparedData.getTableName(); - if(DbInfoManager.getInstance().isNewPartitionDb(ddlPreparedData.getSchemaName())){ + if (DbInfoManager.getInstance().isNewPartitionDb(ddlPreparedData.getSchemaName())) { partitionInfo = OptimizerContext.getContext(ddlPreparedData.getSchemaName()) - .getPartitionInfoManager().getPartitionInfo(ddlPreparedData.getTableName()); + .getPartitionInfoManager().getPartitionInfo(ddlPreparedData.getTableName()); this.tableTopology = PartitionInfoUtil.buildTargetTablesFromPartitionInfo(partitionInfo); - }else { + } else { buildExistingTableRule(tableName); buildChangedTableTopology(schemaName, tableName); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DropPartitionLocalIndexBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DropPartitionLocalIndexBuilder.java index c11779d89..8056054bc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DropPartitionLocalIndexBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/DropPartitionLocalIndexBuilder.java @@ -16,26 +16,11 @@ package com.alibaba.polardbx.executor.ddl.job.builder; -import com.alibaba.polardbx.common.TddlConstants; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.rel.ReplaceTableNameWithQuestionMarkVisitor; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.DropLocalIndexPreparedData; -import com.alibaba.polardbx.optimizer.core.rel.ddl.data.PreparedDataUtil; import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil; import org.apache.calcite.rel.core.DDL; -import org.apache.calcite.sql.SqlAlterTable; -import org.apache.calcite.sql.SqlAlterTableDropIndex; -import org.apache.calcite.sql.SqlDdl; -import org.apache.calcite.sql.SqlDdlNodes; -import org.apache.calcite.sql.SqlDropIndex; -import org.apache.calcite.sql.SqlIdentifier; -import org.apache.calcite.sql.SqlKind; -import org.apache.calcite.sql.SqlNode; -import org.apache.calcite.sql.parser.SqlParserPos; public class DropPartitionLocalIndexBuilder extends DropLocalIndexBuilder { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/MoveDatabaseBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/MoveDatabaseBuilder.java index 7dac30743..2fc7d2a4d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/MoveDatabaseBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/MoveDatabaseBuilder.java @@ -48,6 +48,8 @@ public class MoveDatabaseBuilder { protected Map>> targetTablesTopology = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); protected Map tablesPreparedData = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + protected Map> discardTableSpacePhysicalPlansMap = + new TreeMap<>(String.CASE_INSENSITIVE_ORDER); public MoveDatabaseBuilder(DDL ddl, MoveDatabasePreparedData preparedData, ExecutionContext executionContext) { @@ -81,6 +83,11 @@ public void buildTablesPhysicalPlans() { targetTablesTopology.put(tableName, itemBuilder.getTargetPhyTables()); logicalTablesPhysicalPlansMap.put(tableName, phyDdlTableOperations); tablesPreparedData.put(tableName, moveDatabaseItemPreparedData); + + AlterTableDiscardTableSpaceBuilder discardTableSpaceBuilder = + AlterTableDiscardTableSpaceBuilder.createBuilder( + preparedData.getSchemaName(), tableName, itemBuilder.getTableTopology(), executionContext); + discardTableSpacePhysicalPlansMap.put(tableName, discardTableSpaceBuilder.build().getPhysicalPlans()); } } @@ -101,6 +108,10 @@ public Map>> getTargetTablesTopology() { return targetTablesTopology; } + public Map> getDiscardTableSpacePhysicalPlansMap() { + return discardTableSpacePhysicalPlansMap; + } + public Map getTablesPreparedData() { return tablesPreparedData; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/RenamePartitionTableBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/RenamePartitionTableBuilder.java index c22e3a1ad..e0026ae19 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/RenamePartitionTableBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/RenamePartitionTableBuilder.java @@ -17,17 +17,11 @@ package com.alibaba.polardbx.executor.ddl.job.builder; import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.RenameTablePreparedData; import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil; -import com.alibaba.polardbx.optimizer.utils.TableTopologyUtil; import org.apache.calcite.rel.core.DDL; -import java.util.List; -import java.util.Map; -import java.util.Set; - public class RenamePartitionTableBuilder extends RenameTableBuilder { public RenamePartitionTableBuilder(DDL ddl, RenameTablePreparedData preparedData, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreateGlobalIndexBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreateGlobalIndexBuilder.java index cd00d4cc6..d871e6261 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreateGlobalIndexBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreateGlobalIndexBuilder.java @@ -46,6 +46,7 @@ import com.alibaba.polardbx.druid.util.JdbcConstants; import com.alibaba.polardbx.executor.ddl.job.builder.CreateTableBuilder; import com.alibaba.polardbx.executor.ddl.job.builder.DdlPhyPlanBuilder; +import com.alibaba.polardbx.executor.gsi.GsiUtils; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.PlannerContext; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; @@ -83,6 +84,7 @@ import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlNodeList; import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.sql.validate.SqlValidatorImpl; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; @@ -111,10 +113,11 @@ public CreateGlobalIndexBuilder(@Deprecated DDL ddl, CreateGlobalIndexPreparedDa public static CreateGlobalIndexBuilder create(DDL ddl, CreateGlobalIndexPreparedData preparedData, + Map indexTablePreparedDataMap, ExecutionContext ec) { boolean isNewPartDb = DbInfoManager.getInstance().isNewPartitionDb(preparedData.getSchemaName()); return isNewPartDb ? - new CreatePartitionGlobalIndexBuilder(ddl, preparedData, ec) : + new CreatePartitionGlobalIndexBuilder(ddl, preparedData, indexTablePreparedDataMap, false, ec) : new CreateGlobalIndexBuilder(ddl, preparedData, ec); } @@ -195,19 +198,16 @@ protected void refreshShardingInfo(SqlIndexDefinition indexDef, CreateTablePrepa // Generate auto partition for clustered index. final SqlNode dbpartition; - if (tableToSchema.isAutoPartition() && - null == indexDef.getDbPartitionBy() && null == indexDef.getPartitioning() - && !indexDef.isSingle() && !indexDef.isBroadcast()) { + if (tableToSchema.isAutoPartition() + && indexDef.withoutPartitionDef() + && indexDef.isPartitionIndex()) { final String indexColName = indexDef.getColumns().get(0).getColumnNameStr(); dbpartition = generateDbPartition(tableToSchema, indexColName); // Replace the index define. - indexDef = indexDef.rebuildToGsi(null, dbpartition, indexDef.isClustered()); + indexDef = indexDef.rebuildToGsi(null, dbpartition); } else { dbpartition = indexDef.getDbPartitionBy(); - if (indexDef.getPartitioning() == null && - indexDef.getDbPartitionBy() == null && - !indexDef.isSingle() && - !indexDef.isBroadcast()) { + if (indexDef.withoutPartitionDef() && indexDef.isPartitionIndex()) { throw new TddlRuntimeException(ErrorCode.ERR_OPTIMIZER, "Global (clustered) secondary index must have dbpartition/partition by."); } @@ -238,7 +238,7 @@ protected void refreshShardingInfo(SqlCreateIndex sqlCreateIndex, CreateTablePre final String indexColName = sqlCreateIndex.getColumns().get(0).getColumnNameStr(); dbpartition = generateDbPartition(tableToSchema, indexColName); // Replace the index define. - sqlCreateIndex = sqlCreateIndex.rebuildToGsi(null, dbpartition, sqlCreateIndex.createClusteredIndex()); + sqlCreateIndex = sqlCreateIndex.rebuildToGsi(null, dbpartition); } else { dbpartition = sqlCreateIndex.getDbPartitionBy(); if (null == dbpartition && sqlCreateIndex.getPartitioning() == null) { @@ -341,14 +341,16 @@ protected SqlNode buildIndexTableDefinition(final SqlAlterTable sqlAlterTable, f final Set indexColumnSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); indexColumnSet.addAll(indexColumnMap.keySet()); - if (!containsAllShardingColumns(indexColumnSet, indexRule)) { + // Columnar index do not force using index column as partition column + final boolean isColumnar = indexDef.isColumnar(); + if (!isColumnar && !containsAllShardingColumns(indexColumnSet, indexRule)) { throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_INDEX_AND_SHARDING_COLUMNS_NOT_MATCH); } /** * check single/broadcast table */ - if (null != primaryRule) { + if (null != primaryRule && !isColumnar) { final boolean singleTable = GeneralUtil.isEmpty(primaryRule.getDbShardRules()) && GeneralUtil.isEmpty(primaryRule.getTbShardRules()); if (forceAllowGsi == false && (primaryRule.isBroadcast() || singleTable)) { @@ -402,14 +404,16 @@ protected SqlNode buildIndexTableDefinition(final SqlCreateIndex sqlCreateIndex) final Set indexColumnSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); indexColumnSet.addAll(indexColumnMap.keySet()); - if (!containsAllShardingColumns(indexColumnSet, indexRule)) { + // Columnar index do not force using index column as partition column + final boolean isColumnar = sqlCreateIndex.createCci(); + if (!isColumnar && !containsAllShardingColumns(indexColumnSet, indexRule)) { throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_INDEX_AND_SHARDING_COLUMNS_NOT_MATCH); } /** * check single/broadcast table */ - if (null != primaryRule) { + if (null != primaryRule && !isColumnar) { final boolean singleTable = GeneralUtil.isEmpty(primaryRule.getDbShardRules()) && GeneralUtil.isEmpty(primaryRule.getTbShardRules()); if (primaryRule.isBroadcast() || singleTable) { @@ -667,6 +671,8 @@ protected SqlNode createIndexTable(SqlIdentifier indexTableName, List pkList = new ArrayList<>(); TableMeta primaryTableMeta = ec.getSchemaManager(schemaName).getTableWithNull(primaryTableName); + boolean isColumnar = GsiUtils.isAddCci(relDdl.getSqlNode(), sqlAlterTable); + // Generated column can not be sharding key if (primaryTableMeta != null) { for (String col : indexShardingKey) { @@ -723,7 +729,7 @@ protected SqlNode createIndexTable(SqlIdentifier indexTableName, final SQLColumnConstraint constraint = constraintIt.next(); if (constraint instanceof SQLColumnPrimaryKey) { withoutPk = false; - if (!pkList.isEmpty()) { + if (!pkList.isEmpty() && !isColumnar) { throw new TddlRuntimeException( ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_UNSUPPORTED_PRIMARY_TABLE_DEFINITION, "multiple primary key definition"); @@ -752,11 +758,11 @@ protected SqlNode createIndexTable(SqlIdentifier indexTableName, // to convert default value if (primaryTableMeta != null) { ColumnMeta columnMeta = primaryTableMeta.getColumnIgnoreCase(columnName); - if (columnMeta.isBinaryDefault()) { + if (columnMeta != null && columnMeta.isBinaryDefault()) { SQLHexExpr newDefaultVal = new SQLHexExpr(columnMeta.getField().getDefault()); columnDefinition.setDefaultExpr(newDefaultVal); } - if (columnMeta.isLogicalGeneratedColumn()) { + if (columnMeta != null && columnMeta.isLogicalGeneratedColumn()) { columnDefinition.setDefaultExpr(null); } } @@ -765,7 +771,7 @@ protected SqlNode createIndexTable(SqlIdentifier indexTableName, withoutPk = false; final MySqlPrimaryKey primaryKey = (MySqlPrimaryKey) tableElement; - if (!pkList.isEmpty()) { + if (!pkList.isEmpty() && !isColumnar) { throw new TddlRuntimeException( ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_UNSUPPORTED_PRIMARY_TABLE_DEFINITION, "multiple primary key definition"); @@ -956,7 +962,7 @@ protected SqlNode createIndexTable(SqlIdentifier indexTableName, // Generate unique index of pk on unique GSI. // the unique index will be dropped when backfill finish - Map tableMetaMap = ec.getSchemaManager().getCache(); + Map tableMetaMap = ec.getSchemaManager(schemaName).getCache(); if (unique && !pkList.isEmpty() && tableMetaMap != null && tableMetaMap.containsKey( primaryTableName.toLowerCase())) { genUniqueIndexForUGSI(indexTableStmt, pkList); @@ -1038,6 +1044,8 @@ protected SqlNode createClusteredIndexTable(SqlIdentifier indexTableName, List pkList = new ArrayList<>(); TableMeta primaryTableMeta = ec.getSchemaManager(schemaName).getTableWithNull(primaryTableName); + boolean isColumnar = GsiUtils.isAddCci(relDdl.getSqlNode(), sqlAlterTable); + // Generated column can not be sharding key if (primaryTableMeta != null) { for (String col : indexShardingKey) { @@ -1049,6 +1057,16 @@ protected SqlNode createClusteredIndexTable(SqlIdentifier indexTableName, } } + // validate unsupported columnar columns + if (isColumnar) { + SqlValidatorImpl.validateUnsupportedColumnTypeWithCci( + indexTableStmt, indexTableStmt.getPrimaryKeyNames(), + gsiPreparedData.getColumns().stream() + .map(SqlIndexColumnName::getColumnNameStr) + .collect(Collectors.toList()), + gsiPreparedData.getShardColumns()); + } + /** *
          *     1. remove AUTO_INCREMENT property
@@ -1078,7 +1096,7 @@ protected SqlNode createClusteredIndexTable(SqlIdentifier indexTableName,
                         final SQLColumnConstraint constraint = constraintIt.next();
                         if (constraint instanceof SQLColumnPrimaryKey) {
                             withoutPk = false;
-                            if (!pkList.isEmpty()) {
+                            if (!pkList.isEmpty() && !isColumnar) {
                                 throw new TddlRuntimeException(
                                     ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_UNSUPPORTED_PRIMARY_TABLE_DEFINITION,
                                     "multiple primary key definition");
@@ -1115,7 +1133,7 @@ protected SqlNode createClusteredIndexTable(SqlIdentifier indexTableName,
             } else if (tableElement instanceof MySqlPrimaryKey) {
                 withoutPk = false;
                 final MySqlPrimaryKey primaryKey = (MySqlPrimaryKey) tableElement;
-                if (!pkList.isEmpty()) {
+                if (!pkList.isEmpty() && !isColumnar) {
                     throw new TddlRuntimeException(
                         ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_UNSUPPORTED_PRIMARY_TABLE_DEFINITION,
                         "multiple primary key definition");
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreatePartitionGlobalIndexBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreatePartitionGlobalIndexBuilder.java
index 83c02ae41..b7d25cd31 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreatePartitionGlobalIndexBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreatePartitionGlobalIndexBuilder.java
@@ -20,6 +20,7 @@
 import com.alibaba.polardbx.common.exception.NotSupportException;
 import com.alibaba.polardbx.common.exception.TddlRuntimeException;
 import com.alibaba.polardbx.common.exception.code.ErrorCode;
+import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.druid.sql.SQLUtils;
 import com.alibaba.polardbx.druid.sql.ast.SQLIndex;
 import com.alibaba.polardbx.druid.sql.ast.expr.SQLIdentifierExpr;
@@ -38,8 +39,10 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CreateTablePreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
+import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.partition.PartitionSpec;
 import com.alibaba.polardbx.optimizer.partition.common.PartitionLocation;
+import com.alibaba.polardbx.optimizer.partition.common.PartitionStrategy;
 import com.alibaba.polardbx.optimizer.partition.common.PartitionTableType;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Maps;
@@ -68,15 +71,27 @@
 
 public class CreatePartitionGlobalIndexBuilder extends CreateGlobalIndexBuilder {
 
+    Map indexTablePreparedDataMap;
+    final boolean alignWithPrimaryTable;
+
     public CreatePartitionGlobalIndexBuilder(@Deprecated DDL ddl, CreateGlobalIndexPreparedData gsiPreparedData,
+                                             Map indexTablePreparedDataMap,
+                                             boolean alignWithPrimaryTable,
                                              ExecutionContext executionContext) {
         super(ddl, gsiPreparedData, executionContext);
+        this.indexTablePreparedDataMap = indexTablePreparedDataMap;
+        this.alignWithPrimaryTable = alignWithPrimaryTable;
     }
 
     @Override
     public CreateGlobalIndexBuilder build() {
         buildTablePartitionInfoAndTopology();
-        buildPhysicalPlans();
+        if (gsiPreparedData.isColumnarIndex()) {
+            // Build sql template to generate covering columns
+            buildSqlTemplate();
+        } else {
+            buildPhysicalPlans();
+        }
         built = true;
         return this;
     }
@@ -94,17 +109,16 @@ private void buildTablePartitionInfoAndTopology() {
                 "DDL Kind '" + sqlDdl.getKind() + "' for GSI creation");
         }
 
-        indexTableBuilder = new CreatePartitionTableBuilder(relDdl, indexTablePreparedData, executionContext,
-            PartitionTableType.GSI_TABLE);
+        // Set table type to COLUMNAR_TABLE for columnar index,
+        // so that we can use COLUMNAR_DEFAULT_PARTITIONS as default partition count of columnar index
+        // in {@link com.alibaba.polardbx.optimizer.partition.PartitionInfoBuilder.autoDecideHashPartCountIfNeed}
+        indexTableBuilder = new CreatePartitionTableBuilder(
+            relDdl,
+            indexTablePreparedData,
+            executionContext,
+            gsiPreparedData.isColumnarIndex() ? PartitionTableType.COLUMNAR_TABLE : PartitionTableType.GSI_TABLE);
 
-        PartitionInfo indexPartInfo = indexTableBuilder.getPartitionInfo();
-        PartitionInfo primaryPartitionInfo = gsiPreparedData.getPrimaryPartitionInfo();
-        if (indexPartInfo.equals(primaryPartitionInfo)
-            && primaryPartitionInfo.getTableGroupId() == TableGroupRecord.INVALID_TABLE_GROUP_ID
-            && indexPartInfo.getTableGroupId() == TableGroupRecord.INVALID_TABLE_GROUP_ID) {
-            physicalLocationAlignWithPrimaryTable(primaryPartitionInfo, indexPartInfo);
-            gsiPreparedData.setIndexAlignWithPrimaryTableGroup(true);
-        }
+        alignWithTargetTable();
 
         indexTableBuilder.buildTableRuleAndTopology();
         this.gsiPreparedData.setIndexPartitionInfo(indexTableBuilder.getPartitionInfo());
@@ -112,6 +126,70 @@ private void buildTablePartitionInfoAndTopology() {
         this.tableTopology = indexTableBuilder.getTableTopology();
     }
 
+    private void alignWithTargetTable() {
+        CreateTablePreparedData indexTablePreparedData = gsiPreparedData.getIndexTablePreparedData();
+        PartitionInfo indexPartInfo = indexTableBuilder.getPartitionInfo();
+        PartitionInfo targetPartInfo = gsiPreparedData.getPrimaryPartitionInfo();
+        if (indexTablePreparedData.isWithImplicitTableGroup()) {
+            if (alignWithPrimaryTable) {
+                boolean partInfoEqual = partitionInfoEqual(indexPartInfo, targetPartInfo);
+                if (partInfoEqual && targetPartInfo.getTableGroupId() == indexPartInfo.getTableGroupId()) {
+                    physicalLocationAlignWithPrimaryTable(targetPartInfo, indexPartInfo);
+                    gsiPreparedData.setTableGroupAlignWithTargetTable(targetPartInfo.getTableName());
+                    return;
+                }
+            }
+            if (GeneralUtil.isEmpty(indexTablePreparedDataMap)) {
+                return;
+            }
+            String tableGroupName = ((SqlIdentifier) indexTablePreparedData.getTableGroupName()).getLastName();
+            for (Map.Entry indexTablePreparedDataEntry : indexTablePreparedDataMap.entrySet()) {
+                if (indexTablePreparedDataEntry.getValue().isWithImplicitTableGroup()) {
+                    String candicateTableGroupName =
+                        ((SqlIdentifier) indexTablePreparedDataEntry.getValue().getTableGroupName()).getLastName();
+                    if (tableGroupName.equalsIgnoreCase(candicateTableGroupName)) {
+                        targetPartInfo = indexTablePreparedDataEntry.getValue().getIndexPartitionInfo();
+                        boolean partInfoEqual = partitionInfoEqual(indexPartInfo, targetPartInfo);
+                        if (partInfoEqual) {
+                            physicalLocationAlignWithPrimaryTable(targetPartInfo, indexPartInfo);
+                            gsiPreparedData.setTableGroupAlignWithTargetTable(targetPartInfo.getTableName());
+                            return;
+                        } else {
+                            throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT,
+                                "the partition of table " + indexPartInfo.getTableName()
+                                    + " is not match with tablegroup " + tableGroupName);
+                        }
+                    }
+                }
+            }
+        } else {
+            boolean partInfoEqual = partitionInfoEqual(indexPartInfo, targetPartInfo);
+            ;
+            if (partInfoEqual && targetPartInfo.getTableGroupId() == TableGroupRecord.INVALID_TABLE_GROUP_ID
+                && indexPartInfo.getTableGroupId() == TableGroupRecord.INVALID_TABLE_GROUP_ID) {
+                physicalLocationAlignWithPrimaryTable(targetPartInfo, indexPartInfo);
+                gsiPreparedData.setTableGroupAlignWithTargetTable(targetPartInfo.getTableName());
+            } else {
+                if (GeneralUtil.isEmpty(indexTablePreparedDataMap)) {
+                    return;
+                }
+                for (Map.Entry indexTablePreparedDataEntry : indexTablePreparedDataMap.entrySet()) {
+                    if (!indexTablePreparedDataEntry.getValue().isWithImplicitTableGroup()) {
+                        targetPartInfo = indexTablePreparedDataEntry.getValue().getIndexPartitionInfo();
+                        partInfoEqual = partitionInfoEqual(indexPartInfo, targetPartInfo);
+                        ;
+                        if (partInfoEqual && targetPartInfo.getTableGroupId() == TableGroupRecord.INVALID_TABLE_GROUP_ID
+                            && indexPartInfo.getTableGroupId() == TableGroupRecord.INVALID_TABLE_GROUP_ID) {
+                            physicalLocationAlignWithPrimaryTable(targetPartInfo, indexPartInfo);
+                            gsiPreparedData.setTableGroupAlignWithTargetTable(targetPartInfo.getTableName());
+                            return;
+                        }
+                    }
+                }
+            }
+        }
+    }
+
     private void physicalLocationAlignWithPrimaryTable(PartitionInfo primaryPartitionInfo,
                                                        PartitionInfo indexPartitionInfo) {
         assert primaryPartitionInfo.equals(indexPartitionInfo);
@@ -160,6 +238,7 @@ private static List getPrimaryKeyNames(MySqlCreateTableStatement astCrea
     protected SqlNode buildIndexTableDefinition(final SqlAlterTable sqlAlterTable, final boolean forceAllowGsi) {
         final boolean uniqueIndex = sqlAlterTable.getAlters().get(0) instanceof SqlAddUniqueIndex;
         final SqlIndexDefinition indexDef = ((SqlAddIndex) sqlAlterTable.getAlters().get(0)).getIndexDef();
+        final boolean isColumnar = indexDef.isColumnar();
 
         /**
          * build global secondary index table
@@ -194,7 +273,8 @@ protected SqlNode buildIndexTableDefinition(final SqlAlterTable sqlAlterTable, f
             // Add PK in check set because simple index may concat PK as partition key.
             indexAndPkColumnSet.addAll(getPrimaryKeyNames(astCreateIndexTable));
         }
-        if (!containsAllShardingColumns(indexAndPkColumnSet, indexPartitionInfo)) {
+        // Columnar index do not force using index column as partition column
+        if (!isColumnar && !containsAllShardingColumns(indexAndPkColumnSet, indexPartitionInfo)) {
             throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_INDEX_AND_SHARDING_COLUMNS_NOT_MATCH);
         }
 
@@ -202,8 +282,9 @@ protected SqlNode buildIndexTableDefinition(final SqlAlterTable sqlAlterTable, f
          * check single/broadcast table
          */
         if (null != primaryPartitionInfo) {
-            if (forceAllowGsi == false && (primaryPartitionInfo.isBroadcastTable() || primaryPartitionInfo
-                .isSingleTable())) {
+            if (!forceAllowGsi
+                && !isColumnar
+                && (primaryPartitionInfo.isBroadcastTable() || primaryPartitionInfo.isSingleTable())) {
                 throw new TddlRuntimeException(
                     ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_UNSUPPORTED_PRIMARY_TABLE_DEFINITION,
                     "Does not support create Global Secondary Index on single or broadcast table");
@@ -265,14 +346,17 @@ protected SqlNode buildIndexTableDefinition(final SqlCreateIndex sqlCreateIndex)
             // Add PK in check set because simple index may concat PK as partition key.
             indexAndPkColumnSet.addAll(getPrimaryKeyNames(indexTableStmt));
         }
-        if (!containsAllShardingColumns(indexAndPkColumnSet, indexPartitionInfo)) {
+        final boolean isColumnar = sqlCreateIndex.createCci();
+        // Columnar index do not force using index column as partition column
+        if (!isColumnar && !containsAllShardingColumns(indexAndPkColumnSet, indexPartitionInfo)) {
             throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_INDEX_AND_SHARDING_COLUMNS_NOT_MATCH);
         }
 
-        /**
+        /*
          * check single/broadcast table
+         * create cci on single/broadcast table is supported
          */
-        if (null != primaryPartitionInfo) {
+        if (null != primaryPartitionInfo && !isColumnar) {
             if (primaryPartitionInfo.isBroadcastTable() || primaryPartitionInfo.isSingleTable()) {
                 throw new TddlRuntimeException(
                     ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_UNSUPPORTED_PRIMARY_TABLE_DEFINITION,
@@ -357,49 +441,57 @@ protected void addLocalIndex(Map indexColumnMap,
                                  boolean unique, boolean isGsi,
                                  List options) {
 
-//        final List indexShardKey = gsiPreparedData.getShardColumnsNotReorder();
-//        if (indexShardKey != null && indexShardKey.size() > 0) {
-//            if (isRepartition()) {
-//                // like create table, look SqlCreateTable
-//                SqlCreateTable.addCompositeIndex(indexColumnMap, indexTableStmt, false, options, false, indexShardKey);
-//            } else {
-//                SqlCreateTable.addCompositeIndex(indexColumnMap, indexTableStmt, unique, options, isGsi, indexShardKey);
-//            }
-//        }
-
         List> allLevelPartKeys = gsiPreparedData.getAllLevelPartColumns();
+        PartitionInfo gsiPartInfo = gsiPreparedData.getIndexPartitionInfo();
+
+        String partStrategy = gsiPartInfo.getPartitionBy().getStrategy().toString();
+        String subPartStrategy = gsiPartInfo.getPartitionBy().getSubPartitionBy() == null ? "" :
+            gsiPartInfo.getPartitionBy().getSubPartitionBy().getStrategy().toString();
+
+        boolean usePartBy = !partStrategy.isEmpty();
         boolean useSubPartBy = false;
         boolean subPartKeyContainAllPartKeyAsPrefixCols = false;
         List partKeyList = allLevelPartKeys.get(0);
         List subPartKeyList = null;
+        boolean addPartColIndexLater = false;
         if (allLevelPartKeys.size() > 1 && allLevelPartKeys.get(1).size() > 0) {
             useSubPartBy = true;
             subPartKeyList = allLevelPartKeys.get(1);
             subPartKeyContainAllPartKeyAsPrefixCols =
                 SqlCreateTable.checkIfContainPrefixPartCols(subPartKeyList, partKeyList);
+            addPartColIndexLater = SqlCreateTable.needAddPartColLocalIndexLater(partStrategy, subPartStrategy);
         }
-        if (!(useSubPartBy & subPartKeyContainAllPartKeyAsPrefixCols)) {
 
-//            if (partKeyList.size() == 1) {
-//                SqlCreateTable.addIndex(indexColumnMap, indexTableStmt, unique, options, true, partKeyList);
-//            } else {
-//                SqlCreateTable.addIndex(indexColumnMap, indexTableStmt, unique, options, true, new ArrayList<>());
-//                SqlCreateTable.addCompositeIndex(indexColumnMap, indexTableStmt, partKeyList);
-//            }
+        if (!(useSubPartBy && subPartKeyContainAllPartKeyAsPrefixCols)) {
+
+            if (addPartColIndexLater) {
+                if (useSubPartBy) {
+//            SqlCreateTable.addCompositeIndex(indexColumnMap, indexTableStmt, false, ImmutableList.of(),
+//                false, subPartKeyList, false, "");
+                    SqlCreateTable.addCompositeIndexForAutoTbl(indexColumnMap, indexTableStmt, false,
+                        ImmutableList.of(), false, subPartStrategy, subPartKeyList, false, "");
+                }
+            }
 
             if (isRepartition()) {
                 // like create table, look SqlCreateTable
-                SqlCreateTable.addCompositeIndex(indexColumnMap, indexTableStmt, false, options, false, partKeyList,
-                    false, "");
+//                SqlCreateTable.addCompositeIndex(indexColumnMap, indexTableStmt, false, options, false, partKeyList,
+//                    false, "");
+                SqlCreateTable.addCompositeIndexForAutoTbl(indexColumnMap, indexTableStmt, false, options, false,
+                    partStrategy, partKeyList, false, "");
             } else {
-                SqlCreateTable.addCompositeIndex(indexColumnMap, indexTableStmt, unique, options, isGsi, partKeyList,
-                    false, "");
+//                SqlCreateTable.addCompositeIndex(indexColumnMap, indexTableStmt, unique, options, isGsi, partKeyList,
+//                    false, "");
+                SqlCreateTable.addCompositeIndexForAutoTbl(indexColumnMap, indexTableStmt, unique, options, isGsi,
+                    partStrategy, partKeyList, false, "");
             }
         }
 
-        if (useSubPartBy) {
-            SqlCreateTable.addCompositeIndex(indexColumnMap, indexTableStmt, false, ImmutableList.of(),
-                false, subPartKeyList, false, "");
+        if (useSubPartBy && !addPartColIndexLater) {
+//            SqlCreateTable.addCompositeIndex(indexColumnMap, indexTableStmt, false, ImmutableList.of(),
+//                false, subPartKeyList, false, "");
+            SqlCreateTable.addCompositeIndexForAutoTbl(indexColumnMap, indexTableStmt, false,
+                ImmutableList.of(), false, subPartStrategy, subPartKeyList, false, "");
         }
 
 //        SqlCreateTable.addLocalIndexForAutoTbl(indexColumnMap, indexTableStmt, unique, isGsi, options, gsiPreparedData.getAllLevelPartColumns(), isRepartition());
@@ -446,4 +538,12 @@ protected void genUniqueIndexForUGSI(MySqlCreateTableStatement indexTableStmt,
         uniqueIndex.setParent(indexTableStmt);
         indexTableStmt.getTableElementList().add(uniqueIndex);
     }
+
+    private boolean partitionInfoEqual(PartitionInfo partitionInfo1, PartitionInfo partitionInfo2) {
+        PartitionStrategy strategy = partitionInfo1.getPartitionBy().getStrategy();
+        boolean isVectorStrategy = (strategy == PartitionStrategy.KEY || strategy == PartitionStrategy.RANGE_COLUMNS);
+        return isVectorStrategy ? PartitionInfoUtil.actualPartColsEquals(partitionInfo1, partitionInfo2,
+            PartitionInfoUtil.fetchAllLevelMaxActualPartColsFromPartInfos(partitionInfo1,
+                partitionInfo2)) : partitionInfo1.equals(partitionInfo2);
+    }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreatePartitionTableWithGsiBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreatePartitionTableWithGsiBuilder.java
index 6ca8e1b6c..2bedcd153 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreatePartitionTableWithGsiBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/CreatePartitionTableWithGsiBuilder.java
@@ -23,12 +23,16 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CreateTablePreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateTableWithGsiPreparedData;
+import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.alibaba.polardbx.optimizer.partition.common.PartitionTableType;
 import org.apache.calcite.rel.core.DDL;
+import org.jetbrains.annotations.NotNull;
+import org.apache.calcite.sql.SqlIdentifier;
 
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.TreeMap;
 
 public class CreatePartitionTableWithGsiBuilder {
 
@@ -75,26 +79,59 @@ public Map> getIndexTablePhysicalPlansMap() {
 
     private void buildPrimaryTablePhysicalPlans() {
         CreateTablePreparedData primaryTablePreparedData = preparedData.getPrimaryTablePreparedData();
+        final PartitionTableType partitionTableType = getPartitionTableType(primaryTablePreparedData);
+
         primaryTableBuilder = new CreatePartitionTableBuilder(relDdl, primaryTablePreparedData, executionContext,
-            PartitionTableType.PARTITION_TABLE);
+            partitionTableType);
         primaryTableBuilder.build();
         this.primaryTableTopology = primaryTableBuilder.getTableTopology();
         this.primaryTablePhysicalPlans = primaryTableBuilder.getPhysicalPlans();
     }
 
+    @NotNull
+    private static PartitionTableType getPartitionTableType(CreateTablePreparedData primaryTablePreparedData) {
+        final boolean broadcastTable = primaryTablePreparedData.isBroadcast();
+        final boolean singleTable = !primaryTablePreparedData.isSharding()
+            && primaryTablePreparedData.getPartitioning() == null;
+
+        PartitionTableType partitionTableType = PartitionTableType.PARTITION_TABLE;
+        if (broadcastTable) {
+            partitionTableType = PartitionTableType.BROADCAST_TABLE;
+        } else if (singleTable) {
+            partitionTableType = PartitionTableType.SINGLE_TABLE;
+        }
+        return partitionTableType;
+    }
+
     private void buildIndexTablePhysicalPlans() {
+        Map indexTablePreparedDataMap = new LinkedHashMap<>();
         for (Map.Entry entry : preparedData.getIndexTablePreparedDataMap()
             .entrySet()) {
-            buildIndexTablePhysicalPlans(entry.getKey(), entry.getValue());
+            buildIndexTablePhysicalPlans(entry.getKey(), entry.getValue(), indexTablePreparedDataMap);
+            indexTablePreparedDataMap.put(entry.getKey(), entry.getValue());
         }
     }
 
     private void buildIndexTablePhysicalPlans(String indexTableName,
-                                              CreateGlobalIndexPreparedData indexTablePreparedData) {
+                                              CreateGlobalIndexPreparedData indexTablePreparedData,
+                                              Map indexTablePreparedDataMap) {
         indexTablePreparedData.setPrimaryPartitionInfo(primaryTableBuilder.getPartitionInfo());
+        CreateTablePreparedData primaryTablePreparedData = preparedData.getPrimaryTablePreparedData();
+        boolean alignWithPrimaryTable = false;
+        if (primaryTablePreparedData.isWithImplicitTableGroup() && indexTablePreparedData.isWithImplicitTableGroup() &&
+            primaryTablePreparedData.getTableGroupName() != null) {
+            String indexTableGroupName = indexTablePreparedData.getTableGroupName() != null ?
+                ((SqlIdentifier) indexTablePreparedData.getTableGroupName()).getLastName() : null;
+            String primaryTableGroupName = ((SqlIdentifier) primaryTablePreparedData.getTableGroupName()).getLastName();
+            if (primaryTableGroupName.equalsIgnoreCase(indexTableGroupName)) {
+                alignWithPrimaryTable = true;
+            }
+        }
 
         CreateGlobalIndexBuilder indexTableBuilder =
-            new CreatePartitionGlobalIndexBuilder(relDdl, indexTablePreparedData, executionContext);
+            new CreatePartitionGlobalIndexBuilder(relDdl, indexTablePreparedData, indexTablePreparedDataMap,
+                alignWithPrimaryTable,
+                executionContext);
         indexTableBuilder.build();
 
         this.indexTablePhysicalPlansMap.put(indexTableName, indexTableBuilder.getPhysicalPlans());
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/DropPartitionGlobalIndexBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/DropPartitionGlobalIndexBuilder.java
index 577011739..b75d3a5eb 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/DropPartitionGlobalIndexBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/DropPartitionGlobalIndexBuilder.java
@@ -21,7 +21,6 @@
 import com.alibaba.polardbx.optimizer.core.planner.SqlConverter;
 import com.alibaba.polardbx.optimizer.core.rel.ReplaceTableNameWithQuestionMarkVisitor;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.DropGlobalIndexPreparedData;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.rel.core.DDL;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/DropPartitionTableWithGsiBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/DropPartitionTableWithGsiBuilder.java
index a08b20a52..57c36d2b7 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/DropPartitionTableWithGsiBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/DropPartitionTableWithGsiBuilder.java
@@ -17,18 +17,12 @@
 package com.alibaba.polardbx.executor.ddl.job.builder.gsi;
 
 import com.alibaba.polardbx.executor.ddl.job.builder.DropPartitionTableBuilder;
-import com.alibaba.polardbx.executor.ddl.job.builder.DropTableBuilder;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
-import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.DropTablePreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.DropGlobalIndexPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.DropTableWithGsiPreparedData;
 import org.apache.calcite.rel.core.DDL;
 
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-
 public class DropPartitionTableWithGsiBuilder extends DropTableWithGsiBuilder {
 
     public DropPartitionTableWithGsiBuilder(DDL ddl, DropTableWithGsiPreparedData preparedData,
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/IndexBuilderHelper.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/IndexBuilderHelper.java
index b54c9935a..c60d09992 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/IndexBuilderHelper.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/gsi/IndexBuilderHelper.java
@@ -134,10 +134,8 @@ public static SqlAlterTable buildImplicitLocalIndexSql(SqlAlterTable alterTable)
             String dropIndexSql =
                 "DROP INDEX " + SqlIdentifier.surroundWithBacktick(localIndexNameString)
                     + " ON " + SqlIdentifier.surroundWithBacktick(dropIndex.getOriginTableName().getLastName());
-            SqlAlterTableDropIndex newDropIndex =
-                new SqlAlterTableDropIndex(dropIndex.getOriginTableName(), localIndexName, dropIndexSql,
-                    SqlParserPos.ZERO);
-            newAlter = newDropIndex;
+            newAlter = SqlDdlNodes.alterTableDropIndex(dropIndex.getOriginTableName(), localIndexName, dropIndexSql,
+                SqlParserPos.ZERO);
         } else {
             throw new UnsupportedOperationException("not supported");
         }
@@ -197,6 +195,8 @@ public static void resetIndexPartition(SQLIndexDefinition def) {
         def.setTbPartitions(null);
         def.setPartitioning(null);
         def.setCovering(Collections.emptyList());
+        def.setWithImplicitTablegroup(false);
+        def.setTableGroup(null);
     }
 
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupBaseBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupBaseBuilder.java
index e1fc76e83..c63f771c9 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupBaseBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupBaseBuilder.java
@@ -21,7 +21,6 @@
 import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.common.utils.Pair;
 import com.alibaba.polardbx.gms.metadb.MetaDbDataSource;
-import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupAccessor;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord;
@@ -40,7 +39,6 @@
 import org.apache.calcite.rel.core.DDL;
 
 import java.sql.Connection;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -113,7 +111,7 @@ public List getAllTableNames() {
         TableGroupConfig tableGroupConfig =
             OptimizerContext.getContext(preparedData.getSchemaName()).getTableGroupInfoManager()
                 .getTableGroupConfigByName(preparedData.getTableGroupName());
-        return tableGroupConfig.getAllTables().stream().map(o -> o.getTableName()).collect(Collectors.toList());
+        return tableGroupConfig.getAllTables();
     }
 
     public Map> getNewPartitionsPhysicalPlansMap() {
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupMergePartitionBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupMergePartitionBuilder.java
index cae3ecca5..941b8f022 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupMergePartitionBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupMergePartitionBuilder.java
@@ -27,8 +27,6 @@
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupMergePartitionPreparedData;
-import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupSplitPartitionPreparedData;
-import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager;
@@ -51,7 +49,7 @@ protected void generateNewPhysicalTableNames(List allLogicalTableNames)
                 OptimizerContext.getContext(mergeData.getSchemaName()).getTableGroupInfoManager();
             TableGroupConfig tableGroupConfig =
                 tableGroupInfoManager.getTableGroupConfigByName(mergeData.getTableGroupName());
-            String firstTableName = tableGroupConfig.getTables().get(0).getTableName();
+            String firstTableName = tableGroupConfig.getTables().get(0);
             PartitionInfo firstPartitionInfo =
                 OptimizerContext.getContext(preparedData.getSchemaName()).getPartitionInfoManager()
                     .getPartitionInfo(firstTableName);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupModifyPartitionBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupModifyPartitionBuilder.java
index 1439ac3ea..559e7526f 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupModifyPartitionBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupModifyPartitionBuilder.java
@@ -16,25 +16,16 @@
 
 package com.alibaba.polardbx.executor.ddl.job.builder.tablegroup;
 
-import com.alibaba.polardbx.common.exception.TddlRuntimeException;
-import com.alibaba.polardbx.common.exception.code.ErrorCode;
-import com.alibaba.polardbx.gms.metadb.MetaDbDataSource;
-import com.alibaba.polardbx.gms.tablegroup.TableGroupAccessor;
-import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord;
 import com.alibaba.polardbx.gms.topology.GroupDetailInfoExRecord;
-import com.alibaba.polardbx.gms.util.MetaDbUtil;
-import com.alibaba.polardbx.gms.util.PartitionNameUtil;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.TableMeta;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupModifyPartitionPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.partition.PartitionSpec;
 import org.apache.calcite.rel.core.DDL;
 
-import java.sql.Connection;
 import java.util.List;
 
 public class AlterTableGroupModifyPartitionBuilder extends AlterTableGroupBaseBuilder {
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupMovePartitionBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupMovePartitionBuilder.java
index 95e5ffb85..53cdd7355 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupMovePartitionBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupMovePartitionBuilder.java
@@ -16,6 +16,8 @@
 
 package com.alibaba.polardbx.executor.ddl.job.builder.tablegroup;
 
+import com.alibaba.polardbx.common.utils.Pair;
+import com.alibaba.polardbx.executor.ddl.job.builder.AlterTableDiscardTableSpaceBuilder;
 import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord;
 import com.alibaba.polardbx.gms.topology.GroupDetailInfoExRecord;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
@@ -28,10 +30,19 @@
 import org.apache.calcite.rel.core.DDL;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
 
 public class AlterTableGroupMovePartitionBuilder extends AlterTableGroupBaseBuilder {
 
+    protected Map> discardTableSpacePhysicalPlansMap =
+        new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+
+    Map>> tbPtbGroupMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+
     public AlterTableGroupMovePartitionBuilder(DDL ddl, AlterTableGroupMovePartitionPreparedData preparedData,
                                                ExecutionContext executionContext) {
         super(ddl, preparedData, executionContext);
@@ -42,6 +53,7 @@ public void buildTablesPhysicalPlans() {
         List groupDetailInfoExRecords = preparedData.getTargetGroupDetailInfoExRecords();
         List allTables = getAllTableNames();
         generateNewPhysicalTableNames(allTables);
+
         for (String tableName : allTables) {
             AlterTableGroupItemPreparedData alterTableGroupItemPreparedData =
                 createAlterTableGroupItemPreparedData(tableName, groupDetailInfoExRecords);
@@ -54,6 +66,35 @@ public void buildTablesPhysicalPlans() {
             newPartitionsPhysicalPlansMap.put(tableName, phyDdlTableOperations);
             tablesPreparedData.put(tableName, alterTableGroupItemPreparedData);
             orderedTargetTablesLocations.put(tableName, itemBuilder.getOrderedTargetTableLocations());
+
+            Map>> tablesTopology = itemBuilder.getTableTopology();
+            AlterTableDiscardTableSpaceBuilder discardTableSpaceBuilder =
+                AlterTableDiscardTableSpaceBuilder.createBuilder(
+                    preparedData.getSchemaName(), tableName, tablesTopology, executionContext);
+            discardTableSpacePhysicalPlansMap.put(tableName, discardTableSpaceBuilder.build().getPhysicalPlans());
+
+            if (preparedData.isUsePhysicalBackfill()) {
+                tbPtbGroupMap.put(tableName, new HashMap<>());
+                for (Map.Entry> srcPhyTbInfo : itemBuilder.getSourcePhyTables().entrySet()) {
+                    String srcGroupKey = srcPhyTbInfo.getKey();
+
+                    assert srcGroupKey != null;
+
+                    String tarGroupKey = null;
+                    for (String phyTbName : srcPhyTbInfo.getValue()) {
+                        if (tarGroupKey == null) {
+                            for (Map.Entry> tarPhyTbInfo : itemBuilder.getTargetPhyTables()
+                                .entrySet()) {
+                                if (tarPhyTbInfo.getValue().contains(phyTbName)) {
+                                    tarGroupKey = tarPhyTbInfo.getKey();
+                                    break;
+                                }
+                            }
+                        }
+                        tbPtbGroupMap.get(tableName).put(phyTbName, Pair.of(srcGroupKey, tarGroupKey));
+                    }
+                }
+            }
         }
     }
 
@@ -83,4 +124,12 @@ public List getNewPhyTables(String tableName) {
         }
         return newPhyTables;
     }
+
+    public Map> getDiscardTableSpacePhysicalPlansMap() {
+        return discardTableSpacePhysicalPlansMap;
+    }
+
+    public Map>> getTbPtbGroupMap() {
+        return tbPtbGroupMap;
+    }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupSplitPartitionBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupSplitPartitionBuilder.java
index 8d64ae704..1ee8685f3 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupSplitPartitionBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupSplitPartitionBuilder.java
@@ -51,7 +51,7 @@ protected void generateNewPhysicalTableNames(List allLogicalTableNames)
                 OptimizerContext.getContext(splitData.getSchemaName()).getTableGroupInfoManager();
             TableGroupConfig tableGroupConfig =
                 tableGroupInfoManager.getTableGroupConfigByName(splitData.getTableGroupName());
-            String firstTableName = tableGroupConfig.getTables().get(0).getTableName();
+            String firstTableName = tableGroupConfig.getTables().get(0);
             PartitionInfo firstPartitionInfo =
                 OptimizerContext.getContext(preparedData.getSchemaName()).getPartitionInfoManager()
                     .getPartitionInfo(firstTableName);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupSplitPartitionByHotValueBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupSplitPartitionByHotValueBuilder.java
index 83f3574b4..bf516929b 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupSplitPartitionByHotValueBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableGroupSplitPartitionByHotValueBuilder.java
@@ -27,7 +27,6 @@
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupSplitPartitionByHotValuePreparedData;
-import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableSplitPartitionByHotValuePreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableSetTableGroupBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableSetTableGroupBuilder.java
index d9f3b8ae6..a2d373fea 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableSetTableGroupBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/AlterTableSetTableGroupBuilder.java
@@ -28,6 +28,7 @@
 import com.alibaba.polardbx.gms.util.GroupInfoUtil;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.ColumnMeta;
+import com.alibaba.polardbx.optimizer.config.table.TableMeta;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableSetTableGroupPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition;
@@ -91,6 +92,10 @@ private void checkAndSetChangeSchemaMeta() {
             TableGroupConfig tableGroupInfo =
                 optimizerContext.getTableGroupInfoManager().getTableGroupConfigByName(tableGroupName);
 
+            if (tableGroupInfo == null && preparedData.isImplicit()) {
+                onlyChangeSchemaMeta = true;
+                return;
+            }
             if (tableGroupInfo == null) {
                 throw new TddlRuntimeException(ErrorCode.ERR_TABLE_GROUP_NOT_EXISTS,
                     "tablegroup:" + tableGroupName + " does not exist");
@@ -100,8 +105,7 @@ private void checkAndSetChangeSchemaMeta() {
                 onlyChangeSchemaMeta = true;
             } else {
                 TableGroupRecord tgRecord = tableGroupInfo.getTableGroupRecord();
-                TablePartRecordInfoContext tablePartRecordInfoContext = tableGroupInfo.getAllTables().get(0);
-                String tableInTbGrp = tablePartRecordInfoContext.getLogTbRec().tableName;
+                String tableInTbGrp = tableGroupInfo.getAllTables().get(0);
 
                 PartitionInfo targetPartInfo =
                     executionContext.getSchemaManager(schemaName).getTable(tableInTbGrp).getPartitionInfo();
@@ -196,6 +200,12 @@ private void checkAndSetChangeSchemaMeta() {
                                     + " is not match to table: "
                                     + logicTableName);
                         }
+                        TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(logicTableName);
+                        if (tableMeta.isGsi()) {
+                            throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT,
+                                "it's not support to change the GSI's tablegroup to " + tableGroupName
+                                    + " due to need repartition");
+                        }
                         repartition = true;
                     }
                 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/RefreshTopologyBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/RefreshTopologyBuilder.java
index cbb8415db..a004dc440 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/RefreshTopologyBuilder.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/builder/tablegroup/RefreshTopologyBuilder.java
@@ -71,7 +71,8 @@ public AlterTableGroupItemPreparedData createAlterTableGroupItemPreparedData(Str
         }
         alterTableGroupItemPreparedData
             .setTableVersion(
-                executionContext.getSchemaManager(preparedData.getSchemaName()).getTable(primaryTableName).getVersion());
+                executionContext.getSchemaManager(preparedData.getSchemaName()).getTable(primaryTableName)
+                    .getVersion());
 
         return alterTableGroupItemPreparedData;
     }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/converter/DdlJobDataConverter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/converter/DdlJobDataConverter.java
index 2e2aadac8..d16b2831e 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/converter/DdlJobDataConverter.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/converter/DdlJobDataConverter.java
@@ -16,15 +16,19 @@
 
 package com.alibaba.polardbx.executor.ddl.job.converter;
 
+import com.alibaba.polardbx.common.exception.TddlRuntimeException;
+import com.alibaba.polardbx.common.exception.code.ErrorCode;
 import com.alibaba.polardbx.common.jdbc.BytesSql;
 import com.alibaba.polardbx.common.jdbc.ParameterContext;
+import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnDefinition;
+import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.executor.gms.util.TableMetaUtil;
-import com.alibaba.polardbx.gms.locality.LocalityDesc;
 import com.alibaba.polardbx.gms.metadb.table.TablesExtRecord;
 import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext;
 import com.alibaba.polardbx.gms.partition.TablePartitionRecord;
 import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
+import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord;
 import com.alibaba.polardbx.gms.topology.DbInfoManager;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
@@ -40,11 +44,18 @@
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.sql.SqlCreateTable;
 import org.apache.calcite.sql.SqlDropTable;
+import org.apache.calcite.sql.SqlIdentifier;
 import org.apache.calcite.util.Util;
 
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper.genHashCodeForPhyTableDDL;
+import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.buildPhyDbTableNameFromGroupNameAndPhyTableName;
 
 public class DdlJobDataConverter {
 
@@ -52,14 +63,16 @@ public class DdlJobDataConverter {
      * NOTE: In most case you should ues DdlPhyPlanBuilder.genPhysicalPlan instead
      */
     public static PhysicalPlanData convertToPhysicalPlanData(Map>> tableTopology,
-                                                             List physicalPlans) {
-        return convertToPhysicalPlanData(tableTopology, physicalPlans, false, false);
+                                                             List physicalPlans,
+                                                             ExecutionContext ec) {
+        return convertToPhysicalPlanData(tableTopology, physicalPlans, false, false, ec);
     }
 
     public static PhysicalPlanData convertToPhysicalPlanData(Map>> tableTopology,
                                                              List physicalPlans,
-                                                             boolean isGsi, boolean isAutoPartition) {
-        return convertToPhysicalPlanData(tableTopology, physicalPlans, isGsi, isAutoPartition, false, false);
+                                                             boolean isGsi, boolean isAutoPartition,
+                                                             ExecutionContext ec) {
+        return convertToPhysicalPlanData(tableTopology, physicalPlans, isGsi, isAutoPartition, false, false, ec);
     }
 
     /**
@@ -69,7 +82,7 @@ public static PhysicalPlanData convertToPhysicalPlanData(Map>> tableTopology,
                                                              List physicalPlans,
                                                              boolean isGsi, boolean isAutoPartition, boolean isOSS,
-                                                             boolean pushDownFk) {
+                                                             boolean pushDownFk, ExecutionContext ec) {
         PhysicalPlanData data = new PhysicalPlanData();
 
         PhyDdlTableOperation physicalPlan = physicalPlans.get(0);
@@ -100,7 +113,7 @@ public static PhysicalPlanData convertToPhysicalPlanData(Map> physicalPartitionTopology =
                     physicalPlan.getPartitionInfo().getPhysicalPartitionTopology(null, false);
@@ -144,6 +157,30 @@ public static PhysicalPlanData convertToPhysicalPlanData(Map convertToPhysicalPlans(PhysicalPlanData data, ExecutionContext executionContext) {
+        return convertToPhysicalPlans(data, executionContext, new HashSet<>());
+    }
+
+    public static Set getPhysicalDoneTables(PhysicalPlanData data, ExecutionContext executionContext,
+                                                    Map hashCodeForDdlBefore) {
+        Set physicalDoneTables = new HashSet<>();
+        for (Map.Entry>> topology : data.getTableTopology().entrySet()) {
+            String groupName = topology.getKey();
+            for (List phyTablesNames : topology.getValue()) {
+                for (String phyTableName : phyTablesNames) {
+                    String hashCodeForDdl = genHashCodeForPhyTableDDL(data.getSchemaName(), groupName,
+                        SqlIdentifier.surroundWithBacktick(phyTableName), 0);
+                    String fullPhyTableName = buildPhyDbTableNameFromGroupNameAndPhyTableName(groupName, phyTableName);
+                    if (!hashCodeForDdlBefore.get(fullPhyTableName).equals(hashCodeForDdl)) {
+                        physicalDoneTables.add(fullPhyTableName);
+                    }
+                }
+            }
+        }
+        return physicalDoneTables;
+    }
+
+    public static List convertToPhysicalPlans(PhysicalPlanData data, ExecutionContext executionContext,
+                                                       Set donePhysicalTables) {
         List physicalPlans = new ArrayList<>();
 
         boolean isNewPartDb = DbInfoManager.getInstance().isNewPartitionDb(data.getSchemaName());
@@ -164,6 +201,13 @@ public static List convertToPhysicalPlans(PhysicalPlanData data, Execut
                     phyDdlTableOperation.setRenameLogicalTableName(data.getNewLogicalTableName());
                 }
 
+                List fullPhyTableNames = phyTableNames.stream()
+                    .map(o -> buildPhyDbTableNameFromGroupNameAndPhyTableName(groupName, o))
+                    .collect(Collectors.toList());
+                if (donePhysicalTables.containsAll(fullPhyTableNames)) {
+                    index++;
+                    continue;
+                }
                 phyDdlTableOperation.setTableNames(ImmutableList.of(phyTableNames));
 
                 phyDdlTableOperation.setKind(data.getKind());
@@ -194,7 +238,7 @@ public static TableRule buildTableRule(TablesExtRecord tablesExtRecord) {
         return tddlRuleGmsConfig.initTableRule(tablesExtRecord);
     }
 
-    public static TableGroupConfig buildTableGroupConfig(PartitionInfo partitionInfo, boolean isOSS) {
+    public static TableGroupDetailConfig buildTableGroupConfig(PartitionInfo partitionInfo, boolean isOSS) {
         TablePartitionRecord logTableRec = PartitionInfoUtil.prepareRecordForLogicalTable(partitionInfo);
         List partRecList = PartitionInfoUtil.prepareRecordForAllPartitions(partitionInfo);
         Map> subPartRecInfos = PartitionInfoUtil
@@ -222,11 +266,9 @@ public static TableGroupConfig buildTableGroupConfig(PartitionInfo partitionInfo
         List tablePartRecordInfoContexts = new ArrayList<>();
         tablePartRecordInfoContexts.add(tablePartRecordInfoContext);
 
-        TableGroupConfig tableGroupConfig = new TableGroupConfig();
-        tableGroupConfig.setTableGroupRecord(tableGroupRecord);
-        tableGroupConfig.setPartitionGroupRecords(partitionGroupRecords);
-        tableGroupConfig.setTables(tablePartRecordInfoContexts);
-        tableGroupConfig.setLocality(partitionInfo.getLocality());
+        TableGroupDetailConfig tableGroupConfig =
+            new TableGroupDetailConfig(tableGroupRecord, partitionGroupRecords, tablePartRecordInfoContexts,
+                partitionInfo.getLocality());
         return tableGroupConfig;
     }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/converter/PhysicalPlanData.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/converter/PhysicalPlanData.java
index 17c561f37..a44c8292e 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/converter/PhysicalPlanData.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/converter/PhysicalPlanData.java
@@ -19,7 +19,8 @@
 import com.alibaba.polardbx.common.jdbc.ParameterContext;
 import com.alibaba.polardbx.gms.locality.LocalityDesc;
 import com.alibaba.polardbx.gms.metadb.table.TablesExtRecord;
-import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
+import com.alibaba.polardbx.gms.lbac.LBACSecurityEntity;
+import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTablePreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.alibaba.polardbx.optimizer.partition.pruning.PhysicalPartitionInfo;
@@ -70,7 +71,7 @@ public class PhysicalPlanData {
     private String createTablePhysicalSql;
 
     private PartitionInfo partitionInfo;
-    private TableGroupConfig tableGroupConfig;
+    private TableGroupDetailConfig tableGroupConfig;
 
     private boolean truncatePartition;
 
@@ -80,6 +81,11 @@ public class PhysicalPlanData {
 
     private boolean flashbackRename = false;
 
+    private boolean renamePhyTable = false;
+
+    private LBACSecurityEntity tableESA;
+    private List colEsaList;
+
     @Override
     public String toString() {
         return String.format("PhysicalPlan{table: %s, sql: %s, topology: %s",
@@ -114,6 +120,9 @@ public PhysicalPlanData clone() {
         clone.truncatePartition = this.truncatePartition;
         clone.localityDesc = this.localityDesc;
         clone.alterTablePreparedData = this.alterTablePreparedData;
+        clone.renamePhyTable = this.renamePhyTable;
+        clone.tableESA = this.tableESA;
+        clone.colEsaList = colEsaList == null ? null : new ArrayList<>(colEsaList);
         return clone;
     }
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AbstractProcedureJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AbstractProcedureJobFactory.java
index ec3bdd495..3834caac3 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AbstractProcedureJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AbstractProcedureJobFactory.java
@@ -17,8 +17,6 @@
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
 import com.alibaba.polardbx.druid.sql.SQLUtils;
-import com.alibaba.polardbx.druid.sql.ast.SQLName;
-import com.alibaba.polardbx.druid.sql.ast.expr.SQLPropertyExpr;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterFileStorageAsOfTimestampJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterFileStorageAsOfTimestampJobFactory.java
index 555158dd5..b7898e878 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterFileStorageAsOfTimestampJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterFileStorageAsOfTimestampJobFactory.java
@@ -56,7 +56,6 @@
 
 /**
  * @author chenzilin
- * @date 2022/2/14 17:47
  */
 public class AlterFileStorageAsOfTimestampJobFactory extends DdlJobFactory {
 
@@ -66,8 +65,8 @@ public class AlterFileStorageAsOfTimestampJobFactory extends DdlJobFactory {
     private AlterFileStoragePreparedData alterFileStoragePreparedData;
 
     public AlterFileStorageAsOfTimestampJobFactory(
-            AlterFileStoragePreparedData alterFileStoragePreparedData,
-            ExecutionContext executionContext) {
+        AlterFileStoragePreparedData alterFileStoragePreparedData,
+        ExecutionContext executionContext) {
         this.executionContext = executionContext;
         this.alterFileStoragePreparedData = alterFileStoragePreparedData;
     }
@@ -87,7 +86,8 @@ protected ExecutableDdlJob doCreate() {
             fromTimeZone = TimeZone.getDefault();
         }
 
-        long ts = OSSTaskUtils.getTsFromTimestampWithTimeZone(alterFileStoragePreparedData.getTimestamp(), fromTimeZone);
+        long ts =
+            OSSTaskUtils.getTsFromTimestampWithTimeZone(alterFileStoragePreparedData.getTimestamp(), fromTimeZone);
 
         List toDeleteFileRecordList = new ArrayList<>();
         List toUpdateFileRecordList = new ArrayList<>();
@@ -99,6 +99,7 @@ protected ExecutableDdlJob doCreate() {
             FileStorageMetaStore fileStorageMetaStore = new FileStorageMetaStore(engine);
             fileStorageMetaStore.setConnection(conn);
 
+            // TODO(siyun): prevent columnar file being purged
             List fileMetaList = fileStorageMetaStore.queryFromFileStorage();
 
             for (FileStorageMetaStore.OssFileMeta ossFileMeta : fileMetaList) {
@@ -123,7 +124,8 @@ protected ExecutableDdlJob doCreate() {
             logger.info("alter filestorage as of timestamp " + alterFileStoragePreparedData.getTimestamp());
             logger.info(String.format("to delete %s files with commit_ts > %s : ", toDeleteFileMeta.size(), ts));
             logger.info(toDeleteFileMeta.stream().map(x -> x.getDataPath()).collect(Collectors.joining("\n")));
-            logger.info(String.format("to update %s files with commit_ts <= %s <= remove_ts : ", toUpdateFileMeta.size(), ts));
+            logger.info(
+                String.format("to update %s files with commit_ts <= %s <= remove_ts : ", toUpdateFileMeta.size(), ts));
             logger.info(toUpdateFileMeta.stream().map(x -> x.getDataPath()).collect(Collectors.joining("\n")));
 
             TableInfoManager tableInfoManager = new TableInfoManager();
@@ -171,11 +173,13 @@ protected ExecutableDdlJob doCreate() {
 
             for (int i = 0; i < toUpdateFileRecordList.size(); i++) {
                 FilesRecord filesRecord = toUpdateFileRecordList.get(i);
-                if (schemaName.equals(filesRecord.getLogicalSchemaName()) && logicalTableName.equals(filesRecord.getLogicalTableName())) {
+                if (schemaName.equals(filesRecord.getLogicalSchemaName()) && logicalTableName.equals(
+                    filesRecord.getLogicalTableName())) {
                     currentFilesRecordList.add(filesRecord);
                 } else {
                     UpdateFileRemoveTsTask updateFileRemoveTsTask = new UpdateFileRemoveTsTask(engine.name(),
-                            schemaName, logicalTableName, currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toList()), null);
+                        schemaName, logicalTableName,
+                        currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toList()), null);
                     taskList.add(updateFileRemoveTsTask);
                     schemaName = filesRecord.getLogicalSchemaName();
                     logicalTableName = filesRecord.getLogicalTableName();
@@ -186,7 +190,8 @@ protected ExecutableDdlJob doCreate() {
 
                 if (!currentFilesRecordList.isEmpty()) {
                     UpdateFileRemoveTsTask updateFileRemoveTsTask = new UpdateFileRemoveTsTask(engine.name(),
-                            schemaName, logicalTableName, currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toList()), null);
+                        schemaName, logicalTableName,
+                        currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toList()), null);
                     taskList.add(updateFileRemoveTsTask);
                 }
             }
@@ -200,10 +205,13 @@ protected ExecutableDdlJob doCreate() {
 
             for (int i = 0; i < toDeleteFileRecordList.size(); i++) {
                 FilesRecord filesRecord = toDeleteFileRecordList.get(i);
-                if (schemaName.equals(filesRecord.getLogicalSchemaName()) && logicalTableName.equals(filesRecord.getLogicalTableName())) {
+                if (schemaName.equals(filesRecord.getLogicalSchemaName()) && logicalTableName.equals(
+                    filesRecord.getLogicalTableName())) {
                     currentFilesRecordList.add(filesRecord);
                 } else {
-                    DropOssFilesTask dropOssFilesTask = new DropOssFilesTask(engine.name(), schemaName, logicalTableName, currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toSet()));
+                    DropOssFilesTask dropOssFilesTask =
+                        new DropOssFilesTask(engine.name(), schemaName, logicalTableName,
+                            currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toSet()));
                     taskList.add(dropOssFilesTask);
                     schemaName = filesRecord.getLogicalSchemaName();
                     logicalTableName = filesRecord.getLogicalTableName();
@@ -214,13 +222,15 @@ protected ExecutableDdlJob doCreate() {
             }
 
             if (!currentFilesRecordList.isEmpty()) {
-                DropOssFilesTask dropOssFilesTask = new DropOssFilesTask(engine.name(), schemaName, logicalTableName, currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toSet()));
+                DropOssFilesTask dropOssFilesTask = new DropOssFilesTask(engine.name(), schemaName, logicalTableName,
+                    currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toSet()));
                 taskList.add(dropOssFilesTask);
             }
         }
 
         if (!toDeleteFileMeta.isEmpty()) {
-            taskList.add(new DeleteOssFilesTask(engine.name(), DefaultDbSchema.NAME, toDeleteFileMeta.stream().map(x -> x.getDataPath()).collect(Collectors.toSet())));
+            taskList.add(new DeleteOssFilesTask(engine.name(), DefaultDbSchema.NAME,
+                toDeleteFileMeta.stream().map(x -> x.getDataPath()).collect(Collectors.toSet())));
         }
 
         for (Pair pair : schemaTablePair) {
@@ -238,7 +248,6 @@ protected ExecutableDdlJob doCreate() {
         return executableDdlJob;
     }
 
-
     @Override
     protected void excludeResources(Set resources) {
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterFileStoragePurgeBeforeTimestampJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterFileStoragePurgeBeforeTimestampJobFactory.java
index ac21735d5..80f37b994 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterFileStoragePurgeBeforeTimestampJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterFileStoragePurgeBeforeTimestampJobFactory.java
@@ -18,8 +18,6 @@
 
 import com.alibaba.polardbx.common.Engine;
 import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException;
-import com.alibaba.polardbx.common.exception.TddlRuntimeException;
-import com.alibaba.polardbx.common.exception.code.ErrorCode;
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.common.utils.Pair;
 import com.alibaba.polardbx.common.utils.logger.Logger;
@@ -74,8 +72,8 @@ public class AlterFileStoragePurgeBeforeTimestampJobFactory extends DdlJobFactor
     private AlterFileStoragePreparedData alterFileStoragePreparedData;
 
     public AlterFileStoragePurgeBeforeTimestampJobFactory(
-            AlterFileStoragePreparedData alterFileStoragePreparedData,
-            ExecutionContext executionContext) {
+        AlterFileStoragePreparedData alterFileStoragePreparedData,
+        ExecutionContext executionContext) {
         this.executionContext = executionContext;
         this.alterFileStoragePreparedData = alterFileStoragePreparedData;
     }
@@ -97,11 +95,13 @@ protected ExecutableDdlJob doCreate() {
             fromTimeZone = TimeZone.getDefault();
         }
 
-        long ts = OSSTaskUtils.getTsFromTimestampWithTimeZone(alterFileStoragePreparedData.getTimestamp(), fromTimeZone);
+        long ts =
+            OSSTaskUtils.getTsFromTimestampWithTimeZone(alterFileStoragePreparedData.getTimestamp(), fromTimeZone);
 
         // ensure purge do not affect backup
         int backupOssPeriodInDay = executionContext.getParamManager().getInt(ConnectionParams.BACKUP_OSS_PERIOD);
-        final ITimestampOracle timestampOracle = executionContext.getTransaction().getTransactionManagerUtil().getTimestampOracle();
+        final ITimestampOracle timestampOracle =
+            executionContext.getTransaction().getTransactionManagerUtil().getTimestampOracle();
         if (null == timestampOracle) {
             throw new UnsupportedOperationException("Do not support timestamp oracle");
         }
@@ -162,10 +162,13 @@ protected ExecutableDdlJob doCreate() {
 
             for (int i = 0; i < toDeleteFileRecordList.size(); i++) {
                 FilesRecord filesRecord = toDeleteFileRecordList.get(i);
-                if (schemaName.equals(filesRecord.getLogicalSchemaName()) && logicalTableName.equals(filesRecord.getLogicalTableName())) {
+                if (schemaName.equals(filesRecord.getLogicalSchemaName()) && logicalTableName.equals(
+                    filesRecord.getLogicalTableName())) {
                     currentFilesRecordList.add(filesRecord);
                 } else {
-                    DropOssFilesTask dropOssFilesTask = new DropOssFilesTask(engine.name(), schemaName, logicalTableName, currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toSet()));
+                    DropOssFilesTask dropOssFilesTask =
+                        new DropOssFilesTask(engine.name(), schemaName, logicalTableName,
+                            currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toSet()));
                     taskList.add(dropOssFilesTask);
                     schemaName = filesRecord.getLogicalSchemaName();
                     logicalTableName = filesRecord.getLogicalTableName();
@@ -176,7 +179,8 @@ protected ExecutableDdlJob doCreate() {
             }
 
             if (!currentFilesRecordList.isEmpty()) {
-                DropOssFilesTask dropOssFilesTask = new DropOssFilesTask(engine.name(), schemaName, logicalTableName, currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toSet()));
+                DropOssFilesTask dropOssFilesTask = new DropOssFilesTask(engine.name(), schemaName, logicalTableName,
+                    currentFilesRecordList.stream().map(x -> x.getFileName()).collect(Collectors.toSet()));
                 taskList.add(dropOssFilesTask);
             }
         }
@@ -185,7 +189,8 @@ protected ExecutableDdlJob doCreate() {
         taskList.addAll(buildRecycleBinPurgeBeforeTimestamp(engine, timestamp, executionContext));
 
         if (!toDeleteFileMeta.isEmpty()) {
-            taskList.add(new DeleteOssFilesTask(engine.name(), DefaultDbSchema.NAME, toDeleteFileMeta.stream().map(x -> x.fileName).collect(Collectors.toSet())));
+            taskList.add(new DeleteOssFilesTask(engine.name(), DefaultDbSchema.NAME,
+                toDeleteFileMeta.stream().map(x -> x.fileName).collect(Collectors.toSet())));
         }
 
         for (Pair pair : schemaTablePair) {
@@ -201,14 +206,17 @@ protected ExecutableDdlJob doCreate() {
         return executableDdlJob;
     }
 
-    public static List buildRecycleBinPurgeBeforeTimestamp(Engine engine, Timestamp ts, ExecutionContext executionContext) {
+    public static List buildRecycleBinPurgeBeforeTimestamp(Engine engine, Timestamp ts,
+                                                                    ExecutionContext executionContext) {
         try (Connection conn = MetaDbDataSource.getInstance().getConnection()) {
             List ddlTasks = new ArrayList<>();
             TableInfoManager tableInfoManager = new TableInfoManager();
             tableInfoManager.setConnection(conn);
 
             Statement statement = conn.createStatement();
-            ResultSet resultSet = statement.executeQuery(String.format("select `schema_name`, `name`, `original_name`, `gmt_create` from %s where name like '%s%%'", GmsSystemTables.RECYCLE_BIN,
+            ResultSet resultSet = statement.executeQuery(String.format(
+                "select `schema_name`, `name`, `original_name`, `gmt_create` from %s where name like '%s%%'",
+                GmsSystemTables.RECYCLE_BIN,
                 FILE_STORAGE_PREFIX));
 
             while (resultSet.next()) {
@@ -222,7 +230,8 @@ public static List buildRecycleBinPurgeBeforeTimestamp(Engine engine, T
                         if (tablesRecord != null
                             && engine.name().equalsIgnoreCase(tablesRecord.engine)) {
                             // purge
-                            ddlTasks.addAll(buildPurgeOssRecycleBin(engine, schemaName, binName, executionContext.copy()));
+                            ddlTasks.addAll(
+                                buildPurgeOssRecycleBin(engine, schemaName, binName, executionContext.copy()));
                         }
                     }
                 }
@@ -233,7 +242,8 @@ public static List buildRecycleBinPurgeBeforeTimestamp(Engine engine, T
         }
     }
 
-    private static List buildPurgeOssRecycleBin(Engine engine, String schemaName, String binName, ExecutionContext executionContext) {
+    private static List buildPurgeOssRecycleBin(Engine engine, String schemaName, String binName,
+                                                         ExecutionContext executionContext) {
         // TODO: improve makeTableVisible
         LogicalRenameTableHandler.makeTableVisible(schemaName, binName, executionContext);
         List taskList = new ArrayList<>();
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterJoinGroupJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterJoinGroupJobFactory.java
index 66972beba..486a9da1e 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterJoinGroupJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterJoinGroupJobFactory.java
@@ -22,6 +22,7 @@
 import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.common.utils.Pair;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TablesSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcAlterJoinGroupMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterJoinGroupAddMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterJoinGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
@@ -31,6 +32,7 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterJoinGroupPreparedData;
 import com.google.common.collect.Lists;
 
+import java.util.ArrayList;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
@@ -78,11 +80,15 @@ protected ExecutableDdlJob doCreate() {
             preparedData.isAdd(),
             preparedData.getTableGroupInfos());
 
+        CdcAlterJoinGroupMarkTask cdcAlterJoinGroupMarkTask =
+            new CdcAlterJoinGroupMarkTask(preparedData.getSchemaName(), preparedData.getJoinGroupName());
+
         TablesSyncTask syncTask =
-            new TablesSyncTask(preparedData.getSchemaName(), preparedData.getTablesVersion().keySet().stream().collect(
-                Collectors.toList()), true, initWait, interval, TimeUnit.MILLISECONDS);
+            new TablesSyncTask(preparedData.getSchemaName(), new ArrayList<>(preparedData.getTablesVersion().keySet()),
+                true, initWait, interval, TimeUnit.MILLISECONDS);
 
-        executableDdlJob.addSequentialTasks(Lists.newArrayList(validateTask, alterJoinGroupAddMetaTask, syncTask));
+        executableDdlJob.addSequentialTasks(
+            Lists.newArrayList(validateTask, alterJoinGroupAddMetaTask, cdcAlterJoinGroupMarkTask, syncTask));
 
         return executableDdlJob;
     }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableAddLogicalForeignKeyJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableAddLogicalForeignKeyJobFactory.java
new file mode 100644
index 000000000..0b37a6622
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableAddLogicalForeignKeyJobFactory.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory;
+
+import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
+import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.AlterForeignKeyTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.AlterTableAddLogicalForeignKeyValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDdlMarkTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4AlterTable;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTable;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTablePreparedData;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static com.alibaba.polardbx.common.cdc.ICdcManager.DEFAULT_DDL_VERSION_ID;
+
+public class AlterTableAddLogicalForeignKeyJobFactory extends AlterTableJobFactory {
+    public AlterTableAddLogicalForeignKeyJobFactory(
+        PhysicalPlanData physicalPlanData,
+        AlterTablePreparedData preparedData,
+        LogicalAlterTable logicalAlterTable,
+        ExecutionContext executionContext) {
+        super(physicalPlanData, preparedData, logicalAlterTable, executionContext);
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+        ExecutableDdlJob4AlterTable executableDdlJob = new ExecutableDdlJob4AlterTable();
+
+        boolean isForeignKeysDdl =
+            !prepareData.getAddedForeignKeys().isEmpty() || !prepareData.getDroppedForeignKeys().isEmpty();
+        boolean isForeignKeyCdcMark = isForeignKeysDdl && !executionContext.getDdlContext().isFkRepartition();
+
+        List taskList = new ArrayList<>();
+
+        AlterTableAddLogicalForeignKeyValidateTask validateTask =
+            new AlterTableAddLogicalForeignKeyValidateTask(schemaName, logicalTableName,
+                prepareData.getAddedForeignKeys().get(0), prepareData.getTableVersion());
+        taskList.add(validateTask);
+
+        DdlTask cdcDdlMarkTask =
+            new CdcDdlMarkTask(schemaName, physicalPlanData, false, isForeignKeyCdcMark, DEFAULT_DDL_VERSION_ID);
+        taskList.add(cdcDdlMarkTask);
+
+        if (isForeignKeysDdl) {
+            DdlTask updateForeignKeysTask =
+                new AlterForeignKeyTask(schemaName, logicalTableName, physicalPlanData.getDefaultDbIndex(),
+                    physicalPlanData.getDefaultPhyTableName(), prepareData.getAddedForeignKeys(),
+                    prepareData.getDroppedForeignKeys(), true);
+            taskList.add(updateForeignKeysTask);
+        }
+
+        // sync foreign key table meta
+        syncFkTables(taskList);
+        DdlTask tableSyncTaskAfterShowing = new TableSyncTask(schemaName, logicalTableName);
+        taskList.add(tableSyncTaskAfterShowing);
+
+        executableDdlJob.addSequentialTasks(taskList);
+        executableDdlJob.labelAsHead(validateTask);
+        executableDdlJob.labelAsTail(tableSyncTaskAfterShowing);
+
+        executableDdlJob.setTableValidateTask((BaseValidateTask) validateTask);
+        executableDdlJob.setTableSyncTask((TableSyncTask) tableSyncTaskAfterShowing);
+
+        return executableDdlJob;
+    }
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableAddPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableAddPartitionJobFactory.java
index d6a769033..1313ed31a 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableAddPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableAddPartitionJobFactory.java
@@ -80,6 +80,8 @@ protected ExecutableDdlJob doCreate() {
             return doAddAndMoveToExistTableGroup();
         } else if (preparedData.isCreateNewTableGroup()) {
             return doAddInNewTableGroup();
+        } else if (StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            return withImplicitTableGroup(executionContext);
         } else {
             throw new RuntimeException("unexpected");
         }
@@ -97,7 +99,7 @@ protected ExecutableDdlJob doAddInOriginTableGroup() {
         Map tablesVersion = getTablesVersion();
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableGroupAddPartitionPreparedData.getTableGroupName(),
-                tablesVersion, true, alterTableGroupAddPartitionPreparedData.getTargetPhysicalGroups());
+                tablesVersion, true, alterTableGroupAddPartitionPreparedData.getTargetPhysicalGroups(), false);
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableGroupAddPartitionPreparedData.getTableGroupName());
 
@@ -168,11 +170,11 @@ protected ExecutableDdlJob doAddAndMoveToExistTableGroup() {
         DdlTask validateSourceTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 sourceTableGroup, tablesVersion, false,
-                /*todo*/null);
+                /*todo*/null, false);
         DdlTask validateTargetTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 targetTableGroup, preparedData.getFirstTableVersionInTargetTableGroup(), false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         executableDdlJob.addTask(emptyTask);
         executableDdlJob.addTask(validateSourceTableGroup);
@@ -248,7 +250,7 @@ protected ExecutableDdlJob doAddInNewTableGroup() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName,
                 preparedData.getTableGroupName(), tablesVersion, false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         SubJobTask subJobMoveTableToNewGroup =
             new SubJobTask(schemaName, String.format(SET_NEW_TABLE_GROUP, preparedData.getTableName()), null);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableAddPartitionSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableAddPartitionSubTaskJobFactory.java
index 9c8e276b8..90aee3a46 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableAddPartitionSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableAddPartitionSubTaskJobFactory.java
@@ -17,7 +17,6 @@
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
 import com.alibaba.polardbx.common.utils.Pair;
-import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
@@ -26,11 +25,9 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupBasePreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTable;
-import org.apache.calcite.sql.SqlAlterTableAddPartition;
 import org.apache.calcite.sql.SqlNode;
 
 import java.util.List;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableDropPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableDropPartitionJobFactory.java
index 544a9826f..470867a93 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableDropPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableDropPartitionJobFactory.java
@@ -77,6 +77,8 @@ protected ExecutableDdlJob doCreate() {
             return doDropAndMoveToExistTableGroup();
         } else if (preparedData.isCreateNewTableGroup()) {
             return doDropInNewTableGroup();
+        } else if (StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            return withImplicitTableGroup(executionContext);
         } else {
             throw new RuntimeException("unexpected");
         }
@@ -98,7 +100,7 @@ protected ExecutableDdlJob doDropInOriginTableGroup() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableGroupDropPartitionPreparedData.getTableGroupName(),
                 tablesVersion, true,
-                isBrdTg ? null : alterTableGroupDropPartitionPreparedData.getTargetPhysicalGroups());
+                isBrdTg ? null : alterTableGroupDropPartitionPreparedData.getTargetPhysicalGroups(), false);
 
         Set outdatedPartitionGroupId = new HashSet<>();
         List outdatedPartitionNames = new ArrayList();
@@ -175,11 +177,11 @@ protected ExecutableDdlJob doDropAndMoveToExistTableGroup() {
         DdlTask validateSourceTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 sourceTableGroup, tablesVersion, false,
-                /*todo*/null);
+                /*todo*/null, false);
         DdlTask validateTargetTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 targetTableGroup, preparedData.getFirstTableVersionInTargetTableGroup(), false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         executableDdlJob.addTask(emptyTask);
         executableDdlJob.addTask(validateSourceTableGroup);
@@ -249,7 +251,7 @@ protected ExecutableDdlJob doDropInNewTableGroup() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName,
                 preparedData.getTableGroupName(), tablesVersion, false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         SubJobTask subJobMoveTableToNewGroup =
             new SubJobTask(schemaName, String.format(SET_NEW_TABLE_GROUP, preparedData.getTableName()), null);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableDropPartitionSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableDropPartitionSubTaskJobFactory.java
index 896979e85..c1b0dac95 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableDropPartitionSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableDropPartitionSubTaskJobFactory.java
@@ -17,7 +17,6 @@
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
 import com.alibaba.polardbx.common.utils.Pair;
-import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
@@ -26,11 +25,9 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupBasePreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTable;
-import org.apache.calcite.sql.SqlAlterTableDropPartition;
 import org.apache.calcite.sql.SqlNode;
 
 import java.util.List;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableExtractPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableExtractPartitionJobFactory.java
index d5c41b1ed..a30a25f5c 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableExtractPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableExtractPartitionJobFactory.java
@@ -77,6 +77,8 @@ protected ExecutableDdlJob doCreate() {
             return splitAndMoveToExistTableGroup();
         } else if (preparedData.isCreateNewTableGroup()) {
             return splitInNewTableGroup();
+        } else if (StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            return withImplicitTableGroup(executionContext);
         } else {
             throw new RuntimeException("unexpected");
         }
@@ -95,7 +97,7 @@ private ExecutableDdlJob splitInOriginTableGroup() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName,
                 alterTableExtractPartitionPreparedData.getTableGroupName(), tablesVersion, true,
-                alterTableExtractPartitionPreparedData.getTargetPhysicalGroups());
+                alterTableExtractPartitionPreparedData.getTargetPhysicalGroups(), false);
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableExtractPartitionPreparedData.getTableGroupName());
 
@@ -173,11 +175,11 @@ private ExecutableDdlJob splitAndMoveToExistTableGroup() {
         DdlTask validateSourceTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 sourceTableGroup, tablesVersion, false,
-                /*todo*/null);
+                /*todo*/null, false);
         DdlTask validateTargetTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 targetTableGroup, preparedData.getFirstTableVersionInTargetTableGroup(), false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         executableDdlJob.addTask(emptyTask);
         executableDdlJob.addTask(validateSourceTableGroup);
@@ -253,7 +255,7 @@ private ExecutableDdlJob splitInNewTableGroup() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName,
                 preparedData.getTableGroupName(), tablesVersion, false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         SubJobTask subJobMoveTableToNewGroup =
             new SubJobTask(schemaName, String.format(SET_NEW_TABLE_GROUP, preparedData.getTableName()), null);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableExtractPartitionSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableExtractPartitionSubTaskJobFactory.java
index d1597a1ac..bc9ca3cf4 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableExtractPartitionSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableExtractPartitionSubTaskJobFactory.java
@@ -25,7 +25,6 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupBasePreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTable;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGeneratedColumnJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGeneratedColumnJobFactory.java
index edae87683..59f1bf137 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGeneratedColumnJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGeneratedColumnJobFactory.java
@@ -58,6 +58,8 @@
 import java.util.Set;
 import java.util.TreeMap;
 
+import static com.alibaba.polardbx.common.cdc.ICdcManager.DEFAULT_DDL_VERSION_ID;
+
 public class AlterTableGeneratedColumnJobFactory extends DdlJobFactory {
     private final PhysicalPlanData physicalPlanData;
     private final String schemaName;
@@ -112,6 +114,8 @@ public AlterTableGeneratedColumnJobFactory(PhysicalPlanData physicalPlanData, Al
             }
             if (!notNullableColumns.isEmpty()) {
                 alterTableStmt.setName(new SQLIdentifierExpr("?"));
+                alterTableStmt.setTargetImplicitTableGroup(null);
+                alterTableStmt.getIndexTableGroupPair().clear();
                 physicalPlanData.setSqlTemplate(alterTableStmt.toString());
             }
 
@@ -153,7 +157,8 @@ protected ExecutableDdlJob doCreateDropGeneratedColumn() {
 
         List nullableTasks = genNullableTask();
 
-        DdlTask cdcTask = new CdcAlterTableColumnDdlMarkTask(schemaName, physicalPlanData, false);
+        DdlTask cdcTask =
+            new CdcAlterTableColumnDdlMarkTask(schemaName, physicalPlanData, false, prepareData.getDdlVersionId());
 
         DdlTask hideColumnsTask =
             new ChangeColumnStatusTask(schemaName, logicalTableName, targetColumns, ColumnStatus.WRITE_ONLY,
@@ -173,7 +178,7 @@ protected ExecutableDdlJob doCreateDropGeneratedColumn() {
                 prepareData.isPrimaryKeyDropped(), prepareData.getAddedPrimaryKeyColumns(),
                 prepareData.getColumnAfterAnother(), prepareData.isLogicalColumnOrder(), prepareData.getTableComment(),
                 prepareData.getTableRowFormat(), physicalPlanData.getSequence(),
-                prepareData.isOnlineModifyColumnIndexTask());
+                prepareData.isOnlineModifyColumnIndexTask(), DEFAULT_DDL_VERSION_ID);
         DdlTask updateMetaSyncTask = new TableSyncTask(schemaName, logicalTableName);
 
         List allTasks =
@@ -213,7 +218,8 @@ protected ExecutableDdlJob doCreateAddGeneratedColumn() {
         }
         List notNullableTasks = genNotNullableTask();
 
-        DdlTask cdcTask = new CdcAlterTableColumnDdlMarkTask(schemaName, physicalPlanData, false);
+        DdlTask cdcTask =
+            new CdcAlterTableColumnDdlMarkTask(schemaName, physicalPlanData, false, prepareData.getDdlVersionId());
 
         DdlTask showColumnsTask =
             new ChangeColumnStatusTask(schemaName, logicalTableName, targetColumns, ColumnStatus.WRITE_ONLY,
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddPartitionJobFactory.java
index cec9e9ef5..f120efae6 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddPartitionJobFactory.java
@@ -81,7 +81,7 @@ protected ExecutableDdlJob doCreate() {
         Map tablesVersion = getTablesVersion();
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableGroupAddPartitionPreparedData.getTableGroupName(),
-                tablesVersion, true, alterTableGroupAddPartitionPreparedData.getTargetPhysicalGroups());
+                tablesVersion, true, alterTableGroupAddPartitionPreparedData.getTargetPhysicalGroups(), false);
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableGroupAddPartitionPreparedData.getTableGroupName());
 
@@ -125,6 +125,7 @@ protected ExecutableDdlJob doCreate() {
 
         // TODO(luoyanxin)
         // executableDdlJob.setMaxParallelism(ScaleOutUtils.getTableGroupTaskParallelism(executionContext));
+        attacheCdcFinalMarkTask(executableDdlJob);
         return executableDdlJob;
     }
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddPartitionSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddPartitionSubTaskJobFactory.java
index 61982c767..949b25263 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddPartitionSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddPartitionSubTaskJobFactory.java
@@ -17,7 +17,6 @@
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
 import com.alibaba.polardbx.common.utils.Pair;
-import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
@@ -26,10 +25,8 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupBasePreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
-import org.apache.calcite.sql.SqlAlterTableAddPartition;
 import org.apache.calcite.sql.SqlAlterTableGroup;
 import org.apache.calcite.sql.SqlNode;
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddTableJobFactory.java
index c979d1f69..24e00748b 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddTableJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupAddTableJobFactory.java
@@ -21,8 +21,8 @@
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcAlterTableGroupAddTablesMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateTableVersionTask;
-import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
@@ -82,11 +82,15 @@ public ExecutableDdlJob toDdlJob() {
         if (preparedData.getReferenceTable() == null) {
             //validate the tablegroup is not empty
             curDdl = new AlterTableGroupValidateTask(preparedData.getSchemaName(),
-                preparedData.getTableGroupName(), preparedData.getTableVersions(), false, null);
+                preparedData.getTableGroupName(), preparedData.getTableVersions(), false, null, false);
         } else {
             curDdl = new ValidateTableVersionTask(preparedData.getSchemaName(), preparedData.getTableVersions());
         }
         executableDdlJob.addTask(curDdl);
+
+        CdcAlterTableGroupAddTablesMarkTask cdcAlterTableGroupAddTablesMarkTask =
+            new CdcAlterTableGroupAddTablesMarkTask(preparedData.getSchemaName(), preparedData.getTableGroupName());
+
         if (preparedData.getReferenceTable() != null) {
             sourceTables.remove(preparedData.getReferenceTable());
             DdlTask ddlTask = generateSetTableGroupJob(preparedData.getReferenceTable());
@@ -94,12 +98,16 @@ public ExecutableDdlJob toDdlJob() {
             executableDdlJob.addTaskRelationship(curDdl, ddlTask);
             curDdl = ddlTask;
         }
-        EmptyTask lastTask = new EmptyTask(preparedData.getSchemaName());
-        for (String tableName : sourceTables) {
-            DdlTask ddlTask = generateSetTableGroupJob(tableName);
-            executableDdlJob.addTask(ddlTask);
-            executableDdlJob.addTaskRelationship(curDdl, ddlTask);
-            executableDdlJob.addTaskRelationship(ddlTask, lastTask);
+
+        if (sourceTables.isEmpty()) {
+            executableDdlJob.addTaskRelationship(curDdl, cdcAlterTableGroupAddTablesMarkTask);
+        } else {
+            for (String tableName : sourceTables) {
+                DdlTask ddlTask = generateSetTableGroupJob(tableName);
+                executableDdlJob.addTask(ddlTask);
+                executableDdlJob.addTaskRelationship(curDdl, ddlTask);
+                executableDdlJob.addTaskRelationship(ddlTask, cdcAlterTableGroupAddTablesMarkTask);
+            }
         }
 
         return executableDdlJob;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupBaseJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupBaseJobFactory.java
index cd0fff2be..b9fc08711 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupBaseJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupBaseJobFactory.java
@@ -16,13 +16,21 @@
 
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
+import com.alibaba.polardbx.common.ddl.newengine.DdlType;
 import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException;
+import com.alibaba.polardbx.common.properties.ConnectionProperties;
 import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.common.utils.Pair;
 import com.alibaba.polardbx.common.utils.logger.Logger;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableGroupAddMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableGroupValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.EmptyTableGroupValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcAlterTableGroupFinalMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetApplyExecutorInitTask;
 import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetApplyFinishTask;
 import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
+import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupsSyncTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
@@ -47,6 +55,7 @@
 import com.alibaba.polardbx.optimizer.partition.PartitionSpec;
 import com.alibaba.polardbx.optimizer.partition.common.PartitionLocation;
 import com.alibaba.polardbx.statistics.SQLRecorderLogger;
+import com.google.common.collect.ImmutableList;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.commons.lang3.StringUtils;
 
@@ -60,14 +69,13 @@
 import java.util.TreeMap;
 import java.util.TreeSet;
 
-import static com.alibaba.polardbx.common.properties.ConnectionParams.CHANGE_SET_APPLY_OPTIMIZATION;
-
 /**
  * @author luoyanxin
  */
 public abstract class AlterTableGroupBaseJobFactory extends DdlJobFactory {
 
     protected static final String SET_NEW_TABLE_GROUP = "alter table `%s` set tablegroup=''";
+    protected static final String SET_TARGET_TABLE_GROUP = "alter table `%s` set tablegroup='%s'";
 
     @Deprecated
     protected final DDL ddl;
@@ -189,6 +197,9 @@ protected void excludeResources(Set resources) {
         if (StringUtils.isNotEmpty(preparedData.getTargetTableGroup())) {
             resources.add(concatWithDot(preparedData.getSchemaName(), preparedData.getTargetTableGroup()));
         }
+        if (StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            resources.add(concatWithDot(preparedData.getSchemaName(), preparedData.getTargetImplicitTableGroupName()));
+        }
         for (String relatedPart : preparedData.getRelatedPartitions()) {
             resources.add(concatWithDot(concatWithDot(preparedData.getSchemaName(), preparedData.getTableGroupName()),
                 relatedPart));
@@ -236,7 +247,7 @@ protected Set getOldDatePartitionGroups(
             .getTableGroupConfigByName(alterTableSplitPartitionPreparedData.getTableGroupName());
         String logicTableName = preparedData.getTableName();
         if (StringUtils.isEmpty(logicTableName)) {
-            logicTableName = tableGroupConfig.getAllTables().get(0).getTableName();
+            logicTableName = tableGroupConfig.getAllTables().get(0);
         }
         TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(logicTableName);
         PartitionInfo partitionInfo = tableMeta.getPartitionInfo();
@@ -295,6 +306,12 @@ protected Set getOldDatePartitionGroups(
         return outdatedPartitionGroupId;
     }
 
+    protected void attacheCdcFinalMarkTask(ExecutableDdlJob executableDdlJob) {
+        CdcAlterTableGroupFinalMarkTask cdcAlterTableGroupFinalMarkTask =
+            new CdcAlterTableGroupFinalMarkTask(preparedData.getSchemaName(), preparedData.getTableGroupName());
+        executableDdlJob.appendTask(cdcAlterTableGroupFinalMarkTask);
+    }
+
     public Map> getTheDeletedPartitionsLocation(String schemaName, String tableName) {
         Map> deletedPhyTables = new HashMap<>();
 
@@ -339,4 +356,71 @@ public Map> getTheDeletedPartitionsLocation(String schemaNam
         return deletedPhyTables;
     }
 
+    public ExecutableDdlJob withImplicitTableGroup(ExecutionContext ec) {
+        executionContext.getDdlContext().setDdlType(DdlType.ALTER_TABLE_RENAME_PARTITION);
+        String implicitTableGroup = preparedData.getTargetImplicitTableGroupName();
+        assert implicitTableGroup != null;
+        TableGroupConfig tgConfig = OptimizerContext.getContext(preparedData.getSchemaName()).getTableGroupInfoManager()
+            .getTableGroupConfigByName(implicitTableGroup);
+        if (tgConfig == null) {
+            return createTableGroupAndRedo(ec);
+        } else if (tgConfig.isEmpty()) {
+            return setTableGroupAndRedo(ec);
+        } else {
+            throw new RuntimeException("unexpected");
+        }
+    }
+
+    public ExecutableDdlJob createTableGroupAndRedo(ExecutionContext ec) {
+        String implicitTableGroup = preparedData.getTargetImplicitTableGroupName();
+        List taskList = new ArrayList<>();
+        ExecutableDdlJob job = new ExecutableDdlJob();
+        CreateTableGroupValidateTask createTableGroupValidateTask =
+            new CreateTableGroupValidateTask(preparedData.getSchemaName(),
+                ImmutableList.of(implicitTableGroup));
+        taskList.add(createTableGroupValidateTask);
+        CreateTableGroupAddMetaTask createTableGroupAddMetaTask = new CreateTableGroupAddMetaTask(
+            preparedData.getSchemaName(), implicitTableGroup, null,
+            null, false, true);
+        taskList.add(createTableGroupAddMetaTask);
+        TableGroupsSyncTask tableGroupsSyncTask =
+            new TableGroupsSyncTask(preparedData.getSchemaName(), ImmutableList.of(implicitTableGroup));
+        taskList.add(tableGroupsSyncTask);
+        SubJobTask subJobAddToImplicitTableGroup =
+            new SubJobTask(preparedData.getSchemaName(),
+                String.format(SET_TARGET_TABLE_GROUP, preparedData.getTableName(), implicitTableGroup),
+                null);
+        SubJobTask redoTask =
+            new SubJobTask(preparedData.getSchemaName(), preparedData.getSourceSql(), null);
+        subJobAddToImplicitTableGroup.setParentAcquireResource(true);
+        redoTask.setParentAcquireResource(true);
+        taskList.add(subJobAddToImplicitTableGroup);
+        taskList.add(redoTask);
+        job.addSequentialTasks(taskList);
+        ec.getParamManager().getProps()
+            .put(ConnectionProperties.ONLY_MANUAL_TABLEGROUP_ALLOW, Boolean.FALSE.toString());
+        return job;
+    }
+
+    public ExecutableDdlJob setTableGroupAndRedo(ExecutionContext ec) {
+        String implicitTableGroup = preparedData.getTargetImplicitTableGroupName();
+        List taskList = new ArrayList<>();
+        ExecutableDdlJob job = new ExecutableDdlJob();
+        EmptyTableGroupValidateTask emptyTableGroupValidateTask =
+            new EmptyTableGroupValidateTask(preparedData.getSchemaName(), implicitTableGroup);
+        SubJobTask subJobAddToImplicitTableGroup =
+            new SubJobTask(preparedData.getSchemaName(), String.format(SET_TARGET_TABLE_GROUP, implicitTableGroup),
+                null);
+        SubJobTask redoTask =
+            new SubJobTask(preparedData.getSchemaName(), preparedData.getSourceSql(), null);
+        subJobAddToImplicitTableGroup.setParentAcquireResource(true);
+        redoTask.setParentAcquireResource(true);
+        taskList.add(emptyTableGroupValidateTask);
+        taskList.add(subJobAddToImplicitTableGroup);
+        taskList.add(redoTask);
+        job.addSequentialTasks(taskList);
+        ec.getParamManager().getProps()
+            .put(ConnectionProperties.ONLY_MANUAL_TABLEGROUP_ALLOW, Boolean.FALSE.toString());
+        return job;
+    }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupChangeSetJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupChangeSetJobFactory.java
index 8e8e8b4c1..5ca81fc57 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupChangeSetJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupChangeSetJobFactory.java
@@ -16,13 +16,18 @@
 
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
+import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility;
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.common.utils.Pair;
 import com.alibaba.polardbx.executor.changeset.ChangeSetManager;
 import com.alibaba.polardbx.executor.ddl.job.converter.DdlJobDataConverter;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.task.backfill.AlterTableGroupBackFillTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CloneTableDataFileTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTablePhyDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.DiscardTableSpaceDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.ImportTableSpaceDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.PhysicalBackfillTask;
 import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcTableGroupDdlMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.changset.AlterTableGroupMovePartitionsCheckTask;
 import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetApplyExecutorInitTask;
@@ -33,8 +38,12 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils;
+import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils;
 import com.alibaba.polardbx.gms.partition.TablePartitionRecord;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
+import com.alibaba.polardbx.gms.topology.DbTopologyManager;
+import com.alibaba.polardbx.gms.topology.GroupDetailInfoExRecord;
+import com.alibaba.polardbx.gms.util.GroupInfoUtil;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
 import com.alibaba.polardbx.optimizer.config.table.TableMeta;
@@ -50,11 +59,13 @@
 import org.apache.commons.lang.StringUtils;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
 import static com.alibaba.polardbx.common.properties.ConnectionParams.CHANGE_SET_APPLY_OPTIMIZATION;
+import static com.alibaba.polardbx.executor.ddl.newengine.meta.DdlJobManager.ID_GENERATOR;
 import static com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils.genChangeSetCatchUpTasks;
 import static com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils.genTargetTableLocations;
 
@@ -63,7 +74,16 @@ public class AlterTableGroupChangeSetJobFactory extends AlterTableGroupSubTaskJo
 
     private ChangeSetApplyFinishTask changeSetApplyFinishTask;
 
+    final List discardTableSpaceOperations;
+    final Map> ptbGroupMap;
+    protected boolean usePhysicalBackfill = false;
     protected final AlterTableGroupBasePreparedData parentPrepareData;
+    protected List backfillTaskEdgeNodes = new ArrayList<>(2);
+    //item: index[0]:clone task; index[1]:PhysicalBackfilltask; index[2~end]: importtask
+    protected List> physicalyTaskPipeLine = new ArrayList<>();
+
+    final Map sourceAndTarDnMap;
+    final Map> storageInstAndUserInfos;
 
     public AlterTableGroupChangeSetJobFactory(DDL ddl, AlterTableGroupBasePreparedData parentPrepareData,
                                               AlterTableGroupItemPreparedData preparedData,
@@ -93,6 +113,40 @@ public AlterTableGroupChangeSetJobFactory(DDL ddl, AlterTableGroupBasePreparedDa
         this.parentPrepareData = parentPrepareData;
         this.changeSetApplyExecutorInitTask = changeSetApplyExecutorInitTask;
         this.changeSetApplyFinishTask = changeSetApplyFinishTask;
+        this.discardTableSpaceOperations = null;
+        this.ptbGroupMap = null;
+        this.sourceAndTarDnMap = null;
+        this.storageInstAndUserInfos = null;
+    }
+
+    public AlterTableGroupChangeSetJobFactory(DDL ddl, AlterTableGroupBasePreparedData parentPrepareData,
+                                              AlterTableGroupItemPreparedData preparedData,
+                                              List phyDdlTableOperations,
+                                              List discardTableSpaceOperations,
+                                              Map> ptbGroupMap,
+                                              Map sourceAndTarDnMap,
+                                              Map> storageInstAndUserInfos,
+                                              Map>> tableTopology,
+                                              Map> targetTableTopology,
+                                              Map> sourceTableTopology,
+                                              Map> orderedTargetTableLocations,
+                                              String targetPartition,
+                                              boolean skipBackfill,
+                                              ChangeSetApplyExecutorInitTask changeSetApplyExecutorInitTask,
+                                              ChangeSetApplyFinishTask changeSetApplyFinishTask,
+                                              ComplexTaskMetaManager.ComplexTaskType taskType,
+                                              ExecutionContext executionContext) {
+        super(ddl, parentPrepareData, preparedData, phyDdlTableOperations, tableTopology, targetTableTopology,
+            sourceTableTopology,
+            orderedTargetTableLocations, targetPartition, skipBackfill, taskType, executionContext);
+        this.parentPrepareData = parentPrepareData;
+        this.changeSetApplyExecutorInitTask = changeSetApplyExecutorInitTask;
+        this.changeSetApplyFinishTask = changeSetApplyFinishTask;
+        this.discardTableSpaceOperations = discardTableSpaceOperations;
+        this.ptbGroupMap = ptbGroupMap;
+        this.sourceAndTarDnMap = sourceAndTarDnMap;
+        this.storageInstAndUserInfos = storageInstAndUserInfos;
+        this.usePhysicalBackfill = parentPrepareData.isUsePhysicalBackfill();
     }
 
     @Override
@@ -136,12 +190,22 @@ protected ExecutableDdlJob doCreate() {
         taskList.add(addMetaTask);
         //2.2 create partitioned physical table
         phyDdlTableOperations.forEach(o -> o.setPartitionInfo(newPartitionInfo));
+
         if (!tableTopology.isEmpty()) {
             PhysicalPlanData physicalPlanData =
-                DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, phyDdlTableOperations);
+                DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, phyDdlTableOperations, executionContext);
             DdlTask phyDdlTask =
                 new CreateTablePhyDdlTask(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData);
             taskList.add(phyDdlTask);
+            if (usePhysicalBackfill) {
+                physicalPlanData =
+                    DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, discardTableSpaceOperations,
+                        executionContext);
+                phyDdlTask =
+                    new DiscardTableSpaceDdlTask(schemaName, physicalPlanData.getLogicalTableName(),
+                        physicalPlanData);
+                taskList.add(phyDdlTask);
+            }
         }
 
         List relatedTables = new ArrayList<>();
@@ -154,11 +218,6 @@ protected ExecutableDdlJob doCreate() {
             relatedTables.add(tableName);
         }
 
-        AlterTableGroupBackFillTask alterTableGroupBackFillTask =
-            new AlterTableGroupBackFillTask(schemaName, tableName, sourceTableTopology, targetTableTopology,
-                isBroadcast(),
-                ComplexTaskMetaManager.ComplexTaskType.MOVE_PARTITION == taskType, true);
-
         Map targetTableLocations = genTargetTableLocations(orderedTargetTableLocations);
         Long changeSetId = ChangeSetManager.getChangeSetId();
 
@@ -168,6 +227,7 @@ protected ExecutableDdlJob doCreate() {
         Map catchUpTasks = genChangeSetCatchUpTasks(
             schemaName,
             tableName,
+            null,
             sourceTableTopology,
             targetTableLocations,
             taskType,
@@ -192,19 +252,116 @@ protected ExecutableDdlJob doCreate() {
             stayAtPublic =
                 StringUtils.equalsIgnoreCase(ComplexTaskMetaManager.ComplexTaskStatus.PUBLIC.name(), finalStatus);
         }
+        List movePartitionTasks;
+        backfillTaskEdgeNodes.clear();
+        physicalyTaskPipeLine.clear();
 
-        List movePartitionTasks = ChangeSetUtils.genChangeSetOnlineSchemaChangeTasks(
-            schemaName, tableName,
-            relatedTables,
-            finalStatus,
-            changeSetStartTask,
-            catchUpTasks,
-            alterTableGroupBackFillTask,
-            changeSetCheckTask,
-            changeSetCheckTwiceTask,
-            changeSetApplyFinishTask,
-            executionContext);
+        final boolean waitLsn = executionContext.getParamManager()
+            .getBoolean(ConnectionParams.PHYSICAL_BACKFILL_WAIT_LSN_WHEN_ROLLBACK);
+
+        boolean healthyCheck =
+            executionContext.getParamManager().getBoolean(ConnectionParams.PHYSICAL_BACKFILL_STORAGE_HEALTHY_CHECK);
+
+        if (usePhysicalBackfill) {
+            for (Map.Entry> entry : ptbGroupMap.entrySet()) {
+                String phyTb = entry.getKey();
+                Pair srcTarGroup = entry.getValue();
+                String sourceStorageId = sourceAndTarDnMap.computeIfAbsent(srcTarGroup.getKey(),
+                    key -> DbTopologyManager.getStorageInstIdByGroupName(schemaName, srcTarGroup.getKey()));
+                String targetStorageId = sourceAndTarDnMap.computeIfAbsent(srcTarGroup.getValue(),
+                    key -> DbTopologyManager.getStorageInstIdByGroupName(schemaName, srcTarGroup.getValue()));
+
+                Pair srcDbAndGroup = Pair.of(
+                    GroupInfoUtil.buildPhysicalDbNameFromGroupName(srcTarGroup.getKey()).toLowerCase(),
+                    srcTarGroup.getKey());
+                Pair tarDbAndGroup = Pair.of(
+                    GroupInfoUtil.buildPhysicalDbNameFromGroupName(srcTarGroup.getValue()).toLowerCase(),
+                    srcTarGroup.getValue());
+                Pair sourceHostIpAndPort =
+                    PhysicalBackfillUtils.getMySQLOneFollowerIpAndPort(sourceStorageId);
+                List> targetHostsIpAndPort =
+                    PhysicalBackfillUtils.getMySQLServerNodeIpAndPorts(targetStorageId, healthyCheck);
+                final long batchSize =
+                    executionContext.getParamManager().getLong(ConnectionParams.PHYSICAL_BACKFILL_BATCH_SIZE);
+                final long minUpdateBatch =
+                    executionContext.getParamManager()
+                        .getLong(ConnectionParams.PHYSICAL_BACKFILL_MIN_SUCCESS_BATCH_UPDATE);
+                final long parallelism =
+                    executionContext.getParamManager().getLong(ConnectionParams.PHYSICAL_BACKFILL_PARALLELISM);
+
+                List phyPartNames =
+                    PhysicalBackfillUtils.getPhysicalPartitionNames(schemaName, srcDbAndGroup.getValue(),
+                        srcDbAndGroup.getKey(),
+                        phyTb);
+                CloneTableDataFileTask cloneTableDataFileTask =
+                    new CloneTableDataFileTask(schemaName, tableName, srcDbAndGroup, tarDbAndGroup, phyTb,
+                        phyPartNames, sourceStorageId, sourceHostIpAndPort, targetHostsIpAndPort, batchSize,
+                        tableMeta.isEncryption());
+                cloneTableDataFileTask.setTaskId(ID_GENERATOR.nextId());
+
+                List importTableSpaceTasks = new ArrayList<>();
 
+                PhysicalBackfillTask physicalBackfillTask =
+                    new PhysicalBackfillTask(schemaName, cloneTableDataFileTask.getTaskId(), tableName, phyTb,
+                        phyPartNames,
+                        srcTarGroup,
+                        Pair.of(sourceStorageId, targetStorageId), storageInstAndUserInfos, batchSize, parallelism,
+                        minUpdateBatch,
+                        waitLsn,
+                        tableMeta.isEncryption());
+                storageInstAndUserInfos.computeIfAbsent(sourceStorageId,
+                    key -> PhysicalBackfillUtils.getUserPasswd(sourceStorageId));
+                Pair userAndPasswd = storageInstAndUserInfos.computeIfAbsent(targetStorageId,
+                    key -> PhysicalBackfillUtils.getUserPasswd(targetStorageId));
+
+                for (Pair hostIpAndPort : targetHostsIpAndPort) {
+                    ImportTableSpaceDdlTask importTableSpaceDdlTask =
+                        new ImportTableSpaceDdlTask(schemaName, tableName, tarDbAndGroup.getKey(), phyTb, hostIpAndPort,
+                            userAndPasswd);
+                    importTableSpaceTasks.add(importTableSpaceDdlTask);
+                }
+                List tasks = new ArrayList<>(importTableSpaceTasks.size() + 2);
+                tasks.add(cloneTableDataFileTask);
+                tasks.add(physicalBackfillTask);
+                tasks.addAll(importTableSpaceTasks);
+                physicalyTaskPipeLine.add(tasks);
+            }
+            Map targetStorageIds = new HashMap<>();
+            for (GroupDetailInfoExRecord groupDetailInfoExRecord : preparedData.getGroupDetailInfoExRecords()) {
+                targetStorageIds.putIfAbsent(groupDetailInfoExRecord.getGroupName(),
+                    groupDetailInfoExRecord.storageInstId);
+            }
+
+            movePartitionTasks = ChangeSetUtils.genChangeSetOnlineSchemaChangeTasks(
+                schemaName, tableName,
+                relatedTables,
+                finalStatus,
+                changeSetStartTask,
+                catchUpTasks,
+                null,
+                changeSetCheckTask,
+                changeSetCheckTwiceTask,
+                changeSetApplyFinishTask,
+                backfillTaskEdgeNodes,
+                executionContext);
+        } else {
+            AlterTableGroupBackFillTask alterTableGroupBackFillTask =
+                new AlterTableGroupBackFillTask(schemaName, tableName, sourceTableTopology, targetTableTopology,
+                    isBroadcast(),
+                    ComplexTaskMetaManager.ComplexTaskType.MOVE_PARTITION == taskType, true, false);
+            movePartitionTasks = ChangeSetUtils.genChangeSetOnlineSchemaChangeTasks(
+                schemaName, tableName,
+                relatedTables,
+                finalStatus,
+                changeSetStartTask,
+                catchUpTasks,
+                alterTableGroupBackFillTask,
+                changeSetCheckTask,
+                changeSetCheckTwiceTask,
+                changeSetApplyFinishTask,
+                backfillTaskEdgeNodes,
+                executionContext);
+        }
         taskList.addAll(movePartitionTasks);
         executableDdlJob.addSequentialTasks(taskList);
 
@@ -214,9 +371,9 @@ protected ExecutableDdlJob doCreate() {
 
         Map> newTopology = newPartitionInfo.getTopology();
         if (stayAtPublic) {
-            cdcTableGroupDdlMarkTask =
-                new CdcTableGroupDdlMarkTask(tableGroupName, schemaName, tableName, sqlKind, newTopology,
-                    dc.getDdlStmt());
+            cdcTableGroupDdlMarkTask = new CdcTableGroupDdlMarkTask(tableGroupName, schemaName, tableName,
+                sqlKind, newTopology, dc.getDdlStmt(),
+                sqlKind == SqlKind.ALTER_TABLEGROUP ? CdcDdlMarkVisibility.Private : CdcDdlMarkVisibility.Protected);
         }
 
         if (changeSetApplyExecutorInitTask != null) {
@@ -231,4 +388,12 @@ protected ExecutableDdlJob doCreate() {
     public AlterTableGroupBasePreparedData getParentPrepareData() {
         return parentPrepareData;
     }
+
+    public List getBackfillTaskEdgeNodes() {
+        return backfillTaskEdgeNodes;
+    }
+
+    public List> getPhysicalyTaskPipeLine() {
+        return physicalyTaskPipeLine;
+    }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupDropPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupDropPartitionJobFactory.java
index 540adc4fa..02d3f14bb 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupDropPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupDropPartitionJobFactory.java
@@ -86,7 +86,7 @@ protected ExecutableDdlJob doCreate() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableGroupDropPartitionPreparedData.getTableGroupName(),
                 tablesVersion, true,
-                isBrdTg ? null : alterTableGroupDropPartitionPreparedData.getTargetPhysicalGroups());
+                isBrdTg ? null : alterTableGroupDropPartitionPreparedData.getTargetPhysicalGroups(), false);
 
         Set outdatedPartitionGroupId = new HashSet<>();
         List outdatedPartitionNames = new ArrayList();
@@ -137,6 +137,7 @@ protected ExecutableDdlJob doCreate() {
 
         // TODO(luoyanxin)
         executableDdlJob.setMaxParallelism(ScaleOutUtils.getTableGroupTaskParallelism(executionContext));
+        attacheCdcFinalMarkTask(executableDdlJob);
         return executableDdlJob;
     }
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupExtractPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupExtractPartitionJobFactory.java
index 680b4bff5..ac655b716 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupExtractPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupExtractPartitionJobFactory.java
@@ -81,7 +81,7 @@ protected ExecutableDdlJob doCreate() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableGroupExtractPartitionPreparedData.getTableGroupName(),
-                tablesVersion, true, alterTableGroupExtractPartitionPreparedData.getTargetPhysicalGroups());
+                tablesVersion, true, alterTableGroupExtractPartitionPreparedData.getTargetPhysicalGroups(), false);
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableGroupExtractPartitionPreparedData.getTableGroupName());
 
@@ -134,6 +134,7 @@ protected ExecutableDdlJob doCreate() {
 
         // TODO(luoyanxin)
         executableDdlJob.setMaxParallelism(ScaleOutUtils.getTableGroupTaskParallelism(executionContext));
+        attacheCdcFinalMarkTask(executableDdlJob);
         return executableDdlJob;
     }
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupExtractPartitionSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupExtractPartitionSubTaskJobFactory.java
index 18b6f1bc9..fb378917b 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupExtractPartitionSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupExtractPartitionSubTaskJobFactory.java
@@ -25,7 +25,6 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupExtractPartitionPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTableGroup;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupMergePartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupMergePartitionJobFactory.java
index b5725e35c..8cfdeff20 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupMergePartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupMergePartitionJobFactory.java
@@ -42,12 +42,10 @@
 import org.apache.commons.lang.StringUtils;
 
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.stream.Collectors;
 
 /**
  * @author luoyanxin
@@ -86,7 +84,7 @@ protected ExecutableDdlJob doCreate() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableGroupMergePartitionPreparedData.getTableGroupName(),
-                tablesVersion, true, alterTableGroupMergePartitionPreparedData.getTargetPhysicalGroups());
+                tablesVersion, true, alterTableGroupMergePartitionPreparedData.getTargetPhysicalGroups(), false);
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableGroupMergePartitionPreparedData.getTableGroupName());
 
@@ -165,6 +163,7 @@ protected ExecutableDdlJob doCreate() {
                 syncTableGroupTask
             ));
         executableDdlJob.setMaxParallelism(ScaleOutUtils.getTableGroupTaskParallelism(executionContext));
+        attacheCdcFinalMarkTask(executableDdlJob);
         return executableDdlJob;
     }
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupModifyPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupModifyPartitionJobFactory.java
index 5bdf8c8ae..1dcbd8bda 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupModifyPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupModifyPartitionJobFactory.java
@@ -39,8 +39,8 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupModifyPartitionPreparedData;
 import com.alibaba.polardbx.optimizer.locality.LocalityInfoUtils;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.common.PartitionLocation;
 import com.alibaba.polardbx.optimizer.partition.PartitionSpec;
+import com.alibaba.polardbx.optimizer.partition.common.PartitionLocation;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 import org.apache.calcite.rel.core.DDL;
@@ -60,7 +60,7 @@
  */
 public class AlterTableGroupModifyPartitionJobFactory extends AlterTableGroupBaseJobFactory {
 
-    private static String MODIFY_PARTITION_LOCK = "MODIFY_PARTITION_LOCK";
+    private static final String MODIFY_PARTITION_LOCK = "MODIFY_PARTITION_LOCK";
 
     public AlterTableGroupModifyPartitionJobFactory(DDL ddl, AlterTableGroupModifyPartitionPreparedData preparedData,
                                                     Map tablesPrepareData,
@@ -75,6 +75,28 @@ public AlterTableGroupModifyPartitionJobFactory(DDL ddl, AlterTableGroupModifyPa
             ComplexTaskMetaManager.ComplexTaskType.DROP_PARTITION, executionContext);
     }
 
+    public static ExecutableDdlJob create(@Deprecated DDL ddl,
+                                          AlterTableGroupModifyPartitionPreparedData preparedData,
+                                          ExecutionContext executionContext) {
+        AlterTableGroupModifyPartitionBuilder alterTableGroupModifyPartitionBuilder =
+            new AlterTableGroupModifyPartitionBuilder(ddl, preparedData, executionContext);
+        Map>>> tablesTopologyMap =
+            alterTableGroupModifyPartitionBuilder.build().getTablesTopologyMap();
+        Map>> targetTablesTopology =
+            alterTableGroupModifyPartitionBuilder.getTargetTablesTopology();
+        Map>> sourceTablesTopology =
+            alterTableGroupModifyPartitionBuilder.getSourceTablesTopology();
+        Map tableGroupItemPreparedDataMap =
+            alterTableGroupModifyPartitionBuilder.getTablesPreparedData();
+        Map> newPartitionsPhysicalPlansMap =
+            alterTableGroupModifyPartitionBuilder.getNewPartitionsPhysicalPlansMap();
+        Map>> orderedTargetTablesLocations =
+            alterTableGroupModifyPartitionBuilder.getOrderedTargetTablesLocations();
+        return new AlterTableGroupModifyPartitionJobFactory(ddl, preparedData, tableGroupItemPreparedDataMap,
+            newPartitionsPhysicalPlansMap, tablesTopologyMap, targetTablesTopology, sourceTablesTopology,
+            orderedTargetTablesLocations, executionContext).create();
+    }
+
     @Override
     protected void validate() {
 
@@ -92,7 +114,7 @@ protected ExecutableDdlJob doCreate() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableGroupModifyPartitionPreparedData.getTableGroupName(),
-                tablesVersion, true, alterTableGroupModifyPartitionPreparedData.getTargetPhysicalGroups());
+                tablesVersion, true, alterTableGroupModifyPartitionPreparedData.getTargetPhysicalGroups(), false);
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableGroupModifyPartitionPreparedData.getTableGroupName());
 
@@ -140,7 +162,7 @@ protected ExecutableDdlJob doCreate() {
             ComplexTaskFactory.bringUpAlterTableGroup(schemaName, tableGroupName, null,
                 taskType, executionContext);
 
-        if (((AlterTableGroupModifyPartitionPreparedData) preparedData).isDropVal()) {
+        if (preparedData.isDropVal()) {
             AlterTableGroupRemoveTempPartitionTask alterTableGroupRemoveTempPartitionTask =
                 new AlterTableGroupRemoveTempPartitionTask(schemaName,
                     ((AlterTableGroupModifyPartitionPreparedData) preparedData).getTempPartitionNames(),
@@ -170,31 +192,10 @@ protected ExecutableDdlJob doCreate() {
 
         // TODO(luoyanxin)
         executableDdlJob.setMaxParallelism(ScaleOutUtils.getTableGroupTaskParallelism(executionContext));
+        attacheCdcFinalMarkTask(executableDdlJob);
         return executableDdlJob;
     }
 
-    public static ExecutableDdlJob create(@Deprecated DDL ddl,
-                                          AlterTableGroupModifyPartitionPreparedData preparedData,
-                                          ExecutionContext executionContext) {
-        AlterTableGroupModifyPartitionBuilder alterTableGroupModifyPartitionBuilder =
-            new AlterTableGroupModifyPartitionBuilder(ddl, preparedData, executionContext);
-        Map>>> tablesTopologyMap =
-            alterTableGroupModifyPartitionBuilder.build().getTablesTopologyMap();
-        Map>> targetTablesTopology =
-            alterTableGroupModifyPartitionBuilder.getTargetTablesTopology();
-        Map>> sourceTablesTopology =
-            alterTableGroupModifyPartitionBuilder.getSourceTablesTopology();
-        Map tableGroupItemPreparedDataMap =
-            alterTableGroupModifyPartitionBuilder.getTablesPreparedData();
-        Map> newPartitionsPhysicalPlansMap =
-            alterTableGroupModifyPartitionBuilder.getNewPartitionsPhysicalPlansMap();
-        Map>> orderedTargetTablesLocations =
-            alterTableGroupModifyPartitionBuilder.getOrderedTargetTablesLocations();
-        return new AlterTableGroupModifyPartitionJobFactory(ddl, preparedData, tableGroupItemPreparedDataMap,
-            newPartitionsPhysicalPlansMap, tablesTopologyMap, targetTablesTopology, sourceTablesTopology,
-            orderedTargetTablesLocations, executionContext).create();
-    }
-
     @Override
     public void constructSubTasks(String schemaName, ExecutableDdlJob executableDdlJob, DdlTask tailTask,
                                   List bringUpAlterTableGroupTasks, String targetPartitionName) {
@@ -259,7 +260,7 @@ public Map> getTheDeletedPartitionsLocation(String schemaNam
         }
         assert num == preparedData.getNewPartitionNames().size();
 
-        if (((AlterTableGroupModifyPartitionPreparedData) preparedData).isDropVal() && !tempPartitionSpecs.isEmpty()) {
+        if (preparedData.isDropVal() && !tempPartitionSpecs.isEmpty()) {
             for (int i = 0; i < tempPartitionSpecs.size(); i++) {
                 PartitionSpec tempPartitionSpec = tempPartitionSpecs.get(i);
                 PartitionLocation location = tempPartitionSpec.getLocation();
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupModifyPartitionSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupModifyPartitionSubTaskJobFactory.java
index 76eb0c902..b9b8d2e8e 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupModifyPartitionSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupModifyPartitionSubTaskJobFactory.java
@@ -27,12 +27,12 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupModifyPartitionPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
-import com.alibaba.polardbx.optimizer.partition.boundspec.PartitionBoundSpec;
-import com.alibaba.polardbx.optimizer.partition.boundspec.PartitionBoundVal;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfoBuilder;
+import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.partition.PartitionSpec;
+import com.alibaba.polardbx.optimizer.partition.boundspec.PartitionBoundSpec;
+import com.alibaba.polardbx.optimizer.partition.boundspec.PartitionBoundVal;
 import com.alibaba.polardbx.optimizer.partition.pruning.PartFieldAccessType;
 import com.alibaba.polardbx.optimizer.partition.pruning.PartitionPrunerUtils;
 import com.alibaba.polardbx.optimizer.partition.pruning.SearchDatumComparator;
@@ -52,7 +52,6 @@
 import org.apache.calcite.sql.SqlSubPartition;
 
 import java.util.ArrayList;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupMovePartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupMovePartitionJobFactory.java
index f1d2b30d4..8ebf0b1dd 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupMovePartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupMovePartitionJobFactory.java
@@ -17,18 +17,36 @@
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
 import com.alibaba.polardbx.common.properties.ConnectionParams;
+import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.common.utils.Pair;
+import com.alibaba.polardbx.executor.balancer.Balancer;
+import com.alibaba.polardbx.executor.balancer.stats.BalanceStats;
+import com.alibaba.polardbx.executor.balancer.stats.PartitionGroupStat;
+import com.alibaba.polardbx.executor.balancer.stats.PartitionStat;
 import com.alibaba.polardbx.executor.ddl.job.builder.tablegroup.AlterTableGroupMovePartitionBuilder;
+import com.alibaba.polardbx.executor.ddl.job.task.CostEstimableDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.DdlBackfillCostRecordTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.ImportTableSpaceDdlNormalTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.PauseCurrentJobTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.PhysicalBackfillTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.SyncLsnTask;
+import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetApplyExecutorInitTask;
+import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetApplyFinishTask;
+import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyLogTask;
+import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupAddMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils;
 import com.alibaba.polardbx.executor.scaleout.ScaleOutUtils;
 import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
+import com.alibaba.polardbx.gms.topology.DbTopologyManager;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
+import com.alibaba.polardbx.optimizer.config.table.TableMeta;
+import com.alibaba.polardbx.optimizer.context.DdlContext;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
@@ -39,20 +57,32 @@
 import org.apache.commons.lang.StringUtils;
 
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Queue;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.stream.Collectors;
 
 /**
  * @author luoyanxin
  */
 public class AlterTableGroupMovePartitionJobFactory extends AlterTableGroupBaseJobFactory {
+    final Map> discardTableSpacePhysicalPlansMap;
+    final Map>> tbPtbGroupMap;
+    final Map sourceAndTarDnMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+    final Map> storageInstAndUserInfos = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
 
     public AlterTableGroupMovePartitionJobFactory(DDL ddl, AlterTableGroupMovePartitionPreparedData preparedData,
                                                   Map tablesPrepareData,
                                                   Map> newPartitionsPhysicalPlansMap,
+                                                  Map> discardTableSpacePhysicalPlansMap,
+                                                  Map>> tbPtbGroupMap,
                                                   Map>>> tablesTopologyMap,
                                                   Map>> targetTablesTopology,
                                                   Map>> sourceTablesTopology,
@@ -61,6 +91,8 @@ public AlterTableGroupMovePartitionJobFactory(DDL ddl, AlterTableGroupMovePartit
         super(ddl, preparedData, tablesPrepareData, newPartitionsPhysicalPlansMap, tablesTopologyMap,
             targetTablesTopology, sourceTablesTopology, orderedTargetTablesLocations,
             ComplexTaskMetaManager.ComplexTaskType.MOVE_PARTITION, executionContext);
+        this.discardTableSpacePhysicalPlansMap = discardTableSpacePhysicalPlansMap;
+        this.tbPtbGroupMap = tbPtbGroupMap;
     }
 
     @Override
@@ -70,10 +102,10 @@ protected void validate() {
 
     @Override
     protected ExecutableDdlJob doCreate() {
+
         AlterTableGroupMovePartitionPreparedData alterTableGroupMovePartitionPreparedData =
             (AlterTableGroupMovePartitionPreparedData) preparedData;
         String schemaName = alterTableGroupMovePartitionPreparedData.getSchemaName();
-        String tableName = alterTableGroupMovePartitionPreparedData.getTableName();
         String tableGroupName = alterTableGroupMovePartitionPreparedData.getTableGroupName();
 
         ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
@@ -81,10 +113,30 @@ protected ExecutableDdlJob doCreate() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableGroupMovePartitionPreparedData.getTableGroupName(),
-                tablesVersion, true, alterTableGroupMovePartitionPreparedData.getTargetPhysicalGroups());
+                tablesVersion, true, alterTableGroupMovePartitionPreparedData.getTargetPhysicalGroups(), false);
+
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableGroupMovePartitionPreparedData.getTableGroupName());
 
+        DdlContext ddlContext = executionContext.getDdlContext();
+        DdlBackfillCostRecordTask costRecordTask = null;
+        if (ddlContext != null && !ddlContext.isSubJob()) {
+            costRecordTask = new DdlBackfillCostRecordTask(schemaName);
+            final BalanceStats balanceStats = Balancer.collectBalanceStatsOfTableGroup(schemaName, tableGroupName);
+            List partitionStats = balanceStats.getPartitionGroupStats();
+            Long diskSize = 0L;
+            Long rows = 0L;
+            Set partitionNamesSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
+            preparedData.getOldPartitionNames().forEach(o -> partitionNamesSet.add(o));
+            for (PartitionGroupStat partitionStat : partitionStats) {
+                if (partitionNamesSet.contains(partitionStat.getFirstPartition().getPartitionName())) {
+                    diskSize += partitionStat.getTotalDiskSize();
+                    rows += partitionStat.getDataRows();
+                }
+            }
+            costRecordTask.setCostInfo(
+                CostEstimableDdlTask.createCostInfo(rows, diskSize, (long) tableGroupConfig.getTableCount()));
+        }
         Set outdatedPartitionGroupId = new HashSet<>();
         for (String mergePartitionName : alterTableGroupMovePartitionPreparedData.getOldPartitionNames()) {
             for (PartitionGroupRecord record : tableGroupConfig.getPartitionGroupRecords()) {
@@ -115,15 +167,15 @@ protected ExecutableDdlJob doCreate() {
             newPartitions,
             localities);
 
-        executableDdlJob.addSequentialTasks(Lists.newArrayList(
-            validateTask,
-            addMetaTask
-        ));
+        if (costRecordTask != null) {
+            executableDdlJob.addSequentialTasks(Lists.newArrayList(validateTask, costRecordTask, addMetaTask));
+        } else {
+            executableDdlJob.addSequentialTasks(Lists.newArrayList(validateTask, addMetaTask));
+        }
         executableDdlJob.labelAsHead(validateTask);
 
         List bringUpAlterTableGroupTasks =
-            ComplexTaskFactory.bringUpAlterTableGroup(schemaName, tableGroupName, null,
-                taskType, executionContext);
+            ComplexTaskFactory.bringUpAlterTableGroup(schemaName, tableGroupName, null, taskType, executionContext);
 
         final String finalStatus =
             executionContext.getParamManager().getString(ConnectionParams.TABLEGROUP_REORG_FINAL_TABLE_STATUS_DEBUG);
@@ -144,12 +196,11 @@ protected ExecutableDdlJob doCreate() {
         }
 
         executableDdlJob.setMaxParallelism(ScaleOutUtils.getTableGroupTaskParallelism(executionContext));
-
+        //attacheCdcFinalMarkTask(executableDdlJob); //暂时不支持复制
         return executableDdlJob;
     }
 
-    public static ExecutableDdlJob create(@Deprecated DDL ddl,
-                                          AlterTableGroupMovePartitionPreparedData preparedData,
+    public static ExecutableDdlJob create(@Deprecated DDL ddl, AlterTableGroupMovePartitionPreparedData preparedData,
                                           ExecutionContext executionContext) {
         AlterTableGroupMovePartitionBuilder alterTableGroupMovePartitionBuilder =
             new AlterTableGroupMovePartitionBuilder(ddl, preparedData, executionContext);
@@ -161,13 +212,149 @@ public static ExecutableDdlJob create(@Deprecated DDL ddl,
             alterTableGroupMovePartitionBuilder.getSourceTablesTopology();
         Map tableGroupItemPreparedDataMap =
             alterTableGroupMovePartitionBuilder.getTablesPreparedData();
+        Map> discardTableSpacePhysicalPlansMap =
+            alterTableGroupMovePartitionBuilder.getDiscardTableSpacePhysicalPlansMap();
         Map> newPartitionsPhysicalPlansMap =
             alterTableGroupMovePartitionBuilder.getNewPartitionsPhysicalPlansMap();
         Map>> orderedTargetTablesLocations =
             alterTableGroupMovePartitionBuilder.getOrderedTargetTablesLocations();
+        Map>> tbPtbGroup =
+            alterTableGroupMovePartitionBuilder.getTbPtbGroupMap();
+
         return new AlterTableGroupMovePartitionJobFactory(ddl, preparedData, tableGroupItemPreparedDataMap,
-            newPartitionsPhysicalPlansMap, tablesTopologyMap, targetTablesTopology, sourceTablesTopology,
-            orderedTargetTablesLocations, executionContext).create();
+            newPartitionsPhysicalPlansMap, discardTableSpacePhysicalPlansMap, tbPtbGroup, tablesTopologyMap,
+            targetTablesTopology, sourceTablesTopology, orderedTargetTablesLocations,
+            executionContext).create();
+    }
+
+    public void constructSubTasks(String schemaName, ExecutableDdlJob executableDdlJob, DdlTask tailTask,
+                                  List bringUpAlterTableGroupTasks, String targetPartitionName) {
+        EmptyTask emptyTask = new EmptyTask(schemaName);
+        ChangeSetApplyExecutorInitTask changeSetApplyExecutorInitTask =
+            new ChangeSetApplyExecutorInitTask(schemaName,
+                ScaleOutUtils.getTableGroupTaskParallelism(executionContext));
+        ChangeSetApplyFinishTask changeSetApplyFinishTask = new ChangeSetApplyFinishTask(preparedData.getSchemaName(),
+            String.format("schema %s group %s start double write ", preparedData.getSchemaName(),
+                preparedData.getTableGroupName()));
+        SyncLsnTask syncLsnTask = null;
+        boolean syncLsnTaskAdded = false;
+        boolean emptyTaskAdded = false;
+
+        final boolean useChangeSet = ChangeSetUtils.isChangeSetProcedure(executionContext);
+
+        int parallelism = ScaleOutUtils.getTableGroupTaskParallelism(executionContext);
+        Queue leavePipeLineQueue = new LinkedList<>();
+        for (Map.Entry>>> entry : tablesTopologyMap.entrySet()) {
+
+            AlterTableGroupSubTaskJobFactory subTaskJobFactory;
+            String logicalTableName = tablesPrepareData.get(entry.getKey()).getTableName();
+            TableMeta tm = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName);
+            if (useChangeSet && tm.isHasPrimaryKey() && ChangeSetUtils.supportUseChangeSet(taskType, tm)) {
+                subTaskJobFactory =
+                    new AlterTableGroupChangeSetJobFactory(ddl, preparedData, tablesPrepareData.get(entry.getKey()),
+                        newPartitionsPhysicalPlansMap.get(entry.getKey()),
+                        discardTableSpacePhysicalPlansMap.get(entry.getKey()),
+                        tbPtbGroupMap.get(entry.getKey()),
+                        sourceAndTarDnMap,
+                        storageInstAndUserInfos,
+                        tablesTopologyMap.get(entry.getKey()),
+                        targetTablesTopology.get(entry.getKey()), sourceTablesTopology.get(entry.getKey()),
+                        orderedTargetTablesLocations.get(entry.getKey()), targetPartitionName, false,
+                        changeSetApplyExecutorInitTask,
+                        changeSetApplyFinishTask,
+                        taskType, executionContext);
+            } else {
+                subTaskJobFactory =
+                    new AlterTableGroupSubTaskJobFactory(ddl, preparedData, tablesPrepareData.get(entry.getKey()),
+                        newPartitionsPhysicalPlansMap.get(entry.getKey()), tablesTopologyMap.get(entry.getKey()),
+                        targetTablesTopology.get(entry.getKey()), sourceTablesTopology.get(entry.getKey()),
+                        orderedTargetTablesLocations.get(entry.getKey()), targetPartitionName, false, taskType,
+                        executionContext);
+            }
+            ExecutableDdlJob subTask = subTaskJobFactory.create();
+            executableDdlJob.combineTasks(subTask);
+            executableDdlJob.addTaskRelationship(tailTask, subTask.getHead());
+
+            if (!syncLsnTaskAdded && preparedData.isUsePhysicalBackfill()) {
+
+                Map> sourceTableTopology = sourceTablesTopology.get(entry.getKey());
+                Map> targetTableTopology = targetTablesTopology.get(entry.getKey());
+                Map targetGroupAndStorageIdMap = new HashMap<>();
+                Map sourceGroupAndStorageIdMap = new HashMap<>();
+                for (String groupName : sourceTableTopology.keySet()) {
+                    sourceGroupAndStorageIdMap.put(groupName,
+                        DbTopologyManager.getStorageInstIdByGroupName(schemaName, groupName));
+                }
+                for (String groupName : targetTableTopology.keySet()) {
+                    targetGroupAndStorageIdMap.put(groupName,
+                        DbTopologyManager.getStorageInstIdByGroupName(schemaName, groupName));
+                }
+
+                syncLsnTask = new SyncLsnTask(schemaName, sourceGroupAndStorageIdMap, targetGroupAndStorageIdMap);
+                executableDdlJob.addTask(syncLsnTask);
+                syncLsnTaskAdded = true;
+            }
+
+            if (preparedData.isUsePhysicalBackfill()) {
+                for (List pipeLine : GeneralUtil.emptyIfNull(subTaskJobFactory.getPhysicalyTaskPipeLine())) {
+                    DdlTask parentLeaveNode;
+                    if (leavePipeLineQueue.size() < parallelism) {
+                        parentLeaveNode = syncLsnTask;
+                    } else {
+                        parentLeaveNode = leavePipeLineQueue.poll();
+                    }
+                    executableDdlJob.removeTaskRelationship(subTaskJobFactory.getBackfillTaskEdgeNodes().get(0),
+                        subTaskJobFactory.getBackfillTaskEdgeNodes().get(1));
+                    executableDdlJob.addTaskRelationship(subTaskJobFactory.getBackfillTaskEdgeNodes().get(0),
+                        syncLsnTask);
+                    executableDdlJob.addTaskRelationship(parentLeaveNode,
+                        pipeLine.get(0));
+                    executableDdlJob.addTaskRelationship(pipeLine.get(0),
+                        pipeLine.get(1));
+                    PhysicalBackfillTask physicalBackfillTask = (PhysicalBackfillTask) pipeLine.get(1);
+                    Map>> targetTables = new HashMap<>();
+                    String tarGroupKey = physicalBackfillTask.getSourceTargetGroup().getValue();
+                    String phyTableName = physicalBackfillTask.getPhysicalTableName();
+
+                    targetTables.computeIfAbsent(tarGroupKey, k -> new ArrayList<>())
+                        .add(Collections.singletonList(phyTableName));
+
+                    ImportTableSpaceDdlNormalTask importTableSpaceDdlNormalTask = new ImportTableSpaceDdlNormalTask(
+                        preparedData.getSchemaName(), entry.getKey(), targetTables);
+                    for (int i = 2; i < pipeLine.size(); i++) {
+                        executableDdlJob.addTaskRelationship(pipeLine.get(1),
+                            pipeLine.get(i));
+                        executableDdlJob.addTaskRelationship(pipeLine.get(i),
+                            importTableSpaceDdlNormalTask);
+                    }
+                    executableDdlJob.addTaskRelationship(importTableSpaceDdlNormalTask,
+                        subTaskJobFactory.getBackfillTaskEdgeNodes().get(1));
+                    leavePipeLineQueue.add(importTableSpaceDdlNormalTask);
+                }
+            }
+
+            if (subTaskJobFactory.getCdcTableGroupDdlMarkTask() != null) {
+                if (!emptyTaskAdded) {
+                    executableDdlJob.addTask(emptyTask);
+                    emptyTaskAdded = true;
+                }
+                executableDdlJob.addTask(subTaskJobFactory.getCdcTableGroupDdlMarkTask());
+                executableDdlJob.addTaskRelationship(subTask.getTail(), emptyTask);
+                executableDdlJob.addTaskRelationship(emptyTask, subTaskJobFactory.getCdcTableGroupDdlMarkTask());
+                executableDdlJob.addTaskRelationship(subTaskJobFactory.getCdcTableGroupDdlMarkTask(),
+                    bringUpAlterTableGroupTasks.get(0));
+            } else {
+                executableDdlJob.addTaskRelationship(subTask.getTail(), bringUpAlterTableGroupTasks.get(0));
+            }
+
+            DdlTask dropUselessTableTask = ComplexTaskFactory.CreateDropUselessPhyTableTask(schemaName, entry.getKey(),
+                sourceTablesTopology.get(entry.getKey()), executionContext);
+            executableDdlJob.addTask(dropUselessTableTask);
+            executableDdlJob.labelAsTail(dropUselessTableTask);
+            executableDdlJob.addTaskRelationship(
+                bringUpAlterTableGroupTasks.get(bringUpAlterTableGroupTasks.size() - 1), dropUselessTableTask);
+            executableDdlJob.getExcludeResources().addAll(subTask.getExcludeResources());
+        }
     }
 
     @Override
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupRenamePartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupRenamePartitionJobFactory.java
index 583894b24..a7812b2c5 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupRenamePartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupRenamePartitionJobFactory.java
@@ -18,12 +18,14 @@
 
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TablesSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcAlterTableGroupRenamePartitionMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupRenamePartitionChangeMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupSyncTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob;
 import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
@@ -64,6 +66,9 @@ protected void validate() {
 
     @Override
     protected ExecutableDdlJob doCreate() {
+        if (preparedData.isRenameNothing()) {
+            return new TransientDdlJob();
+        }
         boolean enablePreemptiveMdl =
             executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_PREEMPTIVE_MDL);
         Long initWait = executionContext.getParamManager().getLong(ConnectionParams.PREEMPTIVE_MDL_INITWAIT);
@@ -75,8 +80,7 @@ protected ExecutableDdlJob doCreate() {
         TableGroupConfig tableGroupConfig =
             OptimizerContext.getContext(preparedData.getSchemaName()).getTableGroupInfoManager()
                 .getTableGroupConfigByName(preparedData.getTableGroupName());
-        for (TablePartRecordInfoContext tablePartRecordInfoContext : tableGroupConfig.getAllTables()) {
-            String tableName = tablePartRecordInfoContext.getLogTbRec().getTableName();
+        for (String tableName : tableGroupConfig.getAllTables()) {
             String primaryTableName;
             TableMeta tableMeta = executionContext.getSchemaManager(preparedData.getSchemaName()).getTable(tableName);
             if (tableMeta.isGsi()) {
@@ -104,13 +108,18 @@ protected ExecutableDdlJob doCreate() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(preparedData.getSchemaName(), preparedData.getTableGroupName(),
                 tablesVersion,
-                true, null);
+                true, null, false);
+
+        CdcAlterTableGroupRenamePartitionMarkTask cdcAlterTableGroupRenamePartitionMarkTask =
+            new CdcAlterTableGroupRenamePartitionMarkTask(preparedData.getSchemaName(),
+                preparedData.getTableGroupName());
 
         DdlTask reloadTableGroup =
             new TableGroupSyncTask(preparedData.getSchemaName(), preparedData.getTableGroupName());
         executableDdlJob.addSequentialTasks(Lists.newArrayList(
             validateTask,
             changeMetaTask,
+            cdcAlterTableGroupRenamePartitionMarkTask,
             syncTask,
             reloadTableGroup
         ));
@@ -133,8 +142,7 @@ protected void excludeResources(Set resources) {
         TableGroupConfig tableGroupConfig =
             OptimizerContext.getContext(preparedData.getSchemaName()).getTableGroupInfoManager()
                 .getTableGroupConfigByName(preparedData.getTableGroupName());
-        for (TablePartRecordInfoContext tablePartRecordInfoContext : tableGroupConfig.getAllTables()) {
-            String tableName = tablePartRecordInfoContext.getLogTbRec().getTableName();
+        for (String tableName : tableGroupConfig.getAllTables()) {
             resources.add(concatWithDot(preparedData.getSchemaName(), tableName));
         }
     }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupReorgPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupReorgPartitionJobFactory.java
index ce8d142ae..17d2ecb68 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupReorgPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupReorgPartitionJobFactory.java
@@ -82,7 +82,7 @@ protected ExecutableDdlJob doCreate() {
         Map tablesVersion = getTablesVersion();
 
         DdlTask validateTask = new AlterTableGroupValidateTask(schemaName, tableGroupName, tablesVersion, true,
-            reorgPreparedData.getTargetPhysicalGroups());
+            reorgPreparedData.getTargetPhysicalGroups(), false);
 
         Set outdatedPartitionGroupId = new HashSet<>();
         List outdatedPartitionGroupLocalities = new ArrayList<>();
@@ -166,7 +166,7 @@ protected ExecutableDdlJob doCreate() {
             ));
 
         executableDdlJob.setMaxParallelism(ScaleOutUtils.getTableGroupTaskParallelism(executionContext));
-
+        attacheCdcFinalMarkTask(executableDdlJob);
         return executableDdlJob;
     }
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSetLocalityJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSetLocalityJobFactory.java
index 3f5b282aa..ddabe748f 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSetLocalityJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSetLocalityJobFactory.java
@@ -20,7 +20,10 @@
 import com.alibaba.polardbx.executor.balancer.policy.PolicyUtils;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TablesSyncTask;
-import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.*;
+import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupSetLocalityChangeMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.BackgroupRebalanceTask;
+import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupSyncTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
@@ -75,8 +78,7 @@ protected ExecutableDdlJob doCreate() {
         TableGroupConfig tableGroupConfig =
             OptimizerContext.getContext(preparedData.getSchemaName()).getTableGroupInfoManager()
                 .getTableGroupConfigByName(preparedData.getTableGroupName());
-        for (TablePartRecordInfoContext tablePartRecordInfoContext : tableGroupConfig.getAllTables()) {
-            String tableName = tablePartRecordInfoContext.getLogTbRec().getTableName();
+        for (String tableName : tableGroupConfig.getAllTables()) {
             String primaryTableName;
             TableMeta tableMeta = executionContext.getSchemaManager(preparedData.getSchemaName()).getTable(tableName);
             if (tableMeta.isGsi()) {
@@ -105,7 +107,7 @@ protected ExecutableDdlJob doCreate() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(preparedData.getSchemaName(), preparedData.getTableGroupName(),
                 tablesVersion,
-                true, null);
+                true, null, false);
 
         DdlTask rebalanceTask =
             new BackgroupRebalanceTask(preparedData.getSchemaName(), preparedData.getRebalanceSql());
@@ -156,8 +158,7 @@ protected void excludeResources(Set resources) {
             TableGroupConfig tableGroupConfig =
                 OptimizerContext.getContext(preparedData.getSchemaName()).getTableGroupInfoManager()
                     .getTableGroupConfigByName(preparedData.getTableGroupName());
-            for (TablePartRecordInfoContext tablePartRecordInfoContext : tableGroupConfig.getAllTables()) {
-                String tableName = tablePartRecordInfoContext.getLogTbRec().getTableName();
+            for (String tableName : tableGroupConfig.getAllTables()) {
                 resources.add(concatWithDot(preparedData.getSchemaName(), tableName));
             }
         }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSetPartitionsLocalityJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSetPartitionsLocalityJobFactory.java
index 26bbb5ade..93dc0384d 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSetPartitionsLocalityJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSetPartitionsLocalityJobFactory.java
@@ -18,7 +18,6 @@
 
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.executor.balancer.policy.PolicyUtils;
-import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TablesSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupSetPartitionsLocalityChangeMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupValidateTask;
@@ -82,8 +81,7 @@ protected ExecutableDdlJob doCreate() {
         TableGroupConfig tableGroupConfig =
             OptimizerContext.getContext(preparedData.getSchemaName()).getTableGroupInfoManager()
                 .getTableGroupConfigByName(preparedData.getTableGroupName());
-        for (TablePartRecordInfoContext tablePartRecordInfoContext : tableGroupConfig.getAllTables()) {
-            String tableName = tablePartRecordInfoContext.getLogTbRec().getTableName();
+        for (String tableName : tableGroupConfig.getAllTables()) {
             String primaryTableName;
             TableMeta tableMeta = executionContext.getSchemaManager(preparedData.getSchemaName()).getTable(tableName);
             if (tableMeta.isGsi()) {
@@ -115,7 +113,7 @@ protected ExecutableDdlJob doCreate() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(preparedData.getSchemaName(), preparedData.getTableGroupName(),
                 tablesVersion,
-                true, null);
+                true, null, false);
 
         BackgroupRebalanceTask rebalanceTask =
             new BackgroupRebalanceTask(preparedData.getSchemaName(), preparedData.getRebalanceSql());
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionByHotValueJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionByHotValueJobFactory.java
index d3027aae2..199d5c53a 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionByHotValueJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionByHotValueJobFactory.java
@@ -36,14 +36,12 @@
 import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupSplitPartitionByHotValuePreparedData;
-import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableSplitPartitionByHotValuePreparedData;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.commons.lang.StringUtils;
 
 import java.util.ArrayList;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -86,7 +84,7 @@ protected ExecutableDdlJob doCreate() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName,
                 alterTableGroupSplitPartitionByHotValuePreparedData.getTableGroupName(), tablesVersion, true,
-                alterTableGroupSplitPartitionByHotValuePreparedData.getTargetPhysicalGroups());
+                alterTableGroupSplitPartitionByHotValuePreparedData.getTargetPhysicalGroups(), false);
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableGroupSplitPartitionByHotValuePreparedData.getTableGroupName());
 
@@ -149,6 +147,7 @@ protected ExecutableDdlJob doCreate() {
             return new TransientDdlJob();
         } else {
             executableDdlJob.setMaxParallelism(ScaleOutUtils.getTableGroupTaskParallelism(executionContext));
+            attacheCdcFinalMarkTask(executableDdlJob);
             return executableDdlJob;
         }
     }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionByHotValueSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionByHotValueSubTaskJobFactory.java
index 9e14070d1..bff0fb6bb 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionByHotValueSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionByHotValueSubTaskJobFactory.java
@@ -27,7 +27,6 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupSplitPartitionByHotValuePreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTableGroup;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionJobFactory.java
index 54ab8903f..40dbe5903 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSplitPartitionJobFactory.java
@@ -36,7 +36,6 @@
 import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupSplitPartitionPreparedData;
-import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableSplitPartitionPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.alibaba.polardbx.optimizer.partition.PartitionSpec;
@@ -47,7 +46,6 @@
 
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -90,10 +88,10 @@ protected ExecutableDdlJob doCreate() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableGroupSplitPartitionPreparedData.getTableGroupName(),
-                tablesVersion, true, alterTableGroupSplitPartitionPreparedData.getTargetPhysicalGroups());
+                tablesVersion, true, alterTableGroupSplitPartitionPreparedData.getTargetPhysicalGroups(), false);
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableGroupSplitPartitionPreparedData.getTableGroupName());
-        String firstTbInTg = tableGroupConfig.getAllTables().get(0).getLogTbRec().getTableName();
+        String firstTbInTg = tableGroupConfig.getAllTables().get(0);
         TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(firstTbInTg);
         PartitionInfo partitionInfo = tableMeta.getPartitionInfo();
 
@@ -170,6 +168,7 @@ protected ExecutableDdlJob doCreate() {
 
         executableDdlJob.setMaxParallelism(ScaleOutUtils.getTableGroupTaskParallelism(executionContext));
 
+        attacheCdcFinalMarkTask(executableDdlJob);
         return executableDdlJob;
     }
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSubTaskJobFactory.java
index e5b9d5f71..587a61fbc 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupSubTaskJobFactory.java
@@ -17,6 +17,7 @@
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
 import com.alibaba.polardbx.common.TddlConstants;
+import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility;
 import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData;
 import com.alibaba.polardbx.common.exception.TddlRuntimeException;
 import com.alibaba.polardbx.common.exception.code.ErrorCode;
@@ -151,7 +152,7 @@ protected ExecutableDdlJob doCreate() {
         phyDdlTableOperations.forEach(o -> o.setPartitionInfo(newPartitionInfo));
         if (!tableTopology.isEmpty()) {
             PhysicalPlanData physicalPlanData =
-                DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, phyDdlTableOperations);
+                DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, phyDdlTableOperations, executionContext);
             DdlTask phyDdlTask =
                 new CreateTablePhyDdlTask(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData);
             taskList.add(phyDdlTask);
@@ -193,7 +194,7 @@ protected ExecutableDdlJob doCreate() {
             List phyDdlTableOperations = builder.build().getPhysicalPlans();
 
             PhysicalPlanData physicalPlanData =
-                DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, phyDdlTableOperations);
+                DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, phyDdlTableOperations, executionContext);
             DdlTask phyDdlTask =
                 new DropIndexPhyDdlTask(schemaName, physicalPlanData);
             taskList.add(phyDdlTask);
@@ -209,7 +210,9 @@ protected ExecutableDdlJob doCreate() {
 
         Map> newTopology = newPartitionInfo.getTopology();
         DdlTask cdcDdlMarkTask =
-            new CdcTableGroupDdlMarkTask(tableGroupName, schemaName, tableName, sqlKind, newTopology, dc.getDdlStmt());
+            new CdcTableGroupDdlMarkTask(tableGroupName, schemaName, tableName, sqlKind, newTopology,
+                dc.getDdlStmt(),
+                sqlKind == SqlKind.ALTER_TABLEGROUP ? CdcDdlMarkVisibility.Private : CdcDdlMarkVisibility.Protected);
         if (stayAtPublic) {
             cdcTableGroupDdlMarkTask = cdcDdlMarkTask;
         }
@@ -368,4 +371,12 @@ DdlTask getPushDownForeignKeysTask(String schemaName, String tableName, boolean
             return new DropLogicalForeignKeyTask(schemaName, tableName, pushDownForeignKeys);
         }
     }
+
+    public List getBackfillTaskEdgeNodes() {
+        return null;
+    }
+
+    public List> getPhysicalyTaskPipeLine() {
+        return null;
+    }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupTruncatePartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupTruncatePartitionJobFactory.java
index fdfa9a02a..5f5d626c4 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupTruncatePartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableGroupTruncatePartitionJobFactory.java
@@ -45,6 +45,8 @@
 import java.util.Set;
 import java.util.TreeMap;
 
+import static com.alibaba.polardbx.common.cdc.ICdcManager.DEFAULT_DDL_VERSION_ID;
+
 public class AlterTableGroupTruncatePartitionJobFactory extends DdlJobFactory {
 
     private final static Logger LOG = SQLRecorderLogger.ddlEngineLogger;
@@ -81,7 +83,7 @@ protected ExecutableDdlJob doCreate() {
 
         boolean isBrdTg = tableGroupConfig.getTableGroupRecord().isBroadCastTableGroup();
         DdlTask validateTask = new AlterTableGroupValidateTask(schemaName, tableGroupName, tableVersions, true,
-            isBrdTg ? null : preparedData.getTargetPhysicalGroups());
+            isBrdTg ? null : preparedData.getTargetPhysicalGroups(), false);
 
         executableDdlJob.labelAsHead(validateTask);
 
@@ -118,7 +120,7 @@ private ExecutableDdlJob buildSubTasks(String schemaName, String tableName,
         ValidateTableVersionTask validateTableVersionTask = new ValidateTableVersionTask(schemaName, tableVersions);
 
         DdlTask phyDdlTask = new TruncateTablePhyDdlTask(schemaName, physicalPlanData);
-        DdlTask cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, false, false);
+        DdlTask cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, false, false, DEFAULT_DDL_VERSION_ID);
 
         subTasks.addSequentialTasks(Lists.newArrayList(
             validateTableVersionTask,
@@ -136,9 +138,8 @@ protected Map getTableVersions(TableGroupConfig tableGroupConfig)
         Map tableVersions = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
 
         tableGroupConfig.getAllTables().forEach(t -> {
-            String tableName = t.getTableName();
-            Long tableVersion = executionContext.getSchemaManager(schemaName).getTable(tableName).getVersion();
-            tableVersions.put(tableName, tableVersion);
+            Long tableVersion = executionContext.getSchemaManager(schemaName).getTable(t).getVersion();
+            tableVersions.put(t, tableVersion);
         });
 
         try (Connection conn = MetaDbDataSource.getInstance().getConnection()) {
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableJobFactory.java
index 3d64e9809..ab9cc5835 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableJobFactory.java
@@ -16,8 +16,15 @@
 
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
+import com.alibaba.polardbx.common.Engine;
 import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData;
+import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.common.utils.Pair;
+import com.alibaba.polardbx.druid.sql.ast.expr.SQLIdentifierExpr;
+import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement;
+import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateTableStatement;
+import com.alibaba.polardbx.druid.sql.ast.statement.SQLExprTableSource;
+import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlAlterTableOption;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
 import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask;
@@ -28,30 +35,53 @@
 import com.alibaba.polardbx.executor.ddl.job.task.basic.AlterTableInsertColumnsMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.AlterTablePhyDdlTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.AlterTableValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.DropEntitySecurityAttrTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.spec.AlterTableRollbacker;
 import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcAlterTableRewrittenDdlMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDdlMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.factory.GsiTaskFactory;
 import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
+import com.alibaba.polardbx.executor.ddl.job.task.twophase.CommitTwoPhaseDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.twophase.CompensationPhyDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.twophase.EmitPhysicalDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.twophase.FinishTwoPhaseDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.twophase.InitTwoPhaseDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.twophase.LogTwoPhaseDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.twophase.PrepareTwoPhaseDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.twophase.WaitTwoPhaseDdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlExceptionAction;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4AlterTable;
+import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper;
+import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlManager;
+import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils;
+import com.alibaba.polardbx.executor.shadowtable.ShadowTableUtils;
+import com.alibaba.polardbx.gms.lbac.LBACSecurityEntity;
+import com.alibaba.polardbx.gms.lbac.LBACSecurityManager;
+import com.alibaba.polardbx.gms.lbac.LBACSecurityLabel;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
 import com.alibaba.polardbx.gms.topology.DbInfoManager;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
+import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
 import com.alibaba.polardbx.optimizer.config.table.SchemaManager;
 import com.alibaba.polardbx.optimizer.config.table.TableMeta;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.planner.rule.util.CBOUtil;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTable;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTablePreparedData;
+import com.alibaba.polardbx.optimizer.parse.FastsqlUtils;
+import com.alibaba.polardbx.optimizer.partition.common.LocalPartitionDefinitionInfo;
+import com.alibaba.polardbx.statistics.SQLRecorderLogger;
 import com.google.common.collect.Lists;
+import org.apache.calcite.sql.SqlIdentifier;
 import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -59,6 +89,8 @@
 import java.util.Set;
 import java.util.stream.Collectors;
 
+import static com.alibaba.polardbx.common.properties.ConnectionParams.CHECK_TABLE_BEFORE_PHY_DDL;
+
 public class AlterTableJobFactory extends DdlJobFactory {
 
     protected final PhysicalPlanData physicalPlanData;
@@ -83,6 +115,35 @@ public class AlterTableJobFactory extends DdlJobFactory {
      */
     protected boolean validateExistence = true;
 
+    private Boolean supportTwoPhaseDdl = false;
+
+    private String finalStatus = "FINISH";
+
+    static final String INPLACE_ALGORITHM = "INPLACE";
+
+    static final String COPY_ALGORITHM = "COPY";
+
+    static final String DEFAULT_ALGORITHM = "DEFAULT";
+
+    static final String INSTANT_ALGORITHM = "INSTANT";
+
+    static final String ALGORITHM = "ALGORITHM";
+
+    private enum DdlAlgorithmType {
+        INPLACE_ADD_DROP_COLUMN_INDEX,
+        COPY_ADD_DROP_COLUMN_INDEX,
+        INPLACE_MODIFY_CHANGE_COLUMN,
+        COPY_MODIFY_CHANGE_COLUMN,
+
+        OMC_MODIFY_CHANGE_COLUMN,
+        IDEMPOTENT_MODIFY_CHANGE_COLUMN,
+
+        UNKNOWN_MODIFY_CHANGE_COLUMN_ALGORITHM,
+
+        UNKNOWN_ALGORITHM
+
+    }
+
     protected ExecutionContext executionContext;
 
     public AlterTableJobFactory(PhysicalPlanData physicalPlanData,
@@ -97,6 +158,14 @@ public AlterTableJobFactory(PhysicalPlanData physicalPlanData,
         this.executionContext = executionContext;
     }
 
+    public void setSupportTwoPhaseDdl(boolean supportTwoPhaseDdl) {
+        this.supportTwoPhaseDdl = supportTwoPhaseDdl;
+    }
+
+    public void setFinalStatus(String finalStatus) {
+        this.finalStatus = finalStatus;
+    }
+
     public void withAlterGsi(boolean alterGsi, String primaryTableName) {
         this.alterGsiTable = alterGsi;
         this.primaryTableName = primaryTableName;
@@ -116,16 +185,19 @@ protected ExecutableDdlJob doCreate() {
         boolean isNewPart = DbInfoManager.getInstance().isNewPartitionDb(schemaName);
 
         TableGroupConfig tableGroupConfig = isNewPart ? physicalPlanData.getTableGroupConfig() : null;
-        DdlTask validateTask =
-            this.validateExistence ?
-                new AlterTableValidateTask(schemaName, logicalTableName,
-                    logicalAlterTable.getSqlAlterTable().getSourceSql(), prepareData.getTableVersion(),
-                    tableGroupConfig) :
-                new EmptyTask(schemaName);
+        Boolean isPushDownMultipleStatement = ((AlterTablePreparedData) prepareData).isPushDownMultipleStatement();
+        DdlTask validateTask = this.validateExistence ? new AlterTableValidateTask(schemaName, logicalTableName,
+            logicalAlterTable.getSqlAlterTable().getSourceSql(), prepareData.getTableVersion(),
+            isPushDownMultipleStatement, tableGroupConfig) : new EmptyTask(schemaName);
 
         final boolean isDropColumnOrDropIndex =
             CollectionUtils.isNotEmpty(prepareData.getDroppedColumns())
                 || CollectionUtils.isNotEmpty(prepareData.getDroppedIndexes());
+        final boolean isAddColumnOrAddIndex =
+            CollectionUtils.isNotEmpty(prepareData.getAddedColumns())
+                || CollectionUtils.isNotEmpty(prepareData.getAddedColumns());
+        final boolean isModifyColumn =
+            CollectionUtils.isNotEmpty(prepareData.getUpdatedColumns());
 
         List alterGsiMetaTasks = new ArrayList<>();
         if (this.alterGsiTable) {
@@ -163,10 +235,19 @@ protected ExecutableDdlJob doCreate() {
             !prepareData.getAddedForeignKeys().isEmpty() || !prepareData.getDroppedForeignKeys().isEmpty();
         boolean isForeignKeyCdcMark = isForeignKeysDdl && !executionContext.getDdlContext().isFkRepartition();
 
+        Boolean generateTwoPhaseDdlTask = supportTwoPhaseDdl;
         DdlTask phyDdlTask = new AlterTablePhyDdlTask(schemaName, logicalTableName, physicalPlanData);
         if (this.repartition) {
             ((AlterTablePhyDdlTask) phyDdlTask).setSourceSql(logicalAlterTable.getNativeSql());
+            generateTwoPhaseDdlTask = false;
         }
+        if (generateTwoPhaseDdlTask) {
+            generateTwoPhaseDdlTask = checkIfGenerateTwoPhaseDdl(prepareData);
+        }
+        // TODO: exclude Physical Partition Table on Two phase ddl.
+        // TODO: exclude foreign key table.
+
+        Boolean withForeignKey = false;
 
         physicalPlanData.setAlterTablePreparedData(prepareData);
         DdlTask cdcDdlMarkTask;
@@ -181,7 +262,8 @@ protected ExecutableDdlJob doCreate() {
             if (ignoreMarkCdcDDL()) {
                 cdcDdlMarkTask = null;
             } else {
-                cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, false, isForeignKeyCdcMark);
+                cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, false, isForeignKeyCdcMark,
+                    prepareData.getDdlVersionId());
             }
         }
 
@@ -212,7 +294,8 @@ protected ExecutableDdlJob doCreate() {
                 prepareData.getTableComment(),
                 prepareData.getTableRowFormat(),
                 physicalPlanData.getSequence(),
-                prepareData.isOnlineModifyColumnIndexTask());
+                prepareData.isOnlineModifyColumnIndexTask(),
+                prepareData.getDdlVersionId());
         } else {
             // only add columns
             updateMetaTask = new AlterTableInsertColumnsMetaTask(
@@ -234,15 +317,38 @@ protected ExecutableDdlJob doCreate() {
                 new AlterTableHideMetaTask(schemaName, logicalTableName,
                     prepareData.getDroppedColumns(),
                     prepareData.getDroppedIndexes());
+            DropEntitySecurityAttrTask dropESATask =
+                createDropESATask(schemaName, logicalTableName, prepareData.getDroppedColumns());
             DdlTask tableSyncTaskAfterHiding = new TableSyncTask(schemaName, logicalTableName);
-            taskList = Lists.newArrayList(
-                validateTask,
-                hideMetaTask,
-                tableSyncTaskAfterHiding,
-                phyDdlTask,
-                cdcDdlMarkTask,
-                updateMetaTask
-            ).stream().filter(Objects::nonNull).collect(Collectors.toList());
+            Pair alterCheckResult =
+                alterTableViaDefaultAlgorithm(generateTwoPhaseDdlTask, isModifyColumn);
+            DdlAlgorithmType ddlAlgorithmType = alterCheckResult.getKey();
+            Long twoPhaseDdlId = alterCheckResult.getValue();
+            if (!generateTwoPhaseDdlTask || ddlAlgorithmType == DdlAlgorithmType.COPY_ADD_DROP_COLUMN_INDEX
+                || ddlAlgorithmType == DdlAlgorithmType.UNKNOWN_ALGORITHM) {
+                taskList = Lists.newArrayList(
+                    validateTask,
+                    hideMetaTask,
+                    tableSyncTaskAfterHiding,
+                    beginAlterColumnDefault,
+                    beginAlterColumnDefaultSyncTask,
+                    phyDdlTask,
+                    cdcDdlMarkTask,
+                    dropESATask,
+                    updateMetaTask
+                ).stream().filter(Objects::nonNull).collect(Collectors.toList());
+            } else {
+                taskList = Lists.newArrayList(
+                    validateTask,
+                    hideMetaTask,
+                    dropESATask,
+                    tableSyncTaskAfterHiding
+                );
+                taskList.addAll(generateTwoPhaseDdlTask(ddlAlgorithmType, twoPhaseDdlId));
+                taskList.add(cdcDdlMarkTask);
+                taskList.add(updateMetaTask);
+                taskList = taskList.stream().filter(Objects::nonNull).collect(Collectors.toList());
+            }
         } else {
             // 1. physical DDL
             // 2. alter GSI meta if necessary
@@ -252,23 +358,41 @@ protected ExecutableDdlJob doCreate() {
             if (AlterTableRollbacker.checkIfRollbackable(originDdl)) {
                 phyDdlTask = phyDdlTask.onExceptionTryRecoveryThenRollback();
             }
-            taskList = Lists.newArrayList(
-                validateTask,
-                beginAlterColumnDefault,
-                beginAlterColumnDefaultSyncTask,
-                phyDdlTask,
-                cdcDdlMarkTask,
-                updateMetaTask
-            ).stream().filter(Objects::nonNull).collect(Collectors.toList());
+            Pair alterCheckResult =
+                alterTableViaDefaultAlgorithm(generateTwoPhaseDdlTask, isModifyColumn);
+            DdlAlgorithmType algorithmType = alterCheckResult.getKey();
+            Long twoPhaseDdlId = alterCheckResult.getValue();
+            if (!generateTwoPhaseDdlTask || algorithmType == DdlAlgorithmType.COPY_ADD_DROP_COLUMN_INDEX
+                || algorithmType == DdlAlgorithmType.UNKNOWN_MODIFY_CHANGE_COLUMN_ALGORITHM
+                || algorithmType == DdlAlgorithmType.UNKNOWN_ALGORITHM) {
+                taskList = Lists.newArrayList(
+                    validateTask,
+                    beginAlterColumnDefault,
+                    beginAlterColumnDefaultSyncTask,
+                    phyDdlTask,
+                    cdcDdlMarkTask,
+                    updateMetaTask
+                ).stream().filter(Objects::nonNull).collect(Collectors.toList());
+            } else {
+                taskList = Lists.newArrayList(
+                    validateTask,
+                    beginAlterColumnDefault,
+                    beginAlterColumnDefaultSyncTask
+                );
+
+                taskList.addAll(generateTwoPhaseDdlTask(algorithmType, twoPhaseDdlId));
+                taskList.add(cdcDdlMarkTask);
+                taskList.add(updateMetaTask);
+                taskList = taskList.stream().filter(Objects::nonNull).collect(Collectors.toList());
+            }
         }
-
         taskList.addAll(alterGsiMetaTasks);
 
         if (isForeignKeysDdl) {
             DdlTask updateForeignKeysTask =
                 new AlterForeignKeyTask(schemaName, logicalTableName, physicalPlanData.getDefaultDbIndex(),
                     physicalPlanData.getDefaultPhyTableName(), prepareData.getAddedForeignKeys(),
-                    prepareData.getDroppedForeignKeys());
+                    prepareData.getDroppedForeignKeys(), false);
             taskList.add(updateForeignKeysTask);
         }
 
@@ -277,6 +401,11 @@ protected ExecutableDdlJob doCreate() {
 
         taskList.add(tableSyncTaskAfterShowing);
 
+//        if (StringUtils.equalsIgnoreCase(finalStatus, "ONLY_FINISH")) {
+//            taskList = generateTwoPhaseDdlTask(isPhysicalOnline);
+//            executableDdlJob.addSequentialTasks(taskList);
+//            return executableDdlJob;
+//        }
         executableDdlJob.addSequentialTasks(taskList);
 
         executableDdlJob.labelAsHead(validateTask);
@@ -288,6 +417,25 @@ protected ExecutableDdlJob doCreate() {
         return executableDdlJob;
     }
 
+    private DropEntitySecurityAttrTask createDropESATask(
+        String schemaName, String tableName, List droppedColumns) {
+        if (droppedColumns == null || droppedColumns.size() == 0) {
+            return null;
+        }
+        List esaList = new ArrayList<>();
+        for (String col : droppedColumns) {
+            LBACSecurityLabel label = LBACSecurityManager.getInstance().getColumnLabel(schemaName, tableName, col);
+            if (label != null) {
+                esaList.add(new LBACSecurityEntity(
+                    LBACSecurityEntity.EntityKey.createColumnKey(schemaName, tableName, col),
+                    LBACSecurityEntity.EntityType.COLUMN,
+                    label.getLabelName()
+                ));
+            }
+        }
+        return esaList.isEmpty() ? null : new DropEntitySecurityAttrTask(schemaName, tableName, esaList);
+    }
+
     @Override
     protected void excludeResources(Set resources) {
         resources.add(concatWithDot(schemaName, logicalTableName));
@@ -296,6 +444,275 @@ protected void excludeResources(Set resources) {
         if (tgName != null) {
             resources.add(concatWithDot(schemaName, tgName));
         }
+
+        // exclude foreign key tables
+        excludeFkTables(resources);
+    }
+
+    protected Boolean checkAlgorithmSpecificationCopy(SQLAlterTableStatement alterTable) {
+        List alterTableItems = alterTable.getItems().stream()
+            .filter(o -> o instanceof MySqlAlterTableOption)
+            .map(o -> (MySqlAlterTableOption) o)
+            .filter(o -> o.getName().equalsIgnoreCase(ALGORITHM))
+            .collect(Collectors.toList());
+        return !(alterTableItems.isEmpty()) && alterTableItems.stream()
+            .allMatch(o -> o.getValue().toString().equalsIgnoreCase(COPY_ALGORITHM));
+    }
+
+    protected Boolean checkAlgorithmSpecificationOthers(SQLAlterTableStatement alterTable) {
+        List supportedAlgorithms =
+            Arrays.asList(INPLACE_ALGORITHM, COPY_ALGORITHM, INSTANT_ALGORITHM, DEFAULT_ALGORITHM);
+        List alterTableItems = alterTable.getItems().stream()
+            .filter(o -> o instanceof MySqlAlterTableOption)
+            .map(o -> (MySqlAlterTableOption) o)
+            .filter(o -> o.getName().equalsIgnoreCase(ALGORITHM))
+            .collect(Collectors.toList());
+        return !(alterTableItems.isEmpty()) && alterTableItems.stream()
+            .anyMatch(o -> !supportedAlgorithms.contains(o.getValue().toString().toUpperCase()));
+    }
+
+    protected DdlAlgorithmType determineModifyColumnAlgorithmByReachedPoints(Pair reachedPoints) {
+        if (reachedPoints.getKey() && reachedPoints.getValue()) {
+            return DdlAlgorithmType.INPLACE_MODIFY_CHANGE_COLUMN;
+        } else if (reachedPoints.getKey() && !reachedPoints.getValue()) {
+            return DdlAlgorithmType.IDEMPOTENT_MODIFY_CHANGE_COLUMN;
+        } else if (!reachedPoints.getKey() && reachedPoints.getValue()) {
+            return DdlAlgorithmType.COPY_MODIFY_CHANGE_COLUMN;
+        } else {
+            return DdlAlgorithmType.UNKNOWN_MODIFY_CHANGE_COLUMN_ALGORITHM;
+        }
+    }
+
+    protected DdlAlgorithmType determineModifyColumnAlgorithm(String origSqlTemplate, Long id) {
+        DdlAlgorithmType algorithmType;
+        String shadowTableName = ShadowTableUtils.generateShadowTableName(logicalTableName, id);
+        String groupName = physicalPlanData.getDefaultDbIndex();
+        String originalTableName = physicalPlanData.getDefaultPhyTableName();
+        ShadowTableUtils.createShadowTable(executionContext, schemaName, logicalTableName, groupName, originalTableName,
+            shadowTableName);
+        SQLAlterTableStatement alterTable = (SQLAlterTableStatement) FastsqlUtils.parseSql(origSqlTemplate).get(0);
+        alterTable.setTargetImplicitTableGroup(null);
+        alterTable.getIndexTableGroupPair().clear();
+        // If specify copy, then algorithm is copy.
+        if (checkAlgorithmSpecificationCopy(alterTable)) {
+            algorithmType = DdlAlgorithmType.COPY_MODIFY_CHANGE_COLUMN;
+            return algorithmType;
+        } else if (checkAlgorithmSpecificationOthers(alterTable)) {
+            algorithmType = DdlAlgorithmType.UNKNOWN_ALGORITHM;
+            return algorithmType;
+        }
+        alterTable.setTableSource(
+            new SQLExprTableSource(new SQLIdentifierExpr(SqlIdentifier.surroundWithBacktick(shadowTableName))));
+        alterTable.addItem(new MySqlAlterTableOption(ALGORITHM, INPLACE_ALGORITHM));
+        String alterTableStmt = alterTable.toString();
+        try {
+            SQLRecorderLogger.ddlLogger.info(
+                String.format("trace physical table %s with ddl %s", shadowTableName, alterTableStmt));
+            ShadowTableUtils.initTraceShadowTable(executionContext, schemaName, logicalTableName, groupName,
+                shadowTableName, id);
+            ShadowTableUtils.alterShadowTable(executionContext, schemaName, logicalTableName, groupName,
+                shadowTableName,
+                alterTableStmt);
+            Pair reachedPoints =
+                ShadowTableUtils.fetchTraceTableDdl(executionContext, schemaName, logicalTableName, groupName,
+                    shadowTableName, id);
+            algorithmType = determineModifyColumnAlgorithmByReachedPoints(reachedPoints);
+        } catch (Exception exception) {
+            // If inplace not ok, then algorithm is copy.
+            if (exception.getMessage() != null && exception.getMessage()
+                .contains("ALGORITHM=INPLACE is not supported")) {
+                algorithmType = DdlAlgorithmType.COPY_MODIFY_CHANGE_COLUMN;
+            } else {
+                // there are unknown error
+                throw exception;
+            }
+        } finally {
+            ShadowTableUtils.clearShadowTable(executionContext, schemaName, logicalTableName, groupName,
+                shadowTableName);
+            ShadowTableUtils.finishTraceShadowTable(executionContext, schemaName, logicalTableName, groupName,
+                shadowTableName, id);
+        }
+        return algorithmType;
+    }
+
+    protected Boolean compareTheSame(String originalCreateTableSql, String afterCreateTableSql) {
+        SQLCreateTableStatement originalCreateTableStmt = (SQLCreateTableStatement)
+            FastsqlUtils.parseSql(originalCreateTableSql).get(0);
+        SQLCreateTableStatement afterCreateTableStmt = (SQLCreateTableStatement)
+            FastsqlUtils.parseSql(afterCreateTableSql).get(0);
+        originalCreateTableSql = originalCreateTableStmt.toString();
+        afterCreateTableSql = afterCreateTableStmt.toString();
+        return originalCreateTableSql.equals(afterCreateTableSql);
+    }
+
+    protected DdlAlgorithmType determineOnlineDdlAlgorithm(String origSql, Long id) {
+        DdlAlgorithmType algorithmType;
+        String shadowTableName = ShadowTableUtils.generateShadowTableName(logicalTableName, id);
+        String groupName = physicalPlanData.getDefaultDbIndex();
+        String originalTableName = physicalPlanData.getDefaultPhyTableName();
+        ShadowTableUtils.createShadowTable(executionContext, schemaName, logicalTableName, groupName, originalTableName,
+            shadowTableName);
+        // If specify copy, then algorithm is copy.
+        SQLAlterTableStatement alterTable = (SQLAlterTableStatement) FastsqlUtils.parseSql(origSql).get(0);
+        alterTable.setTableSource(
+            new SQLExprTableSource(new SQLIdentifierExpr(SqlIdentifier.surroundWithBacktick(shadowTableName))));
+        if (checkAlgorithmSpecificationCopy(alterTable)) {
+            algorithmType = DdlAlgorithmType.COPY_ADD_DROP_COLUMN_INDEX;
+            return algorithmType;
+        } else if (checkAlgorithmSpecificationOthers(alterTable)) {
+            algorithmType = DdlAlgorithmType.UNKNOWN_ALGORITHM;
+            return algorithmType;
+        }
+        alterTable.addItem(new MySqlAlterTableOption(ALGORITHM, INPLACE_ALGORITHM));
+        alterTable.setTargetImplicitTableGroup(null);
+        alterTable.getIndexTableGroupPair().clear();
+        String alterTableStmt = alterTable.toString();
+        try {
+            SQLRecorderLogger.ddlLogger.info(
+                String.format("trace physical table %s with ddl %s", shadowTableName, alterTableStmt));
+            ShadowTableUtils.alterShadowTable(executionContext, schemaName, logicalTableName, groupName,
+                shadowTableName,
+                alterTableStmt);
+            algorithmType = DdlAlgorithmType.INPLACE_ADD_DROP_COLUMN_INDEX;
+        } catch (Exception exception) {
+            // If inplace not ok, then algorithm is copy.
+            if (exception.getMessage().contains("ALGORITHM=INPLACE is not supported")) {
+                algorithmType = DdlAlgorithmType.COPY_ADD_DROP_COLUMN_INDEX;
+            } else {
+                // there are unknown error
+                throw exception;
+            }
+        } finally {
+            ShadowTableUtils.clearShadowTable(executionContext, schemaName, logicalTableName, groupName,
+                shadowTableName);
+        }
+        return algorithmType;
+    }
+
+    protected Pair alterTableViaDefaultAlgorithm(Boolean generateTwoPhaseDdlTask,
+                                                                         Boolean isModifyColumn) {
+        Boolean supportTwoPhaseDdlOnDn =
+            TwoPhaseDdlManager.checkEnableTwoPhaseDdlOnDn(schemaName, logicalTableName, executionContext);
+        Long twoPhaseDdlId = TwoPhaseDdlManager.generateTwoPhaseDdlManagerId(schemaName, logicalTableName);
+        String origSqlTemplate = logicalAlterTable.getNativeSql();
+        DdlAlgorithmType algorithmType;
+        if (!supportTwoPhaseDdlOnDn || !generateTwoPhaseDdlTask) {
+            algorithmType = DdlAlgorithmType.UNKNOWN_ALGORITHM;
+        } else if (isModifyColumn) {
+            algorithmType = determineModifyColumnAlgorithm(origSqlTemplate, twoPhaseDdlId);
+        } else {
+            algorithmType = determineOnlineDdlAlgorithm(origSqlTemplate, twoPhaseDdlId);
+        }
+        return Pair.of(algorithmType, twoPhaseDdlId);
+    }
+
+    protected List generateTwoPhaseDdlTask(DdlAlgorithmType algorithmType, Long twoPhaseDdlId) {
+        String origSqlTemplate = logicalAlterTable.getSqlAlterTable().getSourceSql();
+        int waitPreparedDelay =
+            executionContext.getParamManager().getInt(ConnectionParams.MULTI_PHASE_WAIT_PREPARED_DELAY);
+        int waitCommitDelay = executionContext.getParamManager().getInt(ConnectionParams.MULTI_PHASE_WAIT_COMMIT_DELAY);
+        int prepareDelay = executionContext.getParamManager().getInt(ConnectionParams.MULTI_PHASE_PREPARE_DELAY);
+        int commitDelay = executionContext.getParamManager().getInt(ConnectionParams.MULTI_PHASE_COMMIT_DELAY);
+//        if (nondefaultAlgriothm) {
+//            throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS,
+//                "We don't support set specified algorithm under two phase ddl,"
+//                    + " you can use /*+TDDL:CMD_EXTRA(ENABLE_DRDS_MULTI_PHASE_DDL=false)*/ to avoid this error.");
+//        }
+        List taskList = new ArrayList<>();
+        final boolean preClear =
+            StringUtils.equalsIgnoreCase(finalStatus, "PRE_CLEAR");
+        final boolean stayAtInit =
+            StringUtils.equalsIgnoreCase(finalStatus, "INIT");
+        final boolean stayAtEmit =
+            StringUtils.equalsIgnoreCase(finalStatus, "EMIT");
+        final boolean stayAtWaitPrepared =
+            StringUtils.equalsIgnoreCase(finalStatus, "WAIT_PREPARE");
+        final boolean stayAtPrepare =
+            StringUtils.equalsIgnoreCase(finalStatus, "PREPARE");
+        final boolean stayAtWaitCommit =
+            StringUtils.equalsIgnoreCase(finalStatus, "WAIT_COMMIT");
+        final boolean skipFinish =
+            StringUtils.equalsIgnoreCase(finalStatus, "SKIP_FINISH");
+        final boolean stayAtCommit =
+            StringUtils.equalsIgnoreCase(finalStatus, "COMMIT");
+        Map> tableTopology = new HashMap<>();
+        for (String phyDbName : physicalPlanData.getTableTopology().keySet()) {
+            String groupName = phyDbName;
+            tableTopology.put(groupName, physicalPlanData.getTableTopology().get(phyDbName).stream().
+                flatMap(List::stream).collect(Collectors.toSet()));
+        }
+        DdlTask clearTwoPhaseDdlTask =
+            new FinishTwoPhaseDdlTask(schemaName, logicalTableName, tableTopology, origSqlTemplate
+                , ComplexTaskMetaManager.ComplexTaskType.TWO_PHASE_ALTER_TABLE, twoPhaseDdlId);
+        DdlTask initTwoPhaseDdlTask =
+            new InitTwoPhaseDdlTask(schemaName, logicalTableName, tableTopology, origSqlTemplate
+                , ComplexTaskMetaManager.ComplexTaskType.TWO_PHASE_ALTER_TABLE, twoPhaseDdlId, new HashMap<>());
+        DdlTask emitPhysicalDdlTask =
+            new EmitPhysicalDdlTask(schemaName, logicalTableName, tableTopology, origSqlTemplate
+                , ComplexTaskMetaManager.ComplexTaskType.TWO_PHASE_ALTER_TABLE, twoPhaseDdlId, null);
+        DdlTask waitPreparedDdlTask =
+            new WaitTwoPhaseDdlTask(schemaName, logicalTableName, tableTopology,
+                TwoPhaseDdlUtils.TWO_PHASE_DDL_WAIT_PREPARE_TASK_NAME
+                , ComplexTaskMetaManager.ComplexTaskType.TWO_PHASE_ALTER_TABLE, twoPhaseDdlId, waitPreparedDelay);
+        DdlTask prepareTwoPhaseDdlTask =
+            new PrepareTwoPhaseDdlTask(schemaName, logicalTableName, tableTopology
+                , ComplexTaskMetaManager.ComplexTaskType.TWO_PHASE_ALTER_TABLE, twoPhaseDdlId, prepareDelay);
+        DdlTask waitCommitDdlTask =
+            new WaitTwoPhaseDdlTask(schemaName, logicalTableName, tableTopology,
+                TwoPhaseDdlUtils.TWO_PHASE_DDL_WAIT_COMMIT_TASK_NAME
+                , ComplexTaskMetaManager.ComplexTaskType.TWO_PHASE_ALTER_TABLE, twoPhaseDdlId, waitCommitDelay);
+        DdlTask commitTwoPhaseDdlTask =
+            new CommitTwoPhaseDdlTask(schemaName, logicalTableName, tableTopology
+                , ComplexTaskMetaManager.ComplexTaskType.TWO_PHASE_ALTER_TABLE, twoPhaseDdlId,
+                origSqlTemplate, commitDelay);
+        DdlTask finishTwoPhaseDdlTask =
+            new FinishTwoPhaseDdlTask(schemaName, logicalTableName, tableTopology, origSqlTemplate
+                , ComplexTaskMetaManager.ComplexTaskType.TWO_PHASE_ALTER_TABLE, twoPhaseDdlId);
+        DdlTask logTwoPhaseDdlTask =
+            new LogTwoPhaseDdlTask(schemaName, logicalTableName, tableTopology, origSqlTemplate
+                , ComplexTaskMetaManager.ComplexTaskType.TWO_PHASE_ALTER_TABLE, twoPhaseDdlId);
+        DdlTask compensationPhysicalDdlTask =
+            new CompensationPhyDdlTask(schemaName, logicalTableName, physicalPlanData);
+        if (preClear) {
+            taskList.add(clearTwoPhaseDdlTask);
+        }
+        taskList.add(initTwoPhaseDdlTask);
+        if (stayAtInit) {
+            return taskList;
+        }
+        taskList.add(emitPhysicalDdlTask);
+        if (stayAtEmit) {
+            return taskList;
+        }
+        if (algorithmType == DdlAlgorithmType.INPLACE_ADD_DROP_COLUMN_INDEX
+            || algorithmType == DdlAlgorithmType.INPLACE_MODIFY_CHANGE_COLUMN
+            || algorithmType == DdlAlgorithmType.IDEMPOTENT_MODIFY_CHANGE_COLUMN) {
+            taskList.add(waitPreparedDdlTask);
+            if (stayAtWaitPrepared) {
+                return taskList;
+            }
+            taskList.add(prepareTwoPhaseDdlTask);
+            if (stayAtPrepare) {
+                return taskList;
+            }
+        }
+        if (algorithmType == DdlAlgorithmType.INPLACE_MODIFY_CHANGE_COLUMN
+            || algorithmType == DdlAlgorithmType.INPLACE_ADD_DROP_COLUMN_INDEX
+            || algorithmType == DdlAlgorithmType.COPY_ADD_DROP_COLUMN_INDEX
+            || algorithmType == DdlAlgorithmType.COPY_MODIFY_CHANGE_COLUMN) {
+            taskList.add(waitCommitDdlTask);
+            if (stayAtWaitCommit) {
+                return taskList;
+            }
+            taskList.add(commitTwoPhaseDdlTask);
+            if (stayAtCommit || skipFinish) {
+                return taskList;
+            }
+        }
+        taskList.add(logTwoPhaseDdlTask);
+        taskList.add(finishTwoPhaseDdlTask);
+        taskList.add(compensationPhysicalDdlTask);
+        return taskList;
     }
 
     @Override
@@ -320,29 +737,35 @@ private boolean ignoreMarkCdcDDL() {
             return false;
         }
 
-        AlterTablePreparedData alterTablePreparedData = logicalAlterTable.getAlterTablePreparedData();
-        boolean renameIndex = false;
-        if (alterTablePreparedData != null) {
-            List> renameIndexesList = alterTablePreparedData.getRenamedIndexes();
-            if (CollectionUtils.isNotEmpty(renameIndexesList)) {
-                renameIndex = true;
-            }
+        return this.alterGsiTable;
+    }
+
+    public void syncFkTables(List taskList) {
+        List> relatedTables = fkRelatedTables();
+        for (Pair relatedTable : relatedTables) {
+            taskList.add(new TableSyncTask(relatedTable.getKey(), relatedTable.getValue()));
         }
+    }
 
-        return this.alterGsiTable || renameIndex;
+    public void excludeFkTables(Set resources) {
+        List> relatedTables = fkRelatedTables();
+        for (Pair relatedTable : relatedTables) {
+            resources.add(concatWithDot(relatedTable.getKey(), relatedTable.getValue()));
+        }
     }
 
-    public void syncFkTables(List taskList) {
+    public List> fkRelatedTables() {
+        List> relatedTables = new ArrayList<>();
         if (!prepareData.getAddedForeignKeys().isEmpty()) {
             ForeignKeyData data = prepareData.getAddedForeignKeys().get(0);
-            taskList.add(new TableSyncTask(data.refSchema, data.refTableName));
+            relatedTables.add(new Pair<>(data.refSchema, data.refTableName));
         }
         if (!prepareData.getDroppedForeignKeys().isEmpty()) {
             TableMeta tableMeta =
                 OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName);
             for (ForeignKeyData data : tableMeta.getForeignKeys().values()) {
                 if (data.constraint.equals(prepareData.getDroppedForeignKeys().get(0))) {
-                    taskList.add(new TableSyncTask(data.refSchema, data.refTableName));
+                    relatedTables.add(new Pair<>(data.refSchema, data.refTableName));
                 }
             }
         }
@@ -366,7 +789,7 @@ public void syncFkTables(List taskList) {
                 if (sync) {
                     String referencedSchemaName = e.getValue().schema;
                     String referredTableName = e.getValue().tableName;
-                    taskList.add(new TableSyncTask(referencedSchemaName, referredTableName));
+                    relatedTables.add(new Pair<>(referencedSchemaName, referredTableName));
                 }
             }
 
@@ -382,10 +805,73 @@ public void syncFkTables(List taskList) {
                         }
                     }
                     if (sync) {
-                        taskList.add(new TableSyncTask(e.getValue().refSchema, e.getValue().refTableName));
+                        relatedTables.add(new Pair<>(e.getValue().refSchema, e.getValue().refTableName));
                     }
                 }
             }
         }
+        return relatedTables;
+    }
+
+    private int fetchSize(List lst) {
+        return (lst == null) ? 0 : lst.size();
+    }
+
+    public Boolean checkTableOk(String schemaName, String tableName) {
+        String sql = String.format("check table %s", SqlIdentifier.surroundWithBacktick(tableName));
+        List> result = DdlHelper.getServerConfigManager().executeQuerySql(
+            sql,
+            schemaName,
+            null
+        );
+        if (result.stream().allMatch(o -> o.get("MSG_TEXT").toString().equalsIgnoreCase("OK"))) {
+            return true;
+        } else {
+            SQLRecorderLogger.ddlLogger.info(
+                String.format("check table for %s.%s failed, the result is %s, we will execute ddl in legacy method.",
+                    schemaName, tableName, result));
+            return false;
+        }
+    }
+
+    public Boolean checkIfGenerateTwoPhaseDdl(AlterTablePreparedData prepareData) {
+        final boolean isDropColumnOrDropIndex =
+            CollectionUtils.isNotEmpty(prepareData.getDroppedColumns())
+                || CollectionUtils.isNotEmpty(prepareData.getDroppedIndexes());
+        final boolean isAddColumnOrAddIndex =
+            CollectionUtils.isNotEmpty(prepareData.getAddedColumns())
+                || CollectionUtils.isNotEmpty(prepareData.getAddedIndexes())
+                || CollectionUtils.isNotEmpty(prepareData.getAddedIndexes());
+        final boolean isModifyColumn =
+            CollectionUtils.isNotEmpty(prepareData.getUpdatedColumns());
+        final int alterNums =
+            fetchSize(prepareData.getDroppedColumns()) + fetchSize(prepareData.getDroppedIndexes())
+                + fetchSize(prepareData.getAddedColumns()) + fetchSize(prepareData.getAddedIndexes())
+                + fetchSize(prepareData.getAddedIndexesWithoutNames()) + fetchSize(prepareData.getUpdatedColumns());
+        Engine engine =
+            OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName)
+                .getEngine();
+        LocalPartitionDefinitionInfo localPartitionDefinitionInfo =
+            OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName)
+                .getLocalPartitionDefinitionInfo();
+        Boolean withPhysicalPartition = Engine.isFileStore(engine) || (localPartitionDefinitionInfo != null);
+        Boolean withForeignKey =
+            OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName).hasForeignKey();
+        Boolean withCci =
+            OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName).withCci();
+        Boolean supportTwoPhaseDdl = false;
+        Boolean checkTableOk = true;
+        Boolean checkTable = executionContext.getParamManager().getBoolean(CHECK_TABLE_BEFORE_PHY_DDL);
+        if (isAddColumnOrAddIndex || isModifyColumn || isDropColumnOrDropIndex) {
+            if (alterNums <= 1 && !withPhysicalPartition && !withForeignKey && !withCci) {
+                if (checkTable) {
+                    checkTableOk = checkTableOk(schemaName, logicalTableName);
+                }
+                if (checkTableOk) {
+                    supportTwoPhaseDdl = true;
+                }
+            }
+        }
+        return supportTwoPhaseDdl;
     }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionChangeSetSubJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionChangeSetSubJobFactory.java
index eef7a4ec9..cdda8a0d2 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionChangeSetSubJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionChangeSetSubJobFactory.java
@@ -16,7 +16,6 @@
 
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
-import com.alibaba.polardbx.common.utils.CaseInsensitive;
 import com.alibaba.polardbx.common.utils.Pair;
 import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetApplyFinishTask;
 import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
@@ -24,25 +23,17 @@
 import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation;
-import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupBasePreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
-import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupMergePartitionPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableMergePartitionPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTable;
-import org.apache.calcite.sql.SqlAlterTableMergePartition;
-import org.apache.calcite.sql.SqlIdentifier;
 import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.util.Util;
 
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.TreeSet;
-import java.util.stream.Collectors;
 
 public class AlterTableMergePartitionChangeSetSubJobFactory extends AlterTableGroupChangeSetJobFactory {
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionJobFactory.java
index b6c83ff58..ecf123048 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionJobFactory.java
@@ -19,11 +19,14 @@
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.common.utils.Pair;
 import com.alibaba.polardbx.executor.ddl.job.builder.tablegroup.AlterTableMergePartitionBuilder;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableGroupAddMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.PauseCurrentJobTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
 import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupAddMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupsSyncTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils;
@@ -79,6 +82,8 @@ protected ExecutableDdlJob doCreate() {
             return mergeAndMoveToExistTableGroup();
         } else if (preparedData.isCreateNewTableGroup()) {
             return mergeInNewTableGroup();
+        } else if (StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            return withImplicitTableGroup(executionContext);
         } else {
             throw new RuntimeException("unexpected");
         }
@@ -98,11 +103,11 @@ private ExecutableDdlJob mergeAndMoveToExistTableGroup() {
         DdlTask validateSourceTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 sourceTableGroup, tablesVersion, false,
-                /*todo*/null);
+                /*todo*/null, false);
         DdlTask validateTargetTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 targetTableGroup, preparedData.getFirstTableVersionInTargetTableGroup(), false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         executableDdlJob.addTask(emptyTask);
         executableDdlJob.addTask(validateSourceTableGroup);
@@ -172,7 +177,7 @@ private ExecutableDdlJob mergeInNewTableGroup() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName,
                 preparedData.getTableGroupName(), tablesVersion, false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         SubJobTask subJobMoveTableToNewGroup =
             new SubJobTask(schemaName, String.format(SET_NEW_TABLE_GROUP, preparedData.getTableName()), null);
@@ -200,7 +205,7 @@ private ExecutableDdlJob mergeInOriginTableGroup() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableMergePartitionPreparedData.getTableGroupName(),
-                tablesVersion, true, alterTableMergePartitionPreparedData.getTargetPhysicalGroups());
+                tablesVersion, true, alterTableMergePartitionPreparedData.getTargetPhysicalGroups(), false);
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableMergePartitionPreparedData.getTableGroupName());
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionSubTaskJobFactory.java
index a5c487633..c9926d578 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMergePartitionSubTaskJobFactory.java
@@ -24,23 +24,16 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupBasePreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupMergePartitionPreparedData;
-import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableMergePartitionPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTable;
 import org.apache.calcite.sql.SqlAlterTableGroup;
-import org.apache.calcite.sql.SqlAlterTableMergePartition;
-import org.apache.calcite.sql.SqlIdentifier;
 import org.apache.calcite.sql.SqlNode;
-import org.apache.calcite.util.Util;
 
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.TreeSet;
-import java.util.stream.Collectors;
 
 public class AlterTableMergePartitionSubTaskJobFactory extends AlterTableGroupSubTaskJobFactory {
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableModifyPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableModifyPartitionJobFactory.java
index 3d2bc19f1..51be69f52 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableModifyPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableModifyPartitionJobFactory.java
@@ -91,6 +91,8 @@ protected ExecutableDdlJob doCreate() {
             return modifyAndMoveToExistTableGroup();
         } else if (preparedData.isCreateNewTableGroup()) {
             return modifyInNewTableGroup();
+        } else if (StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            return withImplicitTableGroup(executionContext);
         } else {
             throw new RuntimeException("unexpected");
         }
@@ -107,7 +109,7 @@ protected ExecutableDdlJob modifyInOriginTableGroup() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableGroupModifyPartitionPreparedData.getTableGroupName(),
-                tablesVersion, true, alterTableGroupModifyPartitionPreparedData.getTargetPhysicalGroups());
+                tablesVersion, true, alterTableGroupModifyPartitionPreparedData.getTargetPhysicalGroups(), false);
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableGroupModifyPartitionPreparedData.getTableGroupName());
 
@@ -206,11 +208,11 @@ protected ExecutableDdlJob modifyAndMoveToExistTableGroup() {
         DdlTask validateSourceTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 sourceTableGroup, tablesVersion, false,
-                /*todo*/null);
+                /*todo*/null, false);
         DdlTask validateTargetTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 targetTableGroup, preparedData.getFirstTableVersionInTargetTableGroup(), false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         executableDdlJob.addTask(emptyTask);
         executableDdlJob.addTask(validateSourceTableGroup);
@@ -288,7 +290,7 @@ protected ExecutableDdlJob modifyInNewTableGroup() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName,
                 preparedData.getTableGroupName(), tablesVersion, false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         SubJobTask subJobMoveTableToNewGroup =
             new SubJobTask(schemaName, String.format(SET_NEW_TABLE_GROUP, preparedData.getTableName()), null);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionChangeSetSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionChangeSetSubTaskJobFactory.java
index 4143b0912..4217098c6 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionChangeSetSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionChangeSetSubTaskJobFactory.java
@@ -24,11 +24,9 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableMovePartitionPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTable;
-import org.apache.calcite.sql.SqlAlterTableMovePartition;
 import org.apache.calcite.sql.SqlNode;
 
 import java.util.List;
@@ -43,6 +41,10 @@ public AlterTableMovePartitionChangeSetSubTaskJobFactory(DDL ddl,
                                                              AlterTableMovePartitionPreparedData parentPrepareData,
                                                              AlterTableGroupItemPreparedData preparedData,
                                                              List phyDdlTableOperations,
+                                                             List discardTableSpaceOperations,
+                                                             Map> ptbGroupMap,
+                                                             Map sourceAndTarDnMap,
+                                                             Map> storageInstAndUserInfos,
                                                              Map>> tableTopology,
                                                              Map> targetTableTopology,
                                                              Map> sourceTableTopology,
@@ -52,10 +54,10 @@ public AlterTableMovePartitionChangeSetSubTaskJobFactory(DDL ddl,
                                                              boolean skipBackfill,
                                                              ComplexTaskMetaManager.ComplexTaskType taskType,
                                                              ExecutionContext executionContext) {
-        super(ddl, parentPrepareData, preparedData, phyDdlTableOperations, tableTopology, targetTableTopology,
-            sourceTableTopology,
-            orderedTargetTableLocations, targetPartition, skipBackfill,
-            null, null, taskType, executionContext);
+        super(ddl, parentPrepareData, preparedData, phyDdlTableOperations, discardTableSpaceOperations,
+            ptbGroupMap, sourceAndTarDnMap, storageInstAndUserInfos,
+            tableTopology, targetTableTopology, sourceTableTopology,
+            orderedTargetTableLocations, targetPartition, skipBackfill, null, null, taskType, executionContext);
         this.parentPrepareData = parentPrepareData;
     }
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionJobFactory.java
index 5d138b6bf..2d34988cc 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionJobFactory.java
@@ -17,10 +17,19 @@
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
 import com.alibaba.polardbx.common.properties.ConnectionParams;
+import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.common.utils.Pair;
+import com.alibaba.polardbx.executor.balancer.Balancer;
+import com.alibaba.polardbx.executor.balancer.stats.BalanceStats;
+import com.alibaba.polardbx.executor.balancer.stats.PartitionStat;
 import com.alibaba.polardbx.executor.ddl.job.builder.tablegroup.AlterTableMovePartitionBuilder;
+import com.alibaba.polardbx.executor.ddl.job.task.CostEstimableDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.DdlBackfillCostRecordTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.ImportTableSpaceDdlNormalTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.PauseCurrentJobTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.PhysicalBackfillTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.SyncLsnTask;
 import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupAddMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupValidateTask;
@@ -30,9 +39,11 @@
 import com.alibaba.polardbx.executor.scaleout.ScaleOutUtils;
 import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
+import com.alibaba.polardbx.gms.topology.DbTopologyManager;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
 import com.alibaba.polardbx.optimizer.config.table.TableMeta;
+import com.alibaba.polardbx.optimizer.context.DdlContext;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
@@ -43,19 +54,32 @@
 import org.apache.commons.lang.StringUtils;
 
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Queue;
 import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
 
 /**
  * @author luoyanxin
  */
 public class AlterTableMovePartitionJobFactory extends AlterTableGroupBaseJobFactory {
 
+    final Map> discardTableSpacePhysicalPlansMap;
+    final Map>> tbPtbGroupMap;
+    final Map sourceAndTarDnMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+    final Map> storageInstAndUserInfos = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+
     public AlterTableMovePartitionJobFactory(DDL ddl, AlterTableMovePartitionPreparedData preparedData,
                                              Map tablesPrepareData,
                                              Map> newPartitionsPhysicalPlansMap,
+                                             Map> discardTableSpacePhysicalPlansMap,
+                                             Map>> tbPtbGroupMap,
                                              Map>>> tablesTopologyMap,
                                              Map>> targetTablesTopology,
                                              Map>> sourceTablesTopology,
@@ -64,6 +88,8 @@ public AlterTableMovePartitionJobFactory(DDL ddl, AlterTableMovePartitionPrepare
         super(ddl, preparedData, tablesPrepareData, newPartitionsPhysicalPlansMap, tablesTopologyMap,
             targetTablesTopology, sourceTablesTopology, orderedTargetTablesLocations,
             ComplexTaskMetaManager.ComplexTaskType.MOVE_PARTITION, executionContext);
+        this.discardTableSpacePhysicalPlansMap = discardTableSpacePhysicalPlansMap;
+        this.tbPtbGroupMap = tbPtbGroupMap;
     }
 
     @Override
@@ -79,6 +105,8 @@ protected ExecutableDdlJob doCreate() {
             return doMoveAndMoveToExistTableGroup();
         } else if (preparedData.isCreateNewTableGroup()) {
             return doMoveInNewTableGroup();
+        } else if (StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            return withImplicitTableGroup(executionContext);
         } else {
             throw new RuntimeException("unexpected");
         }
@@ -96,13 +124,11 @@ private ExecutableDdlJob doMoveAndMoveToExistTableGroup() {
 
         DdlTask emptyTask = new EmptyTask(schemaName);
         DdlTask validateSourceTableGroup =
-            new AlterTableGroupValidateTask(schemaName,
-                sourceTableGroup, tablesVersion, false,
-                /*todo*/null);
-        DdlTask validateTargetTableGroup =
-            new AlterTableGroupValidateTask(schemaName,
-                targetTableGroup, preparedData.getFirstTableVersionInTargetTableGroup(), false,
-                preparedData.getTargetPhysicalGroups());
+            new AlterTableGroupValidateTask(schemaName, sourceTableGroup, tablesVersion, false,
+                /*todo*/null, false);
+        DdlTask validateTargetTableGroup = new AlterTableGroupValidateTask(schemaName, targetTableGroup,
+            preparedData.getFirstTableVersionInTargetTableGroup(), false, preparedData.getTargetPhysicalGroups(),
+            false);
 
         executableDdlJob.addTask(emptyTask);
         executableDdlJob.addTask(validateSourceTableGroup);
@@ -141,13 +167,40 @@ private ExecutableDdlJob doMoveAndMoveToExistTableGroup() {
             newPartitions,
             localities);
 
+        DdlContext ddlContext = executionContext.getDdlContext();
+        DdlBackfillCostRecordTask costRecordTask = null;
+        if (ddlContext != null && !ddlContext.isSubJob()) {
+            costRecordTask = new DdlBackfillCostRecordTask(schemaName);
+            final BalanceStats balanceStats = Balancer.collectBalanceStatsOfTable(schemaName, tableName);
+            List partitionStats = balanceStats.getPartitionStats();
+            Long diskSize = 0L;
+            Long rows = 0L;
+            Set partitionNamesSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
+            preparedData.getOldPartitionNames().forEach(o -> partitionNamesSet.add(o));
+            for (PartitionStat partitionStat : partitionStats) {
+                if (partitionNamesSet.contains(partitionStat.getPartitionName())) {
+                    diskSize += partitionStat.getPartitionDiskSize();
+                    rows += partitionStat.getPartitionRows();
+                }
+            }
+            costRecordTask.setCostInfo(CostEstimableDdlTask.createCostInfo(rows, diskSize, 1L));
+        }
+        if (costRecordTask != null) {
+            executableDdlJob.addTask(costRecordTask);
+        }
         executableDdlJob.addTask(addMetaTask);
-        executableDdlJob.addTaskRelationship(validateSourceTableGroup, addMetaTask);
-        executableDdlJob.addTaskRelationship(validateTargetTableGroup, addMetaTask);
+        if (costRecordTask != null) {
+            executableDdlJob.addTaskRelationship(validateSourceTableGroup, costRecordTask);
+            executableDdlJob.addTaskRelationship(validateTargetTableGroup, costRecordTask);
+            executableDdlJob.addTaskRelationship(costRecordTask, addMetaTask);
+        } else {
+            executableDdlJob.addTaskRelationship(validateSourceTableGroup, addMetaTask);
+            executableDdlJob.addTaskRelationship(validateTargetTableGroup, addMetaTask);
+        }
 
         List bringUpAlterTableGroupTasks =
-            ComplexTaskFactory.bringUpAlterTableChangeTopology(schemaName, targetTableGroup, tableName,
-                taskType, executionContext);
+            ComplexTaskFactory.bringUpAlterTableChangeTopology(schemaName, targetTableGroup, tableName, taskType,
+                executionContext);
 
         final String finalStatus =
             executionContext.getParamManager().getString(ConnectionParams.TABLEGROUP_REORG_FINAL_TABLE_STATUS_DEBUG);
@@ -176,20 +229,37 @@ private ExecutableDdlJob doMoveInNewTableGroup() {
         Map tablesVersion = getTablesVersion();
         String schemaName = preparedData.getSchemaName();
         DdlTask validateTask =
-            new AlterTableGroupValidateTask(schemaName,
-                preparedData.getTableGroupName(), tablesVersion, false,
-                preparedData.getTargetPhysicalGroups());
+            new AlterTableGroupValidateTask(schemaName, preparedData.getTableGroupName(), tablesVersion, false,
+                preparedData.getTargetPhysicalGroups(), false);
 
         SubJobTask subJobMoveTableToNewGroup =
             new SubJobTask(schemaName, String.format(SET_NEW_TABLE_GROUP, preparedData.getTableName()), null);
-        SubJobTask subJobSplitTable = new SubJobTask(schemaName, preparedData.getSourceSql(), null);
+        SubJobTask subJobMoveTable = new SubJobTask(schemaName, preparedData.getSourceSql(), null);
+        DdlContext ddlContext = executionContext.getDdlContext();
+        CostEstimableDdlTask.CostInfo costInfo = null;
+        if (ddlContext != null && !ddlContext.isSubJob()) {
+            final BalanceStats balanceStats =
+                Balancer.collectBalanceStatsOfTable(schemaName, preparedData.getTableName());
+            List partitionStats = balanceStats.getPartitionStats();
+            Long diskSize = 0L;
+            Long rows = 0L;
+            Set partitionNamesSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
+            preparedData.getOldPartitionNames().forEach(o -> partitionNamesSet.add(o));
+            for (PartitionStat partitionStat : partitionStats) {
+                if (partitionNamesSet.contains(partitionStat.getPartitionName())) {
+                    diskSize += partitionStat.getPartitionDiskSize();
+                    rows += partitionStat.getPartitionRows();
+                }
+            }
+            costInfo = CostEstimableDdlTask.createCostInfo(rows, diskSize, 1L);
+        }
+        if (costInfo != null) {
+            subJobMoveTable.setCostInfo(costInfo);
+        }
         subJobMoveTableToNewGroup.setParentAcquireResource(true);
-        subJobSplitTable.setParentAcquireResource(true);
-        executableDdlJob.addSequentialTasks(Lists.newArrayList(
-            validateTask,
-            subJobMoveTableToNewGroup,
-            subJobSplitTable
-        ));
+        subJobMoveTable.setParentAcquireResource(true);
+        executableDdlJob.addSequentialTasks(
+            Lists.newArrayList(validateTask, subJobMoveTableToNewGroup, subJobMoveTable));
         return executableDdlJob;
     }
 
@@ -205,7 +275,26 @@ private ExecutableDdlJob doMoveInOriginTableGroup() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableGroupMovePartitionPreparedData.getTableGroupName(),
-                tablesVersion, true, alterTableGroupMovePartitionPreparedData.getTargetPhysicalGroups());
+                tablesVersion, true, alterTableGroupMovePartitionPreparedData.getTargetPhysicalGroups(), false);
+        DdlContext ddlContext = executionContext.getDdlContext();
+        DdlBackfillCostRecordTask costRecordTask = null;
+        if (ddlContext != null && !ddlContext.isSubJob()) {
+            costRecordTask = new DdlBackfillCostRecordTask(schemaName);
+            final BalanceStats balanceStats = Balancer.collectBalanceStatsOfTable(schemaName, tableName);
+            List partitionStats = balanceStats.getPartitionStats();
+            Long diskSize = 0L;
+            Long rows = 0L;
+            Set partitionNamesSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
+            preparedData.getOldPartitionNames().forEach(o -> partitionNamesSet.add(o));
+            for (PartitionStat partitionStat : partitionStats) {
+                if (partitionNamesSet.contains(partitionStat.getPartitionName())) {
+                    diskSize += partitionStat.getPartitionDiskSize();
+                    rows += partitionStat.getPartitionRows();
+                }
+            }
+            costRecordTask.setCostInfo(CostEstimableDdlTask.createCostInfo(rows, diskSize, 1L));
+        }
+
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableGroupMovePartitionPreparedData.getTableGroupName());
 
@@ -239,15 +328,15 @@ private ExecutableDdlJob doMoveInOriginTableGroup() {
             newPartitions,
             localities);
 
-        executableDdlJob.addSequentialTasks(Lists.newArrayList(
-            validateTask,
-            addMetaTask
-        ));
+        if (costRecordTask != null) {
+            executableDdlJob.addSequentialTasks(Lists.newArrayList(validateTask, costRecordTask, addMetaTask));
+        } else {
+            executableDdlJob.addSequentialTasks(Lists.newArrayList(validateTask, addMetaTask));
+        }
         executableDdlJob.labelAsHead(validateTask);
 
         List bringUpAlterTableGroupTasks =
-            ComplexTaskFactory.bringUpAlterTableGroup(schemaName, tableGroupName, null,
-                taskType, executionContext);
+            ComplexTaskFactory.bringUpAlterTableGroup(schemaName, tableGroupName, null, taskType, executionContext);
 
         final String finalStatus =
             executionContext.getParamManager().getString(ConnectionParams.TABLEGROUP_REORG_FINAL_TABLE_STATUS_DEBUG);
@@ -285,6 +374,10 @@ public void constructSubTasks(String schemaName, ExecutableDdlJob executableDdlJ
                     (AlterTableMovePartitionPreparedData) preparedData,
                     tablesPrepareData.get(preparedData.getTableName()),
                     newPartitionsPhysicalPlansMap.get(preparedData.getTableName()),
+                    discardTableSpacePhysicalPlansMap.get(preparedData.getTableName()),
+                    tbPtbGroupMap.get(preparedData.getTableName()),
+                    sourceAndTarDnMap,
+                    storageInstAndUserInfos,
                     tablesTopologyMap.get(preparedData.getTableName()),
                     targetTablesTopology.get(preparedData.getTableName()),
                     sourceTablesTopology.get(preparedData.getTableName()),
@@ -306,6 +399,69 @@ public void constructSubTasks(String schemaName, ExecutableDdlJob executableDdlJ
         executableDdlJob.combineTasks(subTask);
         executableDdlJob.addTaskRelationship(tailTask, subTask.getHead());
 
+        SyncLsnTask syncLsnTask = null;
+        int parallelism = ScaleOutUtils.getTableGroupTaskParallelism(executionContext);
+        Queue leavePipeLineQueue = new LinkedList<>();
+
+        if (preparedData.isUsePhysicalBackfill()) {
+            Map> sourceTableTopology = sourceTablesTopology.get(preparedData.getTableName());
+            Map> targetTableTopology = targetTablesTopology.get(preparedData.getTableName());
+            Map targetGroupAndStorageIdMap = new HashMap<>();
+            Map sourceGroupAndStorageIdMap = new HashMap<>();
+            for (String groupName : sourceTableTopology.keySet()) {
+                sourceGroupAndStorageIdMap.put(groupName,
+                    DbTopologyManager.getStorageInstIdByGroupName(schemaName, groupName));
+            }
+            for (String groupName : targetTableTopology.keySet()) {
+                targetGroupAndStorageIdMap.put(groupName,
+                    DbTopologyManager.getStorageInstIdByGroupName(schemaName, groupName));
+            }
+
+            syncLsnTask =
+                new SyncLsnTask(schemaName, sourceGroupAndStorageIdMap, targetGroupAndStorageIdMap);
+            executableDdlJob.addTask(syncLsnTask);
+
+            for (List pipeLine : GeneralUtil.emptyIfNull(subTaskJobFactory.getPhysicalyTaskPipeLine())) {
+                DdlTask parentLeaveNode;
+                if (leavePipeLineQueue.size() < parallelism) {
+                    parentLeaveNode = syncLsnTask;
+                } else {
+                    parentLeaveNode = leavePipeLineQueue.poll();
+                }
+                executableDdlJob.removeTaskRelationship(subTaskJobFactory.getBackfillTaskEdgeNodes().get(0),
+                    subTaskJobFactory.getBackfillTaskEdgeNodes().get(1));
+                executableDdlJob.addTaskRelationship(subTaskJobFactory.getBackfillTaskEdgeNodes().get(0),
+                    syncLsnTask);
+                executableDdlJob.addTaskRelationship(parentLeaveNode,
+                    pipeLine.get(0));
+                executableDdlJob.addTaskRelationship(pipeLine.get(0),
+                    pipeLine.get(1));
+
+                PhysicalBackfillTask physicalBackfillTask = (PhysicalBackfillTask) pipeLine.get(1);
+                Map>> targetTables = new HashMap<>();
+                String tarGroupKey = physicalBackfillTask.getSourceTargetGroup().getValue();
+                String phyTableName = physicalBackfillTask.getPhysicalTableName();
+
+                targetTables.computeIfAbsent(tarGroupKey, k -> new ArrayList<>())
+                    .add(Collections.singletonList(phyTableName));
+
+                ImportTableSpaceDdlNormalTask importTableSpaceDdlNormalTask = new ImportTableSpaceDdlNormalTask(
+                    preparedData.getSchemaName(), preparedData.getTableName(),
+                    targetTables);
+
+                for (int i = 2; i < pipeLine.size(); i++) {
+                    executableDdlJob.addTaskRelationship(pipeLine.get(1),
+                        pipeLine.get(i));
+                    executableDdlJob.addTaskRelationship(pipeLine.get(i),
+                        importTableSpaceDdlNormalTask);
+                }
+                executableDdlJob.addTaskRelationship(importTableSpaceDdlNormalTask,
+                    subTaskJobFactory.getBackfillTaskEdgeNodes().get(1));
+                leavePipeLineQueue.add(importTableSpaceDdlNormalTask);
+            }
+
+        }
+
         if (subTaskJobFactory.getCdcTableGroupDdlMarkTask() != null) {
             executableDdlJob.addTask(emptyTask);
             executableDdlJob.addTask(subTaskJobFactory.getCdcTableGroupDdlMarkTask());
@@ -319,22 +475,19 @@ public void constructSubTasks(String schemaName, ExecutableDdlJob executableDdlJ
 
         if (bringUpAlterTableGroupTasks.size() > 1 && !(bringUpAlterTableGroupTasks.get(
             0) instanceof PauseCurrentJobTask)) {
-            DdlTask dropUselessTableTask = ComplexTaskFactory
-                .CreateDropUselessPhyTableTask(schemaName, preparedData.getTableName(),
-                    sourceTablesTopology.get(preparedData.getTableName()),
-                    executionContext);
+            DdlTask dropUselessTableTask =
+                ComplexTaskFactory.CreateDropUselessPhyTableTask(schemaName, preparedData.getTableName(),
+                    sourceTablesTopology.get(preparedData.getTableName()), executionContext);
             executableDdlJob.addTask(dropUselessTableTask);
             executableDdlJob.labelAsTail(dropUselessTableTask);
-            executableDdlJob
-                .addTaskRelationship(bringUpAlterTableGroupTasks.get(bringUpAlterTableGroupTasks.size() - 1),
-                    dropUselessTableTask);
+            executableDdlJob.addTaskRelationship(
+                bringUpAlterTableGroupTasks.get(bringUpAlterTableGroupTasks.size() - 1), dropUselessTableTask);
         }
         executableDdlJob.getExcludeResources().addAll(subTask.getExcludeResources());
 
     }
 
-    public static ExecutableDdlJob create(@Deprecated DDL ddl,
-                                          AlterTableMovePartitionPreparedData preparedData,
+    public static ExecutableDdlJob create(@Deprecated DDL ddl, AlterTableMovePartitionPreparedData preparedData,
                                           ExecutionContext executionContext) {
         AlterTableMovePartitionBuilder alterTableMovePartitionBuilder =
             new AlterTableMovePartitionBuilder(ddl, preparedData, executionContext);
@@ -350,9 +503,14 @@ public static ExecutableDdlJob create(@Deprecated DDL ddl,
             alterTableMovePartitionBuilder.getNewPartitionsPhysicalPlansMap();
         Map>> orderedTargetTablesLocations =
             alterTableMovePartitionBuilder.getOrderedTargetTablesLocations();
+        Map> discardTableSpacePhysicalPlansMap =
+            alterTableMovePartitionBuilder.getDiscardTableSpacePhysicalPlansMap();
+        Map>> tbPtbGroupMap =
+            alterTableMovePartitionBuilder.getTbPtbGroupMap();
         return new AlterTableMovePartitionJobFactory(ddl, preparedData, tableGroupItemPreparedDataMap,
-            newPartitionsPhysicalPlansMap, tablesTopologyMap, targetTablesTopology, sourceTablesTopology,
-            orderedTargetTablesLocations, executionContext).create();
+            newPartitionsPhysicalPlansMap, discardTableSpacePhysicalPlansMap, tbPtbGroupMap,
+            tablesTopologyMap, targetTablesTopology, sourceTablesTopology, orderedTargetTablesLocations,
+            executionContext).create();
     }
 
     @Override
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionSubTaskJobFactory.java
index 832828469..945bae648 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableMovePartitionSubTaskJobFactory.java
@@ -25,11 +25,9 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableMovePartitionPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTable;
-import org.apache.calcite.sql.SqlAlterTableMovePartition;
 import org.apache.calcite.sql.SqlNode;
 
 import java.util.List;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableOnlineModifyColumnJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableOnlineModifyColumnJobFactory.java
index 644dbc85c..95ebf8df0 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableOnlineModifyColumnJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableOnlineModifyColumnJobFactory.java
@@ -388,7 +388,8 @@ protected ExecutableDdlJob doCreate() {
                 coveringGsi, gsiDbIndex, gsiPhyTableName, newColumnNullable);
         DdlTask swapColumnTableSyncTask = new TableSyncTask(schemaName, logicalTableName);
 
-        DdlTask cdcDdlMarkTask = new CdcAlterTableColumnDdlMarkTask(schemaName, physicalPlanData, true);
+        DdlTask cdcDdlMarkTask =
+            new CdcAlterTableColumnDdlMarkTask(schemaName, physicalPlanData, true, prepareData.getDdlVersionId());
 
         DdlTask stopMultiWriteTask =
             new OnlineModifyColumnStopMultiWriteTask(schemaName, logicalTableName, isChange, newColumnName,
@@ -564,6 +565,8 @@ private DdlTask genAddColumnPhyTask(String tableName, boolean isGsi) {
         alterTableStmt.getItems().clear();
         alterTableStmt.getItems().add(addColumn);
         alterTableStmt.setAfterSemi(false);
+        alterTableStmt.setTargetImplicitTableGroup(null);
+        alterTableStmt.getIndexTableGroupPair().clear();
         String addColumnSql = alterTableStmt.toString();
         alterTableStmt.getTableSource().setExpr("?");
         String addColumnSqlTemplate = alterTableStmt.toString();
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableRenamePartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableRenamePartitionJobFactory.java
index 4ae170733..a5dbf08fd 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableRenamePartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableRenamePartitionJobFactory.java
@@ -16,25 +16,30 @@
 
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
+import com.alibaba.polardbx.common.ddl.newengine.DdlType;
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TablesSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcAlterTableRenamePartitionMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupRenamePartitionChangeMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableRenamePartitionChangeMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.CleanupEmptyTableGroupTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupSyncTask;
-import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob;
 import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.TableMeta;
+import com.alibaba.polardbx.optimizer.context.DdlContext;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupBasePreparedData;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupRenamePartitionPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableRenamePartitionPreparedData;
 import com.google.common.collect.Lists;
 import org.apache.calcite.rel.core.DDL;
@@ -50,18 +55,13 @@
 /**
  * @author luoyanxin
  */
-public class AlterTableRenamePartitionJobFactory extends DdlJobFactory {
-
-    @Deprecated
-    protected final DDL ddl;
-    protected final AlterTableRenamePartitionPreparedData preparedData;
-    protected final ExecutionContext executionContext;
+public class AlterTableRenamePartitionJobFactory extends AlterTableGroupBaseJobFactory {
 
     public AlterTableRenamePartitionJobFactory(DDL ddl, AlterTableRenamePartitionPreparedData preparedData,
                                                ExecutionContext executionContext) {
-        this.preparedData = preparedData;
-        this.ddl = ddl;
-        this.executionContext = executionContext;
+        super(ddl, preparedData, null, null, null,
+            null, null, null,
+            null, executionContext);
     }
 
     @Override
@@ -71,12 +71,19 @@ protected void validate() {
 
     @Override
     protected ExecutableDdlJob doCreate() {
+        AlterTableGroupRenamePartitionPreparedData renamePartitionPreparedData =
+            (AlterTableGroupRenamePartitionPreparedData) preparedData;
+        if (renamePartitionPreparedData.isRenameNothing()) {
+            return new TransientDdlJob();
+        }
         if (preparedData.isRemainInOriginalTableGroup()) {
             return renameInOriginTableGroup();
         } else if (preparedData.isMoveToExistTableGroup()) {
             return renameAndMoveToExistTableGroup();
         } else if (preparedData.isCreateNewTableGroup()) {
             return renameInNewTableGroup();
+        } else if (org.apache.commons.lang.StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            return withImplicitTableGroup(executionContext);
         } else {
             throw new RuntimeException("unexpected");
         }
@@ -90,9 +97,12 @@ protected ExecutableDdlJob renameAndMoveToExistTableGroup() {
 
         Map tablesVersion = getTablesVersion();
 
+        AlterTableRenamePartitionPreparedData alterTableRenamePartitionPreparedData =
+            (AlterTableRenamePartitionPreparedData) preparedData;
         DdlTask changeMetaTask = new AlterTableRenamePartitionChangeMetaTask(preparedData.getSchemaName(),
-            preparedData.getTargetTableGroup(), preparedData.getTableName(), preparedData.getChangePartitionsPair(),
-            preparedData.isSubPartitionRename());
+            preparedData.getTargetTableGroup(), preparedData.getTableName(),
+            alterTableRenamePartitionPreparedData.getChangePartitionsPair(),
+            alterTableRenamePartitionPreparedData.isSubPartitionRename());
         DdlTask syncTask = new TableSyncTask(preparedData.getSchemaName(), tablesVersion.keySet().iterator().next(),
             enablePreemptiveMdl, initWait, interval,
             TimeUnit.MILLISECONDS);
@@ -106,11 +116,11 @@ protected ExecutableDdlJob renameAndMoveToExistTableGroup() {
         DdlTask validateSourceTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 sourceTableGroup, tablesVersion, false,
-                /*todo*/null);
+                /*todo*/null, false);
         DdlTask validateTargetTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 targetTableGroup, preparedData.getFirstTableVersionInTargetTableGroup(), false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         executableDdlJob.addTask(emptyTask);
         executableDdlJob.addTask(validateSourceTableGroup);
@@ -129,25 +139,29 @@ protected ExecutableDdlJob renameAndMoveToExistTableGroup() {
         executableDdlJob.addTaskRelationship(validateSourceTableGroup, changeMetaTask);
         executableDdlJob.addTaskRelationship(validateTargetTableGroup, changeMetaTask);
 
+        DdlTask cdcAlterTableRenamePartitionMarkTask = buildCdcDdlMarkTask();
+
         executableDdlJob.addSequentialTasks(Lists.newArrayList(
+            cdcAlterTableRenamePartitionMarkTask,
             syncTask,
             cleanupEmptyTableGroupTask,
             synTargetTableGroup,
             synSourceTableGroup
         ));
-        executableDdlJob.addTaskRelationship(changeMetaTask, syncTask);
+        executableDdlJob.addTaskRelationship(changeMetaTask, cdcAlterTableRenamePartitionMarkTask);
 
         return executableDdlJob;
     }
 
     protected ExecutableDdlJob renameInNewTableGroup() {
+        executionContext.getDdlContext().setDdlType(DdlType.ALTER_TABLE_RENAME_PARTITION);
         ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
         Map tablesVersion = getTablesVersion();
         String schemaName = preparedData.getSchemaName();
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName,
                 preparedData.getTableGroupName(), tablesVersion, false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         SubJobTask subJobMoveTableToNewGroup = new SubJobTask(schemaName,
             String.format(AlterTableGroupBaseJobFactory.SET_NEW_TABLE_GROUP, preparedData.getTableName()), null);
@@ -174,8 +188,7 @@ protected ExecutableDdlJob renameInOriginTableGroup() {
         TableGroupConfig tableGroupConfig =
             OptimizerContext.getContext(preparedData.getSchemaName()).getTableGroupInfoManager()
                 .getTableGroupConfigByName(preparedData.getTableGroupName());
-        for (TablePartRecordInfoContext tablePartRecordInfoContext : tableGroupConfig.getAllTables()) {
-            String tableName = tablePartRecordInfoContext.getLogTbRec().getTableName();
+        for (String tableName : tableGroupConfig.getAllTables()) {
             String primaryTableName;
             TableMeta tableMeta = executionContext.getSchemaManager(preparedData.getSchemaName()).getTable(tableName);
             if (tableMeta.isGsi()) {
@@ -191,9 +204,11 @@ protected ExecutableDdlJob renameInOriginTableGroup() {
             tablesVersion.put(primaryTableName, tableMeta.getVersion());
         }
 
+        AlterTableRenamePartitionPreparedData alterTableRenamePartitionPreparedData =
+            (AlterTableRenamePartitionPreparedData) preparedData;
         DdlTask changeMetaTask = new AlterTableGroupRenamePartitionChangeMetaTask(preparedData.getSchemaName(),
-            preparedData.getTableGroupName(), preparedData.getChangePartitionsPair(),
-            preparedData.isSubPartitionRename());
+            preparedData.getTableGroupName(), alterTableRenamePartitionPreparedData.getChangePartitionsPair(),
+            alterTableRenamePartitionPreparedData.isSubPartitionRename());
         DdlTask syncTask =
             new TablesSyncTask(preparedData.getSchemaName(), logicalTableNames, enablePreemptiveMdl, initWait, interval,
                 TimeUnit.MILLISECONDS);
@@ -203,16 +218,21 @@ protected ExecutableDdlJob renameInOriginTableGroup() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(preparedData.getSchemaName(), preparedData.getTableGroupName(),
                 tablesVersion,
-                true, null);
+                true, null, false);
 
         DdlTask reloadTableGroup =
             new TableGroupSyncTask(preparedData.getSchemaName(), preparedData.getTableGroupName());
+
+        DdlTask cdcAlterTableRenamePartitionMarkTask = buildCdcDdlMarkTask();
+
         executableDdlJob.addSequentialTasks(Lists.newArrayList(
             validateTask,
             changeMetaTask,
+            cdcAlterTableRenamePartitionMarkTask,
             syncTask,
             reloadTableGroup
         ));
+
         return executableDdlJob;
     }
 
@@ -230,6 +250,9 @@ protected void excludeResources(Set resources) {
         if (preparedData.isMoveToExistTableGroup() && StringUtils.isNotBlank(preparedData.getTargetTableGroup())) {
             resources.add(concatWithDot(preparedData.getSchemaName(), preparedData.getTargetTableGroup()));
         }
+        if (StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            resources.add(concatWithDot(preparedData.getSchemaName(), preparedData.getTargetImplicitTableGroupName()));
+        }
         for (String relatedPart : preparedData.getRelatedPartitions()) {
             resources.add(concatWithDot(concatWithDot(preparedData.getSchemaName(), preparedData.getTableGroupName()),
                 relatedPart));
@@ -254,4 +277,46 @@ protected Map getTablesVersion() {
         return tablesVersion;
     }
 
+    private DdlTask buildCdcDdlMarkTask() {
+        boolean placeHolder;
+
+        if (executionContext.getDdlContext().isSubJob()) {
+            if (isFromSetTableGroup(executionContext) || isFromAlterTableGroup(executionContext)) {
+                placeHolder = true;
+            } else if (isFromRenamePartition(executionContext)) {
+                placeHolder = false;
+            } else {
+                DdlType parentDdlType = getRootParentDdlContext(executionContext.getDdlContext()).getDdlType();
+                throw new RuntimeException("unexpected parent ddl job , " + parentDdlType);
+            }
+        } else {
+            placeHolder = false;
+        }
+
+        return new CdcAlterTableRenamePartitionMarkTask(preparedData.getSchemaName(),
+            preparedData.getTableName(), placeHolder);
+    }
+
+    private boolean isFromSetTableGroup(ExecutionContext executionContext) {
+        DdlType parentDdlType = getRootParentDdlContext(executionContext.getDdlContext()).getDdlType();
+        return parentDdlType == DdlType.ALTER_TABLE_SET_TABLEGROUP;
+    }
+
+    private boolean isFromAlterTableGroup(ExecutionContext executionContext) {
+        DdlType parentDdlType = getRootParentDdlContext(executionContext.getDdlContext()).getDdlType();
+        return parentDdlType == DdlType.ALTER_TABLEGROUP;
+    }
+
+    private boolean isFromRenamePartition(ExecutionContext executionContext) {
+        DdlType parentDdlType = getRootParentDdlContext(executionContext.getDdlContext()).getDdlType();
+        return parentDdlType == DdlType.ALTER_TABLE_RENAME_PARTITION;
+    }
+
+    private DdlContext getRootParentDdlContext(DdlContext ddlContext) {
+        if (ddlContext.getParentDdlContext() != null) {
+            return getRootParentDdlContext(ddlContext.getParentDdlContext());
+        } else {
+            return ddlContext;
+        }
+    }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableReorgPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableReorgPartitionJobFactory.java
index ddee30517..816ec9ed6 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableReorgPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableReorgPartitionJobFactory.java
@@ -74,6 +74,8 @@ protected ExecutableDdlJob doCreate() {
             return reorgAndMoveToExistTableGroup();
         } else if (preparedData.isCreateNewTableGroup()) {
             return reorgInNewTableGroup();
+        } else if (StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            return withImplicitTableGroup(executionContext);
         } else {
             throw new RuntimeException("unexpected");
         }
@@ -93,11 +95,11 @@ private ExecutableDdlJob reorgAndMoveToExistTableGroup() {
         DdlTask validateSourceTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 sourceTableGroup, tablesVersion, false,
-                /*todo*/null);
+                /*todo*/null, false);
         DdlTask validateTargetTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 targetTableGroup, preparedData.getFirstTableVersionInTargetTableGroup(), false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         executableDdlJob.addTask(emptyTask);
         executableDdlJob.addTask(validateSourceTableGroup);
@@ -169,7 +171,7 @@ private ExecutableDdlJob reorgInNewTableGroup() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, preparedData.getTableGroupName(), tablesVersion, false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         SubJobTask subJobMoveTableToNewGroup =
             new SubJobTask(schemaName, String.format(SET_NEW_TABLE_GROUP, preparedData.getTableName()), null);
@@ -199,7 +201,7 @@ private ExecutableDdlJob reorgInOriginTableGroup() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, reorgPreparedData.getTableGroupName(), tablesVersion, true,
-                reorgPreparedData.getTargetPhysicalGroups());
+                reorgPreparedData.getTargetPhysicalGroups(), false);
 
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(reorgPreparedData.getTableGroupName());
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableReorgPartitionSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableReorgPartitionSubTaskJobFactory.java
index 215d0771a..d7fff2afb 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableReorgPartitionSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableReorgPartitionSubTaskJobFactory.java
@@ -83,7 +83,7 @@ protected PartitionInfo generateNewPartitionInfo() {
             curPartitionInfo = oc.getPartitionInfoManager().getPartitionInfo(tableName);
         } else if (TStringUtil.isNotEmpty(tableGroupName)) {
             TableGroupConfig tableGroupConfig = oc.getTableGroupInfoManager().getTableGroupConfigByName(tableGroupName);
-            String firstTableName = tableGroupConfig.getAllTables().get(0).getTableName();
+            String firstTableName = tableGroupConfig.getAllTables().get(0);
             curPartitionInfo = oc.getPartitionInfoManager().getPartitionInfo(firstTableName);
         } else {
             throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT,
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSetTableGroupJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSetTableGroupJobFactory.java
index 2154262f5..c24daa5b8 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSetTableGroupJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSetTableGroupJobFactory.java
@@ -16,6 +16,7 @@
 
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
+import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility;
 import com.alibaba.polardbx.common.exception.TddlRuntimeException;
 import com.alibaba.polardbx.common.exception.code.ErrorCode;
 import com.alibaba.polardbx.common.properties.ConnectionParams;
@@ -27,7 +28,9 @@
 import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.UpdateTablesVersionTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcAlterTableSetTableGroupMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcTableGroupDdlMarkTask;
+import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateTableVersionTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupAddMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableSetGroupAddSubTaskMetaTask;
@@ -42,8 +45,6 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob;
 import com.alibaba.polardbx.executor.partitionmanagement.AlterTableGroupUtils;
 import com.alibaba.polardbx.executor.scaleout.ScaleOutUtils;
-import com.alibaba.polardbx.gms.locality.LocalityDesc;
-import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext;
 import com.alibaba.polardbx.gms.partition.TablePartitionRecord;
 import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
@@ -53,6 +54,7 @@
 import com.alibaba.polardbx.optimizer.config.table.SchemaManager;
 import com.alibaba.polardbx.optimizer.context.DdlContext;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.planner.rule.util.CBOUtil;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableSetTableGroupPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
@@ -63,7 +65,6 @@
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 import org.apache.calcite.rel.core.DDL;
-import org.apache.calcite.sql.SqlAlterTableSetTableGroup;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.commons.lang.StringUtils;
 
@@ -137,15 +138,32 @@ protected ExecutableDdlJob doCreate() {
         JoinGroupValidateTask joinGroupValidateTask =
             new JoinGroupValidateTask(schemaName, ImmutableList.of(targetTableGroupName), tableName, false);
 
+        Map tableVersions = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+
+        tableVersions.put(preparedData.getPrimaryTableName(), preparedData.getTableVersion());
+
         if (flag[1]) {
             return new TransientDdlJob();
         } else if (flag[0]) {
+
+            DdlTask validateTask = null;
+
+            if (StringUtils.isNotEmpty(preparedData.getTableGroupName()) && !preparedData.isImplicit()) {
+                validateTask =
+                    new AlterTableGroupValidateTask(schemaName, preparedData.getTableGroupName(), tableVersions, false,
+                        null, true);
+            } else {
+                validateTask =
+                    new ValidateTableVersionTask(schemaName, tableVersions);
+            }
+
             AlterTableSetTableGroupChangeMetaOnlyTask tableSetTableGroupChangeMetaOnlyTask =
                 new AlterTableSetTableGroupChangeMetaOnlyTask(preparedData.getSchemaName(), preparedData.getTableName(),
                     curTableGroupConfig.getTableGroupRecord().getTg_name(), preparedData.getTableGroupName(),
                     false,
                     false,
-                    preparedData.getOriginalJoinGroup());
+                    preparedData.getOriginalJoinGroup(),
+                    preparedData.isImplicit());
 
             CleanupEmptyTableGroupTask cleanupEmptyTableGroupTask =
                 new CleanupEmptyTableGroupTask(schemaName, curTableGroupConfig.getTableGroupRecord().getTg_name());
@@ -156,8 +174,8 @@ protected ExecutableDdlJob doCreate() {
             BaseDdlTask syncSourceTableGroup =
                 new TableGroupSyncTask(schemaName, curTableGroupConfig.getTableGroupRecord().getTg_name());
 
-            DdlTask updateTablesVersionTask = new UpdateTablesVersionTask(schemaName, ImmutableList.of(
-                preparedData.getPrimaryTableName()));
+            DdlTask updateTablesVersionTask =
+                new UpdateTablesVersionTask(schemaName, ImmutableList.of(preparedData.getPrimaryTableName()));
 
             boolean enablePreemptiveMdl =
                 executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_PREEMPTIVE_MDL);
@@ -166,25 +184,40 @@ protected ExecutableDdlJob doCreate() {
             // make sure the tablegroup is reload before table, we can't update table version inside TablesSyncTask
             DdlTask syncTable =
                 new TableSyncTask(schemaName, preparedData.getPrimaryTableName(), enablePreemptiveMdl, initWait,
-                    interval,
-                    TimeUnit.MILLISECONDS);
-
-            executableDdlJob.addSequentialTasks(Lists.newArrayList(
-                tableSetTableGroupChangeMetaOnlyTask,
-                cleanupEmptyTableGroupTask,
-                reloadTargetTableGroup,
-                syncSourceTableGroup,
-                updateTablesVersionTask,
-                syncTable
-            ));
+                    interval, TimeUnit.MILLISECONDS);
+
+            if (!executionContext.getDdlContext().isSubJob()) {
+                boolean isGsi = isGsi(schemaName, preparedData.getPrimaryTableName(), preparedData.getTableName());
+                CdcAlterTableSetTableGroupMarkTask cdcAlterTableSetTableGroupMarkTask =
+                    new CdcAlterTableSetTableGroupMarkTask(schemaName, preparedData.getPrimaryTableName(),
+                        preparedData.getTableName(), isGsi);
+                executableDdlJob.addSequentialTasks(Lists.newArrayList(
+                    validateTask,
+                    tableSetTableGroupChangeMetaOnlyTask,
+                    cleanupEmptyTableGroupTask,
+                    reloadTargetTableGroup,
+                    syncSourceTableGroup,
+                    updateTablesVersionTask,
+                    cdcAlterTableSetTableGroupMarkTask,
+                    syncTable
+                ));
+            } else {
+                executableDdlJob.addSequentialTasks(Lists.newArrayList(
+                    validateTask,
+                    tableSetTableGroupChangeMetaOnlyTask,
+                    cleanupEmptyTableGroupTask,
+                    reloadTargetTableGroup,
+                    syncSourceTableGroup,
+                    updateTablesVersionTask,
+                    syncTable
+                ));
+            }
+
             return executableDdlJob;
         }
 
-        Map tableVersions = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
-
-        tableVersions.put(preparedData.getPrimaryTableName(), preparedData.getTableVersion());
         DdlTask validateTask =
-            new AlterTableGroupValidateTask(schemaName, targetTableGroupName, tableVersions, false, null);
+            new AlterTableGroupValidateTask(schemaName, targetTableGroupName, tableVersions, false, null, false);
 
         Set outdatedPartitionGroupId = new HashSet<>();
         // the old and new partition name is identical, so here use newPartitionRecords
@@ -202,29 +235,18 @@ protected ExecutableDdlJob doCreate() {
             targetDbList.add(newRecord.getPhy_db());
             newPartitions.add(newRecord.getPartition_name());
         }
-        DdlTask addMetaTask = new AlterTableGroupAddMetaTask(schemaName,
-            targetTableGroupName,
-            curTableGroupConfig.getTableGroupRecord().getId(),
-            preparedData.getSourceSql(),
+        DdlTask addMetaTask = new AlterTableGroupAddMetaTask(schemaName, targetTableGroupName,
+            curTableGroupConfig.getTableGroupRecord().getId(), preparedData.getSourceSql(),
             ComplexTaskMetaManager.ComplexTaskStatus.DOING_REORG.getValue(),
-            ComplexTaskMetaManager.ComplexTaskType.SET_TABLEGROUP.getValue(),
-            outdatedPartitionGroupId,
-            targetDbList,
+            ComplexTaskMetaManager.ComplexTaskType.SET_TABLEGROUP.getValue(), outdatedPartitionGroupId, targetDbList,
             newPartitions);
 
         boolean skipValidator =
             executionContext.getParamManager().getBoolean(ConnectionParams.SKIP_TABLEGROUP_VALIDATOR);
         if (skipValidator) {
-            executableDdlJob.addSequentialTasks(Lists.newArrayList(
-                joinGroupValidateTask,
-                addMetaTask
-            ));
+            executableDdlJob.addSequentialTasks(Lists.newArrayList(joinGroupValidateTask, addMetaTask));
         } else {
-            executableDdlJob.addSequentialTasks(Lists.newArrayList(
-                validateTask,
-                joinGroupValidateTask,
-                addMetaTask
-            ));
+            executableDdlJob.addSequentialTasks(Lists.newArrayList(validateTask, joinGroupValidateTask, addMetaTask));
         }
         List bringUpAlterTableGroupTasks =
             ComplexTaskFactory.bringUpAlterTableGroup(schemaName, targetTableGroupName, tableName,
@@ -245,25 +267,37 @@ private ExecutableDdlJob repartitionWithTableGroup() {
 
         tableVersions.put(preparedData.getPrimaryTableName(), preparedData.getTableVersion());
         String targetTableGroupName = preparedData.getTableGroupName();
-        ParamManager.setBooleanVal(executionContext.getParamManager().getProps(),
-            ConnectionParams.DDL_ON_GSI, true, false);
+        ParamManager.setBooleanVal(executionContext.getParamManager().getProps(), ConnectionParams.DDL_ON_GSI, true,
+            false);
         String schemaName = preparedData.getSchemaName();
         DdlTask validateTask =
-            new AlterTableGroupValidateTask(schemaName, targetTableGroupName, tableVersions, false, null);
+            new AlterTableGroupValidateTask(schemaName, targetTableGroupName, tableVersions, false, null, false);
 
-        SubJobTask reparitionSubJob =
-            new SubJobTask(schemaName,
-                String.format("alter table %s partition align to %s", preparedData.getTableName(),
-                    targetTableGroupName),
-                null);
+        // this sql of sub-job will be routed to LogicalAlterTableRepartitionHandler
+        // and the changed topology meta info will be notified to cdc by LogicalAlterTableRepartitionHandler
+        SubJobTask reparitionSubJob = new SubJobTask(schemaName,
+            String.format("alter table %s partition align to %s", preparedData.getTableName(), targetTableGroupName),
+            null);
         reparitionSubJob.setParentAcquireResource(true);
-        executableDdlJob.addSequentialTasks(Lists.newArrayList(
-            validateTask,
-            reparitionSubJob
-        ));
+        executableDdlJob.addSequentialTasks(Lists.newArrayList(validateTask, reparitionSubJob));
+
+        if (!executionContext.getDdlContext().isSubJob()) {
+            boolean isGsi = isGsi(schemaName, preparedData.getPrimaryTableName(), preparedData.getTableName());
+
+            CdcAlterTableSetTableGroupMarkTask cdcAlterTableSetTableGroupMarkTask =
+                new CdcAlterTableSetTableGroupMarkTask(schemaName, preparedData.getPrimaryTableName(),
+                    preparedData.getTableName(), isGsi);
+            executableDdlJob.appendTask(cdcAlterTableSetTableGroupMarkTask);
+        }
+
         return executableDdlJob;
     }
 
+    private boolean isGsi(String schemaName, String primaryTableName, String tableName) {
+        return !StringUtils.equalsIgnoreCase(preparedData.getPrimaryTableName(), preparedData.getTableName())
+            && CBOUtil.isGsi(schemaName, tableName);
+    }
+
     private ExecutableDdlJob alignPartitionNameAndSetTableGroup() {
         ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
         Map tableVersions = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
@@ -273,11 +307,10 @@ private ExecutableDdlJob alignPartitionNameAndSetTableGroup() {
 
         String schemaName = preparedData.getSchemaName();
         DdlTask validateTask =
-            new AlterTableGroupValidateTask(schemaName, targetTableGroupName, tableVersions, false, null);
+            new AlterTableGroupValidateTask(schemaName, targetTableGroupName, tableVersions, false, null, false);
 
         if (GeneralUtil.isEmpty(preparedData.getPartitionNamesMap())) {
-            throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT,
-                "unexpect error");
+            throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, "unexpect error");
         }
 
         StringBuilder sb = new StringBuilder();
@@ -294,16 +327,30 @@ private ExecutableDdlJob alignPartitionNameAndSetTableGroup() {
             sb.append("`");
             i++;
         }
+
+        // this sql of sub-job will be routed to LogicalAlterTableRenamePartitionHandler
+        // and will not trigger cdc ddl mark
         SubJobTask subJobAlignPartitionName = new SubJobTask(schemaName,
             String.format("alter table %s rename partition %s", preparedData.getTableName(), sb), null);
+
+        // this sql of sub-job will be routed to LogicalAlterTableSetTableGroupHandler
+        // and the changed topology meta info will be notified to cdc by LogicalAlterTableSetTableGroupHandler
         SubJobTask subJobSetTableGroup = new SubJobTask(schemaName, preparedData.getSourceSql(), null);
+
         subJobAlignPartitionName.setParentAcquireResource(true);
         subJobSetTableGroup.setParentAcquireResource(true);
-        executableDdlJob.addSequentialTasks(Lists.newArrayList(
-            validateTask,
-            subJobAlignPartitionName,
-            subJobSetTableGroup
-        ));
+        executableDdlJob.addSequentialTasks(
+            Lists.newArrayList(validateTask, subJobAlignPartitionName, subJobSetTableGroup));
+
+        if (!executionContext.getDdlContext().isSubJob()) {
+            boolean isGsi = isGsi(schemaName, preparedData.getPrimaryTableName(), preparedData.getTableName());
+
+            CdcAlterTableSetTableGroupMarkTask cdcAlterTableSetTableGroupMarkTask =
+                new CdcAlterTableSetTableGroupMarkTask(schemaName, preparedData.getPrimaryTableName(),
+                    preparedData.getTableName(), isGsi);
+            executableDdlJob.appendTask(cdcAlterTableSetTableGroupMarkTask);
+        }
+
         return executableDdlJob;
     }
 
@@ -332,10 +379,9 @@ protected void constructSubTasks(String schemaName, PartitionInfo curPartitionIn
         PartitionInfo newPartitionInfo = generateNewPartitionInfo();
         TablePartitionRecord logTableRec = PartitionInfoUtil.prepareRecordForLogicalTable(newPartitionInfo);
         logTableRec.partStatus = TablePartitionRecord.PARTITION_STATUS_LOGICAL_TABLE_PUBLIC;
-        List partRecList =
-            PartitionInfoUtil.prepareRecordForAllPartitions(newPartitionInfo);
-        Map> subPartRecInfos = PartitionInfoUtil
-            .prepareRecordForAllSubpartitions(partRecList, newPartitionInfo,
+        List partRecList = PartitionInfoUtil.prepareRecordForAllPartitions(newPartitionInfo);
+        Map> subPartRecInfos =
+            PartitionInfoUtil.prepareRecordForAllSubpartitions(partRecList, newPartitionInfo,
                 newPartitionInfo.getPartitionBy().getPartitions());
 
         OptimizerContext oc =
@@ -343,11 +389,10 @@ protected void constructSubTasks(String schemaName, PartitionInfo curPartitionIn
         TableGroupConfig curTableGroupConfig =
             oc.getTableGroupInfoManager().getTableGroupConfigById(curPartitionInfo.getTableGroupId());
         //DdlTask validateTask = new AlterTableGroupValidateTask(schemaName, preparedData.getTableGroupName());
-        DdlTask addMetaTask =
-            new AlterTableSetGroupAddSubTaskMetaTask(schemaName, tableName,
-                curTableGroupConfig.getTableGroupRecord().tg_name, curPartitionInfo.getTableGroupId(), "",
-                ComplexTaskMetaManager.ComplexTaskStatus.CREATING.getValue(), 0, logTableRec, partRecList,
-                subPartRecInfos, preparedData.getTableGroupName(), preparedData.getOriginalJoinGroup());
+        DdlTask addMetaTask = new AlterTableSetGroupAddSubTaskMetaTask(schemaName, tableName,
+            curTableGroupConfig.getTableGroupRecord().tg_name, curPartitionInfo.getTableGroupId(), "",
+            ComplexTaskMetaManager.ComplexTaskStatus.CREATING.getValue(), 0, logTableRec, partRecList, subPartRecInfos,
+            preparedData.getTableGroupName(), preparedData.getOriginalJoinGroup());
 
         List taskList = new ArrayList<>();
         //1. validate
@@ -372,10 +417,10 @@ protected void constructSubTasks(String schemaName, PartitionInfo curPartitionIn
         final boolean stayAtWriteReOrg =
             StringUtils.equalsIgnoreCase(ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG.name(), finalStatus);
 
-        List bringUpNewPartitions = ComplexTaskFactory
-            .addPartitionTasks(schemaName, tableName, sourceTableTopology, targetTableTopology, stayAtCreating,
-                stayAtDeleteOnly, stayAtWriteOnly, stayAtWriteReOrg, false,
-                executionContext, false, ComplexTaskMetaManager.ComplexTaskType.SET_TABLEGROUP);
+        List bringUpNewPartitions =
+            ComplexTaskFactory.addPartitionTasks(schemaName, tableName, sourceTableTopology, targetTableTopology,
+                stayAtCreating, stayAtDeleteOnly, stayAtWriteOnly, stayAtWriteReOrg, false, executionContext, false,
+                ComplexTaskMetaManager.ComplexTaskType.SET_TABLEGROUP);
         //3.2 status: CREATING -> DELETE_ONLY -> WRITE_ONLY -> WRITE_REORG -> READY_TO_PUBLIC
         taskList.addAll(bringUpNewPartitions);
 
@@ -397,17 +442,20 @@ protected void constructSubTasks(String schemaName, PartitionInfo curPartitionIn
 
         Map> newTopology = newPartitionInfo.getTopology();
 
+        CdcDdlMarkVisibility cdcDdlMarkVisibility =
+            (executionContext.getDdlContext().isSubJob()) ? CdcDdlMarkVisibility.Private :
+                CdcDdlMarkVisibility.Protected;
         CdcTableGroupDdlMarkTask cdcTableGroupDdlMarkTask =
             new CdcTableGroupDdlMarkTask(preparedData.getTableGroupName(), schemaName, tableName, sqlKind, newTopology,
-                dc.getDdlStmt());
+                dc.getDdlStmt(), cdcDdlMarkVisibility);
 
         executableDdlJob.addTask(cdcTableGroupDdlMarkTask);
         executableDdlJob.addTaskRelationship(taskList.get(taskList.size() - 1), cdcTableGroupDdlMarkTask);
-        executableDdlJob.addTaskRelationship(cdcTableGroupDdlMarkTask,
-            bringUpAlterTableGroupTasks.get(0));
+        executableDdlJob.addTaskRelationship(cdcTableGroupDdlMarkTask, bringUpAlterTableGroupTasks.get(0));
 
-        DdlTask dropUselessTableTask = ComplexTaskFactory
-            .CreateDropUselessPhyTableTask(schemaName, tableName, sourceTableTopology, executionContext);
+        DdlTask dropUselessTableTask =
+            ComplexTaskFactory.CreateDropUselessPhyTableTask(schemaName, tableName, sourceTableTopology,
+                executionContext);
         executableDdlJob.addTask(dropUselessTableTask);
         executableDdlJob.addTaskRelationship(bringUpAlterTableGroupTasks.get(bringUpAlterTableGroupTasks.size() - 1),
             dropUselessTableTask);
@@ -421,19 +469,8 @@ private PartitionInfo generateNewPartitionInfo() {
         PartitionInfo curPartitionInfo =
             OptimizerContext.getContext(schemaName).getPartitionInfoManager().getPartitionInfo(tableName);
 
-        return AlterTableGroupSnapShotUtils
-            .getNewPartitionInfo(
-                null,
-                curPartitionInfo,
-                false,
-                ddl.getSqlNode(),
-                null,
-                null,
-                null,
-                null,
-                null,
-                null,
-                executionContext);
+        return AlterTableGroupSnapShotUtils.getNewPartitionInfo(null, curPartitionInfo, false, ddl.getSqlNode(), null,
+            null, null, null, null, null, executionContext);
     }
 
     private void changeMetaInfoCheck(boolean[] flag) {
@@ -465,12 +502,12 @@ private void changeMetaInfoCheck(boolean[] flag) {
             // do nothing;
             flag[0] = false;
             flag[1] = true;
-        } else if (GeneralUtil.isEmpty(targetTableGroupConfig.getAllTables())) {
+        } else if ((targetTableGroupConfig != null && GeneralUtil.isEmpty(targetTableGroupConfig.getAllTables()))
+            || (preparedData.isImplicit() && targetTableGroupConfig == null)) {
             flag[0] = true;
             flag[1] = false;
         } else {
-            TablePartRecordInfoContext tablePartRecordInfoContext = targetTableGroupConfig.getAllTables().get(0);
-            String tableInTbGrp = tablePartRecordInfoContext.getLogTbRec().tableName;
+            String tableInTbGrp = targetTableGroupConfig.getAllTables().get(0);
             PartitionInfo targetPartitionInfo = schemaManager.getTable(tableInTbGrp).getPartitionInfo();
 
             PartitionStrategy strategy = sourcePartitionInfo.getPartitionBy().getStrategy();
@@ -478,10 +515,9 @@ private void changeMetaInfoCheck(boolean[] flag) {
                 (strategy == PartitionStrategy.KEY || strategy == PartitionStrategy.RANGE_COLUMNS);
             boolean match = false;
             if (isVectorStrategy) {
-                if (PartitionInfoUtil
-                    .actualPartColsEquals(sourcePartitionInfo, targetPartitionInfo,
-                        PartitionInfoUtil.fetchAllLevelMaxActualPartColsFromPartInfos(sourcePartitionInfo,
-                            targetPartitionInfo))) {
+                if (PartitionInfoUtil.actualPartColsEquals(sourcePartitionInfo, targetPartitionInfo,
+                    PartitionInfoUtil.fetchAllLevelMaxActualPartColsFromPartInfos(sourcePartitionInfo,
+                        targetPartitionInfo))) {
                     match = true;
                 }
             } else if (sourcePartitionInfo.equals(targetPartitionInfo)) {
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionByHotValueJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionByHotValueJobFactory.java
index 622b4f7cb..0716a434e 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionByHotValueJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionByHotValueJobFactory.java
@@ -81,6 +81,8 @@ protected ExecutableDdlJob doCreate() {
             return splitAndMoveToExistTableGroup();
         } else if (preparedData.isCreateNewTableGroup()) {
             return splitInNewTableGroup();
+        } else if (org.apache.commons.lang.StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            return withImplicitTableGroup(executionContext);
         } else {
             throw new RuntimeException("unexpected");
         }
@@ -96,7 +98,7 @@ private ExecutableDdlJob splitInOriginTableGroup() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName,
                 preparedData.getTableGroupName(), tablesVersion, true,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         Set outdatedPartitionGroupId =
             getOldDatePartitionGroups(preparedData, preparedData.getOldPartitionNames(),
@@ -178,11 +180,11 @@ private ExecutableDdlJob splitAndMoveToExistTableGroup() {
         DdlTask validateSourceTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 sourceTableGroup, tablesVersion, false,
-                /*todo*/null);
+                /*todo*/null, false);
         DdlTask validateTargetTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 targetTableGroup, preparedData.getFirstTableVersionInTargetTableGroup(), false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         executableDdlJob.addTask(emptyTask);
         executableDdlJob.addTask(validateSourceTableGroup);
@@ -261,7 +263,7 @@ private ExecutableDdlJob splitInNewTableGroup() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName,
                 preparedData.getTableGroupName(), tablesVersion, false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         SubJobTask subJobMoveTableToNewGroup =
             new SubJobTask(schemaName, String.format(SET_NEW_TABLE_GROUP, preparedData.getTableName()), null);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionByHotValueSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionByHotValueSubTaskJobFactory.java
index 09003780f..86b7becc5 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionByHotValueSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionByHotValueSubTaskJobFactory.java
@@ -27,7 +27,6 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableSplitPartitionByHotValuePreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTable;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionChangeSetSubJobTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionChangeSetSubJobTask.java
index df9485476..58d67d400 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionChangeSetSubJobTask.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionChangeSetSubJobTask.java
@@ -26,11 +26,9 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableSplitPartitionPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTable;
-import org.apache.calcite.sql.SqlAlterTableSplitPartition;
 import org.apache.calcite.sql.SqlNode;
 
 import java.util.List;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionJobFactory.java
index 97e4275a4..997222239 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionJobFactory.java
@@ -84,6 +84,8 @@ protected ExecutableDdlJob doCreate() {
             return splitAndMoveToExistTableGroup();
         } else if (preparedData.isCreateNewTableGroup()) {
             return splitInNewTableGroup();
+        } else if (org.apache.commons.lang.StringUtils.isNotEmpty(preparedData.getTargetImplicitTableGroupName())) {
+            return withImplicitTableGroup(executionContext);
         } else {
             throw new RuntimeException("unexpected");
         }
@@ -104,11 +106,11 @@ private ExecutableDdlJob splitAndMoveToExistTableGroup() {
         DdlTask validateSourceTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 sourceTableGroup, tablesVersion, false,
-                /*todo*/null);
+                /*todo*/null, false);
         DdlTask validateTargetTableGroup =
             new AlterTableGroupValidateTask(schemaName,
                 targetTableGroup, preparedData.getFirstTableVersionInTargetTableGroup(), false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         executableDdlJob.addTask(emptyTask);
         executableDdlJob.addTask(validateSourceTableGroup);
@@ -124,7 +126,7 @@ private ExecutableDdlJob splitAndMoveToExistTableGroup() {
         List targetDbList = new ArrayList<>();
         List localities = new ArrayList<>();
         AlterTableGroupSplitPartitionPreparedData splitData = (AlterTableGroupSplitPartitionPreparedData) preparedData;
-        String firstTable = tableGroupConfig.getAllTables().get(0).getTableName();
+        String firstTable = tableGroupConfig.getAllTables().get(0);
         PartitionInfo partitionInfo =
             executionContext.getSchemaManager(preparedData.getSchemaName()).getTable(firstTable).getPartitionInfo();
         List newPartitions = getNewPartitions(partitionInfo);
@@ -188,7 +190,7 @@ private ExecutableDdlJob splitInNewTableGroup() {
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName,
                 preparedData.getTableGroupName(), tablesVersion, false,
-                preparedData.getTargetPhysicalGroups());
+                preparedData.getTargetPhysicalGroups(), false);
 
         SubJobTask subJobMoveTableToNewGroup =
             new SubJobTask(schemaName, String.format(SET_NEW_TABLE_GROUP, preparedData.getTableName()), null);
@@ -214,7 +216,7 @@ private ExecutableDdlJob splitInOriginTableGroup() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, alterTableSplitPartitionPreparedData.getTableGroupName(),
-                tablesVersion, true, alterTableSplitPartitionPreparedData.getTargetPhysicalGroups());
+                tablesVersion, true, alterTableSplitPartitionPreparedData.getTargetPhysicalGroups(), false);
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(alterTableSplitPartitionPreparedData.getTableGroupName());
 
@@ -225,7 +227,7 @@ private ExecutableDdlJob splitInOriginTableGroup() {
         List targetDbList = new ArrayList<>();
         int targetDbCnt = alterTableSplitPartitionPreparedData.getTargetGroupDetailInfoExRecords().size();
 
-        String firstTable = tableGroupConfig.getAllTables().get(0).getTableName();
+        String firstTable = tableGroupConfig.getAllTables().get(0);
         PartitionInfo partitionInfo =
             executionContext.getSchemaManager(preparedData.getSchemaName()).getTable(firstTable).getPartitionInfo();
         List newPartitions = getNewPartitions(partitionInfo);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionSubTaskJobFactory.java
index c199f0aac..aa15a9297 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableSplitPartitionSubTaskJobFactory.java
@@ -25,11 +25,9 @@
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupItemPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableSplitPartitionPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlAlterTable;
-import org.apache.calcite.sql.SqlAlterTableSplitPartition;
 import org.apache.calcite.sql.SqlNode;
 
 import java.util.List;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableWithFileStoreJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableWithFileStoreJobFactory.java
index c67839ac9..b6263658b 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableWithFileStoreJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/AlterTableWithFileStoreJobFactory.java
@@ -50,6 +50,8 @@
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
+import static com.alibaba.polardbx.common.cdc.ICdcManager.DEFAULT_DDL_VERSION_ID;
+
 /**
  * alter table which binding to an oss table
  */
@@ -115,7 +117,7 @@ protected ExecutableDdlJob doCreate() {
         physicalPlanData.setAlterTablePreparedData(prepareData);
         physicalPlanFileStoreData.setAlterTablePreparedData(preparedFileStoreData);
         DdlTask cdcDdlMarkTask = this.prepareData.isOnlineModifyColumnIndexTask() ? null :
-            new CdcDdlMarkTask(schemaName, physicalPlanData, false, false);
+            new CdcDdlMarkTask(schemaName, physicalPlanData, false, false, DEFAULT_DDL_VERSION_ID);
 
         List schemas = Lists.newArrayList(schemaName, fileStoreSchema);
         List tables = Lists.newArrayList(logicalTableName, fileStoreTable);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ClearFileStorageJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ClearFileStorageJobFactory.java
new file mode 100644
index 000000000..bb4c7faa1
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ClearFileStorageJobFactory.java
@@ -0,0 +1,119 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory;
+
+import com.alibaba.polardbx.common.Engine;
+import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException;
+import com.alibaba.polardbx.executor.common.RecycleBin;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.CloseFileStorageTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.OSSTaskUtils;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.UnBindingArchiveTableMetaByArchiveTableTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.gms.engine.FileSystemGroup;
+import com.alibaba.polardbx.gms.engine.FileSystemManager;
+import com.alibaba.polardbx.gms.metadb.MetaDbDataSource;
+import com.alibaba.polardbx.gms.metadb.table.TableInfoManager;
+import com.alibaba.polardbx.gms.metadb.table.TablesRecord;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.ClearFileStoragePreparedData;
+
+import java.sql.Connection;
+import java.sql.Timestamp;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+public class ClearFileStorageJobFactory extends DdlJobFactory {
+
+    final private ExecutionContext executionContext;
+    final private ClearFileStoragePreparedData clearFileStoragePreparedData;
+
+    public ClearFileStorageJobFactory(ClearFileStoragePreparedData clearFileStoragePreparedData,
+                                      ExecutionContext executionContext) {
+        this.executionContext = executionContext;
+        this.clearFileStoragePreparedData = clearFileStoragePreparedData;
+    }
+
+    @Override
+    protected void validate() {
+        Engine engine = Engine.of(clearFileStoragePreparedData.getFileStorageName());
+
+        if (!Engine.isFileStore(engine)) {
+            throw new TddlNestableRuntimeException(engine.name() + " is not file storage ");
+        }
+
+        // validate file storage exists
+        FileSystemGroup fileSystemGroup = FileSystemManager.getFileSystemGroup(engine, false);
+        if (fileSystemGroup == null) {
+            throw new TddlNestableRuntimeException("file storage " + engine.name() + " is not exists ");
+        }
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+        Engine engine = Engine.of(clearFileStoragePreparedData.getFileStorageName());
+
+        ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
+        List taskList = new ArrayList<>();
+
+        taskList.addAll(AlterFileStoragePurgeBeforeTimestampJobFactory.buildRecycleBinPurgeBeforeTimestamp(engine,
+            new Timestamp(System.currentTimeMillis()), executionContext));
+
+        List tablesRecordList;
+        try (Connection metaConn = MetaDbDataSource.getInstance().getConnection()) {
+            TableInfoManager tableInfoManager = new TableInfoManager();
+            tableInfoManager.setConnection(metaConn);
+            tablesRecordList = tableInfoManager.queryTablesByEngineAndTableType(engine.name(), "ORC TABLE");
+        } catch (Throwable t) {
+            throw new TddlNestableRuntimeException(t);
+        }
+
+        // drop cold data table
+        for (TablesRecord tablesRecord : tablesRecordList) {
+            String logicalSchemaName = tablesRecord.tableSchema;
+            String logicalTableName = tablesRecord.tableName;
+
+            if (logicalTableName.startsWith(RecycleBin.FILE_STORAGE_PREFIX)) {
+                continue;
+            }
+
+            taskList.add(new UnBindingArchiveTableMetaByArchiveTableTask(logicalSchemaName, logicalTableName));
+
+            taskList.addAll(
+                OSSTaskUtils.dropTableTasks(engine, logicalSchemaName, logicalTableName, true, executionContext));
+        }
+
+        // only clean cold data metas
+        taskList.add(new CloseFileStorageTask(engine.name(), true));
+
+        executableDdlJob.addSequentialTasks(taskList);
+
+        return executableDdlJob;
+    }
+
+    @Override
+    protected void excludeResources(Set resources) {
+
+    }
+
+    @Override
+    protected void sharedResources(Set resources) {
+
+    }
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ComplexTaskFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ComplexTaskFactory.java
index cb8394908..113d6635a 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ComplexTaskFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ComplexTaskFactory.java
@@ -164,8 +164,7 @@ public static List addPartitionTasks(String schemaName,
         if (!skipBackFill) {
             taskList
                 .add(new AlterTableGroupBackFillTask(schemaName, logicalTableName, sourcePhyTables, targetPhyTables,
-
-                    isBroadcast, ComplexTaskMetaManager.ComplexTaskType.MOVE_PARTITION == taskType, false));
+                    isBroadcast, ComplexTaskMetaManager.ComplexTaskType.MOVE_PARTITION == taskType, false, false));
         }
         taskList.add(writeReOrgTask);
         taskList.add(
@@ -195,8 +194,7 @@ public static List bringUpAlterTableGroup(String schemaName,
         TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager()
             .getTableGroupConfigByName(tableGroupName);
         if (complexTaskType != ComplexTaskMetaManager.ComplexTaskType.SET_TABLEGROUP) {
-            for (TablePartRecordInfoContext tablePartRecordInfoContext : tableGroupConfig.getAllTables()) {
-                String logicalTable = tablePartRecordInfoContext.getLogTbRec().getTableName();
+            for (String logicalTable : tableGroupConfig.getAllTables()) {
                 TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(logicalTable);
                 if (tableMeta.isGsi()) {
                     //all the gsi table version change will be behavior by primary table
@@ -610,7 +608,7 @@ public static List moveTableTasks(String schemaName,
 
         taskList
             .add(new MoveTableBackFillTask(schemaName, logicalTableName, sourcePhyTables, targetPhyTables,
-                sourceAndTargetGroupMap, false));
+                sourceAndTargetGroupMap, false, false));
         taskList.add(writeReOrgTask);
         taskList.add(
             new TableSyncTask(schemaName, relatedTables.get(0), enablePreemptiveMdl, initWait, interval,
@@ -649,7 +647,8 @@ public static DdlTask CreateDropUselessPhyTableTask(String schemaName, String lo
         List physicalPlans = dropPhyTableBuilder.getPhysicalPlans();
         physicalPlans.forEach(o -> o.setPartitionInfo(partitionInfo));
 
-        PhysicalPlanData physicalPlanData = DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, physicalPlans);
+        PhysicalPlanData physicalPlanData =
+            DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, physicalPlans, executionContext);
 
         return new DropTablePhyDdlTask(schemaName, physicalPlanData);
     }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ConvertAllSequencesJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ConvertAllSequencesJobFactory.java
new file mode 100644
index 000000000..4f8783949
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ConvertAllSequencesJobFactory.java
@@ -0,0 +1,92 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory;
+
+import com.alibaba.polardbx.common.constants.SequenceAttribute;
+import com.alibaba.polardbx.common.constants.SequenceAttribute.Type;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.ConvertAllSequenceValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.ConvertSequenceInSchemasTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcLogicalSequenceMarkTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.gms.topology.SystemDbHelper;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.sql.SqlKind;
+
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Created by zhuqiwei.
+ *
+ * @author zhuqiwei
+ */
+public class ConvertAllSequencesJobFactory extends DdlJobFactory {
+    final List schemaNames;
+    final Type fromType;
+    final Type toType;
+    final boolean onlySingleSchema;
+
+    final ExecutionContext executionContext;
+
+    public ConvertAllSequencesJobFactory(List schemaNames, Type fromType, Type toType, boolean onlySingleSchema,
+                                         ExecutionContext executionContext) {
+        this.schemaNames = schemaNames;
+        this.fromType = fromType;
+        this.toType = toType;
+        this.onlySingleSchema = onlySingleSchema;
+        this.executionContext = executionContext;
+    }
+
+    @Override
+    protected void validate() {
+    }
+
+    @Override
+    protected void sharedResources(Set resources) {
+        resources.addAll(schemaNames);
+    }
+
+    @Override
+    protected void excludeResources(Set resources) {
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+        ConvertAllSequenceValidateTask validateTask = new ConvertAllSequenceValidateTask(schemaNames, onlySingleSchema);
+        ConvertSequenceInSchemasTask convertSequenceTask =
+            new ConvertSequenceInSchemasTask(schemaNames, fromType, toType);
+        CdcLogicalSequenceMarkTask cdcLogicalSequenceMarkTask = new CdcLogicalSequenceMarkTask(
+            schemaNames.size() == 1 ? schemaNames.get(0) : SystemDbHelper.DEFAULT_DB_NAME,
+            "*",
+            executionContext.getOriginSql(),
+            SqlKind.CONVERT_ALL_SEQUENCES
+        );
+        List ddlTaskList = ImmutableList.of(
+            validateTask,
+            convertSequenceTask,
+            cdcLogicalSequenceMarkTask
+        );
+
+        ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
+        executableDdlJob.addSequentialTasks(ddlTaskList);
+        return executableDdlJob;
+    }
+
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateDatabaseJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateDatabaseJobFactory.java
index e39bef1ca..2723ffa47 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateDatabaseJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateDatabaseJobFactory.java
@@ -30,13 +30,10 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.gms.topology.DbInfoRecord;
 import com.alibaba.polardbx.gms.topology.SystemDbHelper;
-import com.alibaba.polardbx.optimizer.core.function.calc.scalar.filter.In;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CreateDatabasePreparedData;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
-import jdk.nashorn.internal.codegen.MapCreator;
-import org.checkerframework.checker.units.qual.C;
 
 import java.util.ArrayList;
 import java.util.List;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateFileStorageJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateFileStorageJobFactory.java
index 1e1390e47..aeced71ff 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateFileStorageJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateFileStorageJobFactory.java
@@ -31,27 +31,35 @@
 import java.util.Map;
 import java.util.Set;
 
+import static com.alibaba.polardbx.gms.topology.SystemDbHelper.DEFAULT_DB_NAME;
+
 public class CreateFileStorageJobFactory extends DdlJobFactory {
     private static final Logger logger = LoggerFactory.getLogger("oss");
 
     private ExecutionContext executionContext;
     private Engine engine;
     private Map items;
+    private Map azureItems;
 
     public CreateFileStorageJobFactory(
         Engine engine, Map items,
+        Map azureItems,
         ExecutionContext executionContext) {
         this.executionContext = executionContext;
         this.engine = engine;
         this.items = items;
+        this.azureItems = azureItems;
     }
 
     @Override
     protected void validate() {
         if (!items.containsKey(FileStorageInfoKey.FILE_URI)) {
-            throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS, "Should contain FILE_URI in with!");
+            if (Engine.ABS != engine) {
+                throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS, "Should contain FILE_URI in with!");
+            }
         }
-        if (engine.name().equalsIgnoreCase("OSS")) {
+        switch (engine) {
+        case OSS: {
             if (!items.containsKey(FileStorageInfoKey.ENDPOINT)) {
                 throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS, "Should contain ENDPOINT in with!");
             }
@@ -69,13 +77,58 @@ protected void validate() {
                 throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS,
                     "Should contain ACCESS_KEY_SECRET in with!");
             }
+            break;
+        }
+        case ABS: {
+            if (!items.containsKey(FileStorageInfoKey.AZURE_CONNECTION_STRING)) {
+                throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS,
+                    "Should contain AZURE_CONNECTION_STRING in with!");
+            }
+            if (!items.containsKey(FileStorageInfoKey.AZURE_CONTAINER_NAME)) {
+                throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS,
+                    "Should contain AZURE_CONTAINER_NAME in with!");
+            }
+
+            // check connection string
+            if (!azureItems.containsKey(FileStorageInfoKey.AzureConnectionStringKey.DefaultEndpointsProtocol)) {
+                throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS,
+                    "Should contain DefaultEndpointsProtocol in connection string!");
+            }
+            if (!azureItems.containsKey(FileStorageInfoKey.AzureConnectionStringKey.AccountName)) {
+                throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS,
+                    "Should contain AccountName in connection string!");
+            }
+            if (!azureItems.containsKey(FileStorageInfoKey.AzureConnectionStringKey.AccountKey)) {
+                throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS,
+                    "Should contain AccountKey in connection string!");
+            }
+            if (!azureItems.containsKey(FileStorageInfoKey.AzureConnectionStringKey.EndpointSuffix)) {
+                throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS,
+                    "Should contain EndpointSuffix in connection string!");
+            }
+            break;
+        }
+        case S3: {
+            if (!items.containsKey(FileStorageInfoKey.ACCESS_KEY_ID)) {
+                throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS, "Should contain ACCESS_KEY_ID in with!");
+            }
+            if (!items.containsKey(FileStorageInfoKey.ACCESS_KEY_SECRET)) {
+                throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS,
+                    "Should contain ACCESS_KEY_SECRET in with!");
+            }
+            break;
+        }
+        default:
+            break;
         }
     }
 
     @Override
     protected ExecutableDdlJob doCreate() {
         ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
-        executableDdlJob.addTask(new CreateFileStorageTask(executionContext.getSchemaName(), engine.name(), items));
+        executableDdlJob.addTask(
+            new CreateFileStorageTask(DEFAULT_DB_NAME, engine.name(), items, azureItems)
+        );
         return executableDdlJob;
     }
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateFunctionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateFunctionJobFactory.java
index 13736d84c..a2160276d 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateFunctionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateFunctionJobFactory.java
@@ -21,10 +21,12 @@
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.CreateFunctionOnAllDnTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.CreateFunctionRegisterMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.CreateFunctionSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcCreateFunctionMarkTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.pl.PLUtils;
 import com.alibaba.polardbx.executor.pl.StoredFunctionManager;
+import com.alibaba.polardbx.executor.pl.UdfUtils;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCreateFunction;
 import com.google.common.collect.Lists;
@@ -48,6 +50,7 @@ protected void validate() {
             throw new TddlRuntimeException(ErrorCode.ERR_UDF_ALREADY_EXISTS,
                 String.format("function: %s already exist", udfName));
         }
+        UdfUtils.validateContent(createFunction.getSqlCreateFunction().getText());
     }
 
     @Override
@@ -60,13 +63,14 @@ List createTasksForOneJob() {
         DdlTask addMetaTask = new CreateFunctionRegisterMetaTask(schema, null,
             functionName, createContent);
         DdlTask syncTask = new CreateFunctionSyncTask(schema, functionName, createContent, canPush);
+        CdcCreateFunctionMarkTask cdcCreateFunctionMarkTask = new CdcCreateFunctionMarkTask(schema, functionName);
 
         if (canPush) {
             String createFunctionOnDn = PLUtils.getCreateFunctionOnDn(createContent);
             DdlTask createOnAllDbTask = new CreateFunctionOnAllDnTask(schema, functionName, createFunctionOnDn);
-            return Lists.newArrayList(addMetaTask, createOnAllDbTask, syncTask);
+            return Lists.newArrayList(addMetaTask, createOnAllDbTask, syncTask, cdcCreateFunctionMarkTask);
         } else {
-            return Lists.newArrayList(addMetaTask, syncTask);
+            return Lists.newArrayList(addMetaTask, syncTask, cdcCreateFunctionMarkTask);
         }
     }
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateIndexJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateIndexJobFactory.java
index 70826d71a..6f5e9b7bf 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateIndexJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateIndexJobFactory.java
@@ -43,6 +43,8 @@
 import java.util.Set;
 import java.util.stream.Collectors;
 
+import static com.alibaba.polardbx.common.cdc.ICdcManager.DEFAULT_DDL_VERSION_ID;
+
 public class CreateIndexJobFactory extends DdlJobFactory {
 
     private final List physicalPlanDataList;
@@ -89,7 +91,7 @@ private List createTasksForOneJob(PhysicalPlanData physicalPlanData) {
             new CreateIndexPhyDdlTask(schemaName, physicalPlanData).onExceptionTryRecoveryThenRollback();
         DdlTask cdcDdlMarkTask =
             CBOUtil.isOss(schemaName, logicalTableName) || CBOUtil.isGsi(schemaName, logicalTableName) ? null :
-                new CdcDdlMarkTask(schemaName, physicalPlanData, false, false);
+                new CdcDdlMarkTask(schemaName, physicalPlanData, false, false, DEFAULT_DDL_VERSION_ID);
         DdlTask showMetaTask = new CreateIndexShowMetaTask(schemaName, logicalTableName, indexName,
             physicalPlanData.getDefaultDbIndex(), physicalPlanData.getDefaultPhyTableName());
         DdlTask tableSyncTask = new TableSyncTask(schemaName, logicalTableName);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateJavaFunctionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateJavaFunctionJobFactory.java
index fd2c614c1..61a59869b 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateJavaFunctionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateJavaFunctionJobFactory.java
@@ -19,22 +19,28 @@
 import com.alibaba.polardbx.common.exception.TddlRuntimeException;
 import com.alibaba.polardbx.common.exception.code.ErrorCode;
 import com.alibaba.polardbx.common.properties.ConnectionParams;
+import com.alibaba.polardbx.druid.sql.ast.SQLDataType;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.CreateJavaFunctionRegisterMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.CreateJavaFunctionSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcCreateJavaFunctionMarkTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.utils.StringUtils;
 import com.alibaba.polardbx.gms.config.impl.InstConfUtil;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.TddlRelDataTypeSystemImpl;
+import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil;
 import com.alibaba.polardbx.optimizer.core.expression.ExtraFunctionManager;
 import com.alibaba.polardbx.optimizer.core.expression.JavaFunctionManager;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCreateJavaFunction;
+import com.alibaba.polardbx.optimizer.parse.FastsqlUtils;
 import com.alibaba.polardbx.optimizer.parse.privilege.PrivilegeContext;
 import com.alibaba.polardbx.optimizer.utils.CompileUtils;
 import com.google.common.collect.Lists;
 import org.apache.calcite.sql.SqlCreateJavaFunction;
 
 import java.util.List;
+import java.util.Optional;
 
 public class CreateJavaFunctionJobFactory extends AbstractFunctionJobFactory {
 
@@ -58,6 +64,7 @@ protected void validate() {
                 String.format("internal function: %s already exist, please choose another name", udfName));
         }
         checkJavaCodeValid();
+        checkDataTypeValid();
     }
 
     private void checkReachedMaxNum() {
@@ -76,8 +83,23 @@ private void checkJavaCodeValid() {
         String javaCode = sqlCreateFunction.getJavaCode();
         String functionName = sqlCreateFunction.getFuncName();
         String className = StringUtils.funcNameToClassName(functionName);
-        if (InstConfUtil.getBool(ConnectionParams.CHECK_INVALID_JAVA_UDF)) {
-            CompileUtils.checkInvalidJavaCode(javaCode, className);
+        CompileUtils.checkInvalidJavaCode(javaCode, className);
+    }
+
+    private void checkDataTypeValid() {
+        SqlCreateJavaFunction sqlCreateFunction = createFunction.getSqlCreateFunction();
+        String returnType = sqlCreateFunction.getReturnType();
+        // validate return type
+        SQLDataType returnDataType = FastsqlUtils.parseDataType(returnType).get(0);
+        DataTypeUtil.createBasicSqlType(TddlRelDataTypeSystemImpl.getInstance(), returnDataType);
+        String inputTypes = Optional.ofNullable(sqlCreateFunction.getInputTypes())
+            .map(types -> String.join(",", types)).orElse("");
+        if (!org.apache.commons.lang.StringUtils.isEmpty(inputTypes)) {
+            // validate input types
+            List inputDataTypes = FastsqlUtils.parseDataType(inputTypes);
+            for (SQLDataType type : inputDataTypes) {
+                DataTypeUtil.createBasicSqlType(TddlRelDataTypeSystemImpl.getInstance(), type);
+            }
         }
     }
 
@@ -92,9 +114,10 @@ List createTasksForOneJob() {
 
         DdlTask addMetaTask = new CreateJavaFunctionRegisterMetaTask(schema, null,
             functionName, javaCode, returnType, inputTypes == null ? "" : String.join(",", inputTypes), noState);
+        DdlTask cdcMarkTask = new CdcCreateJavaFunctionMarkTask(schema, functionName);
         DdlTask syncTask = new CreateJavaFunctionSyncTask(schema, functionName);
 
-        return Lists.newArrayList(addMetaTask, syncTask);
+        return Lists.newArrayList(addMetaTask, syncTask, cdcMarkTask);
     }
 
     public static ExecutableDdlJob createFunction(LogicalCreateJavaFunction logicalCreateFunction,
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateJoinGroupJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateJoinGroupJobFactory.java
new file mode 100644
index 000000000..c0749a4ee
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateJoinGroupJobFactory.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory;
+
+import com.alibaba.polardbx.common.exception.TddlRuntimeException;
+import com.alibaba.polardbx.common.exception.code.ErrorCode;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateJoinGroupTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcCreateJoinGroupMarkTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.gms.topology.DbInfoManager;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+
+import java.util.Set;
+
+/**
+ * Created by ziyang.lb.
+ *
+ * @author ziyang.lb
+ */
+public class CreateJoinGroupJobFactory extends DdlJobFactory {
+
+    private String schemaName;
+    private String joinGroupName;
+    private String locality;
+    private boolean isIfNotExists;
+    protected final ExecutionContext executionContext;
+
+    public CreateJoinGroupJobFactory(String schemaName, String joinGroupName, String locality, boolean isIfNotExists,
+                                     ExecutionContext executionContext) {
+        this.schemaName = schemaName;
+        this.joinGroupName = joinGroupName;
+        this.locality = locality;
+        this.isIfNotExists = isIfNotExists;
+        this.executionContext = executionContext;
+    }
+
+    @Override
+    protected void validate() {
+        boolean isNewPart = DbInfoManager.getInstance().isNewPartitionDb(schemaName);
+        if (!isNewPart) {
+            throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT,
+                "it's not allow to execute create joingroup for non-partitioning databases");
+        }
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+        ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
+        CreateJoinGroupTask createJoinGroupTask =
+            new CreateJoinGroupTask(schemaName, joinGroupName, locality, isIfNotExists);
+        executableDdlJob.addTask(createJoinGroupTask);
+
+        CdcCreateJoinGroupMarkTask cdcCreateJoinGroupMarkTask =
+            new CdcCreateJoinGroupMarkTask(schemaName, joinGroupName);
+        executableDdlJob.addTask(cdcCreateJoinGroupMarkTask);
+        executableDdlJob.addTaskRelationship(createJoinGroupTask, cdcCreateJoinGroupMarkTask);
+        return executableDdlJob;
+    }
+
+    public static ExecutableDdlJob create(String schemaName, String joinGroupName, String locality,
+                                          boolean isIfNotExists,
+                                          ExecutionContext executionContext) {
+        return new CreateJoinGroupJobFactory(schemaName, joinGroupName, locality, isIfNotExists,
+            executionContext).create();
+    }
+
+    @Override
+    protected void excludeResources(Set resources) {
+        resources.add(concatWithDot(schemaName, joinGroupName));
+    }
+
+    @Override
+    protected void sharedResources(Set resources) {
+    }
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreatePartitionTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreatePartitionTableJobFactory.java
index f21406cc4..9b64b5448 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreatePartitionTableJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreatePartitionTableJobFactory.java
@@ -25,15 +25,19 @@
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateArchiveTableEventLogTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateEntitySecurityAttrTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreatePartitionTableValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableAddTablesMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableAddTablesPartitionInfoMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableGroupAddMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTablePhyDdlTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableShowTableMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.InsertIntoTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDdlMarkTask;
+import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupsSyncTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlExceptionAction;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
@@ -41,11 +45,13 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateSelect;
 import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
+import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord;
 import com.alibaba.polardbx.gms.util.TableGroupNameUtil;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CreateTablePreparedData;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.LikeTableInfo;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.alibaba.polardbx.optimizer.partition.common.LocalPartitionDefinitionInfo;
 import com.alibaba.polardbx.optimizer.utils.RelUtils;
@@ -80,9 +86,10 @@ public CreatePartitionTableJobFactory(boolean autoPartition, boolean hasTimestam
                                           Map specialDefaultValueFlags,
                                           List addedForeignKeys,
                                           PhysicalPlanData physicalPlanData, ExecutionContext executionContext,
-                                          CreateTablePreparedData preparedData, PartitionInfo partitionInfo) {
-        super(autoPartition, hasTimestampColumnDefault, specialDefaultValues,
-            specialDefaultValueFlags, addedForeignKeys, physicalPlanData, executionContext);
+                                          CreateTablePreparedData preparedData, PartitionInfo partitionInfo,
+                                          LikeTableInfo likeTableInfo) {
+        super(autoPartition, hasTimestampColumnDefault, specialDefaultValues, specialDefaultValueFlags,
+            addedForeignKeys, physicalPlanData, preparedData.getDdlVersionId(), executionContext, likeTableInfo);
         this.preparedData = preparedData;
         this.partitionInfo = partitionInfo;
     }
@@ -93,55 +100,70 @@ protected void validate() {
 
     @Override
     protected void excludeResources(Set resources) {
-        if (isNeedToGetCreateTableGroupLock(true)) {
+        if (!preparedData.isWithImplicitTableGroup() && isNeedToGetCreateTableGroupLock(true)) {
             resources.add(concatWithDot(schemaName, ConnectionProperties.ACQUIRE_CREATE_TABLE_GROUP_LOCK));
             executionContext.getExtraCmds().put(ConnectionParams.ACQUIRE_CREATE_TABLE_GROUP_LOCK.getName(), false);
+        } else if (preparedData.isWithImplicitTableGroup()) {
+            if (preparedData != null && preparedData.getTableGroupName() != null) {
+                String tgName = RelUtils.stringValue(preparedData.getTableGroupName());
+                TableGroupConfig tgConfig =
+                    OptimizerContext.getContext(preparedData.getSchemaName()).getTableGroupInfoManager()
+                        .getTableGroupConfigByName(tgName);
+                if (tgConfig == null) {
+                    resources.add(concatWithDot(schemaName, tgName));
+                }
+            }
+            super.excludeResources(resources);
         } else {
             super.excludeResources(resources);
-            boolean isSigleTable = false;
-            boolean isBroadCastTable = false;
-            if (partitionInfo != null) {
-                isSigleTable = partitionInfo.isGsiSingleOrSingleTable();
-                isBroadCastTable = partitionInfo.isGsiBroadcastOrBroadcast();
-            }
+        }
+    }
 
-            boolean matchTg = false;
-            TableGroupConfig tgConfig = physicalPlanData.getTableGroupConfig();
-            for (TablePartRecordInfoContext entry : tgConfig.getTables()) {
-                Long tableGroupId = entry.getLogTbRec().getGroupId();
-                if (tableGroupId != null && tableGroupId != -1) {
-                    OptimizerContext oc =
-                        Objects.requireNonNull(OptimizerContext.getContext(schemaName), schemaName + " corrupted");
-                    TableGroupConfig tableGroupConfig =
-                        oc.getTableGroupInfoManager().getTableGroupConfigById(tableGroupId);
-                    TableGroupRecord record = tableGroupConfig.getTableGroupRecord();
-                    String tgName = record.getTg_name();
-                    resources.add(concatWithDot(schemaName, tgName));
-                    tableGroupIds.add(tableGroupId);
-                    matchTg = true;
-                }
+    @Override
+    protected void sharedResources(Set resources) {
+        boolean isSigleTable = false;
+        boolean isBroadCastTable = false;
+        if (partitionInfo != null) {
+            isSigleTable = partitionInfo.isGsiSingleOrSingleTable();
+            isBroadCastTable = partitionInfo.isGsiBroadcastOrBroadcast();
+        }
+
+        boolean matchTg = false;
+        TableGroupDetailConfig tgConfig = physicalPlanData.getTableGroupConfig();
+        for (TablePartRecordInfoContext entry : tgConfig.getTablesPartRecordInfoContext()) {
+            Long tableGroupId = entry.getLogTbRec().getGroupId();
+            if (tableGroupId != null && tableGroupId != -1) {
+                OptimizerContext oc =
+                    Objects.requireNonNull(OptimizerContext.getContext(schemaName), schemaName + " corrupted");
+                TableGroupConfig tableGroupConfig =
+                    oc.getTableGroupInfoManager().getTableGroupConfigById(tableGroupId);
+                TableGroupRecord record = tableGroupConfig.getTableGroupRecord();
+                String tgName = record.getTg_name();
+                resources.add(concatWithDot(schemaName, tgName));
+                tableGroupIds.add(tableGroupId);
+                matchTg = true;
             }
+        }
 
-            if (preparedData.getTableGroupName() == null) {
-                if (isSigleTable && !matchTg) {
-                    resources.add(concatWithDot(schemaName, TableGroupNameUtil.SINGLE_DEFAULT_TG_NAME_TEMPLATE));
-                } else if (isBroadCastTable) {
-                    resources.add(concatWithDot(schemaName, TableGroupNameUtil.BROADCAST_TG_NAME_TEMPLATE));
-                }
+        if (preparedData.getTableGroupName() == null) {
+            if (isSigleTable && !matchTg) {
+                resources.add(concatWithDot(schemaName, TableGroupNameUtil.SINGLE_DEFAULT_TG_NAME_TEMPLATE));
+            } else if (isBroadCastTable) {
+                resources.add(concatWithDot(schemaName, TableGroupNameUtil.BROADCAST_TG_NAME_TEMPLATE));
             }
+        }
 
-            if (preparedData != null && preparedData.getTableGroupName() != null) {
-                String tgName = RelUtils.stringValue(preparedData.getTableGroupName());
-                if (TStringUtil.isNotBlank(tgName)) {
-                    resources.add(concatWithDot(schemaName, tgName));
-                }
+        if (preparedData != null && preparedData.getTableGroupName() != null) {
+            String tgName = RelUtils.stringValue(preparedData.getTableGroupName());
+            if (TStringUtil.isNotBlank(tgName)) {
+                resources.add(concatWithDot(schemaName, tgName));
             }
+        }
 
-            if (preparedData != null && preparedData.getJoinGroupName() != null) {
-                String jgName = RelUtils.stringValue(preparedData.getJoinGroupName());
-                if (TStringUtil.isNotBlank(jgName)) {
-                    resources.add(concatWithDot(schemaName, jgName));
-                }
+        if (preparedData != null && preparedData.getJoinGroupName() != null) {
+            String jgName = RelUtils.stringValue(preparedData.getJoinGroupName());
+            if (TStringUtil.isNotBlank(jgName)) {
+                resources.add(concatWithDot(schemaName, jgName));
             }
         }
     }
@@ -149,7 +171,37 @@ protected void excludeResources(Set resources) {
     @Override
     protected ExecutableDdlJob doCreate() {
         String schemaName = physicalPlanData.getSchemaName();
-        if (isNeedToGetCreateTableGroupLock(false)) {
+
+        List tableGroups = new ArrayList<>();
+        if (needCreateImplicitTableGroup(tableGroups)) {
+            DdlTask subJobTask = generateCreateTableJob();
+            List taskList = new ArrayList<>();
+            ExecutableDdlJob job = new ExecutableDdlJob();
+            CreateTableGroupValidateTask createTableGroupValidateTask =
+                new CreateTableGroupValidateTask(preparedData.getSchemaName(),
+                    tableGroups);
+            taskList.add(createTableGroupValidateTask);
+            List createTableGroupAddMetaTasks = new ArrayList<>();
+            for (int i = 0; i < tableGroups.size(); i++) {
+                String tableGroupName = tableGroups.get(i);
+                CreateTableGroupAddMetaTask createTableGroupAddMetaTask = new CreateTableGroupAddMetaTask(
+                    preparedData.getSchemaName(), tableGroupName, null,
+                    null, false, true);
+                createTableGroupAddMetaTasks.add(createTableGroupAddMetaTask);
+            }
+
+            TableGroupsSyncTask tableGroupsSyncTask =
+                new TableGroupsSyncTask(preparedData.getSchemaName(), tableGroups);
+            taskList.add(tableGroupsSyncTask);
+            taskList.add(subJobTask);
+            job.addSequentialTasks(taskList);
+            for (int i = 0; i < createTableGroupAddMetaTasks.size(); i++) {
+                job.addTaskRelationship(createTableGroupValidateTask, createTableGroupAddMetaTasks.get(i));
+                job.addTaskRelationship(createTableGroupAddMetaTasks.get(i), tableGroupsSyncTask);
+            }
+            preparedData.setNeedToGetTableGroupLock(true);
+            return job;
+        } else if (!preparedData.isWithImplicitTableGroup() && isNeedToGetCreateTableGroupLock(false)) {
             DdlTask ddl = generateCreateTableJob();
             ExecutableDdlJob job = new ExecutableDdlJob();
             job.addSequentialTasks(Lists.newArrayList(ddl));
@@ -182,10 +234,6 @@ protected ExecutableDdlJob doCreate() {
             if (preparedData != null && preparedData.getJoinGroupName() != null) {
                 joinGroup = RelUtils.stringValue(preparedData.getJoinGroupName());
             }
-            String tableGroup = null;
-            if (preparedData != null && preparedData.getTableGroupName() != null) {
-                tableGroup = RelUtils.stringValue(preparedData.getTableGroupName());
-            }
             CreatePartitionTableValidateTask validateTask =
                 new CreatePartitionTableValidateTask(schemaName, logicalTableName,
                     physicalPlanData.isIfNotExists(), physicalPlanData.getTableGroupConfig(),
@@ -193,11 +241,14 @@ protected ExecutableDdlJob doCreate() {
                     joinGroup, checkSingleTgNotExists, checkBroadcastTgNotExists);
 
             LocalPartitionDefinitionInfo localPartitionDefinitionInfo = preparedData.getLocalPartitionDefinitionInfo();
+            boolean autoCreateTg =
+                executionContext.getParamManager().getBoolean(ConnectionParams.ALLOW_AUTO_CREATE_TABLEGROUP);
+
             CreateTableAddTablesPartitionInfoMetaTask addPartitionInfoTask =
                 new CreateTableAddTablesPartitionInfoMetaTask(schemaName, logicalTableName,
                     physicalPlanData.isTemporary(),
-                    physicalPlanData.getTableGroupConfig(), localPartitionDefinitionInfo, false, null,
-                    joinGroup);
+                    physicalPlanData.getTableGroupConfig(), localPartitionDefinitionInfo, null, null,
+                    joinGroup, false, preparedData.isWithImplicitTableGroup(), autoCreateTg);
 
             CreateTablePhyDdlTask phyDdlTask =
                 new CreateTablePhyDdlTask(schemaName, logicalTableName, physicalPlanData);
@@ -207,13 +258,15 @@ protected ExecutableDdlJob doCreate() {
                     physicalPlanData.getDefaultPhyTableName(), physicalPlanData.getSequence(),
                     physicalPlanData.getTablesExtRecord(), physicalPlanData.isPartitioned(),
                     physicalPlanData.isIfNotExists(), physicalPlanData.getKind(), preparedData.getAddedForeignKeys(),
-                    hasTimestampColumnDefault, specialDefaultValues, specialDefaultValueFlags);
+                    hasTimestampColumnDefault, specialDefaultValues, specialDefaultValueFlags, null, null);
 
             CreateTableShowTableMetaTask showTableMetaTask =
                 new CreateTableShowTableMetaTask(schemaName, logicalTableName);
 
+            CreateEntitySecurityAttrTask cesaTask = createCESATask();
+
             CdcDdlMarkTask cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, false,
-                CollectionUtils.isNotEmpty(addedForeignKeys));
+                CollectionUtils.isNotEmpty(addedForeignKeys), versionId);
 
             CreateArchiveTableEventLogTask createArchiveTableEventLogTask = null;
             // TTL table
@@ -229,15 +282,35 @@ protected ExecutableDdlJob doCreate() {
             }
             ExecutableDdlJob4CreatePartitionTable result = new ExecutableDdlJob4CreatePartitionTable();
 
-            List taskList = Lists.newArrayList(
-                validateTask,
-                addPartitionInfoTask,
-                phyDdlTask,
-                createTableAddTablesMetaTask,
-                cdcDdlMarkTask,
-                showTableMetaTask,
-                createArchiveTableEventLogTask,
-                tableSyncTask);
+            List taskList = new ArrayList<>();
+            if (preparedData.isImportTable()) {
+                taskList.addAll(
+                    Lists.newArrayList(
+                        validateTask,
+                        addPartitionInfoTask,
+                        createTableAddTablesMetaTask,
+                        cdcDdlMarkTask,
+                        showTableMetaTask,
+                        createArchiveTableEventLogTask,
+                        cesaTask,
+                        tableSyncTask
+                    ).stream().filter(Objects::nonNull).collect(Collectors.toList())
+                );
+            } else {
+                taskList.addAll(
+                    Lists.newArrayList(
+                        validateTask,
+                        addPartitionInfoTask,
+                        phyDdlTask,
+                        createTableAddTablesMetaTask,
+                        cdcDdlMarkTask,
+                        showTableMetaTask,
+                        createArchiveTableEventLogTask,
+                        cesaTask,
+                        tableSyncTask
+                    ).stream().filter(Objects::nonNull).collect(Collectors.toList())
+                );
+            }
 
             if (!GeneralUtil.isEmpty(preparedData.getAddedForeignKeys())) {
                 // sync foreign key table meta
@@ -346,4 +419,15 @@ private boolean isNeedToGetCreateTableGroupLock(boolean printLog) {
         return lock;
     }
 
+    private boolean needCreateImplicitTableGroup(List tableGroups) {
+        boolean ret = false;
+        for (Map.Entry entry : preparedData.getRelatedTableGroupInfo().entrySet()) {
+            if (entry.getValue()) {
+                tableGroups.add(entry.getKey());
+                ret = true;
+            }
+        }
+        return ret;
+    }
+
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreatePartitionTableWithGsiJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreatePartitionTableWithGsiJobFactory.java
index 79d1bd5e4..74cf39135 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreatePartitionTableWithGsiJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreatePartitionTableWithGsiJobFactory.java
@@ -16,35 +16,55 @@
 
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
+import com.alibaba.polardbx.common.ColumnarTableOptions;
 import com.alibaba.polardbx.executor.ddl.job.builder.gsi.CreatePartitionTableWithGsiBuilder;
 import com.alibaba.polardbx.executor.ddl.job.converter.DdlJobDataConverter;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.gsi.CreatePartitionGsiJobFactory;
+import com.alibaba.polardbx.executor.ddl.job.factory.gsi.columnar.CreateColumnarIndexJobFactory;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.InsertIntoTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDdlMarkTask;
+import com.alibaba.polardbx.executor.ddl.job.task.columnar.CciSchemaEvolutionTask;
+import com.alibaba.polardbx.executor.ddl.job.task.columnar.WaitColumnarTableCreationTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiStatisticsInfoSyncTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlExceptionAction;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateColumnarIndex;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreatePartitionGsi;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreatePartitionTable;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateSelect;
-import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4InsertOverwrite;
 import com.alibaba.polardbx.executor.sync.GsiStatisticsSyncAction;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.LikeTableInfo;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateTableWithGsiPreparedData;
 import org.apache.calcite.rel.core.DDL;
+import org.apache.calcite.sql.SqlCreateTable;
+import org.apache.calcite.sql.SqlIdentifier;
+import org.apache.calcite.sql.SqlIndexDefinition;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.calcite.sql.dialect.MysqlSqlDialect;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.TreeMap;
 
 /**
  * @author guxu
  */
 public class CreatePartitionTableWithGsiJobFactory extends DdlJobFactory {
+    private static final Logger logger = LoggerFactory.getLogger(CreatePartitionTableWithGsiJobFactory.class);
 
     @Deprecated
     private final DDL ddl;
@@ -58,11 +78,23 @@ public class CreatePartitionTableWithGsiJobFactory extends DdlJobFactory {
     private final String primaryTableName;
 
     private final ExecutionContext executionContext;
+
     private String selectSql;
 
+    private final SqlCreateTable normalizedOriginalDdl;
+
     public CreatePartitionTableWithGsiJobFactory(@Deprecated DDL ddl,
                                                  CreateTableWithGsiPreparedData preparedData,
                                                  ExecutionContext executionContext) {
+        // ddl.sqlNode will be modified in ReplaceTableNameWithQuestionMarkVisitor
+        // after that the table name of CREATE TABLE statement will be replaced with a question mark
+        // which will cause an error in CHECK COLUMNAR META and CDC.
+        // The right way might be copy a new SqlNode in ReplaceTableNameWithQuestionMarkVisitor
+        // every time when table name is replaced (as a SqlShuttle should do).
+        // But change ReplaceTableNameWithQuestionMarkVisitor will affect all kinds of ddl statement,
+        // should be done sometime later and tested carefully
+        this.normalizedOriginalDdl = (SqlCreateTable) SqlNode.clone(ddl.sqlNode);
+
         CreatePartitionTableWithGsiBuilder createTableWithGsiBuilder =
             new CreatePartitionTableWithGsiBuilder(ddl, preparedData, executionContext);
 
@@ -80,7 +112,6 @@ public CreatePartitionTableWithGsiJobFactory(@Deprecated DDL ddl,
 
         this.schemaName = preparedData.getPrimaryTablePreparedData().getSchemaName();
         this.primaryTableName = preparedData.getPrimaryTablePreparedData().getTableName();
-
     }
 
     @Override
@@ -98,14 +129,16 @@ protected ExecutableDdlJob doCreate() {
                 primaryTableTopology,
                 primaryTablePhysicalPlans,
                 false,
-                isAutoPartition);
+                isAutoPartition,
+                executionContext);
         CreatePartitionTableJobFactory ret =
             new CreatePartitionTableJobFactory(preparedData.getPrimaryTablePreparedData().isAutoPartition(),
                 preparedData.getPrimaryTablePreparedData().isTimestampColumnDefault(),
                 preparedData.getPrimaryTablePreparedData().getSpecialDefaultValues(),
                 preparedData.getPrimaryTablePreparedData().getSpecialDefaultValueFlags(),
                 preparedData.getPrimaryTablePreparedData().getAddedForeignKeys(),
-                physicalPlanData, executionContext, preparedData.getPrimaryTablePreparedData(), null);
+                physicalPlanData, executionContext, preparedData.getPrimaryTablePreparedData(), null,
+                preparedData.getPrimaryTablePreparedData().getLikeTableInfo());
 //        ret.setSelectSql(selectSql);
         ExecutableDdlJob thisParentJob = ret.create();
         if (preparedData.getPrimaryTablePreparedData().isNeedToGetTableGroupLock()) {
@@ -121,35 +154,104 @@ protected ExecutableDdlJob doCreate() {
         result.combineTasks(createTableJob);
         result.addExcludeResources(createTableJob.getExcludeResources());
 
+        final List cciWaitList = new ArrayList<>();
+        final List schemaEvolutionInitializer = new ArrayList<>();
         Map gsiPreparedDataMap = preparedData.getIndexTablePreparedDataMap();
+        Map indexAddPartitionMetaTasks = new TreeMap<>(String::compareToIgnoreCase);
         for (Map.Entry entry : gsiPreparedDataMap.entrySet()) {
-            final CreateGlobalIndexPreparedData gsiPreparedData = entry.getValue();
-            ExecutableDdlJob thisJob =
-                CreatePartitionGsiJobFactory.create4CreateTableWithGsi(ddl, gsiPreparedData, executionContext);
-            DdlTask gsiStatisticsInfoTask = new GsiStatisticsInfoSyncTask(
-                gsiPreparedData.getSchemaName(),
-                gsiPreparedData.getPrimaryTableName(),
-                gsiPreparedData.getIndexTableName(),
-                GsiStatisticsSyncAction.INSERT_RECORD,
-                null);
-            thisJob.appendTask(gsiStatisticsInfoTask);
-            if (gsiPreparedData.isNeedToGetTableGroupLock()) {
-                return thisJob;
+            final CreateGlobalIndexPreparedData indexPreparedData = entry.getValue();
+            if (indexPreparedData.isColumnarIndex()) {
+                // Clustered columnar index
+                final ExecutableDdlJob4CreateColumnarIndex cciJob = (ExecutableDdlJob4CreateColumnarIndex)
+                    CreateColumnarIndexJobFactory.create4CreateCci(ddl, indexPreparedData, executionContext);
+                if (indexPreparedData.isNeedToGetTableGroupLock()) {
+                    return cciJob;
+                }
+                cciJob.removeTaskRelationship(
+                    cciJob.getInsertColumnarIndexMetaTask(),
+                    cciJob.getWaitColumnarTableCreationTask()
+                );
+
+                cciWaitList.add(cciJob.getWaitColumnarTableCreationTask());
+
+                // Add cci tasks
+                result.combineTasks(cciJob);
+
+                // Add relationship with before tasks
+                result.addTaskRelationship(
+                    createTableJob.getCreateTableAddTablesMetaTask(),
+                    cciJob.getCreateColumnarIndexValidateTask()
+                );
+
+                // Add Relationship with after tasks
+                result.addTaskRelationship(cciJob.getInsertColumnarIndexMetaTask(), createTableJob.getCdcDdlMarkTask());
+
+                // Add exclusive resources
+                result.addExcludeResources(cciJob.getExcludeResources());
+
+                // Replace index definition in normalizedOriginalDdl
+                normalizedOriginalDdl.replaceCciDef(
+                    entry.getKey(),
+                    indexPreparedData.getIndexDefinition());
+
+                schemaEvolutionInitializer.add(CciSchemaEvolutionTask.createCci(schemaName,
+                    indexPreparedData.getPrimaryTableName(),
+                    indexPreparedData.getIndexTableName(),
+                    buildColumnarOptions(indexPreparedData.getIndexDefinition()),
+                    indexPreparedData.getDdlVersionId()));
+
+                // TODO is CreateGsiPreCheckTask necessary ?
+            } else {
+                // Global secondary index
+                ExecutableDdlJob thisJob =
+                    CreatePartitionGsiJobFactory.create4CreateTableWithGsi(ddl, indexPreparedData, executionContext);
+                DdlTask gsiStatisticsInfoTask = new GsiStatisticsInfoSyncTask(
+                    indexPreparedData.getSchemaName(),
+                    indexPreparedData.getPrimaryTableName(),
+                    indexPreparedData.getIndexTableName(),
+                    GsiStatisticsSyncAction.INSERT_RECORD,
+                    null);
+                thisJob.appendTask(gsiStatisticsInfoTask);
+                if (indexPreparedData.isNeedToGetTableGroupLock()) {
+                    return thisJob;
+                }
+                ExecutableDdlJob4CreatePartitionGsi gsiJob = (ExecutableDdlJob4CreatePartitionGsi) thisJob;
+                result.combineTasks(gsiJob);
+                result.addTaskRelationship(
+                    createTableJob.getCreateTableAddTablesMetaTask(),
+                    gsiJob.getCreateGsiValidateTask()
+                );
+                result.addTaskRelationship(gsiJob.getLastTask(), createTableJob.getCdcDdlMarkTask());
+                result.addExcludeResources(gsiJob.getExcludeResources());
+                result.addTask(gsiJob.getCreateGsiPreCheckTask());
+                result.addTaskRelationship(createTableJob.getCreatePartitionTableValidateTask(),
+                    gsiJob.getCreateGsiPreCheckTask());
+                result.addTaskRelationship(gsiJob.getCreateGsiPreCheckTask(),
+                    createTableJob.getCreateTableAddTablesPartitionInfoMetaTask());
+                indexAddPartitionMetaTasks.put(indexPreparedData.getIndexTableName(),
+                    gsiJob.getCreateTableAddTablesPartitionInfoMetaTask());
+            }
+        }
+        addDependence(indexAddPartitionMetaTasks, result);
+
+        if (!cciWaitList.isEmpty()) {
+            final CdcDdlMarkTask cdcDdlMarkTask = createTableJob.getCdcDdlMarkTask();
+
+            // Try to set CDC_ORIGINAL_DDL with this.normalizedOriginalDdl
+            updateOriginalDdlSql(cdcDdlMarkTask);
+            cdcDdlMarkTask.addSchemaEvolutionInitializers(schemaEvolutionInitializer);
+
+            result.removeTaskRelationship(
+                cdcDdlMarkTask,
+                createTableJob.getCreateTableShowTableMetaTask());
+            DdlTask last = cdcDdlMarkTask;
+            for (WaitColumnarTableCreationTask cciWait : cciWaitList) {
+                result.addTaskRelationship(last, cciWait);
+                last = cciWait;
             }
-            ExecutableDdlJob4CreatePartitionGsi gsiJob = (ExecutableDdlJob4CreatePartitionGsi) thisJob;
-            result.combineTasks(gsiJob);
-            result.addTaskRelationship(
-                createTableJob.getCreateTableAddTablesMetaTask(),
-                gsiJob.getCreateGsiValidateTask()
-            );
-            result.addTaskRelationship(gsiJob.getLastTask(), createTableJob.getCdcDdlMarkTask());
-            result.addExcludeResources(gsiJob.getExcludeResources());
-            result.addTask(gsiJob.getCreateGsiPreCheckTask());
-            result.addTaskRelationship(createTableJob.getCreatePartitionTableValidateTask(),
-                gsiJob.getCreateGsiPreCheckTask());
-            result.addTaskRelationship(gsiJob.getCreateGsiPreCheckTask(),
-                createTableJob.getCreateTableAddTablesPartitionInfoMetaTask());
+            result.addTaskRelationship(last, createTableJob.getCreateTableShowTableMetaTask());
         }
+
         if (selectSql != null) {
             InsertIntoTask
                 insertIntoTask = new InsertIntoTask(schemaName, primaryTableName, selectSql, null, 0);
@@ -166,6 +268,51 @@ protected ExecutableDdlJob doCreate() {
         return result;
     }
 
+    private Map buildColumnarOptions(SqlIndexDefinition indexDefinition) {
+        Map options = new HashMap<>();
+        if (indexDefinition != null && CollectionUtils.isNotEmpty(indexDefinition.getDictColumns())) {
+            String columns = indexDefinition.getDictColumns().stream()
+                .map(sqlIndexColumnName -> SqlIdentifier.surroundWithBacktick(sqlIndexColumnName.getColumnNameStr()))
+                .collect(Collectors.joining(","));
+            options.put(ColumnarTableOptions.DICTIONARY_COLUMNS, columns);
+        }
+        return options;
+    }
+
+    @SuppressWarnings("CatchMayIgnoreException")
+    private void updateOriginalDdlSql(CdcDdlMarkTask cdcDdlMarkTask) {
+        try {
+            // Call SqlCreateTable.toSqlString(SqlDialect dialect) will get user-input create table statement;
+            // Call SqlCreateTable.unparse(SqlWriter writer, int leftPrec, int rightPrec) will get create table
+            // statement with cci name replaced (suffix added).
+            // For GDN use, we need cci name without suffix
+            final String normalizedOriginalDdl = this.normalizedOriginalDdl
+//                .toSqlString(MysqlSqlDialect.DEFAULT, false)
+                .toString();
+            cdcDdlMarkTask.setNormalizedOriginalDdl(normalizedOriginalDdl);
+        } catch (Exception ignored) {
+            logger.error(
+                "Failed to get normalized original ddl statement, might cause CHECK COLUMNAR META report an error",
+                ignored);
+        }
+    }
+
+    private void addDependence(Map indexAddPartitionMetaTasks, ExecutableDdlJob result) {
+        Map gsiPreparedDataMap = preparedData.getIndexTablePreparedDataMap();
+        for (Map.Entry entry : gsiPreparedDataMap.entrySet()) {
+            final CreateGlobalIndexPreparedData gsiPreparedData = entry.getValue();
+            if (primaryTableName.equalsIgnoreCase(gsiPreparedData.getTableGroupAlignWithTargetTable())) {
+                //do nothing;
+            } else if (StringUtils.isNotEmpty(gsiPreparedData.getTableGroupAlignWithTargetTable())) {
+                DdlTask curIndexAddMeta = indexAddPartitionMetaTasks.get(gsiPreparedData.getIndexTableName());
+                DdlTask depandIndexAddMeta =
+                    indexAddPartitionMetaTasks.get(gsiPreparedData.getTableGroupAlignWithTargetTable());
+                result.addTaskRelationship(depandIndexAddMeta, curIndexAddMeta);
+            }
+
+        }
+    }
+
     @Override
     protected void excludeResources(Set resources) {
         resources.add(concatWithDot(schemaName, primaryTableName));
@@ -174,6 +321,11 @@ protected void excludeResources(Set resources) {
                 resources.add(concatWithDot(schemaName, indexTableName));
             });
         }
+        if (preparedData.getPrimaryTablePreparedData() != null
+            && preparedData.getPrimaryTablePreparedData().getLikeTableInfo() != null) {
+            LikeTableInfo likeTableInfo = preparedData.getPrimaryTablePreparedData().getLikeTableInfo();
+            resources.add(concatWithDot(likeTableInfo.getSchemaName(), likeTableInfo.getTableName()));
+        }
     }
 
     @Override
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateProcedureJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateProcedureJobFactory.java
index 3aaf4500d..cfc97ce6c 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateProcedureJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateProcedureJobFactory.java
@@ -22,6 +22,7 @@
 import com.alibaba.polardbx.druid.sql.ast.SQLName;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.procedure.CreateProcedureRegisterMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.procedure.CreateProcedureSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcCreateProcedureMarkTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.pl.PLUtils;
@@ -68,13 +69,18 @@ List createTasksForOneJob() {
             procedureSchema, SQLUtils.normalize(procedureName.getSimpleName()),
             createProcedure.getSqlCreateProcedure().getText());
 
+        CdcCreateProcedureMarkTask cdcCreateProcedureMarkTask = new CdcCreateProcedureMarkTask(
+            procedureSchema, SQLUtils.normalize(procedureName.getSimpleName())
+        );
+
         DdlTask syncTask =
             new CreateProcedureSyncTask(executionSchema, procedureSchema,
                 SQLUtils.normalize(procedureName.getSimpleName()));
 
         return Lists.newArrayList(
             addMetaTask,
-            syncTask
+            syncTask,
+            cdcCreateProcedureMarkTask
         );
     }
 
@@ -83,4 +89,4 @@ public static ExecutableDdlJob createProcedure(LogicalCreateProcedure logicalCre
 
         return new CreateProcedureJobFactory(logicalCreateProcedure, ec.getSchemaName()).create();
     }
-}
\ No newline at end of file
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableGroupJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableGroupJobFactory.java
index 5b2ec516e..c4f692cc4 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableGroupJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableGroupJobFactory.java
@@ -18,6 +18,7 @@
 
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableGroupAddMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableGroupValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcCreateTableGroupMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.validator.TableGroupValidator;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
@@ -29,6 +30,7 @@
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CreateTableGroupPreparedData;
 import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager;
+import com.google.common.collect.Lists;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlCreateTableGroup;
 
@@ -75,12 +77,16 @@ protected ExecutableDdlJob doCreate() {
         }
         CreateTableGroupValidateTask createTableGroupValidateTask =
             new CreateTableGroupValidateTask(preparedData.getSchemaName(),
-                preparedData.getTableGroupName());
+                Lists.newArrayList(preparedData.getTableGroupName()));
         taskList.add(createTableGroupValidateTask);
         CreateTableGroupAddMetaTask createTableGroupAddMetaTask = new CreateTableGroupAddMetaTask(
             preparedData.getSchemaName(), preparedData.getTableGroupName(), preparedData.getLocality(),
-            preparedData.getPartitionBy());
+            preparedData.getPartitionBy(), preparedData.isSingle(), false);
         taskList.add(createTableGroupAddMetaTask);
+        CdcCreateTableGroupMarkTask cdcCreateTableGroupMarkTask = new CdcCreateTableGroupMarkTask(
+            preparedData.getSchemaName(), preparedData.getTableGroupName()
+        );
+        taskList.add(cdcCreateTableGroupMarkTask);
         TableGroupSyncTask tableGroupSyncTask =
             new TableGroupSyncTask(preparedData.getSchemaName(), preparedData.getTableGroupName());
         taskList.add(tableGroupSyncTask);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableJobFactory.java
index 5a0033623..0ab788629 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableJobFactory.java
@@ -17,10 +17,12 @@
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
 import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData;
+import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateEntitySecurityAttrTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableAddTablesExtMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableAddTablesMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTablePhyDdlTask;
@@ -37,12 +39,16 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateSelect;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateTable;
 import com.alibaba.polardbx.gms.locality.LocalityDesc;
+import com.alibaba.polardbx.gms.lbac.LBACSecurityEntity;
+import com.alibaba.polardbx.gms.lbac.LBACSecurityManager;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.LikeTableInfo;
 import com.google.common.collect.Lists;
 import org.apache.commons.collections.CollectionUtils;
 
 import java.sql.Connection;
 import java.util.List;
+import java.util.ArrayList;
 import java.sql.Connection;
 import java.util.Map;
 import java.util.Objects;
@@ -66,6 +72,8 @@ public class CreateTableJobFactory extends DdlJobFactory {
     protected final List addedForeignKeys;
     protected final boolean fromTruncateTable;
     protected String selectSql;
+    protected final Long versionId;
+    protected LikeTableInfo likeTableInfo;
 
     public CreateTableJobFactory(boolean autoPartition,
                                  boolean hasTimestampColumnDefault,
@@ -73,15 +81,19 @@ public CreateTableJobFactory(boolean autoPartition,
                                  Map specialDefaultValueFlags,
                                  List addedForeignKeys,
                                  PhysicalPlanData physicalPlanData,
-                                 ExecutionContext executionContext) {
+                                 Long versionId,
+                                 ExecutionContext executionContext,
+                                 LikeTableInfo likeTableInfo) {
         this(autoPartition,
             hasTimestampColumnDefault,
             specialDefaultValues,
             specialDefaultValueFlags,
             addedForeignKeys,
             physicalPlanData,
+            versionId,
             executionContext,
-            false);
+            false,
+            likeTableInfo);
     }
 
     public CreateTableJobFactory(boolean autoPartition,
@@ -90,8 +102,10 @@ public CreateTableJobFactory(boolean autoPartition,
                                  Map specialDefaultValueFlags,
                                  List addedForeignKeys,
                                  PhysicalPlanData physicalPlanData,
+                                 Long versionId,
                                  ExecutionContext executionContext,
-                                 boolean fromTruncateTable) {
+                                 boolean fromTruncateTable,
+                                 LikeTableInfo likeTableInfo) {
         this.autoPartition = autoPartition;
         this.hasTimestampColumnDefault = hasTimestampColumnDefault;
         this.physicalPlanData = physicalPlanData;
@@ -102,6 +116,8 @@ public CreateTableJobFactory(boolean autoPartition,
         this.specialDefaultValueFlags = specialDefaultValueFlags;
         this.addedForeignKeys = addedForeignKeys;
         this.fromTruncateTable = fromTruncateTable;
+        this.versionId = versionId;
+        this.likeTableInfo = likeTableInfo;
     }
 
     @Override
@@ -115,7 +131,8 @@ public void setSelectSql(String sql) {
     @Override
     protected ExecutableDdlJob doCreate() {
         CreateTableValidateTask validateTask =
-            new CreateTableValidateTask(schemaName, logicalTableName, physicalPlanData.getTablesExtRecord());
+            new CreateTableValidateTask(schemaName, logicalTableName, physicalPlanData.getTablesExtRecord(),
+                likeTableInfo);
 
         CreateTableAddTablesExtMetaTask addExtMetaTask =
             new CreateTableAddTablesExtMetaTask(schemaName, logicalTableName, physicalPlanData.isTemporary(),
@@ -124,14 +141,16 @@ protected ExecutableDdlJob doCreate() {
         CreateTablePhyDdlTask phyDdlTask = new CreateTablePhyDdlTask(schemaName, logicalTableName, physicalPlanData);
 
         CdcDdlMarkTask cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, !fromTruncateTable,
-            CollectionUtils.isNotEmpty(addedForeignKeys));
-
+            CollectionUtils.isNotEmpty(addedForeignKeys), versionId);
+        cdcDdlMarkTask.setUseOriginalDDl(!fromTruncateTable);
         CreateTableAddTablesMetaTask addTableMetaTask =
             new CreateTableAddTablesMetaTask(schemaName, logicalTableName, physicalPlanData.getDefaultDbIndex(),
                 physicalPlanData.getDefaultPhyTableName(), physicalPlanData.getSequence(),
                 physicalPlanData.getTablesExtRecord(), physicalPlanData.isPartitioned(),
                 physicalPlanData.isIfNotExists(), physicalPlanData.getKind(), addedForeignKeys,
-                hasTimestampColumnDefault, specialDefaultValues, specialDefaultValueFlags);
+                hasTimestampColumnDefault, specialDefaultValues, specialDefaultValueFlags, null, null);
+
+        CreateEntitySecurityAttrTask cesaTask = createCESATask();
 
         //Renew this one.
         LocalityDesc locality = physicalPlanData.getLocalityDesc();
@@ -160,6 +179,7 @@ protected ExecutableDdlJob doCreate() {
             cdcDdlMarkTask,
             showTableMetaTask,
             storeLocalityTask,
+            cesaTask,
             tableSyncTask);
 
         if (!GeneralUtil.isEmpty(addedForeignKeys)) {
@@ -207,9 +227,41 @@ protected ExecutableDdlJob doCreate() {
         return result;
     }
 
+    protected CreateEntitySecurityAttrTask createCESATask() {
+        List esaList = new ArrayList<>();
+        if (physicalPlanData.getTableESA() != null) {
+            LBACSecurityManager.getInstance().validateSecurityEntity(physicalPlanData.getTableESA(),
+                physicalPlanData.getTableESA().getSecurityAttr());
+            esaList.add(physicalPlanData.getTableESA());
+            if (physicalPlanData.getColEsaList() != null) {
+                for (LBACSecurityEntity esa : physicalPlanData.getColEsaList()) {
+                    LBACSecurityManager.getInstance()
+                        .validateSecurityEntity(esa, physicalPlanData.getTableESA().getSecurityAttr());
+                    esaList.add(esa);
+                }
+            }
+        }
+        return esaList.isEmpty() ? null : new CreateEntitySecurityAttrTask(schemaName, logicalTableName, esaList);
+    }
+
     @Override
     protected void excludeResources(Set resources) {
         resources.add(concatWithDot(schemaName, logicalTableName));
+        if (likeTableInfo != null) {
+            resources.add(concatWithDot(likeTableInfo.getSchemaName(), likeTableInfo.getTableName()));
+        }
+
+        // exclude foreign key tables
+        if (!GeneralUtil.isEmpty(addedForeignKeys)) {
+            // sync foreign key table meta
+            for (ForeignKeyData addedForeignKey : addedForeignKeys) {
+                if (schemaName.equalsIgnoreCase(addedForeignKey.refSchema) &&
+                    logicalTableName.equalsIgnoreCase(addedForeignKey.refTableName)) {
+                    continue;
+                }
+                resources.add(concatWithDot(addedForeignKey.refSchema, addedForeignKey.refTableName));
+            }
+        }
     }
 
     @Override
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableWithGsiJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableWithGsiJobFactory.java
index 4bfccdae5..129b3556f 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableWithGsiJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateTableWithGsiJobFactory.java
@@ -21,23 +21,26 @@
 import com.alibaba.polardbx.executor.ddl.job.converter.DdlJobDataConverter;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.gsi.CreateGsiJobFactory;
+import com.alibaba.polardbx.executor.ddl.job.factory.gsi.columnar.CreateColumnarIndexJobFactory;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.InsertIntoTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiStatisticsInfoSyncTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlExceptionAction;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateColumnarIndex;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateGsi;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateSelect;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateTable;
-import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4InsertOverwrite;
 import com.alibaba.polardbx.executor.sync.GsiStatisticsSyncAction;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.LikeTableInfo;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateTableWithGsiPreparedData;
 import org.apache.calcite.rel.core.DDL;
 
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -114,7 +117,8 @@ protected ExecutableDdlJob doCreate() {
                 primaryTableTopology,
                 primaryTablePhysicalPlans,
                 false,
-                isAutoPartition);
+                isAutoPartition,
+                executionContext);
         CreateTableJobFactory executableDdlJob4CreateTable = new CreateTableJobFactory(
             false,
             hasTimestampColumnDefault,
@@ -122,7 +126,10 @@ protected ExecutableDdlJob doCreate() {
             specialDefaultValueFlags,
             addedForeignKeys,
             physicalPlanData,
-            executionContext);
+            preparedData.getDdlVersionId(),
+            executionContext,
+            preparedData.getPrimaryTablePreparedData().getLikeTableInfo()
+        );
 //        executableDdlJob4CreateTable.setSelectSql(selectSql);
         ExecutableDdlJob4CreateTable createTableJob =
             (ExecutableDdlJob4CreateTable) executableDdlJob4CreateTable.create();
@@ -131,31 +138,57 @@ protected ExecutableDdlJob doCreate() {
             createTableJob.getCreateTableAddTablesMetaTask(),
             createTableJob.getCdcDdlMarkTask());
 
+        final Map cciPreparedDataMap = new HashMap<>();
+
+        // Create global secondary index
         Map gsiPreparedDataMap = preparedData.getIndexTablePreparedDataMap();
         for (Map.Entry entry : gsiPreparedDataMap.entrySet()) {
-            final CreateGlobalIndexPreparedData gsiPreparedData = entry.getValue();
-            ExecutableDdlJob4CreateGsi gsiJob = (ExecutableDdlJob4CreateGsi)
-                CreateGsiJobFactory.create4CreateTableWithGsi(ddl, gsiPreparedData, executionContext);
-            DdlTask gsiStatisticsInfoTask = new GsiStatisticsInfoSyncTask(
-                gsiPreparedData.getSchemaName(),
-                gsiPreparedData.getPrimaryTableName(),
-                gsiPreparedData.getIndexTableName(),
-                GsiStatisticsSyncAction.INSERT_RECORD,
-                null);
-            gsiJob.appendTask(gsiStatisticsInfoTask);
-            result.combineTasks(gsiJob);
-            result.removeTaskRelationship(
-                gsiJob.getCreateGsiValidateTask(), gsiJob.getCreateTableAddTablesExtMetaTask());
-            result.addTaskRelationship(
-                createTableJob.getCreateTableAddTablesExtMetaTask(), gsiJob.getCreateGsiValidateTask());
-            result.addTaskRelationship(
-                gsiJob.getCreateGsiValidateTask(), createTableJob.getCreateTablePhyDdlTask());
-            result.removeTaskRelationship(
-                createTableJob.getCreateTableAddTablesExtMetaTask(), createTableJob.getCreateTablePhyDdlTask());
-            result.addTaskRelationship(
-                createTableJob.getCreateTableAddTablesMetaTask(), gsiJob.getCreateTableAddTablesExtMetaTask());
-            result.addTaskRelationship(
-                gsiJob.getLastTask(), createTableJob.getCdcDdlMarkTask());
+            final CreateGlobalIndexPreparedData indexPreparedData = entry.getValue();
+            if (indexPreparedData.isColumnarIndex()) {
+                // Clustered columnar index
+                final ExecutableDdlJob4CreateColumnarIndex cciJob = (ExecutableDdlJob4CreateColumnarIndex)
+                    CreateColumnarIndexJobFactory.create4CreateCci(ddl, indexPreparedData, executionContext);
+                if (indexPreparedData.isNeedToGetTableGroupLock()) {
+                    return cciJob;
+                }
+                // Add cci tasks
+                result.combineTasks(cciJob);
+
+                // Add relationship with before tasks
+                result.addTaskRelationship(
+                    createTableJob.getCreateTableAddTablesMetaTask(),
+                    cciJob.getCreateColumnarIndexValidateTask()
+                );
+
+                // Add Relationship with after tasks
+                result.addTaskRelationship(cciJob.getLastTask(), createTableJob.getCdcDdlMarkTask());
+
+                // Add exclusive resources
+                result.addExcludeResources(cciJob.getExcludeResources());
+            } else {
+                // Global secondary index
+                ExecutableDdlJob4CreateGsi gsiJob = (ExecutableDdlJob4CreateGsi)
+                    CreateGsiJobFactory.create4CreateTableWithGsi(ddl, indexPreparedData, executionContext);
+                DdlTask gsiStatisticsInfoTask = new GsiStatisticsInfoSyncTask(
+                    indexPreparedData.getSchemaName(),
+                    indexPreparedData.getPrimaryTableName(),
+                    indexPreparedData.getIndexTableName(),
+                    GsiStatisticsSyncAction.INSERT_RECORD,
+                    null);
+                gsiJob.appendTask(gsiStatisticsInfoTask);
+                result.combineTasks(gsiJob);
+                result.removeTaskRelationship(
+                    gsiJob.getCreateGsiValidateTask(), gsiJob.getCreateTableAddTablesExtMetaTask());
+                result.addTaskRelationship(
+                    createTableJob.getCreateTableAddTablesExtMetaTask(), gsiJob.getCreateGsiValidateTask());
+                result.addTaskRelationship(
+                    gsiJob.getCreateGsiValidateTask(), createTableJob.getCreateTablePhyDdlTask());
+                result.removeTaskRelationship(
+                    createTableJob.getCreateTableAddTablesExtMetaTask(), createTableJob.getCreateTablePhyDdlTask());
+                result.addTaskRelationship(
+                    createTableJob.getCreateTableAddTablesMetaTask(), gsiJob.getCreateTableAddTablesExtMetaTask());
+                result.addTaskRelationship(gsiJob.getLastTask(), createTableJob.getCdcDdlMarkTask());
+            }
         }
 
         result.setExceptionActionForAllSuccessor(
@@ -186,6 +219,11 @@ protected void excludeResources(Set resources) {
                 resources.add(concatWithDot(schemaName, indexTableName));
             });
         }
+        if (preparedData.getPrimaryTablePreparedData() != null
+            && preparedData.getPrimaryTablePreparedData().getLikeTableInfo() != null) {
+            LikeTableInfo likeTableInfo = preparedData.getPrimaryTablePreparedData().getLikeTableInfo();
+            resources.add(concatWithDot(likeTableInfo.getSchemaName(), likeTableInfo.getTableName()));
+        }
     }
 
     @Override
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateViewJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateViewJobFactory.java
new file mode 100644
index 000000000..8d53426da
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/CreateViewJobFactory.java
@@ -0,0 +1,107 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory;
+
+import com.alibaba.polardbx.common.properties.ConnectionProperties;
+import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateViewStatement;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateViewAddMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcCreateViewMarkTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateViewSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.ValidateCreateViewTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.dialect.DbType;
+import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan;
+import com.alibaba.polardbx.optimizer.core.planner.Planner;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCreateView;
+import com.alibaba.polardbx.optimizer.parse.FastsqlUtils;
+import com.alibaba.polardbx.optimizer.planmanager.PlanManagerUtil;
+import com.alibaba.polardbx.optimizer.utils.RelUtils;
+import com.google.common.collect.Lists;
+import org.apache.calcite.sql.TDDLSqlSelect;
+
+import java.util.List;
+import java.util.Set;
+
+public class CreateViewJobFactory extends DdlJobFactory {
+
+    private final LogicalCreateView logicalCreateView;
+
+    ExecutionContext executionContext;
+
+    public CreateViewJobFactory(LogicalCreateView logicalCreateView, ExecutionContext ec) {
+        this.logicalCreateView = logicalCreateView;
+        this.executionContext = ec;
+    }
+
+    @Override
+    protected void validate() {
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+
+        String schemaName = logicalCreateView.getSchemaName();
+        String viewName = logicalCreateView.getViewName();
+        boolean isReplace = logicalCreateView.isReplace();
+        boolean isAlter = logicalCreateView.isAlter();
+        List columnList = logicalCreateView.getColumnList();
+        String viewDefinition = RelUtils.toNativeSql(logicalCreateView.getDefinition(), DbType.MYSQL);
+        String planString = null;
+        String planType = null;
+
+        if (logicalCreateView.getDefinition() instanceof TDDLSqlSelect) {
+            TDDLSqlSelect tddlSqlSelect = (TDDLSqlSelect) logicalCreateView.getDefinition();
+            if (tddlSqlSelect.getHints() != null && tddlSqlSelect.getHints().size() != 0) {
+                String withHintSql =
+                    ((SQLCreateViewStatement) FastsqlUtils.parseSql(executionContext.getSql()).get(0)).getSubQuery()
+                        .toString();
+                // FIXME: by now only support SMP plan.
+                executionContext.getExtraCmds().put(ConnectionProperties.ENABLE_MPP, false);
+                executionContext.getExtraCmds().put(ConnectionProperties.ENABLE_PARAMETER_PLAN, false);
+                ExecutionPlan executionPlan =
+                    Planner.getInstance().plan(withHintSql, executionContext.copy());
+                if (PlanManagerUtil.canConvertToJson(executionPlan, executionContext.getParamManager())) {
+                    planString = PlanManagerUtil.relNodeToJson(executionPlan.getPlan());
+                    planType = "SMP";
+                }
+            }
+        }
+
+        DdlTask validateTask = new ValidateCreateViewTask(schemaName, viewName, isReplace);
+        DdlTask addMetaTask = new CreateViewAddMetaTask(schemaName, viewName,
+            isReplace, columnList, viewDefinition, planString, planType);
+
+        DdlTask cdcMarkTask = new CdcCreateViewMarkTask(schemaName, viewName, isAlter);
+        DdlTask syncTask = new CreateViewSyncTask(schemaName, viewName);
+
+        ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
+        executableDdlJob.addSequentialTasks(Lists.newArrayList(validateTask, addMetaTask, cdcMarkTask, syncTask));
+        return executableDdlJob;
+    }
+
+    @Override
+    protected void excludeResources(Set resources) {
+        resources.add(concatWithDot(logicalCreateView.getSchemaName(), logicalCreateView.getViewName()));
+    }
+
+    @Override
+    protected void sharedResources(Set resources) {
+    }
+}
\ No newline at end of file
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropFileStorageJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropFileStorageJobFactory.java
index f322423bd..0134c07d7 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropFileStorageJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropFileStorageJobFactory.java
@@ -113,7 +113,7 @@ protected ExecutableDdlJob doCreate() {
         taskList.add(new DeleteFileStorageDirectoryTask(engine.name(), "/"));
 
         // close file storage
-        taskList.add(new CloseFileStorageTask(engine.name()));
+        taskList.add(new CloseFileStorageTask(engine.name(), false));
 
         executableDdlJob.addSequentialTasks(taskList);
         return executableDdlJob;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropFunctionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropFunctionJobFactory.java
index f3afe7813..17e6d6573 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropFunctionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropFunctionJobFactory.java
@@ -23,7 +23,9 @@
 import com.alibaba.polardbx.druid.sql.ast.statement.SqlDataAccess;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.DropFunctionOnAllDnTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.DropFunctionDropMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.DropFunctionOnAllDnTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.DropFunctionSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDropFunctionMarkTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.pl.StoredFunctionManager;
@@ -73,11 +75,12 @@ List createTasksForOneJob() {
 
         DdlTask dropMetaTask = new DropFunctionDropMetaTask(schema, null, functionName);
         DdlTask syncTask = new DropFunctionSyncTask(schema, functionName);
+        CdcDropFunctionMarkTask cdcDropFunctionMarkTask = new CdcDropFunctionMarkTask(schema, functionName);
         if (statement.getSqlDataAccess() == SqlDataAccess.NO_SQL) {
             DdlTask dropFuncOnAllDbTask = new DropFunctionOnAllDnTask(schema, functionName);
-            return Lists.newArrayList(dropFuncOnAllDbTask, dropMetaTask, syncTask);
+            return Lists.newArrayList(dropFuncOnAllDbTask, dropMetaTask, cdcDropFunctionMarkTask, syncTask);
         } else {
-            return Lists.newArrayList(dropMetaTask, syncTask);
+            return Lists.newArrayList(dropMetaTask, cdcDropFunctionMarkTask, syncTask);
         }
     }
 
@@ -86,4 +89,4 @@ public static ExecutableDdlJob dropFunction(LogicalDropFunction dropFunction, Ex
         return new DropFunctionJobFactory(dropFunction, ec.getSchemaName(), ec.getParamManager().getBoolean(
             ConnectionParams.FORCE_DROP_SQL_UDF)).create();
     }
-}
\ No newline at end of file
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropIndexJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropIndexJobFactory.java
index 100196d8f..5778287c3 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropIndexJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropIndexJobFactory.java
@@ -43,6 +43,8 @@
 import java.util.Set;
 import java.util.stream.Collectors;
 
+import static com.alibaba.polardbx.common.cdc.ICdcManager.DEFAULT_DDL_VERSION_ID;
+
 public class DropIndexJobFactory extends DdlJobFactory {
 
     private final List physicalPlanDataList;
@@ -91,7 +93,7 @@ private List createTasksForOneJob(PhysicalPlanData physicalPlanData) {
         DdlTask phyDdlTask = new DropIndexPhyDdlTask(schemaName, physicalPlanData);
         DdlTask cdcMarkDdlTask =
             CBOUtil.isOss(schemaName, logicalTableName) || CBOUtil.isGsi(schemaName, logicalTableName) ? null :
-                new CdcDdlMarkTask(schemaName, physicalPlanData, false, false);
+                new CdcDdlMarkTask(schemaName, physicalPlanData, false, false, DEFAULT_DDL_VERSION_ID);
         DdlTask removeMetaTask = new DropIndexRemoveMetaTask(schemaName, logicalTableName, indexName);
         TableSyncTask removeMetaSyncTask = new TableSyncTask(schemaName, logicalTableName);
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropJavaFunctionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropJavaFunctionJobFactory.java
index 4d3af6610..e31e33898 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropJavaFunctionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropJavaFunctionJobFactory.java
@@ -21,6 +21,7 @@
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.DropJavaFunctionDropMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.DropJavaFunctionSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDropJavaFunctionMarkTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
@@ -30,7 +31,6 @@
 import com.google.common.collect.Lists;
 import org.apache.calcite.sql.SqlDropJavaFunction;
 
-import java.sql.Connection;
 import java.util.List;
 
 public class DropJavaFunctionJobFactory extends AbstractFunctionJobFactory {
@@ -61,8 +61,9 @@ List createTasksForOneJob() {
         String functionName = dropFunction.getSqlDropFunction().getFuncName();
 
         DdlTask dropMetaTask = new DropJavaFunctionDropMetaTask(schema, null, functionName);
+        DdlTask cdcMarkTask = new CdcDropJavaFunctionMarkTask(schema, functionName);
         DdlTask syncTask = new DropJavaFunctionSyncTask(schema, functionName);
-        return Lists.newArrayList(dropMetaTask, syncTask);
+        return Lists.newArrayList(dropMetaTask, cdcMarkTask, syncTask);
     }
 
     public static ExecutableDdlJob dropFunction(LogicalDropJavaFunction dropFunction, ExecutionContext ec) {
@@ -74,4 +75,4 @@ public static ExecutableDdlJob dropFunction(LogicalDropJavaFunction dropFunction
         return new DropJavaFunctionJobFactory(dropFunction, ec.getSchemaName(), ec.getParamManager().getBoolean(
             ConnectionParams.FORCE_DROP_JAVA_UDF)).create();
     }
-}
\ No newline at end of file
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropJoinGroupJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropJoinGroupJobFactory.java
index c517c01fe..4e5d9035e 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropJoinGroupJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropJoinGroupJobFactory.java
@@ -19,6 +19,7 @@
 import com.alibaba.polardbx.common.exception.TddlRuntimeException;
 import com.alibaba.polardbx.common.exception.code.ErrorCode;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.DropEmptyJoinGroupTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDropJoinGroupMarkTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.gms.topology.DbInfoManager;
@@ -61,6 +62,11 @@ protected ExecutableDdlJob doCreate() {
             new DropEmptyJoinGroupTask(preparedData.getSchemaName(), preparedData.getJoinGroupName(),
                 preparedData.isIfExists());
         executableDdlJob.addTask(dropEmptyJoinGroupTask);
+
+        CdcDropJoinGroupMarkTask cdcDropJoinGroupMarkTask =
+            new CdcDropJoinGroupMarkTask(preparedData.getSchemaName(), preparedData.getJoinGroupName());
+        executableDdlJob.addTask(cdcDropJoinGroupMarkTask);
+        executableDdlJob.addTaskRelationship(dropEmptyJoinGroupTask, cdcDropJoinGroupMarkTask);
         return executableDdlJob;
     }
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropPartitionTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropPartitionTableJobFactory.java
index 476e432e4..1df75f2cc 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropPartitionTableJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropPartitionTableJobFactory.java
@@ -19,6 +19,7 @@
 import com.alibaba.polardbx.common.Engine;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.DropEntitySecurityAttrTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.DropPartitionTableRemoveMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.DropPartitionTableValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.DropTableHideTableMetaTask;
@@ -33,6 +34,7 @@
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.DropTablePreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.alibaba.polardbx.optimizer.utils.ITimestampOracle;
 
@@ -41,15 +43,21 @@
 import java.util.Objects;
 import java.util.Set;
 
+import static com.alibaba.polardbx.common.cdc.ICdcManager.DEFAULT_DDL_VERSION_ID;
+
 public class DropPartitionTableJobFactory extends DropTableJobFactory {
 
     private List tableGroupIds = new ArrayList<>();
 
     private ExecutionContext executionContext;
 
-    public DropPartitionTableJobFactory(PhysicalPlanData physicalPlanData, ExecutionContext executionContext) {
+    private DropTablePreparedData dropTablePreparedData = null;
+
+    public DropPartitionTableJobFactory(PhysicalPlanData physicalPlanData, ExecutionContext executionContext,
+                                        DropTablePreparedData dropTablePreparedData) {
         super(physicalPlanData);
         this.executionContext = executionContext;
+        this.dropTablePreparedData = dropTablePreparedData;
     }
 
     @Override
@@ -73,7 +81,8 @@ protected ExecutableDdlJob doCreate() {
         DropTableHideTableMetaTask dropTableHideTableMetaTask =
             new DropTableHideTableMetaTask(schemaName, logicalTableName);
         DropTablePhyDdlTask phyDdlTask = new DropTablePhyDdlTask(schemaName, physicalPlanData);
-        CdcDdlMarkTask cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, false, false);
+        CdcDdlMarkTask cdcDdlMarkTask =
+            new CdcDdlMarkTask(schemaName, physicalPlanData, false, false, DEFAULT_DDL_VERSION_ID);
         DropPartitionTableRemoveMetaTask removeMetaTask =
             new DropPartitionTableRemoveMetaTask(schemaName, logicalTableName);
 
@@ -111,7 +120,16 @@ protected ExecutableDdlJob doCreate() {
                 new UpdateTableRemoveTsTask(engine.name(), schemaName, logicalTableName, ts);
             tasks.add(updateTableRemoveTsTask);
         }
-        tasks.add(phyDdlTask);
+
+        if (!dropTablePreparedData.isImportTable()) {
+            tasks.add(phyDdlTask);
+        }
+
+        DropEntitySecurityAttrTask desaTask = createDESATask();
+        if (desaTask != null) {
+            tasks.add(desaTask);
+        }
+
         if (!Engine.isFileStore(engine)) {
             tasks.add(cdcDdlMarkTask);
         }
@@ -131,7 +149,9 @@ protected ExecutableDdlJob doCreate() {
 
         executableDdlJob.setValidateTask(validateTask);
         executableDdlJob.setDropTableHideTableMetaTask(dropTableHideTableMetaTask);
-        executableDdlJob.setPhyDdlTask(phyDdlTask);
+        if (!dropTablePreparedData.isImportTable()) {
+            executableDdlJob.setPhyDdlTask(phyDdlTask);
+        }
         if (!Engine.isFileStore(engine)) {
             executableDdlJob.setCdcDdlMarkTask(cdcDdlMarkTask);
         }
@@ -157,6 +177,9 @@ protected void excludeResources(Set resources) {
             String tgName = tableGroupConfig.getTableGroupRecord().getTg_name();
             resources.add(concatWithDot(schemaName, tgName));
         }
+
+        // exclude foreign key tables
+        FactoryUtils.getFkTableExcludeResources(schemaName, logicalTableName, resources);
     }
 
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropPartitionTableWithGsiJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropPartitionTableWithGsiJobFactory.java
index 05a5e7420..93245a016 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropPartitionTableWithGsiJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropPartitionTableWithGsiJobFactory.java
@@ -21,6 +21,7 @@
 import com.alibaba.polardbx.executor.ddl.job.converter.DdlJobDataConverter;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.gsi.DropPartitionGsiJobFactory;
+import com.alibaba.polardbx.executor.ddl.job.factory.gsi.columnar.DropColumnarIndexJobFactory;
 import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.DropPartitionTableWithGsiValidateTask;
@@ -29,6 +30,7 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4DropColumnarIndex;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4DropPartitionGsi;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4DropPartitionTable;
 import com.alibaba.polardbx.executor.sync.GsiStatisticsSyncAction;
@@ -99,10 +101,12 @@ protected ExecutableDdlJob doCreate() {
             preparedData.getPrimaryTablePreparedData().getTableVersion());
 
         PhysicalPlanData physicalPlanData =
-            DdlJobDataConverter.convertToPhysicalPlanData(primaryTableTopology, primaryTablePhysicalPlans);
+            DdlJobDataConverter.convertToPhysicalPlanData(primaryTableTopology, primaryTablePhysicalPlans,
+                executionContext);
 
         ExecutableDdlJob4DropPartitionTable dropPrimaryTableJob = (ExecutableDdlJob4DropPartitionTable)
-            new DropPartitionTableJobFactory(physicalPlanData, executionContext).create();
+            new DropPartitionTableJobFactory(physicalPlanData, executionContext,
+                preparedData.getPrimaryTablePreparedData()).create();
 
         DdlTask validateTask = dropPrimaryTableJob.getValidateTask();
         DdlTask dropPrimaryTableSyncTask = dropPrimaryTableJob.getTableSyncTask();
@@ -110,40 +114,71 @@ protected ExecutableDdlJob doCreate() {
 
         result.addExcludeResources(dropPrimaryTableJob.getExcludeResources());
 
+        ValidateTableVersionTask validateTableVersionTask =
+            new ValidateTableVersionTask(preparedData.getPrimaryTablePreparedData().getSchemaName(), tableVersions);
+
         Map gsiPreparedDataMap = preparedData.getIndexTablePreparedDataMap();
         for (Map.Entry entry : gsiPreparedDataMap.entrySet()) {
             final DropGlobalIndexPreparedData gsiPreparedData = entry.getValue();
             final String indexTableName = gsiPreparedData.getIndexTableName();
-            ExecutableDdlJob4DropPartitionGsi dropGsiJob = (ExecutableDdlJob4DropPartitionGsi)
-                DropPartitionGsiJobFactory.create(gsiPreparedData, executionContext, true, false);
-
-            DdlTask gsiStatisticsInfoTask = new GsiStatisticsInfoSyncTask(
-                gsiPreparedData.getSchemaName(),
-                gsiPreparedData.getPrimaryTableName(),
-                gsiPreparedData.getIndexTableName(),
-                GsiStatisticsSyncAction.DELETE_RECORD,
-                null);
-            dropGsiJob.appendTask(gsiStatisticsInfoTask);
-
-            result.addTaskRelationship(validateTask, dropGsiJob.getValidateTask());
-            result.addTaskRelationship(dropPrimaryTableSyncTask, dropGsiJob.getDropGsiTableHideTableMetaTask());
-            result.addTaskRelationship(
-                dropGsiJob.getDropGsiTableHideTableMetaTask(), dropGsiJob.getDropGsiPhyDdlTask());
-            result.addTaskRelationship(
-                dropGsiJob.getDropGsiPhyDdlTask(), dropGsiJob.getGsiDropCleanUpTask());
-            result.addTaskRelationship(
-                dropGsiJob.getGsiDropCleanUpTask(), dropGsiJob.getDropGsiTableRemoveMetaTask());
-            result.addTaskRelationship(
-                dropGsiJob.getDropGsiTableRemoveMetaTask(), new TableSyncTask(schemaName, indexTableName));
-            result.addExcludeResources(dropGsiJob.getExcludeResources());
-            tableVersions.put(gsiPreparedData.getTableName(),
-                gsiPreparedData.getTableVersion());
-        }
-        ValidateTableVersionTask validateTableVersionTask =
-            new ValidateTableVersionTask(preparedData.getPrimaryTablePreparedData().getSchemaName(), tableVersions);
 
-        result.addTask(validateTableVersionTask);
-        result.addTaskRelationship(validateTableVersionTask, dropPrimaryTableJob.getHead());
+            if (entry.getValue().isColumnar()) {
+                // columnar index will be destroyed automatically
+                ExecutableDdlJob4DropColumnarIndex dropCciJob = (ExecutableDdlJob4DropColumnarIndex)
+                    DropColumnarIndexJobFactory.create(gsiPreparedData, executionContext, true, false);
+                result.addTask(dropCciJob.getValidateTask());
+                result.addTaskRelationship(dropCciJob.getValidateTask(),
+                    dropCciJob.getDropColumnarTableHideTableMetaTask());
+                result.addTaskRelationship(
+                    dropCciJob.getDropColumnarTableHideTableMetaTask(), dropCciJob.getCciSchemaEvolutionTask());
+                result.addTaskRelationship(
+                    dropCciJob.getCciSchemaEvolutionTask(), dropCciJob.getGsiDropCleanUpTask());
+                result.addTaskRelationship(
+                    dropCciJob.getGsiDropCleanUpTask(), dropCciJob.getDropColumnarTableRemoveMetaTask());
+                TableSyncTask indexTableSyncTask = new TableSyncTask(schemaName, indexTableName);
+                TableSyncTask tableSyncTask = new TableSyncTask(schemaName, primaryTableName);
+                result.addTaskRelationship(
+                    dropCciJob.getDropColumnarTableRemoveMetaTask(), indexTableSyncTask);
+                result.addTaskRelationship(indexTableSyncTask, tableSyncTask);
+
+                // Add Relationship before cdc mark tasks
+                result.addTaskRelationship(tableSyncTask, dropPrimaryTableJob.getHead());
+                result.addTaskRelationship(validateTableVersionTask, dropCciJob.getValidateTask());
+
+                result.addExcludeResources(dropCciJob.getExcludeResources());
+                tableVersions.put(gsiPreparedData.getTableName(),
+                    gsiPreparedData.getTableVersion());
+
+                result.addTask(validateTableVersionTask);
+            } else {
+                ExecutableDdlJob4DropPartitionGsi dropGsiJob = (ExecutableDdlJob4DropPartitionGsi)
+                    DropPartitionGsiJobFactory.create(gsiPreparedData, executionContext, true, false);
+
+                DdlTask gsiStatisticsInfoTask = new GsiStatisticsInfoSyncTask(
+                    gsiPreparedData.getSchemaName(),
+                    gsiPreparedData.getPrimaryTableName(),
+                    gsiPreparedData.getIndexTableName(),
+                    GsiStatisticsSyncAction.DELETE_RECORD,
+                    null);
+                dropGsiJob.appendTask(gsiStatisticsInfoTask);
+                result.addTaskRelationship(validateTask, dropGsiJob.getValidateTask());
+                result.addTaskRelationship(dropPrimaryTableSyncTask, dropGsiJob.getDropGsiTableHideTableMetaTask());
+                result.addTaskRelationship(
+                    dropGsiJob.getDropGsiTableHideTableMetaTask(), dropGsiJob.getDropGsiPhyDdlTask());
+                result.addTaskRelationship(
+                    dropGsiJob.getDropGsiPhyDdlTask(), dropGsiJob.getGsiDropCleanUpTask());
+                result.addTaskRelationship(
+                    dropGsiJob.getGsiDropCleanUpTask(), dropGsiJob.getDropGsiTableRemoveMetaTask());
+                result.addTaskRelationship(
+                    dropGsiJob.getDropGsiTableRemoveMetaTask(), new TableSyncTask(schemaName, indexTableName));
+                result.addExcludeResources(dropGsiJob.getExcludeResources());
+                tableVersions.put(gsiPreparedData.getTableName(),
+                    gsiPreparedData.getTableVersion());
+
+                result.addTask(validateTableVersionTask);
+                result.addTaskRelationship(validateTableVersionTask, dropPrimaryTableJob.getHead());
+            }
+        }
 
         List tableNames = new ArrayList<>();
         tableNames.add(primaryTableName);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropProcedureJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropProcedureJobFactory.java
index 6f2b313f5..91a8c66b6 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropProcedureJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropProcedureJobFactory.java
@@ -23,6 +23,7 @@
 import com.alibaba.polardbx.druid.sql.ast.SQLName;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.procedure.DropProcedureDropMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.procedure.DropProcedureSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDropProcedureMarkTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.pl.PLUtils;
@@ -70,8 +71,10 @@ List createTasksForOneJob() {
         }
 
         DdlTask dropMetaTask = new DropProcedureDropMetaTask(executionSchema, null, procedureSchema, simpleName);
+        CdcDropProcedureMarkTask cdcDropProcedureMarkTask =
+            new CdcDropProcedureMarkTask(procedureSchema, simpleName);
         DdlTask syncTask = new DropProcedureSyncTask(executionSchema, procedureSchema, simpleName);
-        return Lists.newArrayList(dropMetaTask, syncTask);
+        return Lists.newArrayList(dropMetaTask, cdcDropProcedureMarkTask, syncTask);
     }
 
     public static ExecutableDdlJob dropProcedure(LogicalDropProcedure logicalDropProcedure,
@@ -80,4 +83,4 @@ public static ExecutableDdlJob dropProcedure(LogicalDropProcedure logicalDropPro
         return new DropProcedureJobFactory(logicalDropProcedure, ec.getSchemaName(), ec.getParamManager().getBoolean(
             ConnectionParams.FORCE_DROP_PROCEDURE)).create();
     }
-}
\ No newline at end of file
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableGroupJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableGroupJobFactory.java
index 78ccbb757..743c3cf02 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableGroupJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableGroupJobFactory.java
@@ -17,7 +17,8 @@
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
 import com.alibaba.polardbx.executor.ddl.job.task.basic.DropTableGroupRemoveMetaTask;
-import com.alibaba.polardbx.executor.ddl.job.task.basic.DropTableGroupValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.EmptyTableGroupValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDropTableGroupMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupSyncTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
@@ -71,13 +72,17 @@ protected ExecutableDdlJob doCreate() {
         if (preparedData.isIfExists() && tableGroupConfig == null) {
             return new TransientDdlJob();
         }
-        DropTableGroupValidateTask dropTableGroupValidateTask =
-            new DropTableGroupValidateTask(preparedData.getSchemaName(),
+        EmptyTableGroupValidateTask dropTableGroupValidateTask =
+            new EmptyTableGroupValidateTask(preparedData.getSchemaName(),
                 preparedData.getTableGroupName());
         taskList.add(dropTableGroupValidateTask);
         DropTableGroupRemoveMetaTask dropTableGroupRemoveMetaTask = new DropTableGroupRemoveMetaTask(
             preparedData.getSchemaName(), preparedData.getTableGroupName());
         taskList.add(dropTableGroupRemoveMetaTask);
+        CdcDropTableGroupMarkTask cdcDropTableGroupMarkTask = new CdcDropTableGroupMarkTask(
+            preparedData.getSchemaName(), preparedData.getTableGroupName()
+        );
+        taskList.add(cdcDropTableGroupMarkTask);
         TableGroupSyncTask tableGroupSyncTask =
             new TableGroupSyncTask(preparedData.getSchemaName(), preparedData.getTableGroupName());
         taskList.add(tableGroupSyncTask);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableJobFactory.java
index 5c93c1089..546d788e7 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableJobFactory.java
@@ -18,6 +18,7 @@
 
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.DropEntitySecurityAttrTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.DropTablePhyDdlTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.DropTableRemoveMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.DropTableValidateTask;
@@ -28,10 +29,21 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4DropTable;
+import com.alibaba.polardbx.gms.lbac.LBACSecurityEntity;
+import com.alibaba.polardbx.gms.lbac.LBACSecurityManager;
+import com.alibaba.polardbx.gms.lbac.LBACSecurityLabel;
+import com.alibaba.polardbx.gms.lbac.LBACSecurityPolicy;
+import com.alibaba.polardbx.optimizer.OptimizerContext;
+import com.alibaba.polardbx.optimizer.config.table.ColumnMeta;
 import com.google.common.collect.Lists;
 
+import java.util.ArrayList;
 import java.util.List;
+import java.util.Objects;
 import java.util.Set;
+import java.util.stream.Collectors;
+
+import static com.alibaba.polardbx.common.cdc.ICdcManager.DEFAULT_DDL_VERSION_ID;
 
 public class DropTableJobFactory extends DdlJobFactory {
 
@@ -58,7 +70,9 @@ protected ExecutableDdlJob doCreate() {
             new StoreTableLocalityTask(schemaName, logicalTableName, "", false);
         TableSyncTask tableSyncTask = new TableSyncTask(schemaName, logicalTableName);
         DropTablePhyDdlTask phyDdlTask = new DropTablePhyDdlTask(schemaName, physicalPlanData);
-        CdcDdlMarkTask cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, false, false);
+        DropEntitySecurityAttrTask desaTask = createDESATask();
+        CdcDdlMarkTask cdcDdlMarkTask =
+            new CdcDdlMarkTask(schemaName, physicalPlanData, false, false, DEFAULT_DDL_VERSION_ID);
         ExecutableDdlJob4DropTable executableDdlJob = new ExecutableDdlJob4DropTable();
 
         List taskList = Lists.newArrayList(
@@ -67,12 +81,13 @@ protected ExecutableDdlJob doCreate() {
             removeMetaTask,
             tableSyncTask,
             phyDdlTask,
+            desaTask,
             cdcDdlMarkTask);
 
         // sync foreign key table meta
         taskList.addAll(FactoryUtils.getFkTableSyncTasks(schemaName, logicalTableName));
 
-        executableDdlJob.addSequentialTasks(taskList);
+        executableDdlJob.addSequentialTasks(taskList.stream().filter(Objects::nonNull).collect(Collectors.toList()));
 
         //labels should be replaced by fields in ExecutableDdlJob4DropTable
         executableDdlJob.labelAsHead(validateTask);
@@ -90,10 +105,41 @@ protected ExecutableDdlJob doCreate() {
     @Override
     protected void excludeResources(Set resources) {
         resources.add(concatWithDot(schemaName, logicalTableName));
+
+        // exclude foreign key tables
+        FactoryUtils.getFkTableExcludeResources(schemaName, logicalTableName, resources);
     }
 
     @Override
     protected void sharedResources(Set resources) {
     }
 
+    protected DropEntitySecurityAttrTask createDESATask() {
+        List esaList = new ArrayList<>();
+        LBACSecurityPolicy policy = LBACSecurityManager.getInstance().getTablePolicy(schemaName, logicalTableName);
+        if (policy == null) {
+            return null;
+        }
+        esaList.add(new LBACSecurityEntity(
+            LBACSecurityEntity.EntityKey.createTableKey(schemaName, logicalTableName),
+            LBACSecurityEntity.EntityType.TABLE,
+            policy.getPolicyName()));
+        List columnMetas =
+            OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName).getAllColumns();
+        for (ColumnMeta columnMeta : columnMetas) {
+            LBACSecurityLabel
+                label =
+                LBACSecurityManager.getInstance().getColumnLabel(schemaName, logicalTableName, columnMeta.getName());
+            if (label == null) {
+                continue;
+            }
+            esaList.add(new LBACSecurityEntity(
+                LBACSecurityEntity.EntityKey.createColumnKey(schemaName, logicalTableName, columnMeta.getName()),
+                LBACSecurityEntity.EntityType.COLUMN,
+                label.getLabelName()
+            ));
+        }
+        return esaList.isEmpty() ? null : new DropEntitySecurityAttrTask(schemaName, logicalTableName, esaList);
+    }
+
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableWithGsiJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableWithGsiJobFactory.java
index 61a8c6ef1..540a754e2 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableWithGsiJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropTableWithGsiJobFactory.java
@@ -95,7 +95,8 @@ protected ExecutableDdlJob doCreate() {
             preparedData.getPrimaryTablePreparedData().getTableVersion());
 
         PhysicalPlanData physicalPlanData =
-            DdlJobDataConverter.convertToPhysicalPlanData(primaryTableTopology, primaryTablePhysicalPlans);
+            DdlJobDataConverter.convertToPhysicalPlanData(primaryTableTopology, primaryTablePhysicalPlans,
+                executionContext);
         ExecutableDdlJob4DropTable dropPrimaryTableJob =
             (ExecutableDdlJob4DropTable) new DropTableJobFactory(physicalPlanData).create();
         result.combineTasks(dropPrimaryTableJob);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropViewJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropViewJobFactory.java
new file mode 100644
index 000000000..68f9c164e
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/DropViewJobFactory.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory;
+
+import com.alibaba.polardbx.executor.ddl.job.task.basic.DropViewRemoveMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.DropViewSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDropViewMarkTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateViewSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.ValidateDropViewTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropView;
+import com.google.common.collect.Lists;
+
+import java.util.Set;
+
+public class DropViewJobFactory extends DdlJobFactory {
+
+    private final LogicalDropView logicalDropView;
+
+    public DropViewJobFactory(LogicalDropView logicalDropView) {
+        this.logicalDropView = logicalDropView;
+    }
+
+    @Override
+    protected void validate() {
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+        String schemaName = logicalDropView.getSchemaName();
+        String viewName = logicalDropView.getViewName();
+        boolean ifExists = logicalDropView.isIfExists();
+
+        DdlTask validateTask = new ValidateDropViewTask(schemaName, viewName, ifExists);
+        DdlTask removeMetaTask = new DropViewRemoveMetaTask(schemaName, viewName);
+        DdlTask cdcMarkTask = new CdcDropViewMarkTask(schemaName, viewName);
+        DdlTask syncTask = new DropViewSyncTask(schemaName, viewName);
+
+        ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
+        executableDdlJob.addSequentialTasks(Lists.newArrayList(validateTask, removeMetaTask, cdcMarkTask, syncTask));
+        return executableDdlJob;
+    }
+
+    @Override
+    protected void excludeResources(Set resources) {
+        resources.add(concatWithDot(logicalDropView.getSchemaName(), logicalDropView.getViewName()));
+    }
+
+    @Override
+    protected void sharedResources(Set resources) {
+    }
+}
\ No newline at end of file
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/InsertOverwriteJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/InsertOverwriteJobFactory.java
index be4c9ebcd..174f519b1 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/InsertOverwriteJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/InsertOverwriteJobFactory.java
@@ -39,8 +39,6 @@
 import java.util.Map;
 import java.util.Set;
 
-import static org.apache.calcite.sql.SqlIdentifier.surroundWithBacktick;
-
 /**
  * @author lijiu.lzw
  */
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/LogicalAlterDatabaseSetLocalityFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/LogicalAlterDatabaseSetLocalityFactory.java
index 879c529ff..bcd355b59 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/LogicalAlterDatabaseSetLocalityFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/LogicalAlterDatabaseSetLocalityFactory.java
@@ -18,6 +18,8 @@
 
 import com.alibaba.polardbx.common.exception.TddlRuntimeException;
 import com.alibaba.polardbx.common.exception.code.ErrorCode;
+import com.alibaba.polardbx.common.model.privilege.DbInfo;
+import com.alibaba.polardbx.executor.ddl.job.task.storagepool.AlterDatabaseLocalityTask;
 import com.alibaba.polardbx.executor.ddl.job.task.storagepool.AlterDatabaseModifyStorageInfoTask;
 import com.alibaba.polardbx.executor.ddl.job.task.storagepool.AlterDatabaseStorageInstValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.BackgroupRebalanceTask;
@@ -25,6 +27,11 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.gms.locality.LocalityDesc;
+import com.alibaba.polardbx.gms.metadb.MetaDbDataSource;
+import com.alibaba.polardbx.gms.topology.DbInfoManager;
+import com.alibaba.polardbx.gms.topology.DbTopologyManager;
+import com.alibaba.polardbx.gms.topology.StorageInfoAccessor;
+import com.alibaba.polardbx.gms.topology.StorageInfoRecord;
 import com.alibaba.polardbx.gms.util.InstIdUtil;
 import com.alibaba.polardbx.optimizer.locality.LocalityInfoUtils;
 import com.alibaba.polardbx.optimizer.locality.LocalityManager;
@@ -32,7 +39,9 @@
 import com.google.common.collect.ImmutableList;
 import org.apache.commons.lang.StringUtils;
 
+import java.sql.Connection;
 import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.stream.Collectors;
@@ -55,92 +64,192 @@ public LogicalAlterDatabaseSetLocalityFactory(String schemaName, String targetLo
     protected void validate() {
     }
 
+    protected Set getUndeletableDnIds() {
+        Set nonDeletableStorage;
+        try (Connection metaDbConn = MetaDbDataSource.getInstance().getConnection()) {
+            nonDeletableStorage = DbTopologyManager.getNonDeletableStorageInst(metaDbConn);
+        } catch (Throwable ex) {
+            throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS,
+                "failed to fetch the non deletable storage list");
+        }
+        return nonDeletableStorage;
+    }
+
+    protected Set getAllStorageInstIds(String instId) {
+        Set allStorageInstIds;
+        StorageInfoAccessor storageInfoAccessor = new StorageInfoAccessor();
+        try (Connection metaDbConn = MetaDbDataSource.getInstance().getConnection()) {
+            storageInfoAccessor.setConnection(metaDbConn);
+            List storageInfoRecords = storageInfoAccessor.getStorageInfosByInstId(instId);
+            return storageInfoRecords.stream().map(o -> o.storageInstId).collect(Collectors.toSet());
+        } catch (Throwable ex) {
+            throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS,
+                "failed to fetch the non deletable storage list");
+        }
+    }
+
     @Override
     protected ExecutableDdlJob doCreate() {
         Boolean appendStoragePool = true;
+        Boolean appendDn = true;
         List storagePoolNames = new ArrayList<>();
         String instId = InstIdUtil.getInstId();
         ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
         LocalityDesc targetLocalityDesc = LocalityInfoUtils.parse(targetLocality);
         String originalLocality = LocalityManager.getInstance().getLocalityOfDb(schemaName).getLocality();
         LocalityDesc originalLocalityDesc = LocalityInfoUtils.parse(originalLocality);
-        if (originalLocalityDesc.getPrimaryStoragePoolName()
-            .equalsIgnoreCase(targetLocalityDesc.getPrimaryStoragePoolName())) {
-            List originalStoragePoolNames = originalLocalityDesc.getStoragePoolNames();
-            List targetStoragePoolNames = targetLocalityDesc.getStoragePoolNames();
-            if (originalStoragePoolNames.containsAll(targetStoragePoolNames)) {
-                if (targetStoragePoolNames.containsAll(originalStoragePoolNames)) {
-                    String errMsg = String.format(
-                        "invalid storage pool name list! '%s', the same with before.",
-                        StringUtils.join(targetStoragePoolNames, ","),
-                        originalLocalityDesc.getPrimaryStoragePoolName());
-                    throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg);
+        if (!originalLocalityDesc.hasStoragePoolDefinition()) {
+            Set originalDnSet = originalLocalityDesc.getDnSet();
+            Set targetDnSet = targetLocalityDesc.getDnSet();
+            Set fullDnSet = getAllStorageInstIds(instId);
+            Set dnSet = new HashSet<>();
+            Set undeletableDnSet = getUndeletableDnIds();
+            if (!fullDnSet.containsAll(targetDnSet)) {
+                Set invalidDnSet =
+                    fullDnSet.stream().filter(o -> !targetDnSet.contains(o)).collect(Collectors.toSet());
+                String errMsg =
+                    String.format(
+                        "invalid dn list! dn_id: '%s' not illegal",
+                        StringUtils.join(invalidDnSet, ","));
+                throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg);
+            }
 
-                }
-                storagePoolNames = originalStoragePoolNames.stream().filter(o -> !targetStoragePoolNames.contains(o))
-                    .collect(Collectors.toList());
-                appendStoragePool = false;
-                if (storagePoolNames.contains(originalLocalityDesc.getPrimaryStoragePoolName())) {
-                    String errMsg = String.format(
-                        "invalid storage pool name list! '%s', must contain primary storage pool name '%s'.",
-                        StringUtils.join(targetStoragePoolNames, ","),
-                        originalLocalityDesc.getPrimaryStoragePoolName());
-                    throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg);
+            if (originalDnSet.containsAll(targetDnSet) && targetDnSet.containsAll(originalDnSet)) {
+                String errMsg =
+                    String.format(
+                        "invalid dn list! '%s', the same as before",
+                        StringUtils.join(targetDnSet, ","));
+                throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg);
 
+            }
+            if (originalDnSet.containsAll(targetDnSet)) {
+                appendDn = false;
+                dnSet = originalDnSet.stream().filter(o -> !targetDnSet.contains(o)).collect(Collectors.toSet());
+                List undeletableDnList =
+                    originalDnSet.stream().filter(o -> undeletableDnSet.contains(o)).collect(Collectors.toList());
+                if (!targetDnSet.containsAll(undeletableDnList)) {
+                    String errMsg =
+                        String.format(
+                            "invalid dn list! '%s', must contains undeletableDnid '%s'",
+                            StringUtils.join(targetDnSet, ","), StringUtils.join(undeletableDnSet, ","));
+                    throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg);
                 }
-            } else if (targetStoragePoolNames.containsAll(originalStoragePoolNames)) {
-                storagePoolNames = targetStoragePoolNames.stream().filter(o -> !originalStoragePoolNames.contains(o))
-                    .collect(Collectors.toList());
-                appendStoragePool = true;
+            } else if (targetDnSet.containsAll(originalDnSet)) {
+                appendDn = true;
+                dnSet = targetDnSet.stream().filter(o -> !originalDnSet.contains(o)).collect(Collectors.toSet());
             } else {
                 String errMsg =
-                    String.format("invalid storage pool name list! '%s', must be consistency with original settings.",
-                        StringUtils.join(targetStoragePoolNames, ","));
+                    String.format(
+                        "invalid dn list! '%s', must be subset of original dn list or superset of original dn list",
+                        StringUtils.join(targetDnSet, ","));
                 throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg);
             }
-        } else {
-            String errMsg = String.format("invalid primary storage pool name! '%s', must equal to original settings.",
-                targetLocalityDesc.getPrimaryStoragePoolName());
-            throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg);
-        }
+            if (appendDn) {
 
-        if (appendStoragePool) {
-
-            DdlTask alterDatabaseStorageInstValidateTask = new AlterDatabaseStorageInstValidateTask(schemaName, instId,
-                "append", storagePoolNames
-            );
-            DdlTask alterDatabaseModifyStorageInfoTask =
-                new AlterDatabaseModifyStorageInfoTask(schemaName, instId, targetLocalityDesc.toString(),
-                    storagePoolNames);
-            String rebalanceSql = "refresh topology";
-            DdlTask backgroundAppendStoragePoolTask = new BackgroupRebalanceTask(schemaName, rebalanceSql);
-            executableDdlJob.addSequentialTasks(ImmutableList.of(
-                alterDatabaseStorageInstValidateTask,
-                alterDatabaseModifyStorageInfoTask,
-                backgroundAppendStoragePoolTask
-            ));
+                String rebalanceSql = "SCHEDULE REBALANCE DATABASE";
+                DdlTask alterDatabaseModifyLocalityTask =
+                    new AlterDatabaseLocalityTask(schemaName, instId, targetLocality);
+                DdlTask backgroundAppendStoragePoolTask = new BackgroupRebalanceTask(schemaName, rebalanceSql);
+                executableDdlJob.addSequentialTasks(ImmutableList.of(
+                    alterDatabaseModifyLocalityTask,
+                    backgroundAppendStoragePoolTask
+                ));
+            } else {
+                String drainNodeListStr = StringUtils.join(dnSet, ",");
+                String rebalanceSql = String.format("SCHEDULE REBALANCE DATABASE DRAIN_NODE = '%s'", drainNodeListStr);
+                DdlTask alterDatabaseModifyLocalityTask =
+                    new AlterDatabaseLocalityTask(schemaName, instId, targetLocality);
+                DdlTask backgroundAppendStoragePoolTask = new BackgroupRebalanceTask(schemaName, rebalanceSql);
+                executableDdlJob.addSequentialTasks(ImmutableList.of(
+                    alterDatabaseModifyLocalityTask,
+                    backgroundAppendStoragePoolTask
+                ));
+            }
         } else {
-            DdlTask alterDatabaseStorageInstValidateTask = new AlterDatabaseStorageInstValidateTask(schemaName, instId,
-                "append", storagePoolNames
-            );
-            DdlTask alterDatabaseModifyStorageInfoTask =
-                new AlterDatabaseModifyStorageInfoTask(schemaName, instId, targetLocalityDesc.toString(),
-                    storagePoolNames);
-            String rebalanceSqlStmt = "rebalance database drain_node = '%s' drain_storage_pool='%s'";
-            String storagePoolNamesStr = StringUtils.join(storagePoolNames, ",");
-            List drainNodeList = storagePoolNames.stream()
-                .map(o -> StoragePoolManager.getInstance().getStoragePoolInfo(o).getDnLists()).flatMap(
-                    o -> o.stream()).collect(
-                    Collectors.toList());
-            String drainNodesStr = StringUtils.join(drainNodeList, ",");
-            String rebalanceSql = String.format(rebalanceSqlStmt, drainNodesStr, storagePoolNamesStr);
-            DdlTask backgroundAppendStoragePoolTask = new BackgroupRebalanceTask(schemaName, rebalanceSql);
-            executableDdlJob.addSequentialTasks(ImmutableList.of(
-                alterDatabaseStorageInstValidateTask,
-                alterDatabaseModifyStorageInfoTask,
-                backgroundAppendStoragePoolTask
-            ));
+            if (originalLocalityDesc.getPrimaryStoragePoolName()
+                .equalsIgnoreCase(targetLocalityDesc.getPrimaryStoragePoolName())) {
+                List originalStoragePoolNames = originalLocalityDesc.getStoragePoolNames();
+                List targetStoragePoolNames = targetLocalityDesc.getStoragePoolNames();
+                if (originalStoragePoolNames.containsAll(targetStoragePoolNames)) {
+                    if (targetStoragePoolNames.containsAll(originalStoragePoolNames)) {
+                        String errMsg = String.format(
+                            "invalid storage pool name list! '%s', the same with before.",
+                            StringUtils.join(targetStoragePoolNames, ","),
+                            originalLocalityDesc.getPrimaryStoragePoolName());
+                        throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg);
+
+                    }
+                    storagePoolNames =
+                        originalStoragePoolNames.stream().filter(o -> !targetStoragePoolNames.contains(o))
+                            .collect(Collectors.toList());
+                    appendStoragePool = false;
+                    if (storagePoolNames.contains(originalLocalityDesc.getPrimaryStoragePoolName())) {
+                        String errMsg = String.format(
+                            "invalid storage pool name list! '%s', must contain primary storage pool name '%s'.",
+                            StringUtils.join(targetStoragePoolNames, ","),
+                            originalLocalityDesc.getPrimaryStoragePoolName());
+                        throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg);
+
+                    }
+                } else if (targetStoragePoolNames.containsAll(originalStoragePoolNames)) {
+                    storagePoolNames =
+                        targetStoragePoolNames.stream().filter(o -> !originalStoragePoolNames.contains(o))
+                            .collect(Collectors.toList());
+                    appendStoragePool = true;
+                } else {
+                    String errMsg =
+                        String.format(
+                            "invalid storage pool name list! '%s', must be consistency with original settings.",
+                            StringUtils.join(targetStoragePoolNames, ","));
+                    throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg);
+                }
+            } else {
+                String errMsg =
+                    String.format("invalid primary storage pool name! '%s', must equal to original settings.",
+                        targetLocalityDesc.getPrimaryStoragePoolName());
+                throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg);
+            }
 
+            if (appendStoragePool) {
+
+                DdlTask alterDatabaseStorageInstValidateTask =
+                    new AlterDatabaseStorageInstValidateTask(schemaName, instId,
+                        "append", storagePoolNames
+                    );
+                DdlTask alterDatabaseModifyStorageInfoTask =
+                    new AlterDatabaseModifyStorageInfoTask(schemaName, instId, targetLocalityDesc.toString(),
+                        storagePoolNames);
+                String rebalanceSql = "refresh topology";
+                DdlTask backgroundAppendStoragePoolTask = new BackgroupRebalanceTask(schemaName, rebalanceSql);
+                executableDdlJob.addSequentialTasks(ImmutableList.of(
+                    alterDatabaseStorageInstValidateTask,
+                    alterDatabaseModifyStorageInfoTask,
+                    backgroundAppendStoragePoolTask
+                ));
+            } else {
+                DdlTask alterDatabaseStorageInstValidateTask =
+                    new AlterDatabaseStorageInstValidateTask(schemaName, instId,
+                        "append", storagePoolNames
+                    );
+                DdlTask alterDatabaseModifyStorageInfoTask =
+                    new AlterDatabaseModifyStorageInfoTask(schemaName, instId, targetLocalityDesc.toString(),
+                        storagePoolNames);
+                String rebalanceSqlStmt = "rebalance database drain_node = '%s' drain_storage_pool='%s'";
+                String storagePoolNamesStr = StringUtils.join(storagePoolNames, ",");
+                List drainNodeList = storagePoolNames.stream()
+                    .map(o -> StoragePoolManager.getInstance().getStoragePoolInfo(o).getDnLists()).flatMap(
+                        o -> o.stream()).collect(
+                        Collectors.toList());
+                String drainNodesStr = StringUtils.join(drainNodeList, ",");
+                String rebalanceSql = String.format(rebalanceSqlStmt, drainNodesStr, storagePoolNamesStr);
+                DdlTask backgroundAppendStoragePoolTask = new BackgroupRebalanceTask(schemaName, rebalanceSql);
+                executableDdlJob.addSequentialTasks(ImmutableList.of(
+                    alterDatabaseStorageInstValidateTask,
+                    alterDatabaseModifyStorageInfoTask,
+                    backgroundAppendStoragePoolTask
+                ));
+
+            }
         }
 
         return executableDdlJob;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/LogicalAlterInstanceReadonlyStatusFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/LogicalAlterInstanceReadonlyStatusFactory.java
new file mode 100644
index 000000000..533707b41
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/LogicalAlterInstanceReadonlyStatusFactory.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory;
+
+import com.alibaba.polardbx.executor.ddl.job.task.basic.ChangeInstanceReadonlyStatusTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.GlobalAcquireMdlLockInDbSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.GlobalReleaseMdlLockInDbSyncTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.gms.topology.DbInfoManager;
+import com.alibaba.polardbx.gms.topology.SystemDbHelper;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * Created by zhuqiwei.
+ *
+ * @author zhuqiwei
+ */
+public class LogicalAlterInstanceReadonlyStatusFactory extends DdlJobFactory {
+    protected final boolean readonly;
+
+    public LogicalAlterInstanceReadonlyStatusFactory(boolean readonly) {
+        this.readonly = readonly;
+    }
+
+    @Override
+    protected void validate() {
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+
+        final String defaultSchema = "__cdc__";
+        DbInfoManager dbInfoManager = DbInfoManager.getInstance();
+        List allSchemaList = dbInfoManager
+            .getDbList().stream().filter(s -> !SystemDbHelper.isDBBuildIn(s))
+            .collect(Collectors.toList());
+
+        DdlTask globalAcquireMdlLockInDbTask = new GlobalAcquireMdlLockInDbSyncTask(
+            defaultSchema,
+            ImmutableSet.copyOf(allSchemaList)
+        );
+        DdlTask globalReleaseMdlLockInDbTask = new GlobalReleaseMdlLockInDbSyncTask(
+            defaultSchema,
+            ImmutableSet.copyOf(allSchemaList)
+        );
+        DdlTask changeDatabaseReadWriteStatus = new ChangeInstanceReadonlyStatusTask(defaultSchema, readonly);
+
+        ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
+        executableDdlJob.addSequentialTasks(ImmutableList.of(
+            globalAcquireMdlLockInDbTask,
+            changeDatabaseReadWriteStatus,
+            globalReleaseMdlLockInDbTask
+        ));
+
+        return executableDdlJob;
+    }
+
+    @Override
+    protected void excludeResources(Set resources) {
+        //forbid all database's ddl
+        resources.add("__cdc__");
+    }
+
+    @Override
+    protected void sharedResources(Set resources) {
+    }
+
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/LogicalSequenceDdlJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/LogicalSequenceDdlJobFactory.java
new file mode 100644
index 000000000..16e739e48
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/LogicalSequenceDdlJobFactory.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory;
+
+import com.alibaba.polardbx.executor.ddl.job.task.basic.LogicalHandleSequenceTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.LogicalSequenceValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.SequenceClearPlanCacheSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.SequenceSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcLogicalSequenceMarkTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.sql.SequenceBean;
+import org.eclipse.jetty.util.StringUtil;
+
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Created by zhuqiwei.
+ *
+ * @author zhuqiwei
+ */
+public class LogicalSequenceDdlJobFactory extends DdlJobFactory {
+    protected final SequenceBean sequenceBean;
+    protected final ExecutionContext executionContext;
+    protected final String schemName;
+
+    protected final String tableName;
+
+    public LogicalSequenceDdlJobFactory(String schemaName, String tableName, SequenceBean sequenceBean,
+                                        ExecutionContext executionContext) {
+        this.sequenceBean = sequenceBean;
+        this.executionContext = executionContext;
+        this.schemName = schemaName;
+        this.tableName = tableName;
+    }
+
+    @Override
+    protected void validate() {
+    }
+
+    @Override
+    protected void excludeResources(Set resources) {
+        resources.add(concatWithDot(schemName, sequenceBean.getName()));
+    }
+
+    @Override
+    protected void sharedResources(Set resources) {
+        if (!StringUtil.isEmpty(tableName)) {
+            resources.add(concatWithDot(schemName, tableName));
+        }
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+        LogicalSequenceValidateTask logicalSequenceValidateTask =
+            new LogicalSequenceValidateTask(schemName, sequenceBean);
+        LogicalHandleSequenceTask logicalHandleSequenceTask =
+            new LogicalHandleSequenceTask(schemName, tableName, sequenceBean);
+        CdcLogicalSequenceMarkTask cdcMarkTask =
+            new CdcLogicalSequenceMarkTask(schemName, sequenceBean.getName(), executionContext.getOriginSql(),
+                sequenceBean.getKind());
+        SequenceSyncTask sequenceSyncTask =
+            new SequenceSyncTask(schemName, sequenceBean.getName(), sequenceBean.getKind());
+        SequenceClearPlanCacheSyncTask sequenceClearPlanCacheSyncTask =
+            new SequenceClearPlanCacheSyncTask(schemName, sequenceBean.getName(), sequenceBean.getKind());
+
+        List taskList = ImmutableList.of(
+            logicalSequenceValidateTask,
+            logicalHandleSequenceTask,
+            cdcMarkTask,
+            sequenceSyncTask,
+            sequenceClearPlanCacheSyncTask
+        );
+
+        ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
+        executableDdlJob.addSequentialTasks(taskList);
+        return executableDdlJob;
+    }
+
+    public static ExecutableDdlJob create(String schemaName, String tableName, SequenceBean sequenceBean,
+                                          ExecutionContext executionContext) {
+        return new LogicalSequenceDdlJobFactory(schemaName, tableName, sequenceBean, executionContext).create();
+    }
+
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MergeTableGroupJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MergeTableGroupJobFactory.java
index 248d0a961..f25094057 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MergeTableGroupJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MergeTableGroupJobFactory.java
@@ -21,20 +21,15 @@
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.common.utils.Pair;
-import com.alibaba.polardbx.common.utils.TStringUtil;
-import com.alibaba.polardbx.executor.balancer.action.ActionMovePartition;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TablesSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMergeTableGroupMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
-import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterJoinGroupAddMetaTask;
-import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterJoinGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterTableGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.CleanupEmptyTableGroupTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.JoinGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.MergeTableGroupChangeTablesMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupSyncTask;
-import com.alibaba.polardbx.executor.ddl.job.validator.TableGroupValidator;
-import com.alibaba.polardbx.executor.ddl.newengine.dag.DirectedAcyclicGraph;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
@@ -45,10 +40,8 @@
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.TableMeta;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
-import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterJoinGroupPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.MergeTableGroupPreparedData;
 import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager;
-import com.google.common.collect.Lists;
 import org.apache.commons.lang.StringUtils;
 
 import java.util.ArrayList;
@@ -72,7 +65,6 @@ public class MergeTableGroupJobFactory extends DdlJobFactory {
     private static String MOVE_PARTITION_SQL = "alter tablegroup %s move partitions %s";
     private static String MOVE_PARTITIONS = "(%s) to %s";
 
-
     public MergeTableGroupJobFactory(MergeTableGroupPreparedData preparedData,
                                      ExecutionContext executionContext) {
         this.preparedData = preparedData;
@@ -99,13 +91,14 @@ public static ExecutableDdlJob create(MergeTableGroupPreparedData preparedData,
         return new MergeTableGroupJobFactory(preparedData, executionContext).create();
     }
 
-    private SubJobTask generateMovePartitionJob(TableGroupConfig sourceTableGroupConfig, Map targetLocations) {
+    private SubJobTask generateMovePartitionJob(TableGroupConfig sourceTableGroupConfig,
+                                                Map targetLocations) {
         String tableGroupName = sourceTableGroupConfig.getTableGroupRecord().getTg_name();
         sourceTableGroupConfig.getPartitionGroupRecords();
         Map> moveActions = new TreeMap<>(String::compareToIgnoreCase);
 
         boolean needMove = false;
-        for(PartitionGroupRecord record: sourceTableGroupConfig.getPartitionGroupRecords()) {
+        for (PartitionGroupRecord record : sourceTableGroupConfig.getPartitionGroupRecords()) {
             String targetDb = targetLocations.get(record.partition_name);
             String targetInst = preparedData.getDbInstMap().get(targetDb);
             if (StringUtils.isEmpty(targetInst)) {
@@ -114,16 +107,17 @@ private SubJobTask generateMovePartitionJob(TableGroupConfig sourceTableGroupCon
             }
             if (!record.getPhy_db().equalsIgnoreCase(targetDb)) {
                 needMove = true;
-                moveActions.computeIfAbsent(targetInst, o-> new TreeSet<>(String::compareToIgnoreCase)).add(record.getPartition_name());
+                moveActions.computeIfAbsent(targetInst, o -> new TreeSet<>(String::compareToIgnoreCase))
+                    .add(record.getPartition_name());
             }
         }
         if (!needMove) {
-             return null;
+            return null;
         }
 
         StringBuilder sb = new StringBuilder();
-        int i=0;
-        for(Map.Entry> entry:moveActions.entrySet()) {
+        int i = 0;
+        for (Map.Entry> entry : moveActions.entrySet()) {
             if (i > 0) {
                 sb.append(", ");
             }
@@ -148,8 +142,8 @@ protected void excludeResources(Set resources) {
         for (String tableGroup : preparedData.getSourceTableGroups()) {
             resources.add(concatWithDot(preparedData.getSchemaName(), tableGroup));
         }
-        for(Map.Entry> entry : preparedData.getTablesVersion().entrySet()) {
-            for(Map.Entry tableEntry:entry.getValue().entrySet()) {
+        for (Map.Entry> entry : preparedData.getTablesVersion().entrySet()) {
+            for (Map.Entry tableEntry : entry.getValue().entrySet()) {
                 resources.add(concatWithDot(preparedData.getSchemaName(), tableEntry.getKey()));
             }
         }
@@ -180,7 +174,7 @@ public ExecutableDdlJob toDdlJob() {
         job.addTask(tailTask);
         AlterTableGroupValidateTask targetTableGroupValidateTask = new AlterTableGroupValidateTask(
             preparedData.getSchemaName(), targetTableGroup, preparedData.getTablesVersion().get(targetTableGroup),
-            false, preparedData.getPhysicalGroups());
+            false, preparedData.getPhysicalGroups(), false);
         job.addTask(targetTableGroupValidateTask);
         job.addTaskRelationship(headTask, targetTableGroupValidateTask);
         job.addTaskRelationship(targetTableGroupValidateTask, midTask);
@@ -188,7 +182,8 @@ public ExecutableDdlJob toDdlJob() {
         List tableGroups = new ArrayList<>();
         tableGroups.add(targetTableGroup);
         tableGroups.addAll(preparedData.getSourceTableGroups());
-        JoinGroupValidateTask joinGroupValidateTask = new JoinGroupValidateTask(preparedData.getSchemaName(),tableGroups,null,true );
+        JoinGroupValidateTask joinGroupValidateTask =
+            new JoinGroupValidateTask(preparedData.getSchemaName(), tableGroups, null, true);
         job.addTask(joinGroupValidateTask);
         job.addTaskRelationship(headTask, joinGroupValidateTask);
         job.addTaskRelationship(joinGroupValidateTask, midTask);
@@ -197,7 +192,7 @@ public ExecutableDdlJob toDdlJob() {
             AlterTableGroupValidateTask validateTask =
                 new AlterTableGroupValidateTask(preparedData.getSchemaName(), sourceTableGroup,
                     preparedData.getTablesVersion()
-                        .get(sourceTableGroup), true, null);
+                        .get(sourceTableGroup), true, null, false);
             job.addTask(validateTask);
             job.addTaskRelationship(headTask, validateTask);
             job.addTaskRelationship(validateTask, midTask);
@@ -220,7 +215,7 @@ public ExecutableDdlJob toDdlJob() {
                 subJobTask.setParentAcquireResource(true);
             }
             EmptyTask emptyTask = new EmptyTask(preparedData.getSchemaName());
-            DdlTask subJobOrEmptyTask = subJobTask == null? emptyTask:subJobTask;
+            DdlTask subJobOrEmptyTask = subJobTask == null ? emptyTask : subJobTask;
             job.addTask(subJobOrEmptyTask);
             job.addTaskRelationship(midTask, subJobOrEmptyTask);
             job.addTaskRelationship(subJobOrEmptyTask, changeTablesMetaTask);
@@ -251,6 +246,9 @@ public ExecutableDdlJob toDdlJob() {
             job.addTaskRelationship(cleanupEmptyTableGroupTask, tablesSyncTask);
         }
 
+        CdcMergeTableGroupMarkTask cdcMergeTableGroupMarkTask = new CdcMergeTableGroupMarkTask(
+            preparedData.getSchemaName(), targetTableGroup);
+
         List tableGroupSyncTasks = new ArrayList<>();
         TableGroupSyncTask targetTableGroupSyncTask =
             new TableGroupSyncTask(preparedData.getSchemaName(), targetTableGroup);
@@ -263,8 +261,9 @@ public ExecutableDdlJob toDdlJob() {
         for (DdlTask tableGroupSyncTask : tableGroupSyncTasks) {
             job.addTask(tableGroupSyncTask);
             job.addTaskRelationship(tablesSyncTask, tableGroupSyncTask);
-            job.addTaskRelationship(tableGroupSyncTask, tailTask);
+            job.addTaskRelationship(tableGroupSyncTask, cdcMergeTableGroupMarkTask);
         }
+
         job.labelAsHead(headTask);
         job.labelAsTail(tailTask);
         return job;
@@ -275,18 +274,21 @@ private Set getRelatedTableGroupNames(String tableGroup, TableGroupInfoM
         tableGroups.add(tableGroup);
         TableGroupConfig tableGroupConfig = tableGroupInfoManager.getTableGroupConfigByName(tableGroup);
         if (tableGroupConfig != null) {
-            for (TablePartRecordInfoContext tablePartCon :GeneralUtil.emptyIfNull(tableGroupConfig.getAllTables())) {
-                TableMeta tableMeta = executionContext.getSchemaManager(preparedData.getSchemaName()).getTable(tablePartCon.getTableName());
+            for (String tableName : GeneralUtil.emptyIfNull(tableGroupConfig.getAllTables())) {
+                TableMeta tableMeta =
+                    executionContext.getSchemaManager(preparedData.getSchemaName()).getTable(tableName);
                 if (tableMeta.isGsi()) {
                     String primaryTableName = tableMeta.getGsiTableMetaBean().gsiMetaBean.tableName;
-                    tableMeta = OptimizerContext.getContext(preparedData.getSchemaName()).getLatestSchemaManager().getTable(primaryTableName);
-                    TableGroupConfig curTableConfig = tableGroupInfoManager.getTableGroupConfigById(tableMeta.getPartitionInfo().getTableGroupId());
-                    if (curTableConfig !=null) {
+                    tableMeta = OptimizerContext.getContext(preparedData.getSchemaName()).getLatestSchemaManager()
+                        .getTable(primaryTableName);
+                    TableGroupConfig curTableConfig =
+                        tableGroupInfoManager.getTableGroupConfigById(tableMeta.getPartitionInfo().getTableGroupId());
+                    if (curTableConfig != null) {
                         tableGroups.add(curTableConfig.getTableGroupRecord().getTg_name());
                     }
                 }
             }
         }
-        return  tableGroups;
+        return tableGroups;
     }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseChangeSetJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseChangeSetJobFactory.java
index bda31e648..5518f8e0c 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseChangeSetJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseChangeSetJobFactory.java
@@ -17,21 +17,28 @@
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
 import com.alibaba.polardbx.common.properties.ConnectionParams;
+import com.alibaba.polardbx.common.utils.Pair;
 import com.alibaba.polardbx.executor.changeset.ChangeSetManager;
 import com.alibaba.polardbx.executor.ddl.job.converter.DdlJobDataConverter;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.task.backfill.MoveTableBackFillTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CloneTableDataFileTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTablePhyDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.DiscardTableSpaceDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.ImportTableSpaceDdlTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.MoveDatabaseAddMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.PhysicalBackfillTask;
 import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetApplyExecutorInitTask;
 import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetApplyFinishTask;
-import com.alibaba.polardbx.executor.ddl.job.task.changset.MoveTableCheckTask;
 import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetCatchUpTask;
 import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetStartTask;
-import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
+import com.alibaba.polardbx.executor.ddl.job.task.changset.MoveTableCheckTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils;
+import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils;
+import com.alibaba.polardbx.gms.topology.DbTopologyManager;
+import com.alibaba.polardbx.gms.util.GroupInfoUtil;
 import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
 import com.alibaba.polardbx.optimizer.config.table.TableMeta;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
@@ -44,33 +51,54 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.TreeMap;
 
 import static com.alibaba.polardbx.common.properties.ConnectionParams.CHANGE_SET_APPLY_OPTIMIZATION;
+import static com.alibaba.polardbx.executor.ddl.newengine.meta.DdlJobManager.ID_GENERATOR;
 import static com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils.genChangeSetCatchUpTasks;
 
 public class MoveDatabaseChangeSetJobFactory extends MoveDatabaseSubTaskJobFactory {
     private ChangeSetApplyExecutorInitTask changeSetApplyExecutorInitTask;
     private ChangeSetApplyFinishTask changeSetApplyFinishTask;
 
+    protected final List discardTableSpaceOperations;
+    protected final Map tarGroupAndStorageIds;
+    protected final boolean usePhysicalBackfill;
+    protected List backfillTaskEdgeNodes = new ArrayList<>(2);
+    protected List> physicalyTaskPipeLine = new ArrayList<>();
+    protected final Map sourceAndTarDnMap;
+    protected final Map> storageInstAndUserInfos;
+
     public MoveDatabaseChangeSetJobFactory(DDL ddl, MoveDatabaseItemPreparedData preparedData,
                                            List phyDdlTableOperations,
+                                           List discardTableSpaceOperations,
+                                           Map sourceAndTarDnMap,
+                                           Map> storageInstAndUserInfos,
                                            Map>> tableTopology,
                                            Map> targetTableTopology,
                                            Map> sourceTableTopology,
                                            ChangeSetApplyExecutorInitTask changeSetApplyExecutorInitTask,
                                            ChangeSetApplyFinishTask changeSetApplyFinishTask,
+                                           Map tarGroupAndStorageIds,
+                                           boolean usePhysicalBackfill,
                                            ExecutionContext executionContext) {
         super(ddl, preparedData, phyDdlTableOperations, tableTopology, targetTableTopology, sourceTableTopology,
             executionContext);
         this.changeSetApplyExecutorInitTask = changeSetApplyExecutorInitTask;
         this.changeSetApplyFinishTask = changeSetApplyFinishTask;
+        this.discardTableSpaceOperations = discardTableSpaceOperations;
+        this.tarGroupAndStorageIds = tarGroupAndStorageIds;
+        this.storageInstAndUserInfos = storageInstAndUserInfos;
+        this.sourceAndTarDnMap = sourceAndTarDnMap;
+        this.usePhysicalBackfill = usePhysicalBackfill;
     }
 
     @Override
     protected ExecutableDdlJob doCreate() {
         String schemaName = preparedData.getSchemaName();
         String tableName = preparedData.getTableName();
-
+        final boolean shareStorageMode =
+            executionContext.getParamManager().getBoolean(ConnectionParams.SHARE_STORAGE_MODE);
         DdlTask addMetaTask =
             new MoveDatabaseAddMetaTask(schemaName, ImmutableList.of(tableName), "",
                 ComplexTaskMetaManager.ComplexTaskStatus.CREATING.getValue(),
@@ -89,11 +117,24 @@ protected ExecutableDdlJob doCreate() {
         taskList.add(addMetaTask);
         //2.2 create partitioned physical table
         PhysicalPlanData physicalPlanData =
-            DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, phyDdlTableOperations);
+            DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, phyDdlTableOperations, executionContext);
         DdlTask phyDdlTask =
             new CreateTablePhyDdlTask(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData);
         taskList.add(phyDdlTask);
-
+        MoveTableBackFillTask moveTableBackFillTask = null;
+        if (usePhysicalBackfill) {
+            physicalPlanData =
+                DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, discardTableSpaceOperations,
+                    executionContext);
+            phyDdlTask =
+                new DiscardTableSpaceDdlTask(schemaName, physicalPlanData.getLogicalTableName(),
+                    physicalPlanData);
+            taskList.add(phyDdlTask);
+        } else {
+            moveTableBackFillTask =
+                new MoveTableBackFillTask(schemaName, tableName, sourceTableTopology, targetTableTopology,
+                    preparedData.getSourceTargetGroupMap(), false, true);
+        }
         List relatedTables = new ArrayList<>();
         TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(tableName);
         if (tableMeta.isGsi()) {
@@ -104,10 +145,6 @@ protected ExecutableDdlJob doCreate() {
             relatedTables.add(tableName);
         }
 
-        MoveTableBackFillTask moveTableBackFillTask =
-            new MoveTableBackFillTask(schemaName, tableName, sourceTableTopology, targetTableTopology,
-                preparedData.getSourceTargetGroupMap(), true);
-
         Long changeSetId = ChangeSetManager.getChangeSetId();
 
         ChangeSetStartTask changeSetStartTask = new ChangeSetStartTask(
@@ -121,6 +158,7 @@ protected ExecutableDdlJob doCreate() {
         Map catchUpTasks = genChangeSetCatchUpTasks(
             schemaName,
             tableName,
+            null,
             sourceTableTopology,
             preparedData.getSourceTargetGroupMap(),
             ComplexTaskMetaManager.ComplexTaskType.MOVE_DATABASE,
@@ -139,18 +177,121 @@ protected ExecutableDdlJob doCreate() {
         final String finalStatus =
             executionContext.getParamManager().getString(ConnectionParams.SCALE_OUT_FINAL_TABLE_STATUS_DEBUG);
 
+        List moveDatabaseTasks;
+        backfillTaskEdgeNodes.clear();
+        physicalyTaskPipeLine.clear();
+        Map groupAndDbMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+        final boolean waitLsn = executionContext.getParamManager()
+            .getBoolean(ConnectionParams.PHYSICAL_BACKFILL_WAIT_LSN_WHEN_ROLLBACK);
+
+        boolean healthyCheck =
+            executionContext.getParamManager().getBoolean(ConnectionParams.PHYSICAL_BACKFILL_STORAGE_HEALTHY_CHECK);
+
+        if (usePhysicalBackfill) {
+            for (Map.Entry> entry : sourceTableTopology.entrySet()) {
+                String srcGroupName = entry.getKey();
+                String tarGroupName = GroupInfoUtil.buildScaleOutGroupName(srcGroupName);
+                Pair srcTarGroup = Pair.of(srcGroupName, tarGroupName);
+                String sourceStorageId = sourceAndTarDnMap.computeIfAbsent(srcTarGroup.getKey(),
+                    key -> DbTopologyManager.getStorageInstIdByGroupName(schemaName, srcTarGroup.getKey()));
+                String targetStorageId = tarGroupAndStorageIds.get(srcTarGroup.getValue());
+
+                String srcDbName = groupAndDbMap.computeIfAbsent(srcTarGroup.getKey(),
+                    key -> DbTopologyManager.getPhysicalDbNameByGroupKeyFromMetaDb(schemaName, srcTarGroup.getKey()));
+
+                Pair srcDbAndGroup = Pair.of(srcDbName.toLowerCase(), srcTarGroup.getKey());
+                String tarDbName;
+                if (shareStorageMode) {
+                    tarDbName = groupAndDbMap.computeIfAbsent(srcTarGroup.getValue(),
+                        key -> GroupInfoUtil.buildScaleOutPhyDbName(schemaName, srcTarGroup.getKey()));
+                } else {
+                    tarDbName = groupAndDbMap.computeIfAbsent(srcTarGroup.getValue(), key -> srcDbName);
+                }
+                Pair tarDbAndGroup = Pair.of(tarDbName.toLowerCase(),
+                    srcTarGroup.getValue());
+                Pair sourceHostIpAndPort =
+                    PhysicalBackfillUtils.getMySQLOneFollowerIpAndPort(sourceStorageId);
+                List> targetHostsIpAndPort =
+                    PhysicalBackfillUtils.getMySQLServerNodeIpAndPorts(targetStorageId, healthyCheck);
+                final long batchSize =
+                    executionContext.getParamManager().getLong(ConnectionParams.PHYSICAL_BACKFILL_BATCH_SIZE);
+                final long minUpdateBatch =
+                    executionContext.getParamManager()
+                        .getLong(ConnectionParams.PHYSICAL_BACKFILL_MIN_SUCCESS_BATCH_UPDATE);
+                final long parallelism =
+                    executionContext.getParamManager().getLong(ConnectionParams.PHYSICAL_BACKFILL_PARALLELISM);
+
+                for (String phyTb : entry.getValue()) {
+                    List phyPartNames =
+                        PhysicalBackfillUtils.getPhysicalPartitionNames(schemaName, srcDbAndGroup.getValue(),
+                            srcDbAndGroup.getKey(),
+                            phyTb);
+                    CloneTableDataFileTask cloneTableDataFileTask =
+                        new CloneTableDataFileTask(schemaName, tableName, srcDbAndGroup, tarDbAndGroup, phyTb,
+                            phyPartNames, sourceStorageId, sourceHostIpAndPort, targetHostsIpAndPort, batchSize,
+                            tableMeta.isEncryption());
+                    cloneTableDataFileTask.setTaskId(ID_GENERATOR.nextId());
+
+                    List importTableSpaceTasks = new ArrayList<>();
+
+                    PhysicalBackfillTask physicalBackfillTask =
+                        new PhysicalBackfillTask(schemaName, cloneTableDataFileTask.getTaskId(), tableName, phyTb,
+                            phyPartNames,
+                            srcTarGroup,
+                            Pair.of(sourceStorageId, targetStorageId), storageInstAndUserInfos, batchSize, parallelism,
+                            minUpdateBatch,
+                            waitLsn,
+                            tableMeta.isEncryption());
+
+                    storageInstAndUserInfos.computeIfAbsent(sourceStorageId,
+                        key -> PhysicalBackfillUtils.getUserPasswd(sourceStorageId));
+
+                    Pair userAndPasswd = storageInstAndUserInfos.computeIfAbsent(targetStorageId,
+                        key -> PhysicalBackfillUtils.getUserPasswd(targetStorageId));
+
+                    for (Pair hostIpAndPort : targetHostsIpAndPort) {
+                        ImportTableSpaceDdlTask importTableSpaceDdlTask =
+                            new ImportTableSpaceDdlTask(schemaName, tableName, tarDbAndGroup.getKey(), phyTb,
+                                hostIpAndPort,
+                                userAndPasswd);
+                        importTableSpaceTasks.add(importTableSpaceDdlTask);
+                    }
+                    List tasks = new ArrayList<>(importTableSpaceTasks.size() + 2);
+                    tasks.add(cloneTableDataFileTask);
+                    tasks.add(physicalBackfillTask);
+                    tasks.addAll(importTableSpaceTasks);
+                    physicalyTaskPipeLine.add(tasks);
+                }
+            }
+
+            moveDatabaseTasks = ChangeSetUtils.genChangeSetOnlineSchemaChangeTasks(
+                schemaName, tableName,
+                relatedTables,
+                finalStatus,
+                changeSetStartTask,
+                catchUpTasks,
+                null,
+                moveTableCheckTask,
+                moveTableCheckTwiceTask,
+                changeSetApplyFinishTask,
+                backfillTaskEdgeNodes,
+                executionContext);
+        } else {
+            moveDatabaseTasks = ChangeSetUtils.genChangeSetOnlineSchemaChangeTasks(
+                schemaName, tableName,
+                relatedTables,
+                finalStatus,
+                changeSetStartTask,
+                catchUpTasks,
+                moveTableBackFillTask,
+                moveTableCheckTask,
+                moveTableCheckTwiceTask,
+                changeSetApplyFinishTask,
+                backfillTaskEdgeNodes,
+                executionContext);
+        }
+
         final ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
-        List moveDatabaseTasks = ChangeSetUtils.genChangeSetOnlineSchemaChangeTasks(
-            schemaName, tableName,
-            relatedTables,
-            finalStatus,
-            changeSetStartTask,
-            catchUpTasks,
-            moveTableBackFillTask,
-            moveTableCheckTask,
-            moveTableCheckTwiceTask,
-            changeSetApplyFinishTask,
-            executionContext);
 
         taskList.addAll(moveDatabaseTasks);
         executableDdlJob.addSequentialTasks(taskList);
@@ -166,4 +307,12 @@ protected ExecutableDdlJob doCreate() {
 
         return executableDdlJob;
     }
+
+    public List getBackfillTaskEdgeNodes() {
+        return backfillTaskEdgeNodes;
+    }
+
+    public List> getPhysicalyTaskPipeLine() {
+        return physicalyTaskPipeLine;
+    }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseJobFactory.java
index 55d074d3c..d301c79b6 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseJobFactory.java
@@ -21,12 +21,20 @@
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.common.utils.Pair;
+import com.alibaba.polardbx.executor.balancer.Balancer;
+import com.alibaba.polardbx.executor.balancer.stats.BalanceStats;
+import com.alibaba.polardbx.executor.balancer.stats.GroupStats;
 import com.alibaba.polardbx.executor.ddl.job.builder.MoveDatabaseBuilder;
 import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.CostEstimableDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.DdlBackfillCostRecordTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.ImportTableSpaceDdlNormalTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.InitNewStorageInstTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.MoveDatabaseAddMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.MoveDatabaseValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.PauseCurrentJobTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.PhysicalBackfillTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.SyncLsnTask;
 import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetApplyExecutorInitTask;
 import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetApplyFinishTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
@@ -35,10 +43,12 @@
 import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils;
 import com.alibaba.polardbx.executor.scaleout.ScaleOutUtils;
 import com.alibaba.polardbx.gms.topology.DbInfoManager;
+import com.alibaba.polardbx.gms.topology.DbTopologyManager;
 import com.alibaba.polardbx.gms.util.GroupInfoUtil;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
 import com.alibaba.polardbx.optimizer.config.table.TableMeta;
+import com.alibaba.polardbx.optimizer.context.DdlContext;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.MoveDatabaseItemPreparedData;
@@ -49,14 +59,15 @@
 import org.apache.commons.lang.StringUtils;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Queue;
 import java.util.Set;
 import java.util.TreeMap;
 
-import static com.alibaba.polardbx.common.properties.ConnectionParams.CHANGE_SET_APPLY_OPTIMIZATION;
-
 /**
  * Created by luoyanxin.
  *
@@ -72,12 +83,16 @@ public class MoveDatabaseJobFactory extends DdlJobFactory {
     protected final Map>>> tablesTopologyMap;
     protected final Map>> targetTablesTopology;
     protected final Map>> sourceTablesTopology;
+    protected final Map> discardTableSpacePhysicalPlansMap;
     protected final ExecutionContext executionContext;
     protected final ComplexTaskMetaManager.ComplexTaskType taskType;
+    final Map sourceAndTarDnMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+    final Map> storageInstAndUserInfos = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
 
     public MoveDatabaseJobFactory(DDL ddl, MoveDatabasePreparedData preparedData,
                                   Map tablesPrepareData,
                                   Map> logicalTablesPhysicalPlansMap,
+                                  Map> discardTableSpacePhysicalPlansMap,
                                   Map>>> tablesTopologyMap,
                                   Map>> targetTablesTopology,
                                   Map>> sourceTablesTopology,
@@ -87,6 +102,7 @@ public MoveDatabaseJobFactory(DDL ddl, MoveDatabasePreparedData preparedData,
         this.ddl = ddl;
         this.tablesPrepareData = tablesPrepareData;
         this.logicalTablesPhysicalPlansMap = logicalTablesPhysicalPlansMap;
+        this.discardTableSpacePhysicalPlansMap = discardTableSpacePhysicalPlansMap;
         this.tablesTopologyMap = tablesTopologyMap;
         this.targetTablesTopology = targetTablesTopology;
         this.sourceTablesTopology = sourceTablesTopology;
@@ -113,7 +129,20 @@ public void constructSubTasks(ExecutableDdlJob executableDdlJob,
         ChangeSetApplyFinishTask changeSetApplyFinishTask = new ChangeSetApplyFinishTask(preparedData.getSchemaName(),
             String.format("schema %s group %s start double write ", preparedData.getSchemaName(),
                 preparedData.getSourceTargetGroupMap()));
+        SyncLsnTask syncLsnTask = null;
+        boolean syncLsnTaskAdded = false;
         final boolean useChangeSet = ChangeSetUtils.isChangeSetProcedure(executionContext);
+        Map tarGroupAndStorageIds = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
+        if (useChangeSet) {
+            for (Map.Entry srcTarGroup : preparedData.getSourceTargetGroupMap().entrySet()) {
+                String targetStorageId =
+                    preparedData.getGroupAndStorageInstId().get(srcTarGroup.getKey()).getValue();
+                tarGroupAndStorageIds.put(srcTarGroup.getValue(), targetStorageId);
+            }
+        }
+        int parallelism = ScaleOutUtils.getScaleoutTaskParallelism(executionContext);
+        Queue leavePipeLineQueue = new LinkedList<>();
+
         for (Map.Entry>>> entry : tablesTopologyMap.entrySet()) {
             String schemaName = tablesPrepareData.get(entry.getKey()).getSchemaName();
             String logicalTableName = tablesPrepareData.get(entry.getKey()).getTableName();
@@ -122,9 +151,18 @@ public void constructSubTasks(ExecutableDdlJob executableDdlJob,
             MoveDatabaseSubTaskJobFactory subTaskJobFactory;
             if (useChangeSet && ChangeSetUtils.supportUseChangeSet(taskType, tm)) {
                 subTaskJobFactory = new MoveDatabaseChangeSetJobFactory(ddl, tablesPrepareData.get(entry.getKey()),
-                    logicalTablesPhysicalPlansMap.get(entry.getKey()), tablesTopologyMap.get(entry.getKey()),
-                    targetTablesTopology.get(entry.getKey()), sourceTablesTopology.get(entry.getKey()),
-                    changeSetApplyExecutorInitTask, changeSetApplyFinishTask, executionContext);
+                    logicalTablesPhysicalPlansMap.get(entry.getKey()),
+                    discardTableSpacePhysicalPlansMap.get(entry.getKey()),
+                    sourceAndTarDnMap,
+                    storageInstAndUserInfos,
+                    tablesTopologyMap.get(entry.getKey()),
+                    targetTablesTopology.get(entry.getKey()),
+                    sourceTablesTopology.get(entry.getKey()),
+                    changeSetApplyExecutorInitTask,
+                    changeSetApplyFinishTask,
+                    tarGroupAndStorageIds,
+                    preparedData.isUsePhysicalBackfill(),
+                    executionContext);
             } else {
                 subTaskJobFactory = new MoveDatabaseSubTaskJobFactory(ddl, tablesPrepareData.get(entry.getKey()),
                     logicalTablesPhysicalPlansMap.get(entry.getKey()), tablesTopologyMap.get(entry.getKey()),
@@ -136,6 +174,61 @@ public void constructSubTasks(ExecutableDdlJob executableDdlJob,
             executableDdlJob.addTaskRelationship(tailTask, subTask.getHead());
             executableDdlJob.getExcludeResources().addAll(subTask.getExcludeResources());
             executableDdlJob.addTaskRelationship(subTask.getTail(), bringUpMoveDatabase.get(0));
+
+            if (preparedData.isUsePhysicalBackfill()) {
+                if (!syncLsnTaskAdded) {
+                    Map> sourceTableTopology = sourceTablesTopology.get(entry.getKey());
+                    Map targetGroupAndStorageIdMap = new HashMap<>();
+                    Map sourceGroupAndStorageIdMap = new HashMap<>();
+                    for (String groupName : sourceTableTopology.keySet()) {
+                        sourceGroupAndStorageIdMap.put(groupName,
+                            DbTopologyManager.getStorageInstIdByGroupName(schemaName, groupName));
+                        targetGroupAndStorageIdMap.put(preparedData.getSourceTargetGroupMap().get(groupName),
+                            preparedData.getGroupAndStorageInstId().get(groupName).getValue());
+                    }
+                    syncLsnTask =
+                        new SyncLsnTask(schemaName, sourceGroupAndStorageIdMap, targetGroupAndStorageIdMap);
+                    executableDdlJob.addTask(syncLsnTask);
+                    syncLsnTaskAdded = true;
+                }
+                for (List pipeLine : GeneralUtil.emptyIfNull(subTaskJobFactory.getPhysicalyTaskPipeLine())) {
+                    DdlTask parentLeaveNode;
+                    if (leavePipeLineQueue.size() < parallelism) {
+                        parentLeaveNode = syncLsnTask;
+                    } else {
+                        parentLeaveNode = leavePipeLineQueue.poll();
+                    }
+                    executableDdlJob.removeTaskRelationship(subTaskJobFactory.getBackfillTaskEdgeNodes().get(0),
+                        subTaskJobFactory.getBackfillTaskEdgeNodes().get(1));
+                    executableDdlJob.addTaskRelationship(subTaskJobFactory.getBackfillTaskEdgeNodes().get(0),
+                        syncLsnTask);
+                    executableDdlJob.addTaskRelationship(parentLeaveNode,
+                        pipeLine.get(0));
+                    executableDdlJob.addTaskRelationship(pipeLine.get(0),
+                        pipeLine.get(1));
+                    PhysicalBackfillTask physicalBackfillTask = (PhysicalBackfillTask) pipeLine.get(1);
+                    Map>> targetTables = new HashMap<>();
+                    String tarGroupKey = physicalBackfillTask.getSourceTargetGroup().getValue();
+                    String phyTableName = physicalBackfillTask.getPhysicalTableName();
+
+                    targetTables.computeIfAbsent(tarGroupKey, k -> new ArrayList<>())
+                        .add(Collections.singletonList(phyTableName));
+
+                    ImportTableSpaceDdlNormalTask importTableSpaceDdlNormalTask = new ImportTableSpaceDdlNormalTask(
+                        preparedData.getSchemaName(), entry.getKey(),
+                        targetTables);
+
+                    for (int i = 2; i < pipeLine.size(); i++) {
+                        executableDdlJob.addTaskRelationship(pipeLine.get(1),
+                            pipeLine.get(i));
+                        executableDdlJob.addTaskRelationship(pipeLine.get(i),
+                            importTableSpaceDdlNormalTask);
+                    }
+                    executableDdlJob.addTaskRelationship(importTableSpaceDdlNormalTask,
+                        subTaskJobFactory.getBackfillTaskEdgeNodes().get(1));
+                    leavePipeLineQueue.add(importTableSpaceDdlNormalTask);
+                }
+            }
         }
     }
 
@@ -156,12 +249,12 @@ protected ExecutableDdlJob doCreate() {
             for (String sourceGroup : entry.getValue()) {
                 if (!shareStorageMode) {
                     instGroupDbInfos.computeIfAbsent(entry.getKey(), o -> new ArrayList<>())
-                        .add(Pair.of(GroupInfoUtil.buildScaloutGroupName(sourceGroup),
+                        .add(Pair.of(GroupInfoUtil.buildScaleOutGroupName(sourceGroup),
                             GroupInfoUtil.buildPhysicalDbNameFromGroupName(sourceGroup)));
                 } else {
                     String targetPhyDb = GroupInfoUtil.buildScaleOutPhyDbName(schemaName, sourceGroup);
                     instGroupDbInfos.computeIfAbsent(entry.getKey(), o -> new ArrayList<>())
-                        .add(Pair.of(GroupInfoUtil.buildScaloutGroupName(sourceGroup), targetPhyDb));
+                        .add(Pair.of(GroupInfoUtil.buildScaleOutGroupName(sourceGroup), targetPhyDb));
                 }
             }
         }
@@ -174,21 +267,63 @@ protected ExecutableDdlJob doCreate() {
                 ComplexTaskMetaManager.ComplexTaskStatus.DOING_REORG.getValue(),
                 taskType.getValue(), 0);
 
+        DdlContext ddlContext = executionContext.getDdlContext();
+        DdlBackfillCostRecordTask costRecordTask = null;
+        if (ddlContext != null && !ddlContext.isSubJob()) {
+            costRecordTask = new DdlBackfillCostRecordTask(schemaName);
+            final BalanceStats balanceStats = Balancer.collectBalanceStatsOfDatabase(schemaName);
+            List groupsOfStorages = balanceStats.getGroups();
+            Long diskSize = 0L;
+            Long rows = 0L;
+            for (GroupStats.GroupsOfStorage groupsOfStorage : GeneralUtil.emptyIfNull(groupsOfStorages)) {
+                if (groupsOfStorage == null || groupsOfStorage.getGroupDataSizeMap() == null) {
+                    continue;
+                }
+                for (Map.Entry> entry : groupsOfStorage.groupDataSizeMap.entrySet()) {
+                    if (preparedData.getSourceTargetGroupMap().containsKey(entry.getKey())) {
+                        rows += entry.getValue().getKey();
+                        diskSize += entry.getValue().getValue();
+                    }
+                }
+            }
+            costRecordTask.setCostInfo(
+                CostEstimableDdlTask.createCostInfo(rows, diskSize, (long) tablesPrepareData.size()));
+        }
+
         boolean skipValidator =
             executionContext.getParamManager().getBoolean(ConnectionParams.SKIP_MOVE_DATABASE_VALIDATOR);
         if (!skipValidator) {
-            executableDdlJob.addSequentialTasks(Lists.newArrayList(
-                /*the parent job of rebalance will acquire the Xlock of current schemaName before exec*/
-                moveDataBaseValidateTask,
-                initNewStorageInstTask,
-                addMetaTask
-            ));
+            if (costRecordTask != null) {
+                executableDdlJob.addSequentialTasks(Lists.newArrayList(
+                    /*the parent job of rebalance will acquire the Xlock of current schemaName before exec*/
+                    moveDataBaseValidateTask,
+                    initNewStorageInstTask,
+                    costRecordTask,
+                    addMetaTask
+                ));
+            } else {
+                executableDdlJob.addSequentialTasks(Lists.newArrayList(
+                    /*the parent job of rebalance will acquire the Xlock of current schemaName before exec*/
+                    moveDataBaseValidateTask,
+                    initNewStorageInstTask,
+                    addMetaTask
+                ));
+            }
         } else {
-            executableDdlJob.addSequentialTasks(Lists.newArrayList(
-                /*the parent job of rebalance will acquire the Xlock of current schemaName before exec*/
-                initNewStorageInstTask,
-                addMetaTask
-            ));
+            if (costRecordTask != null) {
+                executableDdlJob.addSequentialTasks(Lists.newArrayList(
+                    /*the parent job of rebalance will acquire the Xlock of current schemaName before exec*/
+                    initNewStorageInstTask,
+                    costRecordTask,
+                    addMetaTask
+                ));
+            } else {
+                executableDdlJob.addSequentialTasks(Lists.newArrayList(
+                    /*the parent job of rebalance will acquire the Xlock of current schemaName before exec*/
+                    initNewStorageInstTask,
+                    addMetaTask
+                ));
+            }
         }
 
         executableDdlJob.labelAsTail(addMetaTask);
@@ -239,8 +374,11 @@ public static ExecutableDdlJob create(@Deprecated DDL ddl,
             moveDatabaseBuilder.getTablesPreparedData();
         Map> logicalTablesPhysicalPlansMap =
             moveDatabaseBuilder.getLogicalTablesPhysicalPlansMap();
+        Map> discardTableSpacePhysicalPlansMap =
+            moveDatabaseBuilder.getDiscardTableSpacePhysicalPlansMap();
         return new MoveDatabaseJobFactory(ddl, preparedData, moveDatabaseItemPreparedDataMap,
-            logicalTablesPhysicalPlansMap, tablesTopologyMap, targetTablesTopology, sourceTablesTopology,
+            logicalTablesPhysicalPlansMap, discardTableSpacePhysicalPlansMap,
+            tablesTopologyMap, targetTablesTopology, sourceTablesTopology,
             ComplexTaskMetaManager.ComplexTaskType.MOVE_DATABASE,
             executionContext).create();
     }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseSubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseSubTaskJobFactory.java
index e5842e9e4..46f7e9abd 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseSubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabaseSubTaskJobFactory.java
@@ -102,7 +102,7 @@ protected ExecutableDdlJob doCreate() {
         taskList.add(addMetaTask);
         //2.2 create partitioned physical table
         PhysicalPlanData physicalPlanData =
-            DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, phyDdlTableOperations);
+            DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, phyDdlTableOperations, executionContext);
         DdlTask phyDdlTask =
             new CreateTablePhyDdlTask(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData);
         taskList.add(phyDdlTask);
@@ -164,4 +164,12 @@ DdlTask getPushDownForeignKeysTask(String schemaName, String tableName, boolean
             return new DropLogicalForeignKeyTask(schemaName, tableName, pushDownForeignKeys);
         }
     }
+
+    public List getBackfillTaskEdgeNodes() {
+        return null;
+    }
+
+    public List> getPhysicalyTaskPipeLine() {
+        return null;
+    }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabasesJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabasesJobFactory.java
index 0d69660eb..31cf7b803 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabasesJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/MoveDatabasesJobFactory.java
@@ -19,12 +19,12 @@
 import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils;
 import com.alibaba.polardbx.executor.scaleout.ScaleOutUtils;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.MoveDatabasePreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.MoveDatabasesPreparedData;
 import org.apache.calcite.rel.core.DDL;
-import org.apache.calcite.sql.SqlKind;
 
 import java.util.List;
 import java.util.Map;
@@ -63,11 +63,13 @@ protected ExecutableDdlJob doCreate() {
         executableDdlJob.labelAsHead(emptyTask);
         EmptyTask tailTask = new EmptyTask(defaultSchemaName);
         executableDdlJob.labelAsTail(tailTask);
-
+        boolean usePhysicalBackfill =
+            PhysicalBackfillUtils.isSupportForPhysicalBackfill(defaultSchemaName, executionContext);
         for (Map.Entry>> entry : preparedData.getLogicalDbStorageGroups()
             .entrySet()) {
             MoveDatabasePreparedData moveDatabasePreparedData =
                 new MoveDatabasePreparedData(entry.getKey(), entry.getValue(), preparedData.getSourceSql());
+            moveDatabasePreparedData.setUsePhysicalBackfill(usePhysicalBackfill);
             ExecutableDdlJob dbExecDdlJob =
                 MoveDatabaseJobFactory.create(ddl, moveDatabasePreparedData, executionContext);
             executableDdlJob.combineTasks(dbExecDdlJob);
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/PureCdcDdlMark4CreateTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/PureCdcDdlMark4CreateTableJobFactory.java
new file mode 100644
index 000000000..084bae520
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/PureCdcDdlMark4CreateTableJobFactory.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory;
+
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcCreateTableIfNotExistsMarkTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.gms.metadb.limit.LimitValidator;
+
+import java.util.Set;
+
+public class PureCdcDdlMark4CreateTableJobFactory extends DdlJobFactory {
+
+    private final String schemaName;
+    private final String tableName;
+
+    public PureCdcDdlMark4CreateTableJobFactory(String schemaName, String tableName) {
+        this.schemaName = schemaName;
+        this.tableName = tableName;
+    }
+
+    @Override
+    protected void validate() {
+        LimitValidator.validateTableNameLength(schemaName);
+        LimitValidator.validateTableNameLength(tableName);
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+        ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
+        CdcCreateTableIfNotExistsMarkTask task = new CdcCreateTableIfNotExistsMarkTask(schemaName, tableName);
+        executableDdlJob.addTask(task);
+        return executableDdlJob;
+    }
+
+    @Override
+    protected void excludeResources(Set resources) {
+        resources.add(concatWithDot(schemaName, tableName));
+    }
+
+    @Override
+    protected void sharedResources(Set resources) {
+
+    }
+
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/PureCdcDdlMark4DropTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/PureCdcDdlMark4DropTableJobFactory.java
new file mode 100644
index 000000000..17b777bd8
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/PureCdcDdlMark4DropTableJobFactory.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory;
+
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDropTableIfExistsMarkTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.gms.metadb.limit.LimitValidator;
+
+import java.util.Set;
+
+public class PureCdcDdlMark4DropTableJobFactory extends DdlJobFactory {
+
+    private final String schemaName;
+    private final String tableName;
+
+    public PureCdcDdlMark4DropTableJobFactory(String schemaName, String tableName) {
+        this.schemaName = schemaName;
+        this.tableName = tableName;
+    }
+
+    @Override
+    protected void validate() {
+        LimitValidator.validateTableNameLength(schemaName);
+        LimitValidator.validateTableNameLength(tableName);
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+        ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
+        CdcDropTableIfExistsMarkTask task = new CdcDropTableIfExistsMarkTask(schemaName, tableName);
+        executableDdlJob.addTask(task);
+        return executableDdlJob;
+    }
+
+    @Override
+    protected void excludeResources(Set resources) {
+        resources.add(concatWithDot(schemaName, tableName));
+    }
+
+    @Override
+    protected void sharedResources(Set resources) {
+
+    }
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RecycleOssTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RecycleOssTableJobFactory.java
index 1c7fef907..1b5c8962b 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RecycleOssTableJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RecycleOssTableJobFactory.java
@@ -18,8 +18,8 @@
 
 import com.alibaba.polardbx.common.utils.Pair;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
-import com.alibaba.polardbx.executor.ddl.job.task.basic.RenamePartitionTablePhyDdlTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTableAddMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTablePhyDdlTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTableSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTableUpdateMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTableValidateTask;
@@ -48,6 +48,7 @@ public class RecycleOssTableJobFactory extends DdlJobFactory {
     private final String schemaName;
     private final String logicalTableName;
     private final String newLogicalTableName;
+    private final boolean needRenamePhyTable;
     private final ExecutionContext executionContext;
 
     private String sourceSchemaName;
@@ -59,6 +60,7 @@ public RecycleOssTableJobFactory(PhysicalPlanData physicalPlanData, ExecutionCon
         this.schemaName = physicalPlanData.getSchemaName();
         this.logicalTableName = physicalPlanData.getLogicalTableName();
         this.newLogicalTableName = physicalPlanData.getNewLogicalTableName();
+        this.needRenamePhyTable = physicalPlanData.isRenamePhyTable();
         this.executionContext = executionContext;
     }
 
@@ -95,13 +97,16 @@ protected ExecutableDdlJob doCreate() {
             tasks.add(tableSyncTask);
         });
         DdlTask addMetaTask = new RenameTableAddMetaTask(schemaName, logicalTableName, newLogicalTableName);
-        DdlTask phyDdlTask = new RenamePartitionTablePhyDdlTask(schemaName, physicalPlanData);
-        DdlTask updateMetaTask = new RenameTableUpdateMetaTask(schemaName, logicalTableName, newLogicalTableName);
+        DdlTask phyDdlTask = new RenameTablePhyDdlTask(schemaName, physicalPlanData);
+        DdlTask updateMetaTask =
+            new RenameTableUpdateMetaTask(schemaName, logicalTableName, newLogicalTableName, needRenamePhyTable);
         DdlTask syncTask = new RenameTableSyncTask(schemaName, logicalTableName, newLogicalTableName);
         TableSyncTask tableSyncTask = new TableSyncTask(schemaName, logicalTableName);
+        tasks.add(addMetaTask);
+        if (needRenamePhyTable) {
+            tasks.add(phyDdlTask);
+        }
         tasks.addAll(Lists.newArrayList(
-            addMetaTask,
-            phyDdlTask,
             updateMetaTask,
             syncTask,
             tableSyncTask
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshDbTopologyFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshDbTopologyFactory.java
index 7d95ea653..a4d1171ed 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshDbTopologyFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshDbTopologyFactory.java
@@ -79,7 +79,7 @@ protected ExecutableDdlJob doCreate() {
 
         DdlTask validateTask =
             new AlterTableGroupValidateTask(schemaName, refreshTopologyPreparedData.getTableGroupName(), tablesVersion,
-                true, null);
+                true, null, false);
         RefreshTopologyValidateTask refreshTopologyValidateTask =
             new RefreshTopologyValidateTask(schemaName, refreshTopologyPreparedData.getInstGroupDbInfo());
 
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshDbTopologySubTaskJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshDbTopologySubTaskJobFactory.java
index 391f9b6f0..ea9a193d5 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshDbTopologySubTaskJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshDbTopologySubTaskJobFactory.java
@@ -27,7 +27,6 @@
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.alibaba.polardbx.optimizer.tablegroup.AlterTableGroupSnapShotUtils;
 import org.apache.calcite.rel.core.DDL;
-import org.apache.calcite.sql.SqlAlterTableSetTableGroup;
 
 import java.util.List;
 import java.util.Map;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshTopologyFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshTopologyFactory.java
index fd63a6dcc..48139397f 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshTopologyFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RefreshTopologyFactory.java
@@ -26,6 +26,7 @@
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.RefreshDbTopologyPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.RefreshTopologyPreparedData;
+import com.alibaba.polardbx.optimizer.locality.StoragePoolManager;
 import org.apache.calcite.rel.core.DDL;
 
 import java.util.Map;
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ReimportTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ReimportTableJobFactory.java
new file mode 100644
index 000000000..e14b30e82
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/ReimportTableJobFactory.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory;
+
+import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData;
+import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
+import com.alibaba.polardbx.executor.ddl.job.task.ReimportTableChangeMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CreateTablePreparedData;
+import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
+import com.google.common.collect.ImmutableList;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Created by zhuqiwei.
+ *
+ * @author zhuqiwei
+ */
+public class ReimportTableJobFactory extends CreatePartitionTableJobFactory {
+
+    public ReimportTableJobFactory(boolean autoPartition, boolean hasTimestampColumnDefault,
+                                   Map specialDefaultValues,
+                                   Map specialDefaultValueFlags,
+                                   List addedForeignKeys,
+                                   PhysicalPlanData physicalPlanData, ExecutionContext executionContext,
+                                   CreateTablePreparedData preparedData, PartitionInfo partitionInfo) {
+        super(autoPartition, hasTimestampColumnDefault, specialDefaultValues, specialDefaultValueFlags,
+            addedForeignKeys,
+            physicalPlanData, executionContext, preparedData, partitionInfo, null);
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+        ReimportTableChangeMetaTask reimportTableChangeMetaTask =
+            new ReimportTableChangeMetaTask(schemaName, logicalTableName, physicalPlanData.getDefaultDbIndex(),
+                physicalPlanData.getDefaultPhyTableName(), physicalPlanData.getSequence(),
+                physicalPlanData.getTablesExtRecord(), physicalPlanData.isPartitioned(),
+                physicalPlanData.isIfNotExists(), physicalPlanData.getKind(), addedForeignKeys,
+                hasTimestampColumnDefault,
+                specialDefaultValues, specialDefaultValueFlags);
+
+        TableSyncTask tableSyncTask = new TableSyncTask(schemaName, logicalTableName);
+
+        ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
+        executableDdlJob.addSequentialTasks(ImmutableList.of(
+            reimportTableChangeMetaTask,
+            tableSyncTask
+        ));
+
+        return executableDdlJob;
+    }
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RenameTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RenameTableJobFactory.java
index 5295d500e..cb75a53a7 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RenameTableJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RenameTableJobFactory.java
@@ -17,13 +17,11 @@
 package com.alibaba.polardbx.executor.ddl.job.factory;
 
 import com.alibaba.polardbx.common.Engine;
-import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData;
 import com.alibaba.polardbx.common.exception.TddlRuntimeException;
 import com.alibaba.polardbx.common.exception.code.ErrorCode;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameGsiUpdateMetaTask;
-import com.alibaba.polardbx.executor.ddl.job.task.basic.RenamePartitionTablePhyDdlTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTableAddMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTablePhyDdlTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTableSyncTask;
@@ -31,6 +29,7 @@
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTableValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDdlMarkTask;
+import com.alibaba.polardbx.executor.ddl.job.task.columnar.RenameColumnarTablesMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator;
 import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
@@ -38,12 +37,12 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.gms.topology.DbInfoManager;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
+import com.alibaba.polardbx.optimizer.config.table.SchemaManager;
 import com.alibaba.polardbx.optimizer.config.table.TableMeta;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 
 public class RenameTableJobFactory extends DdlJobFactory {
@@ -52,14 +51,18 @@ public class RenameTableJobFactory extends DdlJobFactory {
     private final String schemaName;
     private final String logicalTableName;
     private final String newLogicalTableName;
+    private final boolean needRenamePhyTables;
     private final ExecutionContext executionContext;
+    protected final Long versionId;
 
-    public RenameTableJobFactory(PhysicalPlanData physicalPlanData, ExecutionContext executionContext) {
+    public RenameTableJobFactory(PhysicalPlanData physicalPlanData, ExecutionContext executionContext, Long versionId) {
         this.physicalPlanData = physicalPlanData;
         this.schemaName = physicalPlanData.getSchemaName();
         this.logicalTableName = physicalPlanData.getLogicalTableName();
         this.newLogicalTableName = physicalPlanData.getNewLogicalTableName();
+        this.needRenamePhyTables = physicalPlanData.isRenamePhyTable();
         this.executionContext = executionContext;
+        this.versionId = versionId;
     }
 
     @Override
@@ -72,15 +75,10 @@ protected ExecutableDdlJob doCreate() {
         boolean isGsi = TableValidator.checkTableIsGsi(schemaName, logicalTableName);
         DdlTask validateTask = new RenameTableValidateTask(schemaName, logicalTableName, newLogicalTableName);
         DdlTask addMetaTask = new RenameTableAddMetaTask(schemaName, logicalTableName, newLogicalTableName);
-        DdlTask cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, false, false);
+        DdlTask cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, false, false, versionId);
 
-        DdlTask phyDdlTask;
-        boolean isNewPartitionDb = DbInfoManager.getInstance().isNewPartitionDb(schemaName);
-        if (isNewPartitionDb) {
-            phyDdlTask = new RenamePartitionTablePhyDdlTask(schemaName, physicalPlanData);
-        } else {
-            phyDdlTask = new RenameTablePhyDdlTask(schemaName, physicalPlanData).onExceptionTryRecoveryThenRollback();
-        }
+        DdlTask phyDdlTask =
+            new RenameTablePhyDdlTask(schemaName, physicalPlanData).onExceptionTryRecoveryThenRollback();
         DdlTask updateMetaTask;
         DdlTask syncTask;
         if (isGsi) {
@@ -89,37 +87,42 @@ protected ExecutableDdlJob doCreate() {
             String primaryTableName = tableMeta.getGsiTableMetaBean().gsiMetaBean.tableName;
 
             updateMetaTask =
-                new RenameGsiUpdateMetaTask(schemaName, primaryTableName, logicalTableName, newLogicalTableName);
+                new RenameGsiUpdateMetaTask(schemaName, primaryTableName, logicalTableName, newLogicalTableName,
+                    needRenamePhyTables);
             syncTask = new TableSyncTask(schemaName, primaryTableName);
         } else {
-            updateMetaTask = new RenameTableUpdateMetaTask(schemaName, logicalTableName, newLogicalTableName);
+            updateMetaTask =
+                new RenameTableUpdateMetaTask(schemaName, logicalTableName, newLogicalTableName, needRenamePhyTables);
             syncTask = new RenameTableSyncTask(schemaName, logicalTableName, newLogicalTableName);
         }
 
+        SchemaManager schemaManager = executionContext.getSchemaManager(schemaName);
+        TableMeta tableMeta = schemaManager.getTable(logicalTableName);
+        boolean withColumnar =
+            tableMeta.getGsiTableMetaBean() != null && tableMeta.getGsiTableMetaBean().indexMap != null
+                && tableMeta.getGsiTableMetaBean().indexMap.values().stream().anyMatch(m -> m.columnarIndex);
+
         List taskList = new ArrayList<>();
         taskList.add(validateTask);
         taskList.add(addMetaTask);
-        taskList.add(phyDdlTask);
-        Engine engine = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName).getEngine();
-        if (!Engine.isFileStore(engine)) {
+        if (needRenamePhyTables) {
+            taskList.add(phyDdlTask);
+        }
+        Engine engine =
+            OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName).getEngine();
+        if (!Engine.isFileStore(engine) && !isGsi) {
+            if (withColumnar) {
+                DdlTask renameColumnarTask =
+                    new RenameColumnarTablesMetaTask(schemaName, logicalTableName, newLogicalTableName, versionId);
+                taskList.add(renameColumnarTask);
+            }
             taskList.add(cdcDdlMarkTask);
         }
         taskList.add(updateMetaTask);
         taskList.add(syncTask);
 
         // sync foreign key table meta
-        TableMeta tableMeta =
-            OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName);
-        Map referencedForeignKeys = tableMeta.getReferencedForeignKeys();
-        Map foreignKeys = tableMeta.getForeignKeys();
-        for (Map.Entry e : foreignKeys.entrySet()) {
-            taskList.add(new TableSyncTask(e.getValue().refSchema, e.getValue().refTableName));
-        }
-        for (Map.Entry e : referencedForeignKeys.entrySet()) {
-            String referencedSchemaName = e.getValue().schema;
-            String referencedTableName = e.getValue().tableName;
-            taskList.add(new TableSyncTask(referencedSchemaName, referencedTableName));
-        }
+        taskList.addAll(FactoryUtils.getFkTableSyncTasks(schemaName, logicalTableName));
 
         ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
         executableDdlJob.addSequentialTasks(taskList);
@@ -141,6 +144,9 @@ protected void excludeResources(Set resources) {
                 throw new TddlRuntimeException(ErrorCode.ERR_TABLE_META_TOO_OLD, schemaName, logicalTableName);
             }
         }
+
+        // exclude foreign key tables
+        FactoryUtils.getFkTableExcludeResources(schemaName, logicalTableName, resources);
     }
 
     @Override
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RenameTablesJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RenameTablesJobFactory.java
index abca08737..c0658f900 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RenameTablesJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/RenameTablesJobFactory.java
@@ -21,6 +21,7 @@
 import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTablesCdcSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTablesUpdateDataIdTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTablesUpdateMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.RenameTablesValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TableListDataIdSyncTask;
@@ -79,12 +80,18 @@ protected ExecutableDdlJob doCreate() {
                 enablePreemptiveMdl, initWait, interval, TimeUnit.MILLISECONDS,
                 oldNames, newNames, preparedData.getCollate(), preparedData.getCdcMetas(),
                 preparedData.getNewTableTopologies());
-        TableListDataIdSyncTask tableListDataIdSyncTask = new TableListDataIdSyncTask(schemaName);
+        RenameTablesUpdateDataIdTask dataIdTask = new RenameTablesUpdateDataIdTask(schemaName, oldNames, newNames);
+        TableListDataIdSyncTask tableListDataIdSyncTask =
+            new TableListDataIdSyncTask(schemaName, preparedData.getDistinctNames());
+        TableListDataIdSyncTask tableListDataIdSyncTask0 =
+            new TableListDataIdSyncTask(schemaName, preparedData.getDistinctNames());
 
         taskList.add(validateTask);
+        taskList.add(tableListDataIdSyncTask0);
         taskList.add(metaTask);
         // lock + cdc + sync + unlock
         taskList.add(cdcSyncTask);
+        taskList.add(dataIdTask);
         taskList.add(tableListDataIdSyncTask);
 
         ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/TruncateTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/TruncateTableJobFactory.java
index e9df140fc..eef0f63d2 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/TruncateTableJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/TruncateTableJobFactory.java
@@ -32,6 +32,8 @@
 import java.util.Map;
 import java.util.Set;
 
+import static com.alibaba.polardbx.common.cdc.ICdcManager.DEFAULT_DDL_VERSION_ID;
+
 public class TruncateTableJobFactory extends DdlJobFactory {
 
     private final PhysicalPlanData physicalPlanData;
@@ -62,7 +64,7 @@ protected ExecutableDdlJob doCreate() {
 
         DdlTask validateTask = new TruncateTableValidateTask(schemaName, logicalTableName, tableGroupConfig);
         DdlTask phyDdlTask = new TruncateTablePhyDdlTask(schemaName, physicalPlanData);
-        DdlTask cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, false, false);
+        DdlTask cdcDdlMarkTask = new CdcDdlMarkTask(schemaName, physicalPlanData, false, false, DEFAULT_DDL_VERSION_ID);
 
         ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
         executableDdlJob.addSequentialTasks(Lists.newArrayList(
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/AlterGsiVisibilityJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/AlterGsiVisibilityJobFactory.java
index d1c2aae2b..49bee9846 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/AlterGsiVisibilityJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/AlterGsiVisibilityJobFactory.java
@@ -16,8 +16,11 @@
 
 package com.alibaba.polardbx.executor.ddl.job.factory.gsi;
 
+import com.alibaba.polardbx.common.properties.ConnectionParams;
 import com.alibaba.polardbx.executor.ddl.job.task.AlterGsiVisibilityValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcAlterIndexVisibilityMarkTask;
+import com.alibaba.polardbx.executor.ddl.job.task.gsi.CciUpdateIndexStatusTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiUpdateIndexStatusTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiUpdateIndexVisibilityTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateTableVersionTask;
@@ -25,6 +28,7 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.gms.metadb.table.ColumnarTableStatus;
 import com.alibaba.polardbx.gms.metadb.table.IndexStatus;
 import com.alibaba.polardbx.gms.metadb.table.IndexVisibility;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
@@ -36,6 +40,9 @@
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
+import org.jetbrains.annotations.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.util.List;
 import java.util.Map;
@@ -48,6 +55,7 @@
  * @author zhuqiwei
  */
 public class AlterGsiVisibilityJobFactory extends DdlJobFactory {
+    private static final Logger LOGGER = LoggerFactory.getLogger(AlterGsiVisibilityJobFactory.class);
     protected final String schemaName;
     protected final String primaryTableName;
     protected final String indexTableName;
@@ -67,7 +75,7 @@ public AlterGsiVisibilityJobFactory(AlterTableWithGsiPreparedData alterTableWith
 
     @Override
     protected void validate() {
-        GsiValidator.validateGsi(schemaName, indexTableName);
+        GsiValidator.validateGsiOrCci(schemaName, indexTableName);
         GsiValidator.validateAllowDdlOnTable(schemaName, primaryTableName, executionContext);
     }
 
@@ -119,20 +127,66 @@ protected ExecutableDdlJob doCreate() {
                 IndexVisibility.INVISIBLE);
         }
 
-        DdlTask syncTask = new TableSyncTask(schemaName, primaryTableName);
+        final CciUpdateIndexStatusTask changeCciStatusTask = buildChangeCciStatusTask();
 
-        List taskList = ImmutableList.of(
-            validateTask,
-            validateTableVersionTask,
-            changeGsiStatusTask,
-            syncTask
+        final CdcAlterIndexVisibilityMarkTask cdcAlterIndexVisibilityMarkTask = new CdcAlterIndexVisibilityMarkTask(
+            schemaName, primaryTableName
         );
+
+        DdlTask syncTask = new TableSyncTask(schemaName, primaryTableName);
+
+        List taskList = (null != changeCciStatusTask) ?
+            ImmutableList.of(
+                validateTask,
+                validateTableVersionTask,
+                changeGsiStatusTask,
+                changeCciStatusTask,
+                cdcAlterIndexVisibilityMarkTask,
+                syncTask
+            ) :
+            ImmutableList.of(
+                validateTask,
+                validateTableVersionTask,
+                changeGsiStatusTask,
+                cdcAlterIndexVisibilityMarkTask,
+                syncTask
+            );
         ExecutableDdlJob executableDdlJob = new ExecutableDdlJob();
         executableDdlJob.addSequentialTasks(taskList);
 
         return executableDdlJob;
     }
 
+    @Nullable
+    private CciUpdateIndexStatusTask buildChangeCciStatusTask() {
+        CciUpdateIndexStatusTask changeCciStatusTask = null;
+        if (preparedData.isColumnar()
+            && executionContext.getParamManager().getBoolean(ConnectionParams.ALTER_CCI_STATUS)) {
+            final String beforeStatusStr =
+                executionContext.getParamManager().getString(ConnectionParams.ALTER_CCI_STATUS_BEFORE);
+            final String afterStatusStr =
+                executionContext.getParamManager().getString(ConnectionParams.ALTER_CCI_STATUS_AFTER);
+
+            try {
+                final ColumnarTableStatus beforeStatus = ColumnarTableStatus.valueOf(beforeStatusStr);
+                final ColumnarTableStatus afterStatus = ColumnarTableStatus.valueOf(afterStatusStr);
+
+                changeCciStatusTask = new CciUpdateIndexStatusTask(
+                    schemaName,
+                    primaryTableName,
+                    indexTableName,
+                    beforeStatus,
+                    afterStatus,
+                    beforeStatus.toIndexStatus(),
+                    afterStatus.toIndexStatus(),
+                    true);
+            } catch (Exception ignored) {
+                LOGGER.error("Unknown before({}) or after({}) status", beforeStatusStr, afterStatusStr);
+            }
+        }
+        return changeCciStatusTask;
+    }
+
     public static ExecutableDdlJob create(AlterTableWithGsiPreparedData alterTableWithGsiPreparedData,
                                           ExecutionContext executionContext) {
         return new AlterGsiVisibilityJobFactory(alterTableWithGsiPreparedData, executionContext).create();
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/AlterPartitionCountJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/AlterPartitionCountJobFactory.java
index 7501541d7..07cd85566 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/AlterPartitionCountJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/AlterPartitionCountJobFactory.java
@@ -16,20 +16,17 @@
 
 package com.alibaba.polardbx.executor.ddl.job.factory.gsi;
 
+import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
-import com.alibaba.polardbx.executor.ddl.job.task.basic.TruncateTablePhyDdlTask;
 import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcRepartitionMarkTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.AlterPartitionCountCutOverTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.AlterPartitionCountSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.AlterPartitionCountValidateTask;
-import com.alibaba.polardbx.executor.ddl.job.task.gsi.RepartitionCutOverTask;
-import com.alibaba.polardbx.executor.ddl.job.task.gsi.RepartitionSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateTableVersionTask;
 import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupsSyncTask;
 import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator;
 import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator;
-import com.alibaba.polardbx.executor.ddl.job.validator.ddl.RepartitionValidator;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlExceptionAction;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
@@ -123,7 +120,11 @@ protected ExecutableDdlJob doCreate() {
             alterPartitionCountSyncTask = new AlterPartitionCountSyncTask(schemaName, primaryTableName, tableNameMap);
 
         // cdc
-        DdlTask cdcDdlMarkTask = new CdcRepartitionMarkTask(schemaName, primaryTableName, SqlKind.ALTER_TABLE);
+        if (executionContext.getDdlContext().isSubJob()) {
+            throw new RuntimeException("unexpected parent ddl job");
+        }
+        DdlTask cdcDdlMarkTask = new CdcRepartitionMarkTask(
+            schemaName, primaryTableName, SqlKind.ALTER_TABLE, CdcDdlMarkVisibility.Protected);
 
         // drop gsi
         List dropGsiJobs = new ArrayList<>();
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/CreateGsiCheckTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/CreateGsiCheckTask.java
new file mode 100644
index 000000000..e6a86450e
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/CreateGsiCheckTask.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory.gsi;
+
+import com.alibaba.fastjson.annotation.JSONCreator;
+import com.alibaba.polardbx.common.properties.ConnectionParams;
+import com.alibaba.polardbx.executor.ddl.job.task.BaseBackfillTask;
+import com.alibaba.polardbx.executor.ddl.job.task.gsi.CheckGsiTask;
+import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName;
+import com.alibaba.polardbx.executor.gsi.corrector.GsiChecker;
+import com.alibaba.polardbx.optimizer.OptimizerContext;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import lombok.Getter;
+import org.apache.calcite.sql.SqlSelect;
+
+import java.util.Map;
+
+@TaskName(name = "CreateGsiCheckTask")
+@Getter
+public class CreateGsiCheckTask extends BaseBackfillTask {
+    final private String logicalTableName;
+    final private String indexTableName;
+    public Map virtualColumns;
+    public Map backfillColumnMap;
+
+    @JSONCreator
+    public CreateGsiCheckTask(String schemaName, String logicalTableName, String indexTableName,
+                              Map virtualColumns, Map backfillColumnMap) {
+        super(schemaName);
+        this.logicalTableName = logicalTableName;
+        this.indexTableName = indexTableName;
+        this.virtualColumns = virtualColumns;
+        this.backfillColumnMap = backfillColumnMap;
+        onExceptionTryRecoveryThenRollback();
+    }
+
+    @Override
+    protected void executeImpl(ExecutionContext executionContext) {
+        final boolean skipCheck =
+            executionContext.getParamManager().getBoolean(ConnectionParams.SKIP_CHANGE_SET_CHECKER);
+
+        if (!skipCheck) {
+            String lockMode = SqlSelect.LockMode.UNDEF.toString();
+            GsiChecker.Params params = GsiChecker.Params.buildFromExecutionContext(executionContext);
+            boolean isPrimaryBroadCast =
+                OptimizerContext.getContext(schemaName).getRuleManager().isBroadCast(logicalTableName);
+            boolean isGsiBroadCast =
+                OptimizerContext.getContext(schemaName).getRuleManager().isBroadCast(indexTableName);
+            CheckGsiTask checkTask =
+                new CheckGsiTask(schemaName, logicalTableName, indexTableName, lockMode, lockMode, params, false, "",
+                    isPrimaryBroadCast, isGsiBroadCast, virtualColumns, backfillColumnMap);
+            checkTask.checkInBackfill(executionContext);
+        }
+    }
+}
\ No newline at end of file
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/CreateGsiJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/CreateGsiJobFactory.java
index 0e895336a..f502f0857 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/CreateGsiJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/CreateGsiJobFactory.java
@@ -24,6 +24,7 @@
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableAddTablesExtMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableAddTablesMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableShowTableMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
 import com.alibaba.polardbx.executor.ddl.job.task.factory.GsiTaskFactory;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.CreateGsiPhyDdlTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.CreateGsiValidateTask;
@@ -33,15 +34,16 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateGsi;
+import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils;
 import com.alibaba.polardbx.gms.metadb.table.IndexStatus;
 import com.alibaba.polardbx.gms.metadb.table.IndexVisibility;
 import com.alibaba.polardbx.gms.topology.DbInfoManager;
+import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
 import com.alibaba.polardbx.optimizer.config.table.TableMeta;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData;
 import org.apache.calcite.rel.core.DDL;
 import org.apache.calcite.sql.SqlIndexColumnName;
-import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang3.StringUtils;
 
 import java.util.ArrayList;
@@ -51,6 +53,7 @@
 import java.util.TreeMap;
 import java.util.stream.Collectors;
 
+import static com.alibaba.polardbx.executor.gsi.GsiUtils.columnAst2nameStr;
 import static com.alibaba.polardbx.gms.metadb.table.IndexStatus.DELETE_ONLY;
 import static com.alibaba.polardbx.gms.metadb.table.IndexStatus.WRITE_ONLY;
 
@@ -79,6 +82,9 @@ public class CreateGsiJobFactory extends DdlJobFactory {
     protected final Map specialDefaultValueFlags;
     protected ExecutionContext executionContext;
     public boolean needOnlineSchemaChange = true;
+    public boolean onlineModifyColumn = false;
+    public boolean useChangeSet = false;
+    public boolean mirrorCopy = false;
     public boolean stayAtBackFill = false;
     public boolean buildBroadCast = false;
 
@@ -88,9 +94,27 @@ public class CreateGsiJobFactory extends DdlJobFactory {
     protected List referencedTables;
     protected List addedForeignKeys;
 
-    // for alter table modify sharding key (repartition check)
+    /**
+     * for alter table modify sharding key (repartition check)
+     */
     public Map virtualColumnMap = null;
 
+    /**
+     * for online modify column (change column name), column map, oldName ----> newName
+     */
+    public Map backfillColumnMap = null;
+
+    public List modifyStringColumns = null;
+
+    public List addNewColumns = null;
+
+    /**
+     * for online modify column, oldIndexName ----> newIndexName
+     */
+    public String oldIndexName;
+
+    public SubJobTask rerunTask;
+
     private boolean visible;
 
     private boolean repartition;
@@ -182,25 +206,53 @@ protected ExecutableDdlJob doCreate() {
         final boolean stayAtDeleteOnly = StringUtils.equalsIgnoreCase(DELETE_ONLY.name(), finalStatus);
         final boolean stayAtWriteOnly = StringUtils.equalsIgnoreCase(WRITE_ONLY.name(), finalStatus);
 
+        Map columnMapping = backfillColumnMap == null ? null :
+            backfillColumnMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getValue, Map.Entry::getKey));
+
         List columns = columnAst2nameStr(this.columns);
         List coverings = columnAst2nameStr(this.coverings);
         List bringUpGsi = null;
-        if (needOnlineSchemaChange) {
+        if (useChangeSet) {
+            // online modify column
+            bringUpGsi = GsiTaskFactory.addGlobalIndexTasksChangeSet(
+                schemaName,
+                primaryTableName,
+                oldIndexName,
+                indexTableName,
+                stayAtDeleteOnly,
+                stayAtWriteOnly,
+                stayAtBackFill,
+                virtualColumnMap,
+                backfillColumnMap,
+                modifyStringColumns,
+                onlineModifyColumn,
+                mirrorCopy,
+                physicalPlanData,
+                null
+            );
+        } else if (needOnlineSchemaChange) {
+            // add index, use schema change
             TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(primaryTableName);
             bringUpGsi = GsiTaskFactory.addGlobalIndexTasks(
                 schemaName,
                 primaryTableName,
+                oldIndexName,
                 indexTableName,
                 stayAtDeleteOnly,
                 stayAtWriteOnly,
                 stayAtBackFill,
                 null,
+                null,
+                modifyStringColumns,
                 physicalPlanData,
                 tableMeta,
                 repartition,
+                onlineModifyColumn,
+                mirrorCopy,
                 executionContext.getOriginSql()
             );
         } else {
+            // create table with gsi, not use schema change
             bringUpGsi = GsiTaskFactory.createGlobalIndexTasks(
                 schemaName,
                 primaryTableName,
@@ -229,7 +281,9 @@ protected ExecutableDdlJob doCreate() {
                 addedForeignKeys,
                 hasTimestampColumnDefault,
                 specialDefaultValues,
-                specialDefaultValueFlags);
+                specialDefaultValueFlags,
+                columnMapping,
+                addNewColumns);
         CreateTableShowTableMetaTask showTableMetaTask = new CreateTableShowTableMetaTask(schemaName, indexTableName);
         GsiInsertIndexMetaTask addIndexMetaTask =
             new GsiInsertIndexMetaTask(
@@ -244,7 +298,9 @@ protected ExecutableDdlJob doCreate() {
                 IndexStatus.CREATING,
                 clusteredIndex,
                 visible ? IndexVisibility.VISIBLE : IndexVisibility.INVISIBLE,
-                needOnlineSchemaChange
+                needOnlineSchemaChange,
+                columnMapping,
+                addNewColumns
             );
 
         List taskList = new ArrayList<>();
@@ -299,15 +355,6 @@ protected void excludeResources(Set resources) {
     protected void sharedResources(Set resources) {
     }
 
-    protected List columnAst2nameStr(List columnDefList) {
-        if (CollectionUtils.isEmpty(columnDefList)) {
-            return new ArrayList<>();
-        }
-        return columnDefList.stream()
-            .map(e -> e.getColumnNameStr())
-            .collect(Collectors.toList());
-    }
-
     /**
      * for create table with gsi
      * needOnlineSchemaChange = false, will skip Online Schema Change and BackFill
@@ -337,4 +384,32 @@ public static ExecutableDdlJob create(@Deprecated DDL ddl,
     public void setVirtualColumnMap(Map virtualColumnMap) {
         this.virtualColumnMap = virtualColumnMap;
     }
+
+    public void setBackfillColumnMap(Map backfillColumnMap) {
+        this.backfillColumnMap = backfillColumnMap;
+    }
+
+    public void setOldIndexName(String oldIndexName) {
+        this.oldIndexName = oldIndexName;
+    }
+
+    public void setOnlineModifyColumn(boolean onlineModifyColumn) {
+        this.onlineModifyColumn = onlineModifyColumn;
+    }
+
+    public void setUseChangeSet(boolean useChangeSet) {
+        this.useChangeSet = useChangeSet;
+    }
+
+    public void setMirrorCopy(boolean mirrorCopy) {
+        this.mirrorCopy = mirrorCopy;
+    }
+
+    public void setModifyStringColumns(List modifyStringColumns) {
+        this.modifyStringColumns = modifyStringColumns;
+    }
+
+    public void setAddNewColumns(List addNewColumns) {
+        this.addNewColumns = addNewColumns;
+    }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/CreatePartitionGsiJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/CreatePartitionGsiJobFactory.java
index c86ea4509..f79ffb459 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/CreatePartitionGsiJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/CreatePartitionGsiJobFactory.java
@@ -25,6 +25,8 @@
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableAddTablesMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableAddTablesPartitionInfoMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableGroupAddMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableGroupValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableShowTableMetaTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
@@ -33,6 +35,7 @@
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.CreateGsiPreValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.CreateGsiValidateTask;
 import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiInsertIndexMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupsSyncTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreatePartitionGsi;
@@ -40,6 +43,7 @@
 import com.alibaba.polardbx.gms.metadb.table.IndexVisibility;
 import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
+import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.config.table.TableMeta;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
@@ -55,9 +59,12 @@
 import java.util.Objects;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.stream.Collectors;
 
+import static com.alibaba.polardbx.executor.gsi.GsiUtils.columnAst2nameStr;
 import static com.alibaba.polardbx.gms.metadb.table.IndexStatus.DELETE_ONLY;
 import static com.alibaba.polardbx.gms.metadb.table.IndexStatus.WRITE_ONLY;
+import static org.apache.calcite.sql.SqlIdentifier.surroundWithBacktick;
 
 /**
  * 1. create [unique] global index
@@ -71,7 +78,7 @@ public class CreatePartitionGsiJobFactory extends CreateGsiJobFactory {
 
     List tableGroupIds = new ArrayList<>();
 
-    final boolean indexAlignWithPrimaryTableGroup;
+    final String tableGroupAlignWithTargetTable;
     final CreateGlobalIndexPreparedData globalIndexPreparedData;
 
     public CreatePartitionGsiJobFactory(CreateGlobalIndexPreparedData globalIndexPreparedData,
@@ -99,12 +106,16 @@ public CreatePartitionGsiJobFactory(CreateGlobalIndexPreparedData globalIndexPre
             globalIndexPreparedData.getAddedForeignKeys(),
             executionContext
         );
-        this.indexAlignWithPrimaryTableGroup = globalIndexPreparedData.isIndexAlignWithPrimaryTableGroup();
+        this.tableGroupAlignWithTargetTable = globalIndexPreparedData.getTableGroupAlignWithTargetTable();
         this.globalIndexPreparedData = globalIndexPreparedData;
     }
 
     @Override
     protected void excludeResources(Set resources) {
+        if (!globalIndexPreparedData.isWithImplicitTableGroup() && isNeedToGetCreateTableGroupLock(true)) {
+            resources.add(concatWithDot(schemaName, ConnectionProperties.ACQUIRE_CREATE_TABLE_GROUP_LOCK));
+            executionContext.getExtraCmds().put(ConnectionParams.ACQUIRE_CREATE_TABLE_GROUP_LOCK.getName(), false);
+        }
         if (isNeedToGetCreateTableGroupLock(true)) {
             resources.add(concatWithDot(schemaName, ConnectionProperties.ACQUIRE_CREATE_TABLE_GROUP_LOCK));
             executionContext.getExtraCmds().put(ConnectionParams.ACQUIRE_CREATE_TABLE_GROUP_LOCK.getName(), false);
@@ -113,8 +124,8 @@ protected void excludeResources(Set resources) {
             resources.add(concatWithDot(schemaName, primaryTableName));
             resources.add(concatWithDot(schemaName, indexTableName));
 
-            TableGroupConfig tgConfig = physicalPlanData.getTableGroupConfig();
-            for (TablePartRecordInfoContext entry : tgConfig.getTables()) {
+            TableGroupDetailConfig tgConfig = physicalPlanData.getTableGroupConfig();
+            for (TablePartRecordInfoContext entry : tgConfig.getTablesPartRecordInfoContext()) {
                 Long tableGroupId = entry.getLogTbRec().getGroupId();
                 if (tableGroupId != null && tableGroupId != -1) {
                     tableGroupIds.add(tableGroupId);
@@ -133,7 +144,37 @@ protected void excludeResources(Set resources) {
     @Override
     protected ExecutableDdlJob doCreate() {
 
-        if (isNeedToGetCreateTableGroupLock(false)) {
+        List tableGroups = new ArrayList<>();
+        if (needCreateImplicitTableGroup(tableGroups)) {
+            rerunTask = generateCreateTableJob();
+            DdlTask subJobTask = rerunTask;
+            List taskList = new ArrayList<>();
+            ExecutableDdlJob job = new ExecutableDdlJob();
+            CreateTableGroupValidateTask createTableGroupValidateTask =
+                new CreateTableGroupValidateTask(globalIndexPreparedData.getSchemaName(),
+                    tableGroups);
+            taskList.add(createTableGroupValidateTask);
+            List createTableGroupAddMetaTasks = new ArrayList<>();
+            for (int i = 0; i < tableGroups.size(); i++) {
+                String tableGroupName = tableGroups.get(i);
+                CreateTableGroupAddMetaTask createTableGroupAddMetaTask = new CreateTableGroupAddMetaTask(
+                    globalIndexPreparedData.getSchemaName(), tableGroupName, null,
+                    null, globalIndexPreparedData.isSingle(), true);
+                createTableGroupAddMetaTasks.add(createTableGroupAddMetaTask);
+            }
+
+            TableGroupsSyncTask tableGroupsSyncTask =
+                new TableGroupsSyncTask(globalIndexPreparedData.getSchemaName(), tableGroups);
+            taskList.add(tableGroupsSyncTask);
+            taskList.add(subJobTask);
+            job.addSequentialTasks(taskList);
+            for (int i = 0; i < createTableGroupAddMetaTasks.size(); i++) {
+                job.addTaskRelationship(createTableGroupValidateTask, createTableGroupAddMetaTasks.get(i));
+                job.addTaskRelationship(createTableGroupAddMetaTasks.get(i), tableGroupsSyncTask);
+            }
+            globalIndexPreparedData.setNeedToGetTableGroupLock(true);
+            return job;
+        } else if (isNeedToGetCreateTableGroupLock(false)) {
             DdlTask ddl = generateCreateTableJob();
             ExecutableDdlJob job = new ExecutableDdlJob();
             job.addSequentialTasks(Lists.newArrayList(ddl));
@@ -150,26 +191,52 @@ protected ExecutableDdlJob doCreate() {
             List columns = columnAst2nameStr(this.columns);
             List coverings = columnAst2nameStr(this.coverings);
 
+            Map columnMapping = backfillColumnMap == null ? null :
+                backfillColumnMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getValue, Map.Entry::getKey));
+
             final String finalStatus =
                 executionContext.getParamManager().getString(ConnectionParams.GSI_FINAL_STATUS_DEBUG);
             final boolean stayAtDeleteOnly = StringUtils.equalsIgnoreCase(DELETE_ONLY.name(), finalStatus);
             final boolean stayAtWriteOnly = StringUtils.equalsIgnoreCase(WRITE_ONLY.name(), finalStatus);
 
             List bringUpGsi = null;
-            if (needOnlineSchemaChange) {
+            if (useChangeSet) {
+                // online modify column
+                bringUpGsi = GsiTaskFactory.addGlobalIndexTasksChangeSet(
+                    schemaName,
+                    primaryTableName,
+                    oldIndexName,
+                    indexTableName,
+                    stayAtDeleteOnly,
+                    stayAtWriteOnly,
+                    stayAtBackFill,
+                    virtualColumnMap,
+                    backfillColumnMap,
+                    modifyStringColumns,
+                    onlineModifyColumn,
+                    mirrorCopy,
+                    physicalPlanData,
+                    globalIndexPreparedData.getIndexPartitionInfo()
+                );
+            } else if (needOnlineSchemaChange) {
                 TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(primaryTableName);
                 boolean repartition = globalIndexPreparedData.isRepartition();
                 bringUpGsi = GsiTaskFactory.addGlobalIndexTasks(
                     schemaName,
                     primaryTableName,
+                    oldIndexName,
                     indexTableName,
                     stayAtDeleteOnly,
                     stayAtWriteOnly,
                     stayAtBackFill,
                     virtualColumnMap,
+                    null,
+                    modifyStringColumns,
                     physicalPlanData,
                     tableMeta,
                     repartition,
+                    onlineModifyColumn,
+                    mirrorCopy,
                     executionContext.getOriginSql()
                 );
             } else {
@@ -179,11 +246,13 @@ protected ExecutableDdlJob doCreate() {
                     indexTableName
                 );
             }
+            boolean autoCreateTg =
+                executionContext.getParamManager().getBoolean(ConnectionParams.ALLOW_AUTO_CREATE_TABLEGROUP);
             CreateTableAddTablesPartitionInfoMetaTask createTableAddTablesPartitionInfoMetaTask =
                 new CreateTableAddTablesPartitionInfoMetaTask(schemaName, indexTableName,
                     physicalPlanData.isTemporary(),
-                    physicalPlanData.getTableGroupConfig(),  null, indexAlignWithPrimaryTableGroup, primaryTableName,
-                    null);
+                    physicalPlanData.getTableGroupConfig(), null, tableGroupAlignWithTargetTable, primaryTableName,
+                    null, false, globalIndexPreparedData.isWithImplicitTableGroup(), autoCreateTg);
             CreateTableAddTablesMetaTask addTablesMetaTask =
                 new CreateTableAddTablesMetaTask(
                     schemaName,
@@ -198,7 +267,9 @@ protected ExecutableDdlJob doCreate() {
                     addedForeignKeys,
                     hasTimestampColumnDefault,
                     specialDefaultValues,
-                    specialDefaultValueFlags
+                    specialDefaultValueFlags,
+                    columnMapping,
+                    addNewColumns
                 );
             CreateTableShowTableMetaTask showTableMetaTask =
                 new CreateTableShowTableMetaTask(schemaName, indexTableName);
@@ -216,7 +287,9 @@ protected ExecutableDdlJob doCreate() {
                     IndexStatus.CREATING,
                     clusteredIndex,
                     globalIndexPreparedData.isVisible() ? IndexVisibility.VISIBLE : IndexVisibility.INVISIBLE,
-                    needOnlineSchemaChange
+                    needOnlineSchemaChange,
+                    columnMapping,
+                    addNewColumns
                 );
             addIndexMetaTask = (GsiInsertIndexMetaTask) addIndexMetaTask.onExceptionTryRecoveryThenRollback();
 
@@ -281,11 +354,7 @@ private SubJobTask generateCreateTableJob() {
     }
 
     private String genSubJobSql() {
-        List params = Lists.newArrayList(
-            ConnectionParams.ACQUIRE_CREATE_TABLE_GROUP_LOCK.getName() + "=false"
-        );
-        String hint = String.format("/*+TDDL:CMD_EXTRA(%s)*/", StringUtils.join(params, ","));
-        return String.format(globalIndexPreparedData.getIndexTablePreparedData().getSourceSql());
+        return globalIndexPreparedData.getIndexTablePreparedData().getSourceSql();
     }
 
     private SubJobTask generateDropLocalIndexJob() {
@@ -300,7 +369,8 @@ private String genDropLocalIndexSubJobSql() {
             ConnectionParams.DDL_ON_GSI.getName() + "=true"
         );
         String hint = String.format("/*+TDDL:CMD_EXTRA(%s)*/", StringUtils.join(params, ","));
-        String ddl = "alter table " + indexTableName + " drop index " + TddlConstants.UGSI_PK_UNIQUE_INDEX_NAME;
+        String ddl = "alter table " + surroundWithBacktick(indexTableName) + " drop index "
+            + TddlConstants.UGSI_PK_UNIQUE_INDEX_NAME;
         return hint + ddl;
     }
 
@@ -358,7 +428,7 @@ public static ExecutableDdlJob create(@Deprecated DDL ddl,
                                           CreateGlobalIndexPreparedData globalIndexPreparedData,
                                           ExecutionContext executionContext) {
         DdlPhyPlanBuilder builder =
-            CreateGlobalIndexBuilder.create(ddl, globalIndexPreparedData, executionContext).build();
+            CreateGlobalIndexBuilder.create(ddl, globalIndexPreparedData, null, executionContext).build();
         PhysicalPlanData physicalPlanData = builder.genPhysicalPlanData();
 
         return CreateGsiJobFactory.create(globalIndexPreparedData, physicalPlanData, executionContext).create();
@@ -367,7 +437,8 @@ public static ExecutableDdlJob create(@Deprecated DDL ddl,
     public static ExecutableDdlJob create4CreateTableWithGsi(@Deprecated DDL ddl,
                                                              CreateGlobalIndexPreparedData globalIndexPreparedData,
                                                              ExecutionContext ec) {
-        CreateGlobalIndexBuilder builder = new CreatePartitionGlobalIndexBuilder(ddl, globalIndexPreparedData, ec);
+        CreateGlobalIndexBuilder builder =
+            new CreatePartitionGlobalIndexBuilder(ddl, globalIndexPreparedData, null, false, ec);
         builder.build();
 
         boolean autoPartition = globalIndexPreparedData.getIndexTablePreparedData().isAutoPartition();
@@ -381,4 +452,15 @@ public static ExecutableDdlJob create4CreateTableWithGsi(@Deprecated DDL ddl,
         gsiJobFactory.needOnlineSchemaChange = false;
         return gsiJobFactory.create();
     }
+
+    protected boolean needCreateImplicitTableGroup(List tableGroups) {
+        boolean ret = false;
+        for (Map.Entry entry : this.globalIndexPreparedData.getRelatedTableGroupInfo().entrySet()) {
+            if (entry.getValue()) {
+                tableGroups.add(entry.getKey());
+                ret = true;
+            }
+        }
+        return ret;
+    }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/DropGsiJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/DropGsiJobFactory.java
index c2c2e0441..9e814f9d8 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/DropGsiJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/DropGsiJobFactory.java
@@ -126,8 +126,8 @@ protected ExecutableDdlJob doCreate() {
             TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(primaryTableName);
             if (!tableMeta.isAutoPartition()) {
                 CdcGsiDdlMarkTask cdcGsiDdlMarkTask =
-                    new CdcGsiDdlMarkTask(schemaName, physicalPlanData,
-                        primaryTableName, executionContext.getOriginSql());
+                    new CdcGsiDdlMarkTask(schemaName, physicalPlanData, primaryTableName,
+                        executionContext.getOriginSql());
                 taskList.add(cdcGsiDdlMarkTask);
             }
         }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/DropPartitionGsiJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/DropPartitionGsiJobFactory.java
index efa1323b1..3077e8818 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/DropPartitionGsiJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/DropPartitionGsiJobFactory.java
@@ -141,9 +141,8 @@ protected ExecutableDdlJob doCreate() {
             //mark gsi task
             TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(primaryTableName);
             if (!tableMeta.isAutoPartition()) {
-                CdcGsiDdlMarkTask cdcDdlMarkTask =
-                    new CdcGsiDdlMarkTask(schemaName, physicalPlanData,
-                        primaryTableName, executionContext.getOriginSql());
+                CdcGsiDdlMarkTask cdcDdlMarkTask = new CdcGsiDdlMarkTask(schemaName, physicalPlanData,
+                    primaryTableName, executionContext.getOriginSql());
                 taskList.add(cdcDdlMarkTask);
             }
         }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/ModifyPartitionKeyJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/ModifyPartitionKeyJobFactory.java
deleted file mode 100644
index 81e0c2a3b..000000000
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/ModifyPartitionKeyJobFactory.java
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Copyright [2013-2021], Alibaba Group Holding Limited
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.alibaba.polardbx.executor.ddl.job.factory.gsi;
-
-import com.alibaba.polardbx.common.properties.ConnectionParams;
-import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
-import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
-import com.alibaba.polardbx.executor.ddl.job.task.basic.AlterColumnDefaultTask;
-import com.alibaba.polardbx.executor.ddl.job.task.basic.AlterTablePhyDdlTask;
-import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
-import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
-import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcModifyPartitionKeyMarkTask;
-import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcRepartitionMarkTask;
-import com.alibaba.polardbx.executor.ddl.job.task.gsi.ModifyPartitionKeyCutOverTask;
-import com.alibaba.polardbx.executor.ddl.job.task.gsi.ModifyPartitionKeySyncTask;
-import com.alibaba.polardbx.executor.ddl.job.task.gsi.ModifyPartitionKeyValidateTask;
-import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask;
-import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupsSyncTask;
-import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator;
-import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator;
-import com.alibaba.polardbx.executor.ddl.newengine.job.DdlExceptionAction;
-import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
-import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
-import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
-import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4DropPartitionGsi;
-import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
-import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord;
-import com.alibaba.polardbx.gms.util.TableGroupNameUtil;
-import com.alibaba.polardbx.optimizer.OptimizerContext;
-import com.alibaba.polardbx.optimizer.config.table.TableMeta;
-import com.alibaba.polardbx.optimizer.context.ExecutionContext;
-import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData;
-import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.DropGlobalIndexPreparedData;
-import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
-import org.apache.calcite.sql.SqlKind;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.collections.MapUtils;
-import org.apache.commons.lang3.StringUtils;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import static com.alibaba.polardbx.common.TddlConstants.IMPLICIT_COL_NAME;
-
-/**
- * @author wumu
- */
-public class ModifyPartitionKeyJobFactory extends DdlJobFactory {
-    private final String schemaName;
-    private final String primaryTableName;
-    private final Map tableNameMap;
-    private final Map globalIndexPrepareData;
-    private final ExecutionContext executionContext;
-    private List alterDefaultColumns;
-
-    private boolean needDropImplicitKey;
-
-    private final Map virtualColumnMap;
-    private final Map columnNewDef;
-    private final PhysicalPlanData oldPhysicalPlanData;
-
-    public ModifyPartitionKeyJobFactory(String schemaName, String primaryTableName, Map tableNameMap,
-                                        Map globalIndexPrepareData,
-                                        Map virtualColumnMap, Map columnNewDef,
-                                        PhysicalPlanData oldPhysicalPlanData, ExecutionContext executionContext) {
-        this.schemaName = schemaName;
-        this.primaryTableName = primaryTableName;
-        this.tableNameMap = tableNameMap;
-        this.globalIndexPrepareData = globalIndexPrepareData;
-        this.executionContext = executionContext;
-        this.needDropImplicitKey = false;
-        this.alterDefaultColumns = null;
-        this.virtualColumnMap = virtualColumnMap;
-        this.columnNewDef = columnNewDef;
-        this.oldPhysicalPlanData = oldPhysicalPlanData;
-    }
-
-    @Override
-    protected void validate() {
-        TableValidator.validateTableExistence(schemaName, primaryTableName, executionContext);
-        GsiValidator.validateAllowDdlOnTable(schemaName, primaryTableName, executionContext);
-        GsiValidator.validateGsiSupport(schemaName, executionContext);
-
-        for (String indexTableName : tableNameMap.values()) {
-            GsiValidator.validateCreateOnGsi(schemaName, indexTableName, executionContext);
-        }
-    }
-
-    @Override
-    protected ExecutableDdlJob doCreate() {
-        ExecutableDdlJob ddlJob = new ExecutableDdlJob();
-
-        assert !globalIndexPrepareData.isEmpty();
-
-        // alter table partitions validate
-        // get old table groups
-        List tableGroupConfigs = new ArrayList<>();
-        List oldTableGroupConfigs =
-            FactoryUtils.getTableGroupConfigByTableName(schemaName, new ArrayList<>(tableNameMap.keySet()));
-        tableGroupConfigs.addAll(oldTableGroupConfigs);
-        // get new table groups
-        tableGroupConfigs.addAll(
-            globalIndexPrepareData.values().stream()
-                .map(PhysicalPlanData::getTableGroupConfig).collect(Collectors.toList())
-        );
-        DdlTask validateTask =
-            new ModifyPartitionKeyValidateTask(schemaName, primaryTableName, tableNameMap, tableGroupConfigs);
-
-        // for modify default column
-        DdlTask beginAlterColumnDefault = null;
-        DdlTask beginAlterColumnDefaultSyncTask = null;
-        if (CollectionUtils.isNotEmpty(alterDefaultColumns)) {
-            beginAlterColumnDefault =
-                new AlterColumnDefaultTask(schemaName, primaryTableName, alterDefaultColumns, true);
-            beginAlterColumnDefaultSyncTask = new TableSyncTask(schemaName, primaryTableName);
-        }
-
-        List checkerTasks = genCheckerTasks();
-
-        // create gsi
-        List createGsiJobs = new ArrayList<>();
-        globalIndexPrepareData.forEach((createGlobalIndexPreparedData, physicalPlanData) -> {
-            if (physicalPlanData.getTableGroupConfig() != null) {
-                TableGroupRecord tableGroupRecord = physicalPlanData.getTableGroupConfig().getTableGroupRecord();
-                if (tableGroupRecord != null && (tableGroupRecord.id == null
-                    || tableGroupRecord.id == TableGroupRecord.INVALID_TABLE_GROUP_ID)
-                    && tableGroupRecord.getTg_type() == TableGroupRecord.TG_TYPE_DEFAULT_SINGLE_TBL_TG) {
-                    OptimizerContext oc =
-                        Objects.requireNonNull(OptimizerContext.getContext(schemaName), schemaName + " corrupted");
-                    TableGroupConfig tableGroupConfig = oc.getTableGroupInfoManager()
-                        .getTableGroupConfigByName(TableGroupNameUtil.SINGLE_DEFAULT_TG_NAME_TEMPLATE);
-                    if (tableGroupConfig != null) {
-                        tableGroupRecord.setTg_type(TableGroupRecord.TG_TYPE_NON_DEFAULT_SINGLE_TBL_TG);
-                    }
-                }
-            }
-
-            CreateGsiJobFactory createGsiJobFactory =
-                CreateGsiJobFactory.create(createGlobalIndexPreparedData, physicalPlanData, executionContext);
-            createGsiJobFactory.stayAtBackFill = true;
-            createGsiJobFactory.setVirtualColumnMap(virtualColumnMap);
-            createGsiJobs.add(createGsiJobFactory.create());
-        });
-
-        TableMeta tableMeta = executionContext.getSchemaManager().getTable(primaryTableName);
-        // cut over
-        ModifyPartitionKeyCutOverTask cutOverTask =
-            new ModifyPartitionKeyCutOverTask(schemaName, primaryTableName, tableNameMap, tableMeta.isAutoPartition(),
-                tableMeta.getPartitionInfo().isSingleTable(), tableMeta.getPartitionInfo().isBroadcastTable());
-        ModifyPartitionKeySyncTask
-            modifyPartitionKeySyncTask = new ModifyPartitionKeySyncTask(schemaName, primaryTableName, tableNameMap);
-
-        // cdc
-        DdlTask cdcDdlMarkTask = new CdcModifyPartitionKeyMarkTask(schemaName, primaryTableName, SqlKind.ALTER_TABLE);
-
-        // drop gsi
-        List dropGsiJobs = new ArrayList<>();
-
-        for (Map.Entry entries : tableNameMap.entrySet()) {
-            String newIndexTableName = entries.getValue();
-            DropGlobalIndexPreparedData dropGlobalIndexPreparedData =
-                new DropGlobalIndexPreparedData(schemaName, primaryTableName, newIndexTableName, false);
-            dropGlobalIndexPreparedData.setRepartition(true);
-            dropGlobalIndexPreparedData.setRepartitionTableName(entries.getKey());
-            ExecutableDdlJob dropGsiJob =
-                DropGsiJobFactory.create(dropGlobalIndexPreparedData, executionContext, false, false);
-            // rollback is not supported after CutOver
-            dropGsiJob.setExceptionActionForAllTasks(DdlExceptionAction.TRY_RECOVERY_THEN_PAUSE);
-            dropGsiJobs.add(dropGsiJob);
-        }
-
-        // table groups sync task
-        TableGroupsSyncTask tableGroupsSyncTask = new TableGroupsSyncTask(schemaName,
-            oldTableGroupConfigs.stream()
-                .map(e -> e.getTableGroupRecord().getTg_name())
-                .collect(Collectors.toList())
-        );
-
-        List ddlTasks = new ArrayList<>();
-        ddlTasks.add(validateTask);
-        if (CollectionUtils.isNotEmpty(alterDefaultColumns)) {
-            ddlTasks.add(beginAlterColumnDefault);
-            ddlTasks.add(beginAlterColumnDefaultSyncTask);
-        }
-
-        if (CollectionUtils.isNotEmpty(checkerTasks)) {
-            ddlTasks.addAll(checkerTasks);
-        }
-        ddlJob.addSequentialTasks(ddlTasks);
-        createGsiJobs.forEach(ddlJob::appendJob2);
-
-        final boolean skipCutOver = StringUtils.equalsIgnoreCase(
-            executionContext.getParamManager().getString(ConnectionParams.REPARTITION_SKIP_CUTOVER), "true");
-        if (!skipCutOver) {
-            ddlJob.appendTask(cutOverTask);
-            ddlJob.addTaskRelationship(cutOverTask, modifyPartitionKeySyncTask);
-            ddlJob.addTaskRelationship(modifyPartitionKeySyncTask, cdcDdlMarkTask);
-        }
-
-        final boolean skipCleanUp = StringUtils.equalsIgnoreCase(
-            executionContext.getParamManager().getString(ConnectionParams.REPARTITION_SKIP_CLEANUP), "true");
-        if (!skipCleanUp) {
-            dropGsiJobs.forEach(ddlJob::appendJob2);
-
-            ddlJob.addTaskRelationship(
-                ((ExecutableDdlJob4DropPartitionGsi) dropGsiJobs.get(dropGsiJobs.size() - 1)).getFinalSyncTask(),
-                tableGroupsSyncTask);
-
-            if (needDropImplicitKey) {
-                SubJobTask dropImplicitKeySubJobTask =
-                    new SubJobTask(schemaName,
-                        String.format("alter table %s drop column %s", primaryTableName, IMPLICIT_COL_NAME),
-                        null);
-                dropImplicitKeySubJobTask.setParentAcquireResource(true);
-
-                ddlJob.addTaskRelationship(tableGroupsSyncTask, dropImplicitKeySubJobTask);
-            }
-        } else {
-            if (!skipCutOver) {
-                ddlJob.addTaskRelationship(cdcDdlMarkTask, tableGroupsSyncTask);
-            } else {
-                ddlJob.appendTask(tableGroupsSyncTask);
-            }
-        }
-
-        ddlJob.labelAsHead(validateTask);
-        return ddlJob;
-    }
-
-    @Override
-    protected void excludeResources(Set resources) {
-        resources.add(concatWithDot(schemaName, primaryTableName));
-        for (String indexTableName : tableNameMap.values()) {
-            resources.add(concatWithDot(schemaName, indexTableName));
-        }
-
-        // lock table group of primary table
-        OptimizerContext oc =
-            Objects.requireNonNull(OptimizerContext.getContext(schemaName), schemaName + " corrupted");
-
-        PartitionInfo partitionInfo = oc.getPartitionInfoManager().getPartitionInfo(primaryTableName);
-        if (partitionInfo != null && partitionInfo.getTableGroupId() != -1) {
-            TableGroupConfig tableGroupConfig =
-                oc.getTableGroupInfoManager().getTableGroupConfigById(partitionInfo.getTableGroupId());
-            String tgName = tableGroupConfig.getTableGroupRecord().getTg_name();
-            resources.add(concatWithDot(schemaName, tgName));
-        }
-    }
-
-    private List genCheckerTasks() {
-        List result = new ArrayList<>();
-
-        if (MapUtils.isEmpty(virtualColumnMap) || MapUtils.isEmpty(columnNewDef)) {
-            return null;
-        }
-
-        String tableNameWithBacktick = String.format("`%s`", primaryTableName);
-        virtualColumnMap.forEach((colName, virColName) -> {
-            String addSqlFormatter =
-                String.format("ALTER TABLE %%s ADD COLUMN `%s` %s GENERATED ALWAYS AS (ALTER_TYPE(`%s`)) VIRTUAL",
-                    virColName, columnNewDef.get(colName), colName);
-            String dropSqlFormatter = String.format("ALTER TABLE %%s DROP COLUMN `%s`", virColName);
-            String addSql = String.format(addSqlFormatter, tableNameWithBacktick);
-            String dropSql = String.format(dropSqlFormatter, tableNameWithBacktick);
-            String addSqlTemplate = String.format(addSqlFormatter, "?");
-            String dropSqlTemplate = String.format(dropSqlFormatter, "?");
-
-            result.add(
-                genAlterTablePhyTask(addSql, dropSql, addSqlTemplate, dropSqlTemplate, primaryTableName, "INPLACE"));
-        });
-
-        return result;
-    }
-
-    private DdlTask genAlterTablePhyTask(String sql, String reverseSql, String sqlTemplate, String reverseSqlTemplate,
-                                         String tableName, String algorithm) {
-        sql = sql + ", ALGORITHM=" + algorithm;
-        if (!StringUtils.isEmpty(reverseSql)) {
-            reverseSql = reverseSql + ", ALGORITHM=" + algorithm;
-        }
-
-        sqlTemplate = sqlTemplate + ", ALGORITHM=" + algorithm;
-        if (!StringUtils.isEmpty(reverseSqlTemplate)) {
-            reverseSqlTemplate = reverseSqlTemplate + ", ALGORITHM=" + algorithm;
-        }
-
-        PhysicalPlanData newPhysicalPlanData = oldPhysicalPlanData.clone();
-        newPhysicalPlanData.setSqlTemplate(sqlTemplate);
-        AlterTablePhyDdlTask task;
-        task = new AlterTablePhyDdlTask(schemaName, tableName, newPhysicalPlanData);
-        task.setSourceSql(sql);
-        if (!StringUtils.isEmpty(reverseSql)) {
-            task.setRollbackSql(reverseSql);
-            task.setRollbackSqlTemplate(reverseSqlTemplate);
-        }
-        return task;
-    }
-
-    @Override
-    protected void sharedResources(Set resources) {
-
-    }
-
-    public void setNeedDropImplicitKey(boolean needDropImplicitKey) {
-        this.needDropImplicitKey = needDropImplicitKey;
-    }
-
-    public void setAlterDefaultColumns(List alterDefaultColumns) {
-        this.alterDefaultColumns = alterDefaultColumns;
-    }
-}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RebuildTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RebuildTableJobFactory.java
new file mode 100644
index 000000000..35e1cc56f
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RebuildTableJobFactory.java
@@ -0,0 +1,455 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory.gsi;
+
+import com.alibaba.polardbx.common.properties.ConnectionParams;
+import com.alibaba.polardbx.common.utils.Pair;
+import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
+import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.AlterColumnDefaultTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.AlterTablePhyDdlTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.ModifyPartitionKeyRemoveTableStatisticTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.RebuildTableChangeMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcModifyPartitionKeyMarkTask;
+import com.alibaba.polardbx.executor.ddl.job.task.gsi.ModifyPartitionKeySyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.gsi.RebuildTableCutOverTask;
+import com.alibaba.polardbx.executor.ddl.job.task.gsi.RebuildTableValidateTask;
+import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupsSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator;
+import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlExceptionAction;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreatePartitionGsi;
+import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils;
+import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
+import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord;
+import com.alibaba.polardbx.gms.util.TableGroupNameUtil;
+import com.alibaba.polardbx.optimizer.OptimizerContext;
+import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager;
+import com.alibaba.polardbx.optimizer.config.table.TableMeta;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.RebuildTablePrepareData;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.DropGlobalIndexPreparedData;
+import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
+import com.alibaba.polardbx.optimizer.rule.TddlRuleManager;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.collections.MapUtils;
+import org.apache.commons.lang3.StringUtils;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
+import static com.alibaba.polardbx.common.TddlConstants.IMPLICIT_COL_NAME;
+import static org.apache.calcite.sql.SqlIdentifier.surroundWithBacktick;
+
+/**
+ * @author wumu
+ */
+public class RebuildTableJobFactory extends DdlJobFactory {
+    private final String schemaName;
+    private final String primaryTableName;
+    private final Map tableNameMap;
+    private final Map tableNameMapReverse;
+    private final Map globalIndexPrepareData;
+    private final ExecutionContext executionContext;
+    private List alterDefaultColumns;
+    private List changedColumns;
+
+    private boolean needDropImplicitKey;
+
+    private final Map virtualColumnMap;
+    private final Map columnNewDef;
+    private final Map backfillColumnMap;
+    private final PhysicalPlanData oldPhysicalPlanData;
+
+    private final Map needRehash;
+    private final List modifyStringColumns;
+    private final List addNewColumns;
+
+    public RebuildTableJobFactory(String schemaName, String primaryTableName,
+                                  Map globalIndexPrepareData,
+                                  RebuildTablePrepareData rebuildTablePrepareData,
+                                  PhysicalPlanData oldPhysicalPlanData,
+                                  ExecutionContext executionContext) {
+        this.schemaName = schemaName;
+        this.primaryTableName = primaryTableName;
+        this.globalIndexPrepareData = globalIndexPrepareData;
+        this.executionContext = executionContext;
+        this.needDropImplicitKey = false;
+        this.alterDefaultColumns = null;
+        this.tableNameMap = rebuildTablePrepareData.getTableNameMap();
+        this.tableNameMapReverse = rebuildTablePrepareData.getTableNameMapReverse();
+        this.virtualColumnMap = rebuildTablePrepareData.getVirtualColumnMap();
+        this.columnNewDef = rebuildTablePrepareData.getColumnNewDef();
+        this.backfillColumnMap = rebuildTablePrepareData.getBackfillColumnMap();
+        this.needRehash = rebuildTablePrepareData.getNeedReHash();
+        this.modifyStringColumns = rebuildTablePrepareData.getModifyStringColumns();
+        this.addNewColumns = rebuildTablePrepareData.getAddNewColumns();
+        this.oldPhysicalPlanData = oldPhysicalPlanData;
+    }
+
+    @Override
+    protected void validate() {
+        TableValidator.validateTableExistence(schemaName, primaryTableName, executionContext);
+        GsiValidator.validateAllowDdlOnTable(schemaName, primaryTableName, executionContext);
+        GsiValidator.validateGsiSupport(schemaName, executionContext);
+
+        for (String indexTableName : tableNameMap.values()) {
+            GsiValidator.validateCreateOnGsi(schemaName, indexTableName, executionContext);
+        }
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+        final boolean useChangeSet =
+            ChangeSetUtils.isChangeSetProcedure(executionContext) && executionContext.getParamManager()
+                .getBoolean(ConnectionParams.ENABLE_CHANGESET_FOR_OMC);
+        final boolean enableBackFillPushDown =
+            executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_BACKFILL_OPT_FOR_OMC);
+
+        ExecutableDdlJob ddlJob = new ExecutableDdlJob();
+
+        assert !globalIndexPrepareData.isEmpty();
+
+        // alter table partitions validate
+        // get old table groups
+        List tableGroupConfigs = new ArrayList<>();
+        List oldTableGroupConfigs =
+            FactoryUtils.getTableGroupConfigByTableName(schemaName, new ArrayList<>(tableNameMap.keySet()));
+        tableGroupConfigs.addAll(oldTableGroupConfigs);
+        // get new table groups
+        tableGroupConfigs.addAll(
+            globalIndexPrepareData.values().stream()
+                .map(PhysicalPlanData::getTableGroupConfig).collect(Collectors.toList())
+        );
+        DdlTask validateTask =
+            new RebuildTableValidateTask(schemaName, primaryTableName, tableNameMap, tableGroupConfigs);
+
+        // 标记开始 rebuild
+        RebuildTableChangeMetaTask rebuildTableChangeMetaTask =
+            new RebuildTableChangeMetaTask(schemaName, primaryTableName);
+        TableSyncTask rebuildSyncTask = new TableSyncTask(schemaName, primaryTableName);
+
+        // for modify default column
+        DdlTask beginAlterColumnDefault = null;
+        DdlTask beginAlterColumnDefaultSyncTask = null;
+        if (CollectionUtils.isNotEmpty(alterDefaultColumns)) {
+            beginAlterColumnDefault =
+                new AlterColumnDefaultTask(schemaName, primaryTableName, alterDefaultColumns, true);
+            beginAlterColumnDefaultSyncTask = new TableSyncTask(schemaName, primaryTableName);
+        }
+
+        List checkerTasks = genCheckerTasks();
+
+        // create gsi
+        List createGsiJobs = new ArrayList<>();
+        AtomicBoolean hasSubJob = new AtomicBoolean(false);
+        List> listGlobalIndexPrepareData = new ArrayList<>();
+        globalIndexPrepareData.forEach((createGlobalIndexPreparedData, physicalPlanData) -> {
+            listGlobalIndexPrepareData.add(new Pair<>(createGlobalIndexPreparedData, physicalPlanData));
+        });
+
+        Collections.sort(listGlobalIndexPrepareData,
+            new Comparator>() {
+                @Override
+                public int compare(Pair o1,
+                                   Pair o2) {
+                    CreateGlobalIndexPreparedData data1 = o1.getKey();
+                    CreateGlobalIndexPreparedData data2 = o2.getKey();
+
+                    // 检查fieldA是否为空(这里假设fieldA是String类型)
+                    boolean isData1TableGroupAlignWithTargetTableEmpty =
+                        StringUtils.isEmpty(data1.getTableGroupAlignWithTargetTable());
+                    boolean isData2TableGroupAlignWithTargetTableEmpty =
+                        StringUtils.isEmpty(data2.getTableGroupAlignWithTargetTable());
+
+                    if (isData1TableGroupAlignWithTargetTableEmpty && !isData2TableGroupAlignWithTargetTableEmpty) {
+                        return -1; // data1中的TableGroupAlignWithTargetTable为空,应该排在前面
+                    } else if (!isData1TableGroupAlignWithTargetTableEmpty
+                        && isData2TableGroupAlignWithTargetTableEmpty) {
+                        return 1; // data2中的TableGroupAlignWithTargetTable为空,data1应该排在后面
+                    } else {
+                        return 0; // 两者都为空或都不为空,视为相等
+                    }
+                }
+            });
+        for (int i = 0; i < listGlobalIndexPrepareData.size(); i++) {
+            Pair pair = listGlobalIndexPrepareData.get(i);
+            PhysicalPlanData physicalPlanData = pair.getValue();
+            CreateGlobalIndexPreparedData createGlobalIndexPreparedData = pair.getKey();
+            if (!hasSubJob.get()) {
+                if (physicalPlanData.getTableGroupConfig() != null) {
+                    TableGroupRecord tableGroupRecord = physicalPlanData.getTableGroupConfig().getTableGroupRecord();
+                    if (tableGroupRecord != null && (tableGroupRecord.id == null
+                        || tableGroupRecord.id == TableGroupRecord.INVALID_TABLE_GROUP_ID)
+                        && tableGroupRecord.getTg_type() == TableGroupRecord.TG_TYPE_DEFAULT_SINGLE_TBL_TG) {
+                        OptimizerContext oc =
+                            Objects.requireNonNull(OptimizerContext.getContext(schemaName), schemaName + " corrupted");
+                        TableGroupConfig tableGroupConfig = oc.getTableGroupInfoManager()
+                            .getTableGroupConfigByName(TableGroupNameUtil.SINGLE_DEFAULT_TG_NAME_TEMPLATE);
+                        if (tableGroupConfig != null) {
+                            tableGroupRecord.setTg_type(TableGroupRecord.TG_TYPE_NON_DEFAULT_SINGLE_TBL_TG);
+                        }
+                    }
+                }
+
+                CreateGsiJobFactory createGsiJobFactory =
+                    CreateGsiJobFactory.create(createGlobalIndexPreparedData, physicalPlanData, executionContext);
+                createGsiJobFactory.stayAtBackFill = true;
+                createGsiJobFactory.setVirtualColumnMap(virtualColumnMap);
+                createGsiJobFactory.setBackfillColumnMap(backfillColumnMap);
+                String oldIndexName = tableNameMapReverse.get(createGlobalIndexPreparedData.getIndexTableName());
+                createGsiJobFactory.setOldIndexName(oldIndexName);
+
+                boolean mirrorCopy = !needRehash.get(createGlobalIndexPreparedData.getIndexTableName());
+                if (enableBackFillPushDown) {
+                    createGsiJobFactory.setMirrorCopy(mirrorCopy);
+                }
+                TableMeta gsiTableMeta = executionContext.getSchemaManager(schemaName).getTable(oldIndexName);
+                if (useChangeSet && ChangeSetUtils.supportUseChangeSet(
+                    ComplexTaskMetaManager.ComplexTaskType.ONLINE_MODIFY_COLUMN, gsiTableMeta)) {
+                    createGsiJobFactory.setUseChangeSet(mirrorCopy);
+                }
+
+                createGsiJobFactory.setOnlineModifyColumn(true);
+                createGsiJobFactory.setModifyStringColumns(modifyStringColumns);
+                createGsiJobFactory.setAddNewColumns(addNewColumns);
+                ExecutableDdlJob gsiJob = createGsiJobFactory.create();
+                SubJobTask subJobTask = createGsiJobFactory.rerunTask;
+                if (createGlobalIndexPreparedData.isNeedToGetTableGroupLock() && !hasSubJob.get()) {
+                    if (StringUtils.isEmpty(subJobTask.getDdlStmt())) {
+                        continue;
+                    } else {
+                        createGsiJobs.add(gsiJob);
+                        hasSubJob.set(true);
+                        break;
+                    }
+                } else {
+                    createGsiJobs.add(gsiJob);
+                }
+            }
+        }
+
+        if (hasSubJob.get()) {
+            createGsiJobs.forEach(ddlJob::appendJob2);
+            return ddlJob;
+        }
+        TableMeta tableMeta = executionContext.getSchemaManager().getTable(primaryTableName);
+        TddlRuleManager tddlRuleManager = executionContext.getSchemaManager().getTddlRuleManager();
+        // cut over
+        RebuildTableCutOverTask cutOverTask =
+            new RebuildTableCutOverTask(schemaName, primaryTableName, tableNameMap,
+                tableMeta.isAutoPartition(),
+                tddlRuleManager.isTableInSingleDb(primaryTableName),
+                tddlRuleManager.isBroadCast(primaryTableName)
+            );
+        ModifyPartitionKeySyncTask
+            modifyPartitionKeySyncTask = new ModifyPartitionKeySyncTask(schemaName, primaryTableName, tableNameMap);
+
+        ModifyPartitionKeyRemoveTableStatisticTask removeTableStatisticTask =
+            new ModifyPartitionKeyRemoveTableStatisticTask(schemaName, primaryTableName, changedColumns);
+
+        // cdc
+        DdlTask cdcDdlMarkTask =
+            new CdcModifyPartitionKeyMarkTask(schemaName, primaryTableName, tableNameMap.get(primaryTableName),
+                SqlKind.ALTER_TABLE, tableNameMap);
+
+        // drop gsi
+        List dropGsiJobs = new ArrayList<>();
+
+        for (Map.Entry entries : tableNameMap.entrySet()) {
+            String newIndexTableName = entries.getValue();
+            DropGlobalIndexPreparedData dropGlobalIndexPreparedData =
+                new DropGlobalIndexPreparedData(schemaName, primaryTableName, newIndexTableName, false);
+            dropGlobalIndexPreparedData.setRepartition(true);
+            dropGlobalIndexPreparedData.setRepartitionTableName(entries.getKey());
+            ExecutableDdlJob dropGsiJob =
+                DropGsiJobFactory.create(dropGlobalIndexPreparedData, executionContext, false, false);
+            // rollback is not supported after CutOver
+            dropGsiJob.setExceptionActionForAllTasks(DdlExceptionAction.TRY_RECOVERY_THEN_PAUSE);
+            dropGsiJobs.add(dropGsiJob);
+        }
+
+        // table groups sync task
+        TableGroupsSyncTask tableGroupsSyncTask = new TableGroupsSyncTask(schemaName,
+            oldTableGroupConfigs.stream()
+                .map(e -> e.getTableGroupRecord().getTg_name())
+                .collect(Collectors.toList())
+        );
+
+        List ddlTasks = new ArrayList<>();
+        ddlTasks.add(validateTask);
+        if (CollectionUtils.isNotEmpty(alterDefaultColumns)) {
+            ddlTasks.add(beginAlterColumnDefault);
+            ddlTasks.add(beginAlterColumnDefaultSyncTask);
+        }
+        ddlTasks.add(rebuildTableChangeMetaTask);
+        ddlTasks.add(rebuildSyncTask);
+
+        if (CollectionUtils.isNotEmpty(checkerTasks)) {
+            ddlTasks.addAll(checkerTasks);
+        }
+        ddlJob.addSequentialTasks(ddlTasks);
+        for (ExecutableDdlJob exeDdljob : createGsiJobs) {
+            if (exeDdljob instanceof ExecutableDdlJob4CreatePartitionGsi) {
+                TableGroupConfig tgConfig =
+                    ((ExecutableDdlJob4CreatePartitionGsi) (exeDdljob)).getCreateGsiValidateTask()
+                        .getTableGroupConfig();
+                tgConfig.setPartitionGroupRecords(
+                    null);//do not validate tablegroup again, it will do it ModifyPartitionKeyValidateTask
+            }
+        }
+        createGsiJobs.forEach(ddlJob::appendJob2);
+
+        final boolean skipCutOver = StringUtils.equalsIgnoreCase(
+            executionContext.getParamManager().getString(ConnectionParams.REPARTITION_SKIP_CUTOVER), "true");
+        if (!skipCutOver) {
+            ddlJob.appendTask(cdcDdlMarkTask);
+            ddlJob.addTaskRelationship(cdcDdlMarkTask, cutOverTask);
+            ddlJob.addTaskRelationship(cutOverTask, modifyPartitionKeySyncTask);
+            ddlJob.addTaskRelationship(modifyPartitionKeySyncTask, removeTableStatisticTask);
+        }
+
+        final boolean skipCleanUp = StringUtils.equalsIgnoreCase(
+            executionContext.getParamManager().getString(ConnectionParams.REPARTITION_SKIP_CLEANUP), "true");
+        if (!skipCleanUp) {
+            dropGsiJobs.forEach(ddlJob::appendJob2);
+
+            ddlJob.addTaskRelationship(
+                (dropGsiJobs.get(dropGsiJobs.size() - 1)).getTail(),
+                tableGroupsSyncTask);
+
+            if (needDropImplicitKey) {
+                SubJobTask dropImplicitKeySubJobTask =
+                    new SubJobTask(schemaName,
+                        String.format("alter table %s drop column %s", surroundWithBacktick(primaryTableName),
+                            IMPLICIT_COL_NAME), null);
+                dropImplicitKeySubJobTask.setParentAcquireResource(true);
+
+                ddlJob.addTaskRelationship(tableGroupsSyncTask, dropImplicitKeySubJobTask);
+            }
+        } else {
+            if (!skipCutOver) {
+                ddlJob.addTaskRelationship(removeTableStatisticTask, tableGroupsSyncTask);
+            } else {
+                ddlJob.appendTask(tableGroupsSyncTask);
+            }
+        }
+
+        ddlJob.labelAsHead(validateTask);
+        return ddlJob;
+    }
+
+    @Override
+    protected void excludeResources(Set resources) {
+        resources.add(concatWithDot(schemaName, primaryTableName));
+        for (String indexTableName : tableNameMap.values()) {
+            resources.add(concatWithDot(schemaName, indexTableName));
+        }
+
+        // lock table group of primary table
+        OptimizerContext oc =
+            Objects.requireNonNull(OptimizerContext.getContext(schemaName), schemaName + " corrupted");
+
+        PartitionInfo partitionInfo = oc.getPartitionInfoManager().getPartitionInfo(primaryTableName);
+        if (partitionInfo != null && partitionInfo.getTableGroupId() != -1) {
+            TableGroupConfig tableGroupConfig =
+                oc.getTableGroupInfoManager().getTableGroupConfigById(partitionInfo.getTableGroupId());
+            String tgName = tableGroupConfig.getTableGroupRecord().getTg_name();
+            resources.add(concatWithDot(schemaName, tgName));
+        }
+    }
+
+    private List genCheckerTasks() {
+        List result = new ArrayList<>();
+
+        if (MapUtils.isEmpty(virtualColumnMap) || MapUtils.isEmpty(columnNewDef)) {
+            return null;
+        }
+
+        String tableNameWithBacktick = surroundWithBacktick(primaryTableName);
+        virtualColumnMap.forEach((colName, virColName) -> {
+            String addSqlFormatter =
+                String.format("ALTER TABLE %%s ADD COLUMN `%s` %s GENERATED ALWAYS AS (ALTER_TYPE(`%s`)) VIRTUAL",
+                    virColName, columnNewDef.get(colName), colName);
+            String dropSqlFormatter = String.format("ALTER TABLE %%s DROP COLUMN `%s`", virColName);
+            String addSql = String.format(addSqlFormatter, tableNameWithBacktick);
+            String dropSql = String.format(dropSqlFormatter, tableNameWithBacktick);
+            String addSqlTemplate = String.format(addSqlFormatter, "?");
+            String dropSqlTemplate = String.format(dropSqlFormatter, "?");
+
+            result.add(
+                genAlterTablePhyTask(addSql, dropSql, addSqlTemplate, dropSqlTemplate, primaryTableName, "INPLACE"));
+        });
+
+        return result;
+    }
+
+    private DdlTask genAlterTablePhyTask(String sql, String reverseSql, String sqlTemplate, String reverseSqlTemplate,
+                                         String tableName, String algorithm) {
+        sql = sql + ", ALGORITHM=" + algorithm;
+        if (!StringUtils.isEmpty(reverseSql)) {
+            reverseSql = reverseSql + ", ALGORITHM=" + algorithm;
+        }
+
+        sqlTemplate = sqlTemplate + ", ALGORITHM=" + algorithm;
+        if (!StringUtils.isEmpty(reverseSqlTemplate)) {
+            reverseSqlTemplate = reverseSqlTemplate + ", ALGORITHM=" + algorithm;
+        }
+
+        PhysicalPlanData newPhysicalPlanData = oldPhysicalPlanData.clone();
+        newPhysicalPlanData.setSqlTemplate(sqlTemplate);
+        AlterTablePhyDdlTask task;
+        task = new AlterTablePhyDdlTask(schemaName, tableName, newPhysicalPlanData);
+        task.setSourceSql(sql);
+        if (!StringUtils.isEmpty(reverseSql)) {
+            task.setRollbackSql(reverseSql);
+            task.setRollbackSqlTemplate(reverseSqlTemplate);
+        }
+        return task;
+    }
+
+    @Override
+    protected void sharedResources(Set resources) {
+
+    }
+
+    public void setNeedDropImplicitKey(boolean needDropImplicitKey) {
+        this.needDropImplicitKey = needDropImplicitKey;
+    }
+
+    public void setAlterDefaultColumns(List alterDefaultColumns) {
+        this.alterDefaultColumns = alterDefaultColumns;
+    }
+
+    public void setChangedColumns(List changedColumns) {
+        this.changedColumns = changedColumns;
+    }
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RemovePartitioningJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RemovePartitioningJobFactory.java
index 31e6d72f6..912813c09 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RemovePartitioningJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RemovePartitioningJobFactory.java
@@ -16,6 +16,7 @@
 
 package com.alibaba.polardbx.executor.ddl.job.factory.gsi;
 
+import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
 import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask;
@@ -32,14 +33,11 @@
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
 import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
 import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
-import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4AlterTable;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreatePartitionGsi;
 import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4DropPartitionGsi;
 import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
-import com.alibaba.polardbx.optimizer.core.function.calc.scalar.operator.Sub;
-import com.alibaba.polardbx.optimizer.core.rel.ddl.data.RepartitionPrepareData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.DropGlobalIndexPreparedData;
 import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
@@ -119,11 +117,15 @@ protected ExecutableDdlJob doCreate() {
         });
 
         RepartitionCutOverTask cutOverTask =
-            new RepartitionCutOverTask(schemaName, primaryTableName, indexTableName, false, false, true);
+            new RepartitionCutOverTask(schemaName, primaryTableName, indexTableName, false, false, true, false);
         RepartitionSyncTask repartitionSyncTask = new RepartitionSyncTask(schemaName, primaryTableName, indexTableName);
 
         // cdc
-        DdlTask cdcDdlMarkTask = new CdcRepartitionMarkTask(schemaName, primaryTableName, SqlKind.ALTER_TABLE);
+        if (executionContext.getDdlContext().isSubJob()) {
+            throw new RuntimeException("unexpected parent ddl job");
+        }
+        DdlTask cdcDdlMarkTask = new CdcRepartitionMarkTask(
+            schemaName, primaryTableName, SqlKind.ALTER_TABLE, CdcDdlMarkVisibility.Protected);
 
         // drop gsi
         DropGlobalIndexPreparedData dropGlobalIndexPreparedData =
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RenameGsiJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RenameGsiJobFactory.java
index a1d6e1110..6d1158e8f 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RenameGsiJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RenameGsiJobFactory.java
@@ -73,8 +73,6 @@ protected void validate() {
 
     @Override
     protected ExecutableDdlJob doCreate() {
-        executionContext.setPhyTableRenamed(false);
-
         TableMeta tableMeta =
             OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(gsiName);
         String primaryTableName = tableMeta.getGsiTableMetaBean().gsiMetaBean.tableName;
@@ -82,7 +80,7 @@ protected ExecutableDdlJob doCreate() {
         DdlTask validateTask = new RenameTableValidateTask(schemaName, gsiName, newGsiName);
         DdlTask addMetaTask = new RenameTableAddMetaTask(schemaName, gsiName, newGsiName);
 
-        DdlTask updateMetaTask = new RenameGsiUpdateMetaTask(schemaName, primaryTableName, gsiName, newGsiName);
+        DdlTask updateMetaTask = new RenameGsiUpdateMetaTask(schemaName, primaryTableName, gsiName, newGsiName, false);
         DdlTask syncTask = new TableSyncTask(schemaName, primaryTableName);
 
         List taskList = new ArrayList<>();
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RepartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RepartitionJobFactory.java
index 62ce6c1e7..c1802f84f 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RepartitionJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/RepartitionJobFactory.java
@@ -16,8 +16,11 @@
 
 package com.alibaba.polardbx.executor.ddl.job.factory.gsi;
 
+import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility;
 import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData;
+import com.alibaba.polardbx.common.ddl.newengine.DdlType;
 import com.alibaba.polardbx.common.properties.ConnectionParams;
+import com.alibaba.polardbx.common.utils.GeneralUtil;
 import com.alibaba.polardbx.common.utils.Pair;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.util.FactoryUtils;
@@ -48,6 +51,7 @@
 import com.alibaba.polardbx.gms.topology.DbInfoManager;
 import com.alibaba.polardbx.gms.util.TableGroupNameUtil;
 import com.alibaba.polardbx.optimizer.OptimizerContext;
+import com.alibaba.polardbx.optimizer.context.DdlContext;
 import com.alibaba.polardbx.optimizer.context.ExecutionContext;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.RepartitionPrepareData;
 import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData;
@@ -64,6 +68,12 @@
 import java.util.Objects;
 import java.util.Set;
 
+import static com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility.Private;
+import static com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility.Protected;
+import static com.alibaba.polardbx.common.ddl.newengine.DdlType.ALTER_TABLE;
+import static com.alibaba.polardbx.common.ddl.newengine.DdlType.ALTER_TABLEGROUP;
+import static com.alibaba.polardbx.common.ddl.newengine.DdlType.ALTER_TABLE_SET_TABLEGROUP;
+
 /**
  * @author guxu wumu
  */
@@ -86,12 +96,15 @@ public class RepartitionJobFactory extends DdlJobFactory {
     private final List> addForeignKeySql;
     private final List> dropForeignKeySql;
     private final Map> foreignKeyChildTable;
+    private final List> addCciSql;
+    private final List> dropCciSql;
 
     private final ExecutionContext executionContext;
 
     private final RelOptCluster cluster;
 
     private final Boolean modifyLocality;
+    private final Boolean repartitionGsi;
 
     public RepartitionJobFactory(CreateGlobalIndexPreparedData globalIndexPreparedData,
                                  RepartitionPrepareData repartitionPrepareData,
@@ -115,6 +128,9 @@ public RepartitionJobFactory(CreateGlobalIndexPreparedData globalIndexPreparedDa
         this.dropForeignKeySql = repartitionPrepareData.getDropForeignKeySql();
         this.foreignKeyChildTable = repartitionPrepareData.getForeignKeyChildTable();
         this.modifyLocality = repartitionPrepareData.getModifyLocality();
+        this.repartitionGsi = repartitionPrepareData.getRepartitionGsi();
+        this.addCciSql = repartitionPrepareData.getAddCciSql();
+        this.dropCciSql = repartitionPrepareData.getDropCciSql();
         this.physicalPlanData = physicalPlanData;
         this.executionContext = executionContext;
         this.cluster = cluster;
@@ -172,12 +188,31 @@ protected ExecutableDdlJob doCreate() {
             CreateGsiJobFactory.create(globalIndexPreparedData, physicalPlanData, executionContext);
         createGsiJobFactory.stayAtBackFill = true;
         ExecutableDdlJob createGsiJob = createGsiJobFactory.create();
+        if (globalIndexPreparedData.getRelatedTableGroupInfo().values().stream().anyMatch(o -> o.booleanValue())
+            || globalIndexPreparedData.isNeedToGetTableGroupLock()) {
+            return createGsiJob;
+        }
 
         RepartitionCutOverTask cutOverTask =
-            new RepartitionCutOverTask(schemaName, primaryTableName, indexTableName, isSingle, isBroadcast, false);
+            new RepartitionCutOverTask(schemaName, primaryTableName, indexTableName, isSingle, isBroadcast, false,
+                repartitionGsi != null && repartitionGsi);
         RepartitionSyncTask repartitionSyncTask = new RepartitionSyncTask(schemaName, primaryTableName, indexTableName);
 
-        DdlTask cdcDdlMarkTask = new CdcRepartitionMarkTask(schemaName, primaryTableName, SqlKind.ALTER_TABLE);
+        DdlTask cdcDdlMarkTask = null;
+        if (executionContext.getDdlContext().isSubJob()) {
+            DdlContext rootDdlContext = getRootParentDdlContext(executionContext.getDdlContext());
+            DdlType rootDdlType = rootDdlContext.getDdlType();
+            if (ALTER_TABLE_SET_TABLEGROUP != rootDdlType && ALTER_TABLE != rootDdlType
+                && ALTER_TABLEGROUP != rootDdlType) {
+                throw new RuntimeException("unexpected parent ddl job " + rootDdlContext.getDdlType());
+            }
+
+            CdcDdlMarkVisibility visibility = rootDdlType == ALTER_TABLE ? Protected : Private;
+            cdcDdlMarkTask = new CdcRepartitionMarkTask(schemaName, primaryTableName, SqlKind.ALTER_TABLE, visibility);
+        } else {
+            cdcDdlMarkTask = new CdcRepartitionMarkTask(
+                schemaName, primaryTableName, SqlKind.ALTER_TABLE, Protected);
+        }
 
         DropGlobalIndexPreparedData dropGlobalIndexPreparedData =
             new DropGlobalIndexPreparedData(schemaName, primaryTableName, indexTableName, false);
@@ -252,6 +287,24 @@ protected ExecutableDdlJob doCreate() {
             return repartitionJob;
         }
 
+        if (GeneralUtil.isNotEmpty(dropCciSql)) {
+            List dropCciSubJobTasks = new ArrayList<>();
+            for (Pair sql : dropCciSql) {
+                SubJobTask dropCciSubJobTask =
+                    new SubJobTask(schemaName, sql.getKey(), sql.getValue());
+                dropCciSubJobTask.setParentAcquireResource(true);
+                dropCciSubJobTasks.add(dropCciSubJobTask);
+            }
+
+            for (int i = 0; i < dropCciSubJobTasks.size(); i++) {
+                if (i == 0) {
+                    repartitionJob.addTaskRelationship(validateTask, dropCciSubJobTasks.get(i));
+                } else {
+                    repartitionJob.addTaskRelationship(dropCciSubJobTasks.get(i - 1), dropCciSubJobTasks.get(i));
+                }
+            }
+        }
+
         // 2. drop foreign keys on child table
         if (dropForeignKeySql != null && !dropForeignKeySql.isEmpty()) {
             // drop foreign key subJob
@@ -270,18 +323,14 @@ protected ExecutableDdlJob doCreate() {
                     repartitionJob.addTaskRelationship(dropFkSubJobTasks.get(i - 1), dropFkSubJobTasks.get(i));
                 }
             }
-
-            for (Map.Entry> entry : foreignKeyChildTable.entrySet()) {
-                for (String table : entry.getValue()) {
-                    TableSyncTask tableSyncTask = new TableSyncTask(entry.getKey(), table);
-                    repartitionJob.addTask(tableSyncTask);
-                }
-            }
         }
 
         // 3. create gsi
         repartitionJob.appendJob2(createGsiJob);
 
+        // drop cci
+//        dropColumnarClusterIndexJobs.forEach(repartitionJob::appendJob2);
+
         // 4. cut over
         final boolean skipCutOver = StringUtils.equalsIgnoreCase(
             executionContext.getParamManager().getString(ConnectionParams.REPARTITION_SKIP_CUTOVER), "true");
@@ -304,6 +353,27 @@ protected ExecutableDdlJob doCreate() {
         // 6. drop gsi tables
         dropGlobalIndexJobs.forEach(repartitionJob::appendJob2);
 
+        // create cci
+//        repartitionJob.appendJob2(createCciJob);
+
+        if (GeneralUtil.isNotEmpty(addCciSql)) {
+            List addCciSubJobTasks = new ArrayList<>();
+            for (Pair sql : addCciSql) {
+                SubJobTask addCciSubJobTask =
+                    new SubJobTask(schemaName, sql.getKey(), sql.getValue());
+                addCciSubJobTask.setParentAcquireResource(true);
+                addCciSubJobTasks.add(addCciSubJobTask);
+            }
+
+            for (int i = 0; i < addCciSubJobTasks.size(); i++) {
+                if (i == 0) {
+                    repartitionJob.appendTask(addCciSubJobTasks.get(i));
+                } else {
+                    repartitionJob.addTaskRelationship(addCciSubJobTasks.get(i - 1), addCciSubJobTasks.get(i));
+                }
+            }
+        }
+
         // 7. drop/create fk on related table
         if (addForeignKeySql != null && !addForeignKeySql.isEmpty()) {
             // change fk meta
@@ -312,6 +382,12 @@ protected ExecutableDdlJob doCreate() {
 
             repartitionJob.appendTask(repartitionChangeFkMetaTask);
 
+            for (ForeignKeyData fk : modifyForeignKeys) {
+                repartitionJob.appendTask(new TableSyncTask(fk.refSchema, fk.refTableName));
+            }
+            TableSyncTask syncTask = new TableSyncTask(schemaName, primaryTableName);
+            repartitionJob.appendTask(syncTask);
+
             // add foreign key subJob
             List addFkSubJobTasks = new ArrayList<>();
             for (Pair sql : addForeignKeySql) {
@@ -323,7 +399,7 @@ protected ExecutableDdlJob doCreate() {
 
             for (int i = 0; i < addFkSubJobTasks.size(); i++) {
                 if (i == 0) {
-                    repartitionJob.addTaskRelationship(repartitionChangeFkMetaTask, addFkSubJobTasks.get(i));
+                    repartitionJob.addTaskRelationship(syncTask, addFkSubJobTasks.get(i));
                 } else {
                     repartitionJob.addTaskRelationship(addFkSubJobTasks.get(i - 1), addFkSubJobTasks.get(i));
                 }
@@ -463,4 +539,11 @@ private DdlTask getDropGsiHeadTask(ExecutableDdlJob dropGsiJob) {
         return dropGsiLastTask;
     }
 
+    private DdlContext getRootParentDdlContext(DdlContext ddlContext) {
+        if (ddlContext.getParentDdlContext() != null) {
+            return getRootParentDdlContext(ddlContext.getParentDdlContext());
+        } else {
+            return ddlContext;
+        }
+    }
 }
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/TruncateTableWithGsiJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/TruncateTableWithGsiJobFactory.java
index 01d1de74c..9f31f1eb8 100644
--- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/TruncateTableWithGsiJobFactory.java
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/TruncateTableWithGsiJobFactory.java
@@ -19,7 +19,6 @@
 import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData;
 import com.alibaba.polardbx.executor.ddl.job.builder.gsi.CreatePartitionTableWithGsiBuilder;
 import com.alibaba.polardbx.executor.ddl.job.builder.gsi.CreateTableWithGsiBuilder;
-import com.alibaba.polardbx.executor.ddl.job.builder.gsi.DropGlobalIndexBuilder;
 import com.alibaba.polardbx.executor.ddl.job.converter.DdlJobDataConverter;
 import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData;
 import com.alibaba.polardbx.executor.ddl.job.factory.CreatePartitionTableJobFactory;
@@ -224,7 +223,8 @@ protected ExecutableDdlJob generateCreateTmpTableJob() {
         Map specialDefaultValueFlags =
             createTablePreparedData.getPrimaryTablePreparedData().getSpecialDefaultValueFlags();
         PhysicalPlanData physicalPlanData = DdlJobDataConverter
-            .convertToPhysicalPlanData(primaryTableTopology, primaryTablePhysicalPlans, false, isAutoPartition);
+            .convertToPhysicalPlanData(primaryTableTopology, primaryTablePhysicalPlans, false, isAutoPartition,
+                executionContext);
 
         // Create Primary Table
         ExecutableDdlJob4CreateTable createTableJob = (ExecutableDdlJob4CreateTable) new CreateTableJobFactory(
@@ -234,7 +234,9 @@ protected ExecutableDdlJob generateCreateTmpTableJob() {
             specialDefaultValueFlags,
             addedForeignKeys,
             physicalPlanData,
-            executionContext).create();
+            preparedData.getDdlVersionId(),
+            executionContext,
+            null).create();
 
         DdlTask thenCreateGsiTask = createTableJob.getCreateTableShowTableMetaTask();
         DdlTask lastTableSyncTask = createTableJob.getTableSyncTask();
@@ -314,13 +316,14 @@ protected ExecutableDdlJob generateCreateTmpPartitionTableJob() {
         Map specialDefaultValueFlags =
             createTablePreparedData.getPrimaryTablePreparedData().getSpecialDefaultValueFlags();
         PhysicalPlanData physicalPlanData = DdlJobDataConverter
-            .convertToPhysicalPlanData(primaryTableTopology, primaryTablePhysicalPlans, false, isAutoPartition);
+            .convertToPhysicalPlanData(primaryTableTopology, primaryTablePhysicalPlans, false, isAutoPartition,
+                executionContext);
 
         // Create Primary Table
         ExecutableDdlJob4CreatePartitionTable createTableJob = (ExecutableDdlJob4CreatePartitionTable)
             new CreatePartitionTableJobFactory(isAutoPartition, hasTimestampColumnDefault, specialDefaultValues,
                 specialDefaultValueFlags, addedForeignKeys, physicalPlanData, executionContext,
-                createTablePreparedData.getPrimaryTablePreparedData(), null).create();
+                createTablePreparedData.getPrimaryTablePreparedData(), null, null).create();
 
         result.addSequentialTasks(Lists.newArrayList(
             createTableJob.getCreatePartitionTableValidateTask(),
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/columnar/CreateColumnarIndexJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/columnar/CreateColumnarIndexJobFactory.java
new file mode 100644
index 000000000..2559221fc
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/columnar/CreateColumnarIndexJobFactory.java
@@ -0,0 +1,437 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory.gsi.columnar;
+
+import com.alibaba.polardbx.common.Engine;
+import com.alibaba.polardbx.common.ddl.newengine.DdlState;
+import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException;
+import com.alibaba.polardbx.common.properties.ConnectionParams;
+import com.alibaba.polardbx.executor.ddl.job.builder.gsi.CreateGlobalIndexBuilder;
+import com.alibaba.polardbx.executor.ddl.job.builder.gsi.CreatePartitionGlobalIndexBuilder;
+import com.alibaba.polardbx.executor.ddl.job.converter.DdlJobDataConverter;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableShowTableMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcCreateColumnarIndexTask;
+import com.alibaba.polardbx.executor.ddl.job.task.columnar.*;
+import com.alibaba.polardbx.executor.ddl.job.task.gsi.CciUpdateIndexStatusTask;
+import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlExceptionAction;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateColumnarIndex;
+import com.alibaba.polardbx.gms.locality.LocalityDesc;
+import com.alibaba.polardbx.gms.metadb.table.ColumnarTableStatus;
+import com.alibaba.polardbx.gms.metadb.table.IndexStatus;
+import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig;
+import com.alibaba.polardbx.gms.topology.DbInfoManager;
+import com.alibaba.polardbx.optimizer.config.table.ColumnMeta;
+import com.alibaba.polardbx.optimizer.config.table.TableMeta;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CreateTablePreparedData;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData;
+import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
+import com.alibaba.polardbx.optimizer.partition.PartitionInfoBuilder;
+import com.alibaba.polardbx.optimizer.partition.common.PartitionStrategy;
+import com.alibaba.polardbx.optimizer.partition.common.PartitionTableType;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import org.apache.calcite.rel.core.DDL;
+import org.apache.calcite.sql.SqlCreateIndex;
+import org.apache.calcite.sql.SqlIdentifier;
+import org.apache.calcite.sql.SqlIndexColumnName;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.calcite.sql.SqlPartitionBy;
+import org.apache.commons.collections.CollectionUtils;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.isVersionIdInitialized;
+import static com.alibaba.polardbx.executor.gsi.GsiUtils.columnAst2nameStr;
+
+/**
+ * create cluster columnar index
+ */
+public class CreateColumnarIndexJobFactory extends DdlJobFactory {
+
+    private static final PartitionStrategy DEFAULT_PARTITION_STRATEGY = PartitionStrategy.KEY;
+
+    protected final String schemaName;
+    protected final String primaryTableName;
+    protected final String columnarIndexTableName;
+    protected final boolean clusteredIndex;
+    protected final List primaryKeys;
+    protected final List coverings;
+    protected final String indexComment;
+    protected final String indexType;
+    protected final CreateTablePreparedData preparedData;
+    protected final Engine engine;
+    protected List partitionKeys = null;
+    protected PartitionStrategy partitionStrategy = null;
+    protected List sortKeys;
+    protected final SqlCreateIndex sqlCreateIndex;
+    protected ExecutionContext executionContext;
+    protected PartitionInfo partitionInfo = null;
+
+    /**
+     * FOR TEST USE ONLY!
+     */
+    protected final Set skipDdlTasks;
+    /**
+     * For create table with cci,
+     * {@link TableSyncTask} of primary table should be executed after {@link CreateTableShowTableMetaTask} task finished
+     */
+    protected final boolean createTableWithCci;
+    protected final Long versionId;
+
+    public CreateColumnarIndexJobFactory(CreateGlobalIndexPreparedData globalIndexPreparedData,
+                                         ExecutionContext executionContext) {
+        this(
+            globalIndexPreparedData.getSchemaName(),
+            globalIndexPreparedData.getPrimaryTableName(),
+            globalIndexPreparedData.getIndexTableName(),
+            globalIndexPreparedData.isClusteredIndex(),
+            globalIndexPreparedData.getIndexTablePreparedData(),
+            globalIndexPreparedData.getColumns(),
+            globalIndexPreparedData.getCoverings(),
+            globalIndexPreparedData.getComment(),
+            globalIndexPreparedData.getIndexType(),
+            globalIndexPreparedData.getOrBuildOriginCreateIndex(),
+            globalIndexPreparedData.getEngineName(),
+            globalIndexPreparedData.isCreateTableWithIndex(),
+            globalIndexPreparedData.getIndexPartitionInfo(),
+            globalIndexPreparedData.getDdlVersionId(),
+            executionContext
+        );
+    }
+
+    protected CreateColumnarIndexJobFactory(String schemaName,
+                                            String primaryTableName,
+                                            String indexTableName,
+                                            boolean clusteredIndex,
+                                            CreateTablePreparedData preparedData,
+                                            List sortKeys,
+                                            List covering,
+                                            String indexComment,
+                                            String indexType,
+                                            SqlCreateIndex sqlCreateIndex,
+                                            SqlNode engineName,
+                                            boolean createTableWithCci,
+                                            PartitionInfo indexPartitionInfo,
+                                            Long versionId,
+                                            ExecutionContext executionContext) {
+        this.schemaName = schemaName;
+        this.primaryTableName = primaryTableName;
+        this.columnarIndexTableName = indexTableName;
+        this.clusteredIndex = clusteredIndex;
+        this.preparedData = preparedData;
+        this.sortKeys = columnAst2nameStr(sortKeys);
+        this.coverings = columnAst2nameStr(covering);
+        this.indexComment = indexComment == null ? "" : indexComment;
+        this.indexType = indexType;
+        this.sqlCreateIndex = sqlCreateIndex;
+        this.executionContext = executionContext;
+        // CreateTablePreparedData#tableMeta is the metadata of primary table
+        // Getting metadata of primary table from schema manager is not always possible
+        // For case like CREATE TABLE with CCI, there might not be published metadata at this step
+        this.primaryKeys =
+            preparedData.getTableMeta().getPrimaryKey().stream().map(ColumnMeta::getName).collect(Collectors.toList());
+        this.skipDdlTasks = executionContext.skipDdlTasks();
+        this.createTableWithCci = createTableWithCci;
+        this.partitionInfo = indexPartitionInfo;
+
+        if (engineName == null) {
+            this.engine = Engine.DEFAULT_COLUMNAR_ENGINE;
+        } else {
+            this.engine = Engine.of(engineName.toString());
+        }
+        Preconditions.checkNotNull(versionId);
+        Preconditions.checkArgument(isVersionIdInitialized(versionId), "Ddl versionId is not initialized");
+        this.versionId = versionId;
+        if (null != executionContext.getDdlContext()) {
+            executionContext.getDdlContext().setPausedPolicy(DdlState.PAUSED);
+        }
+    }
+
+    public static CreateColumnarIndexJobFactory create(CreateGlobalIndexPreparedData preparedData,
+                                                       ExecutionContext ec) {
+        final String schema = preparedData.getSchemaName();
+        if (!DbInfoManager.getInstance().isNewPartitionDb(schema)) {
+            throw new UnsupportedOperationException("clustered columnar index is not supported in DRDS mode");
+        }
+        return new CreateColumnarIndexJobFactory(preparedData, ec);
+    }
+
+    @Override
+    protected void validate() {
+        GsiValidator.validateGsiSupport(schemaName, executionContext);
+        GsiValidator.validateCreateOnGsi(schemaName, columnarIndexTableName, executionContext);
+        validateEngine();
+        validateColumnarTableKeys();
+        validateDictColumns();
+    }
+
+    private void validateDictColumns() {
+        TableMeta tableMeta = this.preparedData.getTableMeta();
+        List dictColumns = sqlCreateIndex.getDictColumns();
+        if (dictColumns != null) {
+            for (SqlIndexColumnName dictCol : dictColumns) {
+                String columnNameStr = dictCol.getColumnNameStr();
+                ColumnMeta column = tableMeta.getColumnIgnoreCase(columnNameStr);
+                if (column == null) {
+                    throw new TddlNestableRuntimeException("unknown dictionary column name: " + columnNameStr);
+                }
+            }
+        }
+    }
+
+    private void validateEngine() {
+        if (!Engine.supportColumnar(engine)) {
+            throw new UnsupportedOperationException("Engine: " + engine + " is not supported in columnar index");
+        }
+    }
+
+    private void validateColumnarTableKeys() {
+        Preconditions.checkArgument(!primaryKeys.isEmpty(),
+            "Columnar index only supports need primary key");
+        Preconditions.checkArgument(clusteredIndex,
+            "Do not support columnar index which is not clustered");
+        TableMeta tableMeta = this.preparedData.getTableMeta();
+
+        buildColumnarPartitionKey();
+        buildColumnarSortKey();
+        buildPartitionInfo(tableMeta);
+    }
+
+    private void buildPartitionInfo(TableMeta tableMeta) {
+        ColumnMeta pkMeta = tableMeta.getColumn(primaryKeys.get(0));
+        List pkColMetas = Collections.singletonList(pkMeta);
+        List allColMetas = tableMeta.getAllColumns();
+
+        String tableGroupName = preparedData.getTableGroupName() == null ? null :
+            ((SqlIdentifier) preparedData.getTableGroupName()).getLastName();
+        String joinGroupName = preparedData.getJoinGroupName() == null ? null :
+            ((SqlIdentifier) preparedData.getJoinGroupName()).getLastName();
+        partitionInfo = PartitionInfoBuilder.buildPartitionInfoByPartDefAst(
+            preparedData.getSchemaName(), columnarIndexTableName,
+            tableGroupName, preparedData.isWithImplicitTableGroup(), joinGroupName,
+            (SqlPartitionBy) preparedData.getPartitioning(), preparedData.getPartBoundExprInfo(),
+            pkColMetas, allColMetas, PartitionTableType.COLUMNAR_TABLE,
+            executionContext, new LocalityDesc());
+    }
+
+    private void buildCoverings(TableMeta tableMeta) {
+        if (!clusteredIndex) {
+            throw new UnsupportedOperationException("Do not support columnar index which is not clustered");
+        }
+    }
+
+    /**
+     * Build partition key by primary key,
+     * using default partition strategy
+     */
+    private void buildColumnarPartitionKey() {
+        if (preparedData.getPartitioning() instanceof SqlPartitionBy) {
+            SqlPartitionBy partitionBy = (SqlPartitionBy) preparedData.getPartitioning();
+            List partitionColumns = partitionBy.getColumns();
+            this.partitionKeys = partitionColumns.stream().map(SqlNode::toString).collect(Collectors.toList());
+            this.partitionStrategy = PartitionInfoBuilder.getPartitionStrategy(partitionBy);
+        } else {
+            partitionKeys = Collections.singletonList(primaryKeys.get(0));
+            partitionStrategy = DEFAULT_PARTITION_STRATEGY;
+        }
+    }
+
+    private void buildColumnarSortKey() {
+        if (CollectionUtils.isEmpty(sortKeys)) {
+            sortKeys = ImmutableList.copyOf(primaryKeys);
+        }
+    }
+
+    @Override
+    protected ExecutableDdlJob doCreate() {
+        final TableGroupDetailConfig tableGroupConfig = DdlJobDataConverter.buildTableGroupConfig(partitionInfo, false);
+
+        final List taskList = new ArrayList<>();
+        // 1. validate
+        final CreateColumnarIndexValidateTask validateTask = new CreateColumnarIndexValidateTask(
+            schemaName,
+            primaryTableName,
+            columnarIndexTableName);
+        taskList.add(validateTask);
+
+        // 2. create columnar table
+        // 2.1 insert tablePartition meta for columnar table
+        final AddColumnarTablesPartitionInfoMetaTask addColumnarTablesPartitionInfoMetaTask =
+            new AddColumnarTablesPartitionInfoMetaTask(
+                schemaName,
+                columnarIndexTableName,
+                tableGroupConfig,
+                primaryTableName);
+        taskList.add(addColumnarTablesPartitionInfoMetaTask);
+
+        // 2.2 insert tables meta for columnar table
+        final AddColumnarTablesMetaTask addColumnarTablesMetaTask = new AddColumnarTablesMetaTask(
+            schemaName,
+            primaryTableName,
+            columnarIndexTableName,
+            versionId, engine);
+        taskList.add(addColumnarTablesMetaTask);
+
+        // Change tables.status
+        final CreateTableShowTableMetaTask showTableMetaTask = new CreateTableShowTableMetaTask(
+            schemaName,
+            columnarIndexTableName);
+        taskList.add(showTableMetaTask);
+
+        // 2.3 insert indexes meta for primary table
+        final InsertColumnarIndexMetaTask insertColumnarIndexMetaTask = new InsertColumnarIndexMetaTask(
+            schemaName,
+            primaryTableName,
+            columnarIndexTableName,
+            sortKeys,
+            coverings,
+            false,
+            indexComment,
+            indexType,
+            IndexStatus.CREATING,
+            clusteredIndex);
+        taskList.add(insertColumnarIndexMetaTask);
+
+        // 2.4 CDC mark create columnar table
+        CdcCreateColumnarIndexTask cdcCreateColumnarIndexTask = null;
+        CreateMockColumnarIndexTask createMockColumnarIndexTask = null;
+        if (executionContext.getParamManager().getBoolean(ConnectionParams.MOCK_COLUMNAR_INDEX)) {
+            //create mock columnar index
+            createMockColumnarIndexTask =
+                new CreateMockColumnarIndexTask(schemaName, columnarIndexTableName, versionId);
+            createMockColumnarIndexTask.setMciFormat(
+                executionContext.getParamManager().getString(ConnectionParams.MCI_FORMAT));
+            taskList.add(createMockColumnarIndexTask);
+        } else if (!createTableWithCci) {
+            // 2.4 CDC mark create columnar table
+            cdcCreateColumnarIndexTask = new CdcCreateColumnarIndexTask(
+                schemaName,
+                primaryTableName,
+                columnarIndexTableName,
+                sqlCreateIndex.getOriginIndexName().getLastName(),
+                sqlCreateIndex.getColumnarOptions(),
+                sqlCreateIndex.toString(true),
+                versionId);
+            taskList.add(cdcCreateColumnarIndexTask);
+
+            // 2.5 table sync
+            DdlTask tableSyncTask = new TableSyncTask(schemaName, columnarIndexTableName);
+            taskList.add(tableSyncTask);
+        }
+
+        // 3.1.1 wait columnar table creation
+        final WaitColumnarTableCreationTask waitColumnarTableCreationTask = new WaitColumnarTableCreationTask(
+            schemaName,
+            primaryTableName,
+            columnarIndexTableName,
+            skipDdlTasks.contains(WaitColumnarTableCreationTask.class.getSimpleName()));
+        taskList.add(waitColumnarTableCreationTask);
+
+        // 3.1.2 check consistency
+        final CciUpdateIndexStatusTask changeCreatingToChecking =
+            (CciUpdateIndexStatusTask) new CciUpdateIndexStatusTask(
+                schemaName,
+                primaryTableName,
+                columnarIndexTableName,
+                ColumnarTableStatus.CREATING,
+                ColumnarTableStatus.PUBLIC,
+                IndexStatus.CREATING,
+                IndexStatus.WRITE_REORG,
+                false
+            ).onExceptionTryRecoveryThenRollback();
+        taskList.add(changeCreatingToChecking);
+
+        DdlTask checkingTableSyncTask = new TableSyncTask(schemaName, primaryTableName);
+        taskList.add(checkingTableSyncTask);
+
+        final CreateCheckCciTask createCheckCciTask = new CreateCheckCciTask(
+            schemaName,
+            primaryTableName,
+            columnarIndexTableName,
+            skipDdlTasks.contains(CreateCheckCciTask.class.getSimpleName())
+        );
+        createCheckCciTask.setExceptionAction(DdlExceptionAction.PAUSE);
+        taskList.add(createCheckCciTask);
+
+        // 3.2 change cci status to PUBLIC
+        final CciUpdateIndexStatusTask updateCciStatusTask = (CciUpdateIndexStatusTask) new CciUpdateIndexStatusTask(
+            schemaName,
+            primaryTableName,
+            columnarIndexTableName,
+            ColumnarTableStatus.PUBLIC,
+            ColumnarTableStatus.PUBLIC,
+            IndexStatus.WRITE_REORG,
+            IndexStatus.PUBLIC,
+            false
+        ).onExceptionTryRecoveryThenRollback();
+        taskList.add(updateCciStatusTask);
+
+        // 3.3 final table sync
+        DdlTask finalTableSyncTask = new TableSyncTask(schemaName, primaryTableName);
+        taskList.add(finalTableSyncTask);
+
+        final ExecutableDdlJob4CreateColumnarIndex result = new ExecutableDdlJob4CreateColumnarIndex();
+        result.addSequentialTasks(taskList);
+
+        result.setCreateColumnarIndexValidateTask(validateTask);
+        result.setAddColumnarTablesPartitionInfoMetaTask(addColumnarTablesPartitionInfoMetaTask);
+        result.setCdcCreateColumnarIndexTask(cdcCreateColumnarIndexTask);
+        result.setCreateMockColumnarIndexTask(createMockColumnarIndexTask);
+        result.setCreateTableShowTableMetaTask(showTableMetaTask);
+        result.setInsertColumnarIndexMetaTask(insertColumnarIndexMetaTask);
+        result.setWaitColumnarTableCreationTask(waitColumnarTableCreationTask);
+        result.setChangeCreatingToChecking(changeCreatingToChecking);
+        result.setCreateCheckCciTask(createCheckCciTask);
+        result.setCciUpdateIndexStatusTask(updateCciStatusTask);
+
+        result.setLastTask(taskList.get(taskList.size() - 1));
+        return result;
+    }
+
+    @Override
+    protected void excludeResources(Set resources) {
+        resources.add(concatWithDot(schemaName, primaryTableName));
+        resources.add(concatWithDot(schemaName, columnarIndexTableName));
+    }
+
+    @Override
+    protected void sharedResources(Set resources) {
+    }
+
+    public static ExecutableDdlJob create4CreateCci(@Deprecated DDL ddl,
+                                                    CreateGlobalIndexPreparedData preparedData,
+                                                    ExecutionContext ec) {
+        Preconditions.checkArgument(preparedData.isClusteredIndex(), "Columnar index can only be clustered index");
+
+        final CreateGlobalIndexBuilder builder =
+            new CreatePartitionGlobalIndexBuilder(ddl, preparedData, null, false, ec);
+        builder.build();
+
+        final CreateColumnarIndexJobFactory gsiJobFactory = CreateColumnarIndexJobFactory.create(preparedData, ec);
+        return gsiJobFactory.create();
+    }
+}
diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/columnar/DropColumnarIndexJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/columnar/DropColumnarIndexJobFactory.java
new file mode 100644
index 000000000..5db685680
--- /dev/null
+++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/gsi/columnar/DropColumnarIndexJobFactory.java
@@ -0,0 +1,231 @@
+/*
+ * Copyright [2013-2021], Alibaba Group Holding Limited
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.alibaba.polardbx.executor.ddl.job.factory.gsi.columnar;
+
+import com.alibaba.polardbx.common.properties.ConnectionParams;
+import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask;
+import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDropColumnarIndexTask;
+import com.alibaba.polardbx.executor.ddl.job.task.columnar.CciSchemaEvolutionTask;
+import com.alibaba.polardbx.executor.ddl.job.task.columnar.DropColumnarTableRemoveMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.columnar.DropMockColumnarIndexTask;
+import com.alibaba.polardbx.executor.ddl.job.task.factory.GsiTaskFactory;
+import com.alibaba.polardbx.executor.ddl.job.task.gsi.DropColumnarTableHideTableMetaTask;
+import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiDropCleanUpTask;
+import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateGsiExistenceTask;
+import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupSyncTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory;
+import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask;
+import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob;
+import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4DropColumnarIndex;
+import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig;
+import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord;
+import com.alibaba.polardbx.optimizer.OptimizerContext;
+import com.alibaba.polardbx.optimizer.context.ExecutionContext;
+import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.DropGlobalIndexPreparedData;
+import com.alibaba.polardbx.optimizer.partition.PartitionInfo;
+
+import javax.annotation.Nullable;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
+/**
+ * 1. drop index xxx on yyy
+ * 2. alter table yyy drop index xxx
+ * 

+ * for drop table with [unique] gsi, see class: DropTableWithGlobalIndexJob + * + * @author guxu + */ +public class DropColumnarIndexJobFactory extends DdlJobFactory { + + protected final String schemaName; + protected final String primaryTableName; + protected final String indexTableName; + protected final String originalIndexName; + protected final ExecutionContext executionContext; + protected final Long versionId; + + public static final String HIDE_TABLE_TASK = "HIDE_TABLE_TASK"; + + private boolean skipSchemaChange = false; + + public DropColumnarIndexJobFactory(String schemaName, + String primaryTableName, + String indexTableName, + String originalIndexName, + Long versionId, + boolean skipSchemaChange, + ExecutionContext executionContext) { + this.schemaName = schemaName; + this.primaryTableName = primaryTableName; + this.indexTableName = indexTableName; + this.originalIndexName = originalIndexName; + this.skipSchemaChange = skipSchemaChange; + this.versionId = versionId; + this.executionContext = executionContext; + } + + @Override + protected void excludeResources(Set resources) { + resources.add(concatWithDot(schemaName, primaryTableName)); + resources.add(concatWithDot(schemaName, indexTableName)); + + final TableGroupConfig indexTgConfig = getIndexTableGroupConfig(); + if (null != indexTgConfig) { + resources.add(concatWithDot(schemaName, indexTgConfig.getTableGroupRecord().getTg_name())); + } + } + + @Override + protected void sharedResources(Set resources) { + + } + + @Override + protected void validate() { + + } + + @Override + protected ExecutableDdlJob doCreate() { + final TableGroupConfig indexTgConfig = getIndexTableGroupConfig(); + final List tableGroupIds = new ArrayList<>(); + if (null != indexTgConfig) { + tableGroupIds.add(indexTgConfig.getTableGroupRecord().getId()); + } + + List taskList = new ArrayList<>(); + // 1. validate + ValidateGsiExistenceTask validateTask = + new ValidateGsiExistenceTask(schemaName, primaryTableName, indexTableName, tableGroupIds, indexTgConfig); + taskList.add(validateTask); + + // 2. GSI status: public -> absent + if (!skipSchemaChange) { + List bringDownTasks = + GsiTaskFactory.dropColumnarIndexTasks(schemaName, primaryTableName, indexTableName); + taskList.addAll(bringDownTasks); + } + + // 3.1 table status: public -> absent + DropColumnarTableHideTableMetaTask dropColumnarTableHideTableMetaTask = + new DropColumnarTableHideTableMetaTask(schemaName, primaryTableName, indexTableName); + taskList.add(dropColumnarTableHideTableMetaTask); + + // 3.2 remove table meta for columnar index + CciSchemaEvolutionTask cciSchemaEvolutionTask = + CciSchemaEvolutionTask.dropCci(schemaName, primaryTableName, indexTableName, versionId); + taskList.add(cciSchemaEvolutionTask); + + // 3.3 drop columnar table + CdcDropColumnarIndexTask cdcDropColumnarTableTask = null; + DropMockColumnarIndexTask dropMockColumnarIndexTask = null; + if (executionContext.getParamManager().getBoolean(ConnectionParams.MOCK_COLUMNAR_INDEX)) { + dropMockColumnarIndexTask = new DropMockColumnarIndexTask(schemaName, primaryTableName, indexTableName); + taskList.add(dropMockColumnarIndexTask); + } else { + cdcDropColumnarTableTask = + new CdcDropColumnarIndexTask(schemaName, primaryTableName, originalIndexName, versionId); + taskList.add(cdcDropColumnarTableTask); + } + + // 3.4 remove indexes meta for primary table + GsiDropCleanUpTask gsiDropCleanUpTask = new GsiDropCleanUpTask(schemaName, primaryTableName, indexTableName); + taskList.add(gsiDropCleanUpTask); + TableSyncTask tableSyncTaskAfterCleanUpGsiIndexesMeta = new TableSyncTask(schemaName, primaryTableName); + taskList.add(tableSyncTaskAfterCleanUpGsiIndexesMeta); + + // 4.1 remove table meta for columnar index + DropColumnarTableRemoveMetaTask dropColumnarTableRemoveMetaTask = + new DropColumnarTableRemoveMetaTask(schemaName, primaryTableName, indexTableName); + taskList.add(dropColumnarTableRemoveMetaTask); + + // 4.2 clear table group cache if necessary + if (null != indexTgConfig) { + final DdlTask syncTableGroup = new TableGroupSyncTask( + schemaName, + indexTgConfig.getTableGroupRecord().getTg_name()); + + taskList.add(syncTableGroup); + } + + // 5. sync after drop columnar index table + TableSyncTask finalSyncTask = new TableSyncTask(schemaName, primaryTableName); + taskList.add(finalSyncTask); + + final ExecutableDdlJob4DropColumnarIndex executableDdlJob = new ExecutableDdlJob4DropColumnarIndex(); + executableDdlJob.addSequentialTasks(taskList); + //todo delete me + // USED IN + // com.alibaba.polardbx.executor.handler.ddl.LogicalDropIndexHandler.buildDropColumnarIndexJob + // com.alibaba.polardbx.executor.handler.ddl.LogicalAlterTableHandler.buildDropCciJob + executableDdlJob.labelAsHead(validateTask); + executableDdlJob.labelAsTail(finalSyncTask); + executableDdlJob.labelTask(HIDE_TABLE_TASK, dropColumnarTableHideTableMetaTask); + + executableDdlJob.setValidateTask(validateTask); + executableDdlJob.setDropColumnarTableHideTableMetaTask(dropColumnarTableHideTableMetaTask); + executableDdlJob.setGsiDropCleanUpTask(gsiDropCleanUpTask); + executableDdlJob.setTableSyncTaskAfterCleanUpGsiIndexesMeta(tableSyncTaskAfterCleanUpGsiIndexesMeta); + executableDdlJob.setCdcDropColumnarIndexTask(cdcDropColumnarTableTask); + executableDdlJob.setDropMockColumnarIndexTask(dropMockColumnarIndexTask); + executableDdlJob.setDropColumnarTableRemoveMetaTask(dropColumnarTableRemoveMetaTask); + executableDdlJob.setCciSchemaEvolutionTask(cciSchemaEvolutionTask); + executableDdlJob.setFinalSyncTask(finalSyncTask); + + return executableDdlJob; + } + + @Nullable + private TableGroupConfig getIndexTableGroupConfig() { + final OptimizerContext oc = + Objects.requireNonNull(OptimizerContext.getContext(schemaName), schemaName + " corrupted"); + + final PartitionInfo partitionInfo = oc + .getPartitionInfoManager() + .getPartitionInfo(indexTableName); + + final Long tableGroupId = Optional.ofNullable(partitionInfo) + .map(PartitionInfo::getTableGroupId) + .orElse(TableGroupRecord.INVALID_TABLE_GROUP_ID); + + TableGroupConfig result = null; + if (tableGroupId != TableGroupRecord.INVALID_TABLE_GROUP_ID) { + result = oc.getTableGroupInfoManager().getTableGroupConfigById(tableGroupId); + } + + return result; + } + + public static ExecutableDdlJob create(DropGlobalIndexPreparedData preparedData, + ExecutionContext executionContext, + boolean skipSchemaChange, + boolean validate) { + return new DropColumnarIndexJobFactory( + preparedData.getSchemaName(), + preparedData.getPrimaryTableName(), + preparedData.getIndexTableName(), + preparedData.getOriginalIndexName(), + preparedData.getDdlVersionId(), + skipSchemaChange, + executionContext + ).create(validate); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/ExpireLocalPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/ExpireLocalPartitionJobFactory.java index 502bd2f35..1779f045a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/ExpireLocalPartitionJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/ExpireLocalPartitionJobFactory.java @@ -41,6 +41,7 @@ import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.FileValidationTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.OSSTaskUtils; import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.UpdateFileCommitTsTask; +import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateTableVersionTask; import com.alibaba.polardbx.executor.ddl.job.task.localpartition.LocalPartitionPhyDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.localpartition.LocalPartitionValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask; @@ -54,12 +55,12 @@ import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.util.LockUtil; import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.archive.CheckOSSArchiveUtil; import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; import com.alibaba.polardbx.optimizer.config.table.SchemaManager; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.ReorganizeLocalPartitionPreparedData; -import com.alibaba.polardbx.optimizer.archive.CheckOSSArchiveUtil; import com.alibaba.polardbx.optimizer.partition.common.LocalPartitionDefinitionInfo; import com.alibaba.polardbx.repo.mysql.checktable.LocalPartitionDescription; import com.alibaba.polardbx.repo.mysql.checktable.TableDescription; @@ -72,6 +73,7 @@ import org.apache.commons.collections.CollectionUtils; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -184,9 +186,15 @@ protected ExecutableDdlJob doCreate() { Map publishedGsi = primaryTableMeta.getGsiPublished(); ExecutableDdlJob executableDdlJob = new ExecutableDdlJob(); List taskList = new ArrayList<>(); + Map versionMap = new HashMap<>(); + versionMap.put(primaryTableName, primaryTableMeta.getVersion()); + ValidateTableVersionTask validateTableVersionTask = new ValidateTableVersionTask(schemaName, versionMap); + executableDdlJob.addTask(validateTableVersionTask); + LocalPartitionValidateTask localPartitionValidateTask = new LocalPartitionValidateTask(schemaName, primaryTableName); executableDdlJob.addTask(localPartitionValidateTask); + executableDdlJob.addTaskRelationship(validateTableVersionTask, localPartitionValidateTask); DdlTask headTask = localPartitionValidateTask; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/RemoveLocalPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/RemoveLocalPartitionJobFactory.java index a512b2879..a3458230a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/RemoveLocalPartitionJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/RemoveLocalPartitionJobFactory.java @@ -24,7 +24,9 @@ import com.alibaba.polardbx.executor.ddl.job.builder.DirectPhysicalSqlPlanBuilder; import com.alibaba.polardbx.executor.ddl.job.task.basic.RemoveLocalPartitionTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; +import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateTableVersionTask; import com.alibaba.polardbx.executor.ddl.job.task.localpartition.LocalPartitionPhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; @@ -35,10 +37,12 @@ import com.alibaba.polardbx.optimizer.core.rel.ddl.data.ReorganizeLocalPartitionPreparedData; import org.apache.calcite.rel.core.DDL; import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlPhyDdlWrapper; import org.apache.calcite.sql.parser.SqlParserPos; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -65,14 +69,14 @@ public RemoveLocalPartitionJobFactory(String schemaName, @Override protected void validate() { - + TableValidator.validateTableWithCCI(schemaName, primaryTableName, executionContext, SqlKind.LOCAL_PARTITION); } @Override protected ExecutableDdlJob doCreate() { final TableMeta primaryTableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(primaryTableName); - if(primaryTableMeta.getLocalPartitionDefinitionInfo() == null){ + if (primaryTableMeta.getLocalPartitionDefinitionInfo() == null) { throw new TddlNestableRuntimeException(String.format( "table %s.%s is not a local partition table", schemaName, primaryTableName)); } @@ -87,8 +91,12 @@ protected ExecutableDdlJob doCreate() { ExecutableDdlJob executableDdlJob = new ExecutableDdlJob(); List taskList = new ArrayList<>(); + Map versionMap = new HashMap<>(); + versionMap.put(primaryTableName, primaryTableMeta.getVersion()); + ValidateTableVersionTask validateTableVersionTask = new ValidateTableVersionTask(schemaName, versionMap); + taskList.add(validateTableVersionTask); taskList.add(genPhyDdlTask(schemaName, primaryTableName, phySql)); - if(publishedGsi != null){ + if (publishedGsi != null) { publishedGsi.forEach((gsiName, gsiIndexMetaBean) -> { taskList.add(genPhyDdlTask(schemaName, gsiName, phySql)); }); @@ -99,8 +107,9 @@ protected ExecutableDdlJob doCreate() { return executableDdlJob; } - private LocalPartitionPhyDdlTask genPhyDdlTask(String schemaName, String tableName, String phySql){ - ddl.sqlNode = SqlPhyDdlWrapper.createForAllocateLocalPartition(new SqlIdentifier(tableName, SqlParserPos.ZERO), phySql); + private LocalPartitionPhyDdlTask genPhyDdlTask(String schemaName, String tableName, String phySql) { + ddl.sqlNode = + SqlPhyDdlWrapper.createForAllocateLocalPartition(new SqlIdentifier(tableName, SqlParserPos.ZERO), phySql); DirectPhysicalSqlPlanBuilder builder = new DirectPhysicalSqlPlanBuilder( ddl, new ReorganizeLocalPartitionPreparedData(schemaName, tableName), executionContext ); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/ReorganizeLocalPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/ReorganizeLocalPartitionJobFactory.java index 975124a1c..4a7ec1e9b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/ReorganizeLocalPartitionJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/ReorganizeLocalPartitionJobFactory.java @@ -28,6 +28,7 @@ import com.alibaba.polardbx.druid.sql.ast.statement.SQLExprTableSource; import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.ddl.job.builder.DirectPhysicalSqlPlanBuilder; +import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateTableVersionTask; import com.alibaba.polardbx.executor.ddl.job.task.localpartition.LocalPartitionPhyDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.localpartition.LocalPartitionValidateTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory; @@ -51,6 +52,7 @@ import org.apache.calcite.sql.parser.SqlParserPos; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -134,12 +136,17 @@ protected ExecutableDdlJob doCreate() { alterTableStatement.setDbType(DbType.mysql); final String phySql = alterTableStatement.toString(); + Map versionMap = new HashMap<>(); + versionMap.put(primaryTableName, primaryTableMeta.getVersion()); + ValidateTableVersionTask validateTableVersionTask = new ValidateTableVersionTask(schemaName, versionMap); + LocalPartitionValidateTask localPartitionValidateTask = new LocalPartitionValidateTask(schemaName, primaryTableName); Map publishedGsi = primaryTableMeta.getGsiPublished(); ExecutableDdlJob executableDdlJob = new ExecutableDdlJob(); List taskList = new ArrayList<>(); + taskList.add(validateTableVersionTask); taskList.add(localPartitionValidateTask); taskList.add(genPhyDdlTask(schemaName, primaryTableName, phySql)); if (publishedGsi != null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/RepartitionLocalPartitionJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/RepartitionLocalPartitionJobFactory.java index de6c172e0..f41a9597b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/RepartitionLocalPartitionJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/localpartition/RepartitionLocalPartitionJobFactory.java @@ -29,6 +29,7 @@ import com.alibaba.polardbx.executor.ddl.job.task.basic.AddLocalPartitionTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.RemoveLocalPartitionTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; +import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateTableVersionTask; import com.alibaba.polardbx.executor.ddl.job.task.localpartition.LocalPartitionPhyDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.localpartition.LocalPartitionValidateTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory; @@ -55,6 +56,7 @@ import org.apache.commons.lang3.StringUtils; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -100,11 +102,15 @@ protected ExecutableDdlJob doCreate() { final TableMeta primaryTableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(primaryTableName); + Map versionMap = new HashMap<>(); + versionMap.put(primaryTableName, primaryTableMeta.getVersion()); + checkLocalPartitionColumnInUk(primaryTableMeta); List gsiList = GlobalIndexMeta.getIndex(primaryTableName, schemaName, executionContext); if (CollectionUtils.isNotEmpty(gsiList)) { for (TableMeta gsiMeta : gsiList) { checkLocalPartitionColumnInUk(gsiMeta); + versionMap.put(gsiMeta.getTableName(), gsiMeta.getVersion()); } } @@ -155,6 +161,10 @@ protected ExecutableDdlJob doCreate() { ExecutableDdlJob executableDdlJob = new ExecutableDdlJob(); List taskList = new ArrayList<>(); + + ValidateTableVersionTask validateTableVersionTask = new ValidateTableVersionTask(schemaName, versionMap); + taskList.add(validateTableVersionTask); + if (primaryTableMeta.getLocalPartitionDefinitionInfo() != null) { LocalPartitionValidateTask localPartitionValidateTask = new LocalPartitionValidateTask(schemaName, primaryTableName); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableAsOfTimeStampJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableAsOfTimeStampJobFactory.java index e50780131..bc496de78 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableAsOfTimeStampJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableAsOfTimeStampJobFactory.java @@ -32,9 +32,9 @@ public class AlterTableAsOfTimeStampJobFactory extends DdlJobFactory { private AlterTablePreparedData alterTablePreparedData; public AlterTableAsOfTimeStampJobFactory(String schemaName, - String logicalTableName, - AlterTablePreparedData alterTablePreparedData, - ExecutionContext executionContext) { + String logicalTableName, + AlterTablePreparedData alterTablePreparedData, + ExecutionContext executionContext) { this.schemaName = schemaName; this.logicalTableName = logicalTableName; this.executionContext = executionContext; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableDropOssFileJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableDropOssFileJobFactory.java index 7842c4da6..4d1300ece 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableDropOssFileJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableDropOssFileJobFactory.java @@ -64,7 +64,8 @@ protected ExecutableDdlJob doCreate() { List taskList = new ArrayList<>(); // change file meta task - final ITimestampOracle timestampOracle = executionContext.getTransaction().getTransactionManagerUtil().getTimestampOracle(); + final ITimestampOracle timestampOracle = + executionContext.getTransaction().getTransactionManagerUtil().getTimestampOracle(); if (null == timestampOracle) { throw new UnsupportedOperationException("Do not support timestamp oracle"); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableEngineJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableEngineJobFactory.java index e834becb6..6b84ca1e1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableEngineJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTableEngineJobFactory.java @@ -17,9 +17,9 @@ package com.alibaba.polardbx.executor.ddl.job.factory.oss; import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.ChangeTableEngineTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.MoveDataToInnodbTask; -import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTablePurgeBeforeTimeStampJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTablePurgeBeforeTimeStampJobFactory.java index 4a9f32aec..0011756dd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTablePurgeBeforeTimeStampJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/AlterTablePurgeBeforeTimeStampJobFactory.java @@ -32,9 +32,9 @@ public class AlterTablePurgeBeforeTimeStampJobFactory extends DdlJobFactory { private AlterTablePreparedData alterTablePreparedData; public AlterTablePurgeBeforeTimeStampJobFactory(String schemaName, - String logicalTableName, - AlterTablePreparedData alterTablePreparedData, - ExecutionContext executionContext) { + String logicalTableName, + AlterTablePreparedData alterTablePreparedData, + ExecutionContext executionContext) { this.schemaName = schemaName; this.logicalTableName = logicalTableName; this.executionContext = executionContext; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/CreatePartitionOssTableJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/CreatePartitionOssTableJobFactory.java index c5f54e485..3d0355e9e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/CreatePartitionOssTableJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/CreatePartitionOssTableJobFactory.java @@ -19,6 +19,8 @@ import com.alibaba.polardbx.common.ArchiveMode; import com.alibaba.polardbx.common.Engine; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.TStringUtil; @@ -26,11 +28,11 @@ import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.executor.ddl.job.factory.CreateTableJobFactory; import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateArchiveTableEventLogTask; +import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateEntitySecurityAttrTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.CreatePartitionTableValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableAddTablesPartitionInfoMetaTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTablePhyDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableShowTableMetaTask; -import com.alibaba.polardbx.executor.ddl.job.task.basic.InsertIntoTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.BindingArchiveTableMetaTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.CreateOssTableAddTablesMetaTask; @@ -42,6 +44,8 @@ import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; +import com.alibaba.polardbx.gms.engine.ColdDataStatus; +import com.alibaba.polardbx.gms.engine.FileSystemUtils; import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; import com.alibaba.polardbx.gms.partition.TableLocalPartitionRecord; @@ -51,6 +55,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CreateTablePreparedData; import com.alibaba.polardbx.optimizer.utils.RelUtils; +import org.apache.calcite.sql.SqlIndexColumnName; import org.eclipse.jetty.util.StringUtil; import java.sql.Connection; @@ -63,27 +68,33 @@ import static com.alibaba.polardbx.executor.ddl.newengine.meta.DdlJobManager.ID_GENERATOR; public class CreatePartitionOssTableJobFactory extends CreateTableJobFactory { - public static final String CREATE_TABLE_ADD_TABLES_META_TASK = "CREATE_TABLE_ADD_TABLES_META_TASK"; - - private CreateTablePreparedData preparedData; - private Engine tableEngine; - private ArchiveMode archiveMode; + private final CreateTablePreparedData preparedData; + private final Engine tableEngine; + private final ArchiveMode archiveMode; + private final List dictColumns; public CreatePartitionOssTableJobFactory(boolean autoPartition, boolean hasTimestampColumnDefault, Map specialDefaultValues, Map specialDefaultValueFlags, PhysicalPlanData physicalPlanData, ExecutionContext executionContext, CreateTablePreparedData preparedData, Engine tableEngine, - ArchiveMode archiveMode) { + ArchiveMode archiveMode, List dictColumns) { super(autoPartition, hasTimestampColumnDefault, specialDefaultValues, specialDefaultValueFlags, null, - physicalPlanData, executionContext); + physicalPlanData, preparedData.getDdlVersionId(), executionContext, null); this.preparedData = preparedData; this.tableEngine = tableEngine; this.archiveMode = archiveMode; + this.dictColumns = dictColumns; } @Override protected void validate() { + if (FileSystemUtils.getColdDataStatus().getStatus() == ColdDataStatus.OFF.getStatus()) { + throw new TddlRuntimeException( + ErrorCode.ERR_ARCHIVE_NOT_ENABLED, + "Data archiving feature is not enabled. Please enable this feature on the console." + ); + } if (archiveMode == ArchiveMode.TTL && preparedData.getLoadTableSchema() != null && preparedData.getLoadTableName() != null) { @@ -97,7 +108,8 @@ protected void validate() { // not a local partition table if (record == null) { - throw GeneralUtil.nestedException( + throw new TddlRuntimeException( + ErrorCode.ERR_INVALID_DDL_PARAMS, MessageFormat.format("{0}.{1} is not a local partition table.", ttlTableSchema, ttlTableName)); } @@ -105,13 +117,14 @@ protected void validate() { String oldArchiveTableSchema = record.getArchiveTableSchema(); String oldArchiveTableName = record.getArchiveTableName(); - // already has archive table but don't allow replace it. + // already has archive table but don't allow to replace it. if (oldArchiveTableSchema != null || oldArchiveTableName != null) { boolean allowReplace = executionContext.getParamManager().getBoolean(ConnectionParams.ALLOW_REPLACE_ARCHIVE_TABLE); if (!allowReplace) { - throw GeneralUtil.nestedException( + throw new TddlRuntimeException( + ErrorCode.ERR_ARCHIVE_TABLE_EXISTS, MessageFormat.format( "The table {0}.{1} already has archive table {2}.{3}, please use connection param: ALLOW_REPLACE_ARCHIVE_TABLE=true to allow replace archive table.", ttlTableSchema, ttlTableName, oldArchiveTableSchema, oldArchiveTableName)); @@ -121,6 +134,11 @@ protected void validate() { throw new TddlNestableRuntimeException(t); } } + if (selectSql != null) { + throw new TddlRuntimeException( + ErrorCode.ERR_CREATE_SELECT_WITH_OSS, "Create table select for archive table is not supported." + ); + } } @Override @@ -170,10 +188,12 @@ protected ExecutableDdlJob doCreate() { false); taskList.add(validateTask); + boolean autoCreateTg = + executionContext.getParamManager().getBoolean(ConnectionParams.ALLOW_AUTO_CREATE_TABLEGROUP); // table partition info CreateTableAddTablesPartitionInfoMetaTask addPartitionInfoTask = new CreateTableAddTablesPartitionInfoMetaTask(schemaName, logicalTableName, physicalPlanData.isTemporary(), - physicalPlanData.getTableGroupConfig(), null, false, null, null); + physicalPlanData.getTableGroupConfig(), null, null, null, null, true, false, autoCreateTg); taskList.add(addPartitionInfoTask); // mysql physical ddl task @@ -214,7 +234,7 @@ protected ExecutableDdlJob doCreate() { CreateOssTableGenerateDataMppTask createOssTableGenerateDataMppTask = new CreateOssTableGenerateDataMppTask(schemaName, logicalTableName, physicalPlanData, preparedData.getLoadTableSchema(), preparedData.getLoadTableName(), tableEngine, archiveMode, - totalNum, serialNum); + dictColumns, totalNum, serialNum); createOssTableGenerateDataMppTask.setTaskId(ID_GENERATOR.nextId()); taskIdList.add(createOssTableGenerateDataMppTask.getTaskId()); @@ -227,7 +247,8 @@ protected ExecutableDdlJob doCreate() { } else { CreateOssTableGenerateDataTask createOssTableGenerateDataTask = new CreateOssTableGenerateDataTask(schemaName, logicalTableName, physicalPlanData, - preparedData.getLoadTableSchema(), preparedData.getLoadTableName(), tableEngine, archiveMode); + preparedData.getLoadTableSchema(), preparedData.getLoadTableName(), tableEngine, archiveMode, + dictColumns); createOssTableGenerateDataTask.setTaskId(ID_GENERATOR.nextId()); taskIdList.add(createOssTableGenerateDataTask.getTaskId()); @@ -266,6 +287,11 @@ protected ExecutableDdlJob doCreate() { preparedData.getLoadTableName(), archiveMode, tableEngine); taskList.add(createArchiveTableEventLogTask); + CreateEntitySecurityAttrTask cesaTask = createCESATask(); + if (cesaTask != null) { + taskList.add(cesaTask); + } + // sync source table TableSyncTask tableSyncTask = new TableSyncTask(schemaName, logicalTableName); taskList.add(tableSyncTask); @@ -286,7 +312,7 @@ protected ExecutableDdlJob doCreate() { executableDdlJob.addSequentialTasksAfter(tailTask, taskList); if (selectSql != null) { throw new TddlNestableRuntimeException( - String.format("Don't support create table select in oss.")); + "Don't support create table select in oss."); } return executableDdlJob; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/MoveOSSDataJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/MoveOSSDataJobFactory.java index 3e4c3bc9e..d3520e2f3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/MoveOSSDataJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/oss/MoveOSSDataJobFactory.java @@ -18,9 +18,9 @@ import com.alibaba.polardbx.common.Engine; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.MoveDataToFileStoreTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.MoveDataToInnodbTask; -import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.UpdateFileCommitTsTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; @@ -81,7 +81,8 @@ protected ExecutableDdlJob doCreate() { moveDataToFileStoreTask.setTaskId(ID_GENERATOR.nextId()); List taskIdList = new ArrayList<>(); taskIdList.add(moveDataToFileStoreTask.getTaskId()); - UpdateFileCommitTsTask updateFileCommitTsTask = new UpdateFileCommitTsTask(targetEngine.name(), schemaName, logicalTableName, taskIdList); + UpdateFileCommitTsTask updateFileCommitTsTask = + new UpdateFileCommitTsTask(targetEngine.name(), schemaName, logicalTableName, taskIdList); taskList.add(moveDataToFileStoreTask); taskList.add(updateFileCommitTsTask); } else if (Engine.isFileStore(sourceEngine) && targetEngine == Engine.INNODB) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/storagepool/AlterStoragePoolAddNodeJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/storagepool/AlterStoragePoolAddNodeJobFactory.java index 2a213f774..e3e31073f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/storagepool/AlterStoragePoolAddNodeJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/storagepool/AlterStoragePoolAddNodeJobFactory.java @@ -18,7 +18,6 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.ddl.job.task.storagepool.AppendStorageInfoTask; import com.alibaba.polardbx.executor.ddl.job.task.storagepool.StorageInstValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.BackgroupRebalanceTask; @@ -26,17 +25,19 @@ import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; +import com.alibaba.polardbx.gms.topology.DbTopologyManager; +import com.alibaba.polardbx.gms.topology.StorageInfoRecord; import com.alibaba.polardbx.gms.util.InstIdUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterStoragePoolPrepareData; -import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupSplitPartitionByHotValuePreparedData; +import com.alibaba.polardbx.optimizer.locality.StoragePoolInfo; import com.alibaba.polardbx.optimizer.locality.StoragePoolManager; import com.google.common.collect.Lists; import org.apache.commons.lang.StringUtils; import java.util.List; +import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; public class AlterStoragePoolAddNodeJobFactory extends DdlJobFactory { private AlterStoragePoolPrepareData prepareData; @@ -52,9 +53,9 @@ protected void validate() { List dnIds = prepareData.getDnIds(); String instId = InstIdUtil.getMasterInstId(); if (prepareData.getValidateStorageInstIdle()) { - StoragePoolValidator.validateStoragePool(instId, dnIds); + StoragePoolValidator.validateStoragePool(instId, dnIds, false, true, true); } else { - StoragePoolValidator.validateStoragePool(instId, dnIds, true, false); + StoragePoolValidator.validateStoragePool(instId, dnIds, false, true, false); } } @@ -64,33 +65,50 @@ protected ExecutableDdlJob doCreate() { String instId = InstIdUtil.getMasterInstId(); //validate again. StoragePoolManager storagePoolManager = StoragePoolManager.getInstance(); - List originalStoragePoolName = prepareData.getDnIds().stream(). - filter(o -> storagePoolManager.storagePoolMap.containsKey(o)). - map(o -> storagePoolManager.storagePoolMap.get(o)).collect( - Collectors.toList()); - if (originalStoragePoolName.size() != prepareData.getDnIds().size()) { + Map storageInfoMap = + DbTopologyManager.getStorageInfoMap(InstIdUtil.getInstId()); + if (!storageInfoMap.keySet().containsAll(prepareData.getDnIds())) { throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_INVALID, "The storage insts appended contains illegal storage inst id!" + StringUtils.join(prepareData.getDnIds(), ",")); } - String undeletableDnId = - storagePoolManager.getStoragePoolInfo(prepareData.getStoragePoolName()).getUndeletableDnId(); + + StoragePoolInfo storagePoolInfo = storagePoolManager.getStoragePoolInfo(prepareData.getStoragePoolName()); + if (storagePoolInfo == null) { + throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, + "The storage pool doesn't exsit: " + prepareData.getStoragePoolName()); + } + String undeletableDnId = storagePoolInfo.getUndeletableDnId(); StorageInstValidateTask storageInstValidateTask = new StorageInstValidateTask(prepareData.getSchemaName(), instId, - prepareData.getDnIds(), true, prepareData.getValidateStorageInstIdle()); + prepareData.getDnIds(), false, true, prepareData.getValidateStorageInstIdle()); + // if append node repeatly. + if (storagePoolInfo.getDnLists().containsAll(prepareData.getDnIds())) { + ddlJob.addSequentialTasks(Lists.newArrayList( + storageInstValidateTask)); + return ddlJob; + } AppendStorageInfoTask appendStorageInfoTask = - new AppendStorageInfoTask(prepareData.getSchemaName(), instId, originalStoragePoolName, + new AppendStorageInfoTask(prepareData.getSchemaName(), instId, storageInfoMap, prepareData.getDnIds(), undeletableDnId, prepareData.getStoragePoolName()); String rebalanceSql = "SCHEDULE REBALANCE TENANT " + prepareData.getStoragePoolName() + " POLICY='data_balance'"; DdlTask rebalanceStoragePoolTask = new BackgroupRebalanceTask("polardbx", rebalanceSql); - ddlJob.addSequentialTasks(Lists.newArrayList( - storageInstValidateTask, - appendStorageInfoTask, - rebalanceStoragePoolTask - )); + if (prepareData.getStoragePoolName().equalsIgnoreCase(StoragePoolManager.RECYCLE_STORAGE_POOL_NAME)) { + ddlJob.addSequentialTasks(Lists.newArrayList( + storageInstValidateTask, + appendStorageInfoTask, + rebalanceStoragePoolTask + )); + } else { + ddlJob.addSequentialTasks(Lists.newArrayList( + storageInstValidateTask, + appendStorageInfoTask, + rebalanceStoragePoolTask + )); + } // ddlJob.appendTask(syncTask); return ddlJob; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/storagepool/AlterStoragePoolDrainNodeJobFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/storagepool/AlterStoragePoolDrainNodeJobFactory.java index bbc32b3d2..7de24055a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/storagepool/AlterStoragePoolDrainNodeJobFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/storagepool/AlterStoragePoolDrainNodeJobFactory.java @@ -29,6 +29,7 @@ import com.alibaba.polardbx.gms.util.InstIdUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterStoragePoolPrepareData; +import com.alibaba.polardbx.optimizer.locality.StoragePoolInfo; import com.alibaba.polardbx.optimizer.locality.StoragePoolManager; import com.google.common.collect.Lists; import org.apache.commons.lang.StringUtils; @@ -50,26 +51,41 @@ protected void validate() { List dnIds = prepareData.getDnIds(); String instId = InstIdUtil.getMasterInstId(); //validate dnId not occupied. - StoragePoolValidator.validateStoragePoolReady(instId, dnIds); + if (!prepareData.getStoragePoolName().equalsIgnoreCase(StoragePoolManager.RECYCLE_STORAGE_POOL_NAME)) { + StoragePoolValidator.validateStoragePoolReady(instId, dnIds); + } } @Override protected ExecutableDdlJob doCreate() { - ExecutableDdlJob ddlJob = new ExecutableDdlJob(); String instId = InstIdUtil.getMasterInstId(); //validate again. StoragePoolManager storagePoolManager = StoragePoolManager.getInstance(); - List originalDnList = - storagePoolManager.getStoragePoolInfo(prepareData.getStoragePoolName()).getDnLists(); + StoragePoolInfo storagePoolInfo = storagePoolManager.getStoragePoolInfo(prepareData.getStoragePoolName()); + if (storagePoolInfo == null) { + throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, + "The storage pool doesn't exsit: " + prepareData.getStoragePoolName()); + } + List originalDnList = storagePoolInfo.getDnLists(); if (!originalDnList.containsAll(prepareData.getDnIds())) { String errMsg = String.format("storage pool %s doesn't contains all of storage inst %s", prepareData.getStoragePoolName(), prepareData.getDnIds()); throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg); } + if (prepareData.getDnIds().contains(storagePoolInfo.getUndeletableDnId())) { + String errMsg = String.format("The storage inst '%s' is undeletable dn id in storage pool '%s'", + storagePoolInfo.getUndeletableDnId(), + prepareData.getDnIds()); + throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, errMsg); + } StorageInstValidateTask storageInstValidateTask = new StorageInstValidateTask(prepareData.getSchemaName(), instId, prepareData.getDnIds(), false, false); + if (prepareData.getStoragePoolName().equalsIgnoreCase(StoragePoolManager.RECYCLE_STORAGE_POOL_NAME)) { + storageInstValidateTask = new StorageInstValidateTask(prepareData.getSchemaName(), instId, + prepareData.getDnIds(), false, false, false); + } StoragePoolValidateTask storagePoolValidateTask = new StoragePoolValidateTask(prepareData.getSchemaName(), instId, prepareData.getStoragePoolName(), @@ -81,14 +97,26 @@ protected ExecutableDdlJob doCreate() { String.format("SCHEDULE REBALANCE TENANT %s DRAIN_NODE='%s'", prepareData.getStoragePoolName(), StringUtils.join(prepareData.getDnIds(), ",")); DdlTask rebalanceStoragePoolTask = new BackgroupRebalanceTask("polardbx", rebalanceSql); - ddlJob.addSequentialTasks(Lists.newArrayList( - //TODO: + ExecutableDdlJob ddlJob = new ExecutableDdlJob(); + if (prepareData.getStoragePoolName().equalsIgnoreCase(StoragePoolManager.RECYCLE_STORAGE_POOL_NAME)) { + ddlJob.addSequentialTasks(Lists.newArrayList( + //TODO: + storageInstValidateTask, + storagePoolValidateTask, + drainStorageInfoTask, + rebalanceStoragePoolTask + )); + + } else { + ddlJob.addSequentialTasks(Lists.newArrayList( + //TODO: // validateTableVersionTask, - storageInstValidateTask, - storagePoolValidateTask, - drainStorageInfoTask, - rebalanceStoragePoolTask - )); + storageInstValidateTask, + storagePoolValidateTask, + drainStorageInfoTask, + rebalanceStoragePoolTask + )); + } // ddlJob.appendTask(syncTask); return ddlJob; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/util/FactoryUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/util/FactoryUtils.java index d8790501a..1fea6447b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/util/FactoryUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/factory/util/FactoryUtils.java @@ -20,9 +20,11 @@ import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig; import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.TableMeta; @@ -32,6 +34,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; public class FactoryUtils { @@ -57,8 +60,8 @@ public static Pair checkDefaultTableGroup(String schemaName, isSigleTable = partitionInfo.isGsiSingleOrSingleTable(); isBroadCastTable = partitionInfo.isGsiBroadcastOrBroadcast(); - TableGroupConfig tgConfig = physicalPlanData.getTableGroupConfig(); - for (TablePartRecordInfoContext entry : tgConfig.getTables()) { + TableGroupDetailConfig tgConfig = physicalPlanData.getTableGroupConfig(); + for (TablePartRecordInfoContext entry : tgConfig.getTablesPartRecordInfoContext()) { Long tableGroupId = entry.getLogTbRec().getGroupId(); if (tableGroupId != null && tableGroupId != -1) { OptimizerContext oc = @@ -134,4 +137,18 @@ public static List getFkTableSyncTasks(String schemaName, String logica } return taskList; } + + public static void getFkTableExcludeResources(String schemaName, String logicalTableName, + Set resources) { + TableMeta tableMeta = + OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName); + Map foreignKeys = tableMeta.getForeignKeys(); + for (Map.Entry e : foreignKeys.entrySet()) { + resources.add(DdlJobFactory.concatWithDot(e.getValue().refSchema, e.getValue().refTableName)); + } + Map refForeignKeys = tableMeta.getReferencedForeignKeys(); + for (Map.Entry e : refForeignKeys.entrySet()) { + resources.add(DdlJobFactory.concatWithDot(e.getValue().schema, e.getValue().tableName)); + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/CommonMetaChanger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/CommonMetaChanger.java index 80bd61efa..f1f7ae1ac 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/CommonMetaChanger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/CommonMetaChanger.java @@ -16,13 +16,14 @@ package com.alibaba.polardbx.executor.ddl.job.meta; +import com.alibaba.polardbx.common.Engine; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; -import com.alibaba.polardbx.executor.sync.ClearOSSFileSystemSyncAction; +import com.alibaba.polardbx.executor.sync.ClearFileSystemCacheSyncAction; import com.alibaba.polardbx.executor.sync.DeleteOssFileSyncAction; import com.alibaba.polardbx.executor.sync.InvalidateBufferPoolSyncAction; import com.alibaba.polardbx.executor.sync.RemoveColumnStatisticSyncAction; @@ -33,8 +34,10 @@ import com.alibaba.polardbx.gms.listener.ConfigManager; import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager.PhyInfoSchemaContext; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.optimizer.config.schema.DefaultDbSchema; +import org.jetbrains.annotations.Nullable; import com.alibaba.polardbx.optimizer.planmanager.PlanManager; import java.util.List; @@ -135,22 +138,23 @@ public static void alterTableColumnFinalOperationsOnSuccess(String schemaName, S // } private static void removeTableStatistic(String schemaName, String logicalTableName) { - SyncManagerHelper.sync(new RemoveTableStatisticSyncAction(schemaName, logicalTableName), schemaName); + SyncManagerHelper.sync(new RemoveTableStatisticSyncAction(schemaName, logicalTableName), schemaName, + SyncScope.ALL); } private static void invalidateAlterTableColumnStatistic(String schemaName, String logicalTableName, List columnList) { SyncManagerHelper.sync(new RemoveColumnStatisticSyncAction(schemaName, logicalTableName, columnList), - schemaName); + schemaName, SyncScope.ALL); } private static void renameStatistic(String schemaName, String logicalTableName, String newLogicalTableName) { SyncManagerHelper.sync(new RenameStatisticSyncAction(schemaName, logicalTableName, newLogicalTableName), - schemaName); + schemaName, SyncScope.ALL); } public static void invalidateBufferPool() { - SyncManagerHelper.sync(new InvalidateBufferPoolSyncAction(), DefaultDbSchema.NAME); + SyncManagerHelper.sync(new InvalidateBufferPoolSyncAction(), DefaultDbSchema.NAME, SyncScope.ALL); } public static void invalidateBufferPool(String schemaName) { @@ -158,14 +162,15 @@ public static void invalidateBufferPool(String schemaName) { } private static void invalidateBufferPool(String schemaName, String logicalTableName) { - SyncManagerHelper.sync(new InvalidateBufferPoolSyncAction(schemaName, logicalTableName), schemaName); + SyncManagerHelper.sync(new InvalidateBufferPoolSyncAction(schemaName, logicalTableName), schemaName, + SyncScope.ALL); } - public static void clearOSSFileSystemCache() { - SyncManagerHelper.sync(new ClearOSSFileSystemSyncAction(), DefaultDbSchema.NAME); + public static void clearFileSystemCache(@Nullable Engine engine, boolean all) { + SyncManagerHelper.sync(new ClearFileSystemCacheSyncAction(engine, all), DefaultDbSchema.NAME, SyncScope.ALL); } public static void clearOSSFileSystemCache(List paths, String schema) { - SyncManagerHelper.sync(new DeleteOssFileSyncAction(paths), schema); + SyncManagerHelper.sync(new DeleteOssFileSyncAction(paths), schema, SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/GsiMetaChanger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/GsiMetaChanger.java index bef67426f..fce389001 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/GsiMetaChanger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/GsiMetaChanger.java @@ -50,6 +50,17 @@ public static void changeTableToGsi(Connection metaDbConnection, .changeTablesExtType(metaDbConnection, schemaName, indexName, GsiMetaManager.TableType.GSI.getValue()); } + public static void changeTableToColumnar(Connection metaDbConnection, + String schemaName, + String indexName) { + + ExecutorContext + .getContext(schemaName) + .getGsiManager() + .getGsiMetaManager() + .changeTablesExtType(metaDbConnection, schemaName, indexName, GsiMetaManager.TableType.COLUMNAR.getValue()); + } + public static void updateIndexStatus(Connection metaDbConnection, String schemaName, String primaryTableName, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/SequenceMetaChanger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/SequenceMetaChanger.java index 5275bfb0a..fb590a5ba 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/SequenceMetaChanger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/SequenceMetaChanger.java @@ -50,6 +50,50 @@ public class SequenceMetaChanger { + public static void createSequenceWithoutCheckExists(String schemaName, String logicalTableName, + SequenceBean sequence, TablesExtRecord tablesExtRecord, + boolean isPartitioned, + ExecutionContext executionContext) { + if (sequence == null || !sequence.isNew()) { + return; + } + + boolean isNewPartitionTable = DbInfoManager.getInstance().isNewPartitionDb(schemaName); + + // Check if it needs default sequence type. + if (sequence.getType() == Type.NA) { + if (!isNewPartitionTable) { + TableRule tableRule; + if (tablesExtRecord != null) { + tableRule = DdlJobDataConverter.buildTableRule(tablesExtRecord); + } else { + tableRule = OptimizerContext.getContext(schemaName).getRuleManager().getTableRule(logicalTableName); + } + if (tableRule != null && (isPartitioned || tableRule.isBroadcast())) { + sequence.setType(AutoIncrementType.GROUP); + } else { + return; + } + } else { + sequence.setType(AutoIncrementType.NEW); + } + } + + String seqName = AUTO_SEQ_PREFIX + logicalTableName; + + // Use START WITH 1 by default when there is no table option + // AUTO_INCREMENT = xx specified. + if (sequence.getStart() == null) { + sequence.setStart(DEFAULT_START_WITH); + } + + sequence.setKind(SqlKind.CREATE_SEQUENCE); + + sequence.setName(seqName); + + SequenceValidator.validate(sequence, executionContext, false); + } + public static boolean createSequenceIfExists(String schemaName, String logicalTableName, SequenceBean sequence, TablesExtRecord tablesExtRecord, boolean isPartitioned, boolean ifNotExists, SqlKind sqlKind, @@ -112,7 +156,7 @@ public static boolean createSequenceIfExists(String schemaName, String logicalTa sequence.setName(seqName); - SequenceValidator.validate(sequence, executionContext); + SequenceValidator.validate(sequence, executionContext, true); return true; } @@ -235,7 +279,7 @@ protected static SequenceBean alterSequenceIfExists(String schemaName, String lo sequence.setKind(SqlKind.ALTER_SEQUENCE); - SequenceValidator.validate(sequence, executionContext); + SequenceValidator.validate(sequence, executionContext, true); return sequence; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/TableMetaChanger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/TableMetaChanger.java index abb49d617..673eb07dd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/TableMetaChanger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/TableMetaChanger.java @@ -19,6 +19,7 @@ import com.alibaba.polardbx.common.Engine; import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; import com.alibaba.polardbx.common.ddl.newengine.DdlConstants; +import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; @@ -31,29 +32,34 @@ import com.alibaba.polardbx.common.utils.timezone.InternalTimeZone; import com.alibaba.polardbx.common.utils.timezone.TimeZoneUtils; import com.alibaba.polardbx.common.utils.timezone.TimestampUtils; +import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; -import com.alibaba.polardbx.executor.ddl.sync.ClearPlanCacheSyncAction; import com.alibaba.polardbx.executor.gms.TableRuleManager; import com.alibaba.polardbx.executor.gms.util.SequenceUtil; -import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.executor.gsi.GsiUtils; import com.alibaba.polardbx.gms.listener.ConfigManager; import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; +import com.alibaba.polardbx.gms.metadb.foreign.ForeignColsRecord; import com.alibaba.polardbx.gms.metadb.foreign.ForeignRecord; import com.alibaba.polardbx.gms.metadb.seq.SequenceBaseRecord; import com.alibaba.polardbx.gms.metadb.table.ColumnMetasRecord; import com.alibaba.polardbx.gms.metadb.table.ColumnStatus; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableMappingRecord; import com.alibaba.polardbx.gms.metadb.table.ColumnsInfoSchemaRecord; import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; import com.alibaba.polardbx.gms.metadb.table.FilesRecord; +import com.alibaba.polardbx.gms.metadb.table.IndexStatus; +import com.alibaba.polardbx.gms.metadb.table.IndexesInfoSchemaRecord; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; import com.alibaba.polardbx.gms.metadb.table.TablesExtRecord; import com.alibaba.polardbx.gms.partition.TableLocalPartitionRecord; import com.alibaba.polardbx.gms.scheduler.ScheduledJobsRecord; -import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.group.jdbc.TGroupDirectConnection; import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.partition.PartitionInfoManager; @@ -65,6 +71,7 @@ import com.google.common.collect.ImmutableList; import org.apache.calcite.sql.SequenceBean; import org.apache.calcite.sql.SqlKind; +import org.jetbrains.annotations.NotNull; import java.sql.Connection; import java.sql.ResultSet; @@ -73,6 +80,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -110,7 +118,9 @@ public static void addTableMeta(Connection metaDbConn, PhyInfoSchemaContext phyI boolean hasTimestampColumnDefault, ExecutionContext executionContext, Map specialDefaultValues, Map specialDefaultValueFlags, - List addedForeignKeys) { + List addedForeignKeys, + Map columnMapping, + List addNewColumns) { String schemaName = phyInfoSchemaContext.tableSchema; String tableName = phyInfoSchemaContext.tableName; @@ -120,20 +130,24 @@ public static void addTableMeta(Connection metaDbConn, PhyInfoSchemaContext phyI long newSeqCacheSize = executionContext.getParamManager().getLong(ConnectionParams.NEW_SEQ_CACHE_SIZE); newSeqCacheSize = newSeqCacheSize < 1 ? 0 : newSeqCacheSize; tableInfoManager.addTable(phyInfoSchemaContext, newSeqCacheSize, - SequenceUtil.buildFailPointInjector(executionContext), addedForeignKeys); + SequenceUtil.buildFailPointInjector(executionContext), addedForeignKeys, columnMapping, addNewColumns); + + //check referenced foreign key table and update fk index + updateForeignKeyRefIndex(metaDbConn, schemaName, tableName); //add foreign key table meta if (GeneralUtil.isNotEmpty(addedForeignKeys)) { try { for (ForeignKeyData addedForeignKey : addedForeignKeys) { - TableInfoManager.updateTableVersion(addedForeignKey.refSchema, addedForeignKey.refTableName, + TableInfoManager.updateTableVersionWithoutDataId(addedForeignKey.refSchema, + addedForeignKey.refTableName, metaDbConn); Map> fkTables = ForeignKeyUtils.getAllForeignKeyRelatedTables(addedForeignKey.refSchema, addedForeignKey.refTableName); for (Map.Entry> entry : fkTables.entrySet()) { for (String table : entry.getValue()) { - TableInfoManager.updateTableVersion(entry.getKey(), table, metaDbConn); + TableInfoManager.updateTableVersionWithoutDataId(entry.getKey(), table, metaDbConn); } } } @@ -154,6 +168,171 @@ public static void addTableMeta(Connection metaDbConn, PhyInfoSchemaContext phyI } } + public static void updateForeignKeyRefIndex(Connection metaDbConn, String schemaName, String tableName) { + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConn); + List foreignRecords = tableInfoManager.queryReferencedTable(schemaName, tableName); + for (ForeignRecord record : foreignRecords) { + List fkColRecords = + tableInfoManager.queryForeignKeysCols(record.schemaName, record.tableName, record.indexName); + List refColumns = fkColRecords.stream().map(c -> c.refColName).collect(Collectors.toList()); + List indexesRecords = + tableInfoManager.queryForeignKeyRefIndexes(schemaName, tableName, refColumns); + if (GeneralUtil.isEmpty(indexesRecords)) { + throw new TddlRuntimeException(ErrorCode.ERR_ADD_FK_CONSTRAINT, "Foreign key columns do not exist"); + } + String refIndexName = indexesRecords.get(0).indexName; + tableInfoManager.updateForeignKeyRefIndex(record.schemaName, record.tableName, record.indexName, + refIndexName); + } + } + + public static void updateForeignKeyRefIndexNull(Connection metaDbConn, String schemaName, String tableName) { + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConn); + List foreignRecords = tableInfoManager.queryReferencedTable(schemaName, tableName); + for (ForeignRecord record : foreignRecords) { + tableInfoManager.updateForeignKeyRefIndex(record.schemaName, record.tableName, record.indexName, ""); + } + } + + public static List addColumnarTableMeta(Connection metaDbConn, String schemaName, + String primaryTableName, String columnarTableName, + Engine engine) { + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConn); + return tableInfoManager.addColumnarTable(schemaName, primaryTableName, columnarTableName, engine); + } + + public static void changeColumnarTableMeta(Connection metaDbConnection, + String schemaName, + String primaryTableName, + List addedColumns, + List droppedColumns, + List updateColumns, + List> changeColumns, + long versionId, + long ddlJobId) { + + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + + Set> indexes = tableInfoManager.queryCci(schemaName, primaryTableName); + if (GeneralUtil.isEmpty(indexes)) { + return; + } + + // ADD COLUMN + if (GeneralUtil.isNotEmpty(addedColumns)) { + tableInfoManager.alterColumnarTableColumns(schemaName, primaryTableName, indexes, versionId, ddlJobId, + DdlType.ALTER_TABLE_ADD_COLUMN, new ArrayList<>()); + } + + // DROP COLUMN + if (GeneralUtil.isNotEmpty(droppedColumns)) { + tableInfoManager.dropColumnarTableColumns(schemaName, primaryTableName, indexes, droppedColumns, + versionId, ddlJobId); + } + + // MODIFY COLUMN + if (GeneralUtil.isNotEmpty(updateColumns)) { + tableInfoManager.alterColumnarTableColumns(schemaName, primaryTableName, indexes, versionId, ddlJobId, + DdlType.ALTER_TABLE_MODIFY_COLUMN, new ArrayList<>()); + } + + // CHANGE COLUMN + if (GeneralUtil.isNotEmpty(changeColumns)) { + tableInfoManager.alterColumnarTableColumns(schemaName, primaryTableName, indexes, versionId, ddlJobId, + DdlType.ALTER_TABLE_CHANGE_COLUMN, changeColumns); + } + } + + public static ColumnarTableMappingRecord addCreateCciSchemaEvolutionMeta(Connection metaDbConn, + String schemaName, + String primaryTableName, + String columnarTableName, + Map options, + long versionId, + long ddlJobId) { + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConn); + // Add columnar table mapping and columnar table evolution + return tableInfoManager.addCreateCciSchemaEvolutionMeta(schemaName, + primaryTableName, + columnarTableName, + options, + versionId, + ddlJobId); + } + + public static void addRollbackCreateCciSchemaEvolutionMeta(Connection metaDbConnection, + @NotNull ColumnarTableMappingRecord tableMappingRecord, + Long rollbackVersionId, + Long ddlJobId) { + final TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + // Add columnar table evolution and update columnar table mapping status + tableInfoManager.addDropCciSchemaEvolutionMeta(tableMappingRecord, + rollbackVersionId, + ddlJobId); + } + + public static void addDropCciSchemaEvolutionMeta(Connection metaDbConnection, + String schemaName, + String primaryTableName, + String columnarTableName, + Long versionId, + Long ddlJobId) { + final TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + // Add columnar table evolution and update columnar table mapping status + tableInfoManager.addDropCciSchemaEvolutionMeta(schemaName, + primaryTableName, + columnarTableName, + versionId, + ddlJobId); + } + + public static void removeColumnarTableMeta(Connection metaDbConnection, String schemaName, + String columnarTableName) { + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + + // TODO(mocheng) make sure no snapshot and streaming task is running on columnar + tableInfoManager.removeColumnarTable(schemaName, columnarTableName); + } + + public static void renameColumnarTableMeta(Connection metaDbConn, String schemaName, String primaryTableName, + String newPrimaryTableName, long versionId, long ddlJobId) { + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConn); + tableInfoManager.renameColumnarTable(schemaName, primaryTableName, newPrimaryTableName, versionId, ddlJobId); + } + + public static void notifyCreateColumnarIndex(Connection metaDbConn, String schemaName, + String primaryTableName) { + String columnarTableListDataId = MetaDbDataIdBuilder.getColumnarTableListDataId(schemaName); + String columnarTableDataId = MetaDbDataIdBuilder.getColumnarDataId(schemaName, primaryTableName); + // register new columnar table data id. + CONFIG_MANAGER.register(columnarTableDataId, metaDbConn); + // update columnar table list data id + CONFIG_MANAGER.notify(columnarTableListDataId, metaDbConn); + } + + /** + * TODO: check remaining columnar indexes on the target table + */ + public static void notifyDropColumnarIndex(Connection metaDbConn, String schemaName, + String primaryTableName) { + String columnarTableListDataId = MetaDbDataIdBuilder.getColumnarTableListDataId(schemaName); + String columnarTableDataId = MetaDbDataIdBuilder.getColumnarDataId(schemaName, primaryTableName); + // unregister columnar table listener + CONFIG_MANAGER.unregister(columnarTableDataId, metaDbConn); + CONFIG_MANAGER.unbindListener(columnarTableDataId); + // update columnar table list data id + CONFIG_MANAGER.notify(columnarTableListDataId, metaDbConn); + } + public static void addOssTableMeta(Connection metaDbConn, PhyInfoSchemaContext phyInfoSchemaContext, Engine tableEngine, ExecutionContext executionContext) { TableInfoManager tableInfoManager = new TableInfoManager(); @@ -245,6 +424,14 @@ public static void validOssColumnMeta(Connection metaDbConn, Long taskId, String tableInfoManager.validOSSColumnsMeta(taskId, tableSchema, tableName); } + public static void updateArchiveTable(Connection metaDbConnection, + String schemaName, String tableName, + String archiveTableSchema, String archiveTableName) { + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + tableInfoManager.updateArchiveTable(schemaName, tableName, archiveTableSchema, archiveTableName); + } + public static PhyInfoSchemaContext buildPhyInfoSchemaContext(String schemaName, String logicalTableName, String dbIndex, String phyTableName, SequenceBean sequenceBean, @@ -268,6 +455,31 @@ public static PhyInfoSchemaContext buildPhyInfoSchemaContext(String schemaName, return phyInfoSchemaContext; } + public static PhyInfoSchemaContext buildPhyInfoSchemaContextAndCreateSequence(String schemaName, + String logicalTableName, + String dbIndex, String phyTableName, + SequenceBean sequenceBean, + TablesExtRecord tablesExtRecord, + boolean isPartitioned, + boolean ifNotExists, SqlKind sqlKind, + long ts, + ExecutionContext executionContext) { + PhyInfoSchemaContext phyInfoSchemaContext = + CommonMetaChanger.getPhyInfoSchemaContext(schemaName, logicalTableName, dbIndex, phyTableName); + + SequenceMetaChanger.createSequenceWithoutCheckExists(schemaName, logicalTableName, sequenceBean, + tablesExtRecord, isPartitioned, executionContext); + + if (sequenceBean != null) { + SequenceBaseRecord sequenceRecord = SequenceUtil.convert(sequenceBean, schemaName, executionContext); + phyInfoSchemaContext.sequenceRecord = sequenceRecord; + } + + phyInfoSchemaContext.ts = ts; + + return phyInfoSchemaContext; + } + public static void triggerSchemaChange(Connection metaDbConn, String schemaName, String tableName, SequenceBaseRecord sequenceRecord, TableInfoManager tableInfoManager) { String tableListDataId = MetaDbDataIdBuilder.getTableListDataId(schemaName); @@ -286,15 +498,12 @@ public static void triggerSchemaChange(Connection metaDbConn, String schemaName, public static void afterNewTableMeta(String schemaName, String logicalTableName) { String tableListDataId = MetaDbDataIdBuilder.getTableListDataId(schemaName); String tableDataId = MetaDbDataIdBuilder.getTableDataId(schemaName, logicalTableName); - CommonMetaChanger.sync(tableListDataId); - try (Connection metaDbConn = MetaDbUtil.getConnection()) { CONFIG_MANAGER.notify(tableDataId, metaDbConn); } catch (SQLException e) { throw new TddlRuntimeException(ErrorCode.ERR_GMS_GET_CONNECTION, e, e.getMessage()); } - CommonMetaChanger.sync(tableDataId); } @@ -329,6 +538,9 @@ public static void removeTableMeta(Connection metaDbConnection, String schemaNam // Remove all table meta. tableInfoManager.removeTable(schemaName, logicalTableName, finalSequenceRecord, withTablesExtOrPartition); + //check referenced foreign key table and update fk index + updateForeignKeyRefIndexNull(metaDbConnection, schemaName, logicalTableName); + // Change foreign logical if contains child table TableMeta tableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTableWithNull(logicalTableName); @@ -337,7 +549,8 @@ public static void removeTableMeta(Connection metaDbConnection, String schemaNam TableMetaChanger.addLogicalForeignKeyMeta(metaDbConnection, schemaName, logicalTableName, new ArrayList<>(tableMeta.getReferencedForeignKeys().values())); for (Map.Entry entry : tableMeta.getReferencedForeignKeys().entrySet()) { - TableInfoManager.updateTableVersion(entry.getValue().schema, entry.getValue().tableName, + TableInfoManager.updateTableVersionWithoutDataId(entry.getValue().schema, + entry.getValue().tableName, metaDbConnection); } } catch (SQLException ex) { @@ -352,7 +565,7 @@ public static void removeTableMeta(Connection metaDbConnection, String schemaNam logicalTableName); for (Map.Entry> entry : fkTables.entrySet()) { for (String table : entry.getValue()) { - TableInfoManager.updateTableVersion(entry.getKey(), table, metaDbConnection); + TableInfoManager.updateTableVersionWithoutDataId(entry.getKey(), table, metaDbConnection); } } } catch (SQLException ex) { @@ -373,6 +586,33 @@ public static void removeTableMeta(Connection metaDbConnection, String schemaNam } } + public static void removeTableMetaWithoutNotify(Connection metaDbConnection, String schemaName, + String logicalTableName, + boolean withTablesExtOrPartition, + ExecutionContext executionContext) { + // Remove sequence meta if exists. + SequenceBaseRecord sequenceRecord = null; + SequenceBean sequenceBean = SequenceMetaChanger.dropSequenceIfExists(schemaName, logicalTableName); + if (sequenceBean != null) { + sequenceRecord = SequenceUtil.convert(sequenceBean, schemaName, executionContext); + } + final SequenceBaseRecord finalSequenceRecord = sequenceRecord; + + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + + // Remove all table meta. + tableInfoManager.removeTable(schemaName, logicalTableName, finalSequenceRecord, withTablesExtOrPartition); + + if (withTablesExtOrPartition) { + tableInfoManager.removeTableExt(schemaName, logicalTableName); + } + + if (sequenceRecord != null) { + SequenceManagerProxy.getInstance().invalidate(schemaName, sequenceBean.getName()); + } + } + public static void afterRemovingTableMeta(String schemaName, String logicalTableName) { String tableListDataId = MetaDbDataIdBuilder.getTableListDataId(schemaName); CommonMetaChanger.sync(tableListDataId); @@ -405,19 +645,28 @@ public static void syncTableListDataId(String schemaName) { CommonMetaChanger.sync(tableListDataId); } + public static void syncTableDataId(String schemaName, String logicalTableName) { + String tableDataId = MetaDbDataIdBuilder.getTableDataId(schemaName, logicalTableName); + + CommonMetaChanger.sync(tableDataId); + } + public static void renameTableMeta(Connection metaDbConn, String schemaName, String logicalTableName, - String newLogicalTableName, ExecutionContext executionContext) { - renameTableMeta(metaDbConn, schemaName, logicalTableName, newLogicalTableName, executionContext, true); + String newLogicalTableName, boolean needRenamePhyTables, + ExecutionContext executionContext) { + renameTableMeta(metaDbConn, schemaName, logicalTableName, newLogicalTableName, executionContext, true, + needRenamePhyTables); } public static void renameTableMeta(Connection metaDbConn, String schemaName, String logicalTableName, String newLogicalTableName, ExecutionContext executionContext, - boolean notifyTableListDataId) { + boolean notifyTableListDataId, boolean needRenamePhyTables) { String tableListDataId = MetaDbDataIdBuilder.getTableListDataId(schemaName); String tableDataId = MetaDbDataIdBuilder.getTableDataId(schemaName, logicalTableName); String newTableDataId = MetaDbDataIdBuilder.getTableDataId(schemaName, newLogicalTableName); String newTbNamePattern = - buildNewTbNamePattern(executionContext, schemaName, logicalTableName, newLogicalTableName); + buildNewTbNamePattern(executionContext, schemaName, logicalTableName, newLogicalTableName, + needRenamePhyTables); boolean isGsi = TableValidator.checkTableIsGsi(schemaName, logicalTableName); // Rename sequence if exists. @@ -437,7 +686,7 @@ public static void renameTableMeta(Connection metaDbConn, String schemaName, Str } // Replace with new table name - if (!executionContext.needToRenamePhyTables()) { + if (!needRenamePhyTables) { newTbNamePattern = null; } tableInfoManager @@ -446,29 +695,30 @@ public static void renameTableMeta(Connection metaDbConn, String schemaName, Str // update foreign key and table meta renameForeignKeyTable(metaDbConn, schemaName, logicalTableName, newLogicalTableName, tableInfoManager); - // Unregister the old table data id. - CONFIG_MANAGER.unregister(tableDataId, metaDbConn); + if (notifyTableListDataId) { + // Unregister the old table data id. + CONFIG_MANAGER.unregister(tableDataId, metaDbConn); - // Register new table data id. - CONFIG_MANAGER.register(newTableDataId, metaDbConn); + // Register new table data id. + CONFIG_MANAGER.register(newTableDataId, metaDbConn); - if (notifyTableListDataId) { CONFIG_MANAGER.notify(tableListDataId, metaDbConn); } } public static void renamePartitionTableMeta(Connection metaDbConn, String schemaName, String logicalTableName, - String newLogicalTableName, ExecutionContext executionContext) { - renamePartitionTableMeta(metaDbConn, schemaName, logicalTableName, newLogicalTableName, executionContext, true); + String newLogicalTableName, boolean needRenamePhyTables, + ExecutionContext executionContext) { + renamePartitionTableMeta(metaDbConn, schemaName, logicalTableName, newLogicalTableName, executionContext, true, + needRenamePhyTables); } public static void renamePartitionTableMeta(Connection metaDbConn, String schemaName, String logicalTableName, String newLogicalTableName, ExecutionContext executionContext, - boolean notifyTableListDataId) { + boolean notifyTableListDataId, boolean needRenamePhyTables) { String tableListDataId = MetaDbDataIdBuilder.getTableListDataId(schemaName); String tableDataId = MetaDbDataIdBuilder.getTableDataId(schemaName, logicalTableName); String newTableDataId = MetaDbDataIdBuilder.getTableDataId(schemaName, newLogicalTableName); - boolean renamePhyTable = executionContext.needToRenamePhyTables(); boolean isGsi = TableValidator.checkTableIsGsi(schemaName, logicalTableName); // Rename sequence if exists. @@ -490,7 +740,7 @@ public static void renamePartitionTableMeta(Connection metaDbConn, String schema } // Replace with new physical table name - if (renamePhyTable) { + if (needRenamePhyTables) { tableInfoManager.renamePartitionTablePhyTable(schemaName, logicalTableName, newLogicalTableName); } @@ -505,15 +755,28 @@ public static void renamePartitionTableMeta(Connection metaDbConn, String schema // update foreign key indexesAccessor and table meta renameForeignKeyTable(metaDbConn, schemaName, logicalTableName, newLogicalTableName, tableInfoManager); + if (notifyTableListDataId) { + // Unregister the old table data id. + CONFIG_MANAGER.unregister(tableDataId, metaDbConn); + + // Register new table data id. + CONFIG_MANAGER.register(newTableDataId, metaDbConn); + + CONFIG_MANAGER.notify(tableListDataId, metaDbConn); + } + } + + public static void renameTableDataId(Connection metaDbConn, String schemaName, String logicalTableName, + String newLogicalTableName) { + String tableListDataId = MetaDbDataIdBuilder.getTableListDataId(schemaName); + String tableDataId = MetaDbDataIdBuilder.getTableDataId(schemaName, logicalTableName); + String newTableDataId = MetaDbDataIdBuilder.getTableDataId(schemaName, newLogicalTableName); + // Unregister the old table data id. CONFIG_MANAGER.unregister(tableDataId, metaDbConn); // Register new table data id. CONFIG_MANAGER.register(newTableDataId, metaDbConn); - - if (notifyTableListDataId) { - CONFIG_MANAGER.notify(tableListDataId, metaDbConn); - } } private static void renameForeignKeyTable(Connection metaDbConn, String schemaName, String logicalTableName, @@ -537,7 +800,8 @@ private static void renameForeignKeyTable(Connection metaDbConn, String schemaNa // referenced table -> table String referencedSchemaName = e.getValue().schema; String referencedTableName = e.getValue().tableName; - TableInfoManager.updateTableVersion(referencedSchemaName, referencedTableName, metaDbConn); + TableInfoManager.updateTableVersionWithoutDataId(referencedSchemaName, referencedTableName, + metaDbConn); } } @@ -562,7 +826,7 @@ private static void renameForeignKeyTable(Connection metaDbConn, String schemaNa ForeignKeyUtils.getAllForeignKeyRelatedTables(schemaName, logicalTableName); for (Map.Entry> entry : fkTables.entrySet()) { for (String table : entry.getValue()) { - TableInfoManager.updateTableVersion(entry.getKey(), table, metaDbConn); + TableInfoManager.updateTableVersionWithoutDataId(entry.getKey(), table, metaDbConn); } } } @@ -582,6 +846,15 @@ public static void hideTableMeta(Connection metaDbConnection, String schemaName, tableInfoManager.hideColumns(schemaName, logicalTableName, columnNames); tableInfoManager.hideIndexesColumns(schemaName, logicalTableName, columnNames); tableInfoManager.hideIndexes(schemaName, logicalTableName, indexNames); + + // hide columnar related columns and indexes + Set> columnarIndexes = tableInfoManager.queryCci(schemaName, logicalTableName); + if (GeneralUtil.isNotEmpty(columnarIndexes)) { + for (Pair columnarIndex : columnarIndexes) { + tableInfoManager.hideColumns(schemaName, columnarIndex.getValue(), columnNames); + tableInfoManager.hideIndexesColumns(schemaName, columnarIndex.getValue(), indexNames); + } + } } public static void showTableMeta(Connection metaDbConnection, String schemaName, String logicalTableName, @@ -591,6 +864,15 @@ public static void showTableMeta(Connection metaDbConnection, String schemaName, tableInfoManager.showColumns(schemaName, logicalTableName, columnNames); tableInfoManager.showIndexesColumns(schemaName, logicalTableName, columnNames); tableInfoManager.showIndexes(schemaName, logicalTableName, indexNames); + + // show columnar related columns and indexes + Set> columnarIndexes = tableInfoManager.queryCci(schemaName, logicalTableName); + if (GeneralUtil.isNotEmpty(columnarIndexes)) { + for (Pair columnarIndex : columnarIndexes) { + tableInfoManager.showColumns(schemaName, columnarIndex.getValue(), columnNames); + tableInfoManager.showIndexesColumns(schemaName, columnarIndex.getValue(), indexNames); + } + } } public static void changeTableMeta(Connection metaDbConnection, String schemaName, String logicalTableName, @@ -630,106 +912,35 @@ public static void changeTableMeta(Connection metaDbConnection, String schemaNam Map> columnJdbcExtInfo = tableInfoManager.fetchColumnJdbcExtInfo( phyInfoSchemaContext.phyTableSchema, phyInfoSchemaContext.phyTableName, phyInfoSchemaContext.dataSource); - // Remove dropped column meta if exist. + // NOTICE: FOR COLUMN OPERATION, THE ONLY CORRECT ORDER MUST BE DROP => CHANGE => ADD/MODIFY + // for all this meta changer. + // We first query meta from information_schema.column in DN by (new_column_names). + // Then we delete the meta by (old_column_names). + // finally, we insert/update the meta by meta queried before. + + // A. Remove dropped column meta if exist. if (GeneralUtil.isNotEmpty(droppedColumns)) { + // TODO(yijin): for columns firstly dropped and then added, we should keep its index here. tableInfoManager.removeColumns(schemaName, logicalTableName, droppedColumns); } - // Update existing column meta if exist and column name may be changed as well. + // B. Update existing column meta if exist and column name may be changed as well. if (GeneralUtil.isNotEmpty(changedColumns)) { tableInfoManager.changeColumns(phyInfoSchemaContext, columnJdbcExtInfo, changedColumns); - // Reset binary default value flag - for (Pair changeColumn : changedColumns) { - String columnName = changeColumn.getKey(); - tableInfoManager.resetColumnBinaryDefaultFlag(schemaName, logicalTableName, columnName); - } - - // update foreign key column and table meta - Map reverseChangeColumns = new HashMap<>(); - for (Pair entry : changedColumns) { - reverseChangeColumns.put(entry.getValue(), entry.getKey()); - } - - try { - TableMeta tableMeta = - OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTableWithNull(logicalTableName); - if (tableMeta != null) { - Map referencedForeignKeys = tableMeta.getReferencedForeignKeys(); - if (!referencedForeignKeys.isEmpty()) { - for (Map.Entry e : referencedForeignKeys.entrySet()) { - boolean sync = false; - for (int i = 0; i < e.getValue().columns.size(); ++i) { - String oldColumn = e.getValue().refColumns.get(i); - if (reverseChangeColumns.containsKey(oldColumn)) { - String columnName = reverseChangeColumns.get(oldColumn); - String indexName = e.getValue().indexName; - tableInfoManager - .updateReferencedForeignKeyColumn(e.getValue().schema, - e.getValue().tableName, indexName, - columnName, oldColumn); - sync = true; - } - } - - // referenced table -> table - if (sync) { - String referencedSchemaName = e.getValue().schema; - String referredTableName = e.getValue().tableName; - TableInfoManager.updateTableVersion(referencedSchemaName, referredTableName, - metaDbConnection); - } - } - } - - Map foreignKeys = tableMeta.getForeignKeys(); - if (!foreignKeys.isEmpty()) { - for (Map.Entry e : foreignKeys.entrySet()) { - boolean sync = false; - for (int i = 0; i < e.getValue().columns.size(); ++i) { - String oldColumn = e.getValue().columns.get(i); - if (reverseChangeColumns.containsKey(oldColumn)) { - String columnName = reverseChangeColumns.get(oldColumn); - String indexName = e.getValue().indexName; - tableInfoManager - .updateForeignKeyColumn(schemaName, logicalTableName, indexName, columnName, - oldColumn); - sync = true; - } - } - // referencing table -> table - if (sync) { - Map> fkTables = - ForeignKeyUtils.getAllForeignKeyRelatedTables(e.getValue().refSchema, - e.getValue().refTableName); - for (Map.Entry> entry : fkTables.entrySet()) { - for (String table : entry.getValue()) { - TableInfoManager.updateTableVersion(entry.getKey(), table, metaDbConnection); - } - } - } - } - } - } - } catch (SQLException ex) { - throw new RuntimeException(ex); - } + changeForeignKeyRefIndex(metaDbConnection, tableInfoManager, schemaName, logicalTableName, changedColumns); } - // Add new column meta if exist. + // C. Add new column meta if exist. if (GeneralUtil.isNotEmpty(addedColumns)) { tableInfoManager.addColumns(phyInfoSchemaContext, columnJdbcExtInfo, addedColumns); } - // Update existing column meta if exist. + // D. Update existing column meta if exist. if (GeneralUtil.isNotEmpty(updatedColumns)) { tableInfoManager.updateColumns(phyInfoSchemaContext, columnJdbcExtInfo, updatedColumns); - // Clear binary default value flag - for (String columnName : updatedColumns) { - tableInfoManager.resetColumnBinaryDefaultFlag(schemaName, logicalTableName, columnName); - } } - // Refresh related column order. + // finally refresh related column order by after. refreshColumnOrder(schemaName, logicalTableName, columnAfterAnother, requireLogicalColumnOrder, tableInfoManager); @@ -760,6 +971,9 @@ public static void changeTableMeta(Connection metaDbConnection, String schemaNam // Remove existing index meta. if (GeneralUtil.isNotEmpty(droppedIndexes)) { tableInfoManager.removeIndexes(schemaName, logicalTableName, droppedIndexes); + + // check referenced foreign key table and update fk index + updateForeignKeyRefIndex(metaDbConnection, schemaName, logicalTableName); } // Rename existing index meta. @@ -774,6 +988,9 @@ public static void changeTableMeta(Connection metaDbConnection, String schemaNam } else { tableInfoManager.renameIndexes(schemaName, logicalTableName, renamedIndexes); } + + // check referenced foreign key table and update fk index + updateForeignKeyRefIndex(metaDbConnection, schemaName, logicalTableName); } // Add new index meta if existed. @@ -820,11 +1037,173 @@ public static void changeTableMeta(Connection metaDbConnection, String schemaNam tableInfoManager.showTable(schemaName, logicalTableName, sequenceRecord, !(onlineModifyColumnIndexTask || isAddLogicalGeneratedColumn)); + + // Change columnar table meta in same transaction + Set> indexes = tableInfoManager.queryCci(schemaName, logicalTableName); + if (GeneralUtil.isNotEmpty(indexes)) { + // columns, indexes + changeCciRelatedMeta(metaDbConnection, tableInfoManager, schemaName, logicalTableName, phyInfoSchemaContext, + addedColumns, droppedColumns, changedColumns); + } + } + + public static void changeCciRelatedMeta(Connection metaDbConnection, + TableInfoManager tableInfoManager, + String schemaName, String logicalTableName, + PhyInfoSchemaContext context, + List addedColumns, + List droppedColumns, + List> changeColumns) { + Set> columnarIndexes = tableInfoManager.queryCci(schemaName, logicalTableName); + List indexNames = columnarIndexes.stream().map(Pair::getValue).collect(Collectors.toList()); + + Map> columnsJdbcExtInfo = tableInfoManager.fetchColumnJdbcExtInfo( + context.phyTableSchema, context.phyTableName, context.dataSource); + + // CHANGE INDEXES TABLE + // ADD COLUMN + if (GeneralUtil.isNotEmpty(addedColumns)) { + Map isNullable = tableInfoManager.getIsNullable(context, columnsJdbcExtInfo, addedColumns); + + for (String indexName : indexNames) { + final GsiMetaManager.GsiIndexMetaBean gsiIndexMetaBean = + ExecutorContext + .getContext(schemaName) + .getGsiManager() + .getGsiMetaManager() + .getIndexMeta(schemaName, logicalTableName, indexName, IndexStatus.ALL); + + final int seqInIndex = + gsiIndexMetaBean.indexColumns.size() + gsiIndexMetaBean.coveringColumns.size() + 1; + + final List indexRecords = + GsiUtils.buildIndexMetaByAddColumns( + addedColumns, + schemaName, + logicalTableName, + indexName, + seqInIndex, + IndexStatus.PUBLIC, + isNullable + ); + GsiMetaChanger.addIndexColumnMeta(metaDbConnection, schemaName, logicalTableName, indexRecords); + } + } + + // DROP COLUMN + if (GeneralUtil.isNotEmpty(droppedColumns)) { + for (String indexName : indexNames) { + for (String column : droppedColumns) { + ExecutorContext + .getContext(schemaName) + .getGsiManager() + .getGsiMetaManager() + .removeColumnMeta(metaDbConnection, schemaName, logicalTableName, indexName, column); + } + } + } + + // CHANGE COLUMN + if (GeneralUtil.isNotEmpty(changeColumns)) { + for (String indexName : indexNames) { + tableInfoManager.changeColumnarIndexColumnMeta(context, columnsJdbcExtInfo, changeColumns, indexName); + } + } + + // CHANGE COLUMNS TABLE + tableInfoManager.changeColumnarIndexTableColumns(indexNames, schemaName, logicalTableName, changeColumns); + } + + public static void changeForeignKeyRefIndex(Connection metaDbConnection, TableInfoManager tableInfoManager, + String schemaName, String tableName, + List> changedColumns) { + // 先删除再插入,不用update,避免 change column a b, change column b a 交换列名造成update的错误 + Map> oldFkColsRecords = new HashMap<>(); + List newFkColsRecords = new ArrayList<>(); + Map> syncTables = new HashMap<>(); + + for (Pair changeColumn : changedColumns) { + String oldName = changeColumn.getValue(); + String newName = changeColumn.getKey(); + + // change parent table columns + List fks = tableInfoManager.queryReferencedForeignKeys(schemaName, tableName); + if (!fks.isEmpty()) { + for (ForeignRecord fk : fks) { + List columns = + tableInfoManager.queryForeignKeysCols(fk.schemaName, fk.tableName, fk.indexName); + for (ForeignColsRecord column : columns) { + if (column.refColName.equalsIgnoreCase(oldName)) { + // old + oldFkColsRecords.putIfAbsent(fk.indexName, new ArrayList<>()); + oldFkColsRecords.get(fk.indexName).add(column); + // new + ForeignColsRecord newColumn = + new ForeignColsRecord(column.schemaName, column.tableName, column.indexName, + column.forColName, + newName, column.pos); + newFkColsRecords.add(newColumn); + + syncTables.putIfAbsent(fk.schemaName, new HashSet<>()); + syncTables.get(fk.schemaName).add(fk.tableName); + } + } + } + } + + // change child table columns + fks = tableInfoManager.queryForeignKeys(schemaName, tableName); + if (!fks.isEmpty()) { + for (ForeignRecord fk : fks) { + List columns = + tableInfoManager.queryForeignKeysCols(fk.schemaName, fk.tableName, fk.indexName); + for (ForeignColsRecord column : columns) { + if (column.forColName.equalsIgnoreCase(oldName)) { + // old + oldFkColsRecords.putIfAbsent(fk.indexName, new ArrayList<>()); + oldFkColsRecords.get(fk.indexName).add(column); + // new + ForeignColsRecord newColumn = + new ForeignColsRecord(column.schemaName, column.tableName, column.indexName, newName, + column.refColName, column.pos); + newFkColsRecords.add(newColumn); + + syncTables.putIfAbsent(fk.refSchemaName, new HashSet<>()); + syncTables.get(fk.refSchemaName).add(fk.refTableName); + } + } + } + } + } + + // delete old records + for (Map.Entry> entry : oldFkColsRecords.entrySet()) { + String schema = entry.getValue().get(0).schemaName; + String table = entry.getValue().get(0).tableName; + tableInfoManager.deleteForeignKeyCols(schema, table, entry.getKey(), + entry.getValue().stream().map(record -> record.forColName).collect(Collectors.toList())); + } + + // insert new records + if (GeneralUtil.isNotEmpty(newFkColsRecords)) { + tableInfoManager.insertForeignKeyCols(newFkColsRecords); + } + + try { + //sync + for (Map.Entry> entry : syncTables.entrySet()) { + for (String table : entry.getValue()) { + TableInfoManager.updateTableVersionWithoutDataId(entry.getKey(), table, metaDbConnection); + } + } + } catch (SQLException ex) { + throw new RuntimeException(ex); + } } public static void addForeignKeyMeta(Connection metaDbConnection, String schemaName, String logicalTableName, String dbIndex, String phyTableName, - List addedForeignKeys) { + List addedForeignKeys, boolean withoutIndex) { TableInfoManager.PhyInfoSchemaContext phyInfoSchemaContext = CommonMetaChanger.getPhyInfoSchemaContext(schemaName, logicalTableName, dbIndex, phyTableName); @@ -837,17 +1216,17 @@ public static void addForeignKeyMeta(Connection metaDbConnection, String schemaN // create foreign key constraints symbol String symbol = ForeignKeyUtils.getForeignKeyConstraintName(schemaName, logicalTableName); - tableInfoManager.addForeignKeys(phyInfoSchemaContext, addedForeignKeys, symbol); + tableInfoManager.addForeignKeys(phyInfoSchemaContext, addedForeignKeys, symbol, withoutIndex); // update table meta ForeignKeyData data = addedForeignKeys.get(0); - TableInfoManager.updateTableVersion(data.refSchema, data.refTableName, metaDbConnection); + TableInfoManager.updateTableVersionWithoutDataId(data.refSchema, data.refTableName, metaDbConnection); Map> fkTables = ForeignKeyUtils.getAllForeignKeyRelatedTables(schemaName, logicalTableName); for (Map.Entry> entry : fkTables.entrySet()) { for (String table : entry.getValue()) { - TableInfoManager.updateTableVersion(entry.getKey(), table, metaDbConnection); + TableInfoManager.updateTableVersionWithoutDataId(entry.getKey(), table, metaDbConnection); } } } @@ -874,7 +1253,7 @@ public static void dropForeignKeyMeta(Connection metaDbConnection, String schema ForeignKeyUtils.getAllForeignKeyRelatedTables(schemaName, logicalTableName); for (Map.Entry> entry : fkTables.entrySet()) { for (String table : entry.getValue()) { - TableInfoManager.updateTableVersion(entry.getKey(), table, metaDbConnection); + TableInfoManager.updateTableVersionWithoutDataId(entry.getKey(), table, metaDbConnection); } } } @@ -895,13 +1274,13 @@ public static void addLogicalForeignKeyMeta(Connection metaDbConnection, String tableInfoManager.updateForeignKeyPushDown(fk.schema, fk.tableName, fk.indexName, pushDown); // update table meta - TableInfoManager.updateTableVersion(fk.refSchema, fk.refTableName, metaDbConnection); + TableInfoManager.updateTableVersionWithoutDataId(fk.refSchema, fk.refTableName, metaDbConnection); Map> fkTables = ForeignKeyUtils.getAllForeignKeyRelatedTables(schemaName, logicalTableName); for (Map.Entry> entry : fkTables.entrySet()) { for (String table : entry.getValue()) { - TableInfoManager.updateTableVersion(entry.getKey(), table, metaDbConnection); + TableInfoManager.updateTableVersionWithoutDataId(entry.getKey(), table, metaDbConnection); } } } @@ -923,13 +1302,13 @@ public static void dropLogicalForeignKeyMeta(Connection metaDbConnection, String tableInfoManager.updateForeignKeyPushDown(fk.schema, fk.tableName, fk.indexName, pushDown); // update table meta - TableInfoManager.updateTableVersion(fk.refSchema, fk.refTableName, metaDbConnection); + TableInfoManager.updateTableVersionWithoutDataId(fk.refSchema, fk.refTableName, metaDbConnection); Map> fkTables = ForeignKeyUtils.getAllForeignKeyRelatedTables(schemaName, logicalTableName); for (Map.Entry> entry : fkTables.entrySet()) { for (String table : entry.getValue()) { - TableInfoManager.updateTableVersion(entry.getKey(), table, metaDbConnection); + TableInfoManager.updateTableVersionWithoutDataId(entry.getKey(), table, metaDbConnection); } } } @@ -1255,11 +1634,15 @@ public static void removeIndexMeta(Connection metaDbConnection, String schemaNam TableInfoManager tableInfoManager = new TableInfoManager(); tableInfoManager.setConnection(metaDbConnection); tableInfoManager.removeIndex(schemaName, logicalTableName, indexName); + + // check referenced foreign key table and update fk index + updateForeignKeyRefIndex(metaDbConnection, schemaName, logicalTableName); } } public static String buildNewTbNamePattern(ExecutionContext executionContext, String schemaName, - String logicalTableName, String newLogicalTableName) { + String logicalTableName, String newLogicalTableName, + boolean needRenamePhyTables) { TddlRuleManager tddlRuleManager = OptimizerContext.getContext(schemaName).getRuleManager(); TableRule tableRule = tddlRuleManager.getTableRule(logicalTableName); @@ -1276,7 +1659,7 @@ public static String buildNewTbNamePattern(ExecutionContext executionContext, St String newTbNamePattern = tableRule.getTbNamePattern(); - if (executionContext.needToRenamePhyTables()) { + if (needRenamePhyTables) { newTbNamePattern = TStringUtil.replaceWithIgnoreCase(newTbNamePattern, logicalTableName, newLogicalTableName); } @@ -1300,7 +1683,7 @@ public static void setAutoPartitionFlag(Connection metaDbConnection, } public static void addPartitionInfoMeta(Connection metaDbConnection, - TableGroupConfig tableGroupConfig, + TableGroupDetailConfig tableGroupConfig, ExecutionContext executionContext, boolean isUpsert) { @@ -1485,23 +1868,4 @@ public static void endAlterColumnDefaultValue(Connection metaDbConn, String sche tableInfoManager.setConnection(metaDbConn); tableInfoManager.endUpdateColumnDefaultVal(schema, table, column); } - - private static void clearPlancache() { - Set activeSchemas = OptimizerContext.getActiveSchemaNames(); - for (String schema : activeSchemas) { - OptimizerContext optimizerContext = OptimizerContext.getContext(schema); - if (optimizerContext != null) { - // TODO: find more precise way - SyncManagerHelper.sync(new ClearPlanCacheSyncAction(schema), schema); - } - } - } - - public static void updateArchiveTable(Connection metaDbConnection, - String schemaName, String tableName, - String archiveTableSchema, String archiveTableName) { - TableInfoManager tableInfoManager = new TableInfoManager(); - tableInfoManager.setConnection(metaDbConnection); - tableInfoManager.updateArchiveTable(schemaName, tableName, archiveTableSchema, archiveTableName); - } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/misc/RepartitionMetaChanger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/misc/RepartitionMetaChanger.java index 2e491f41d..3ba670001 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/misc/RepartitionMetaChanger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/meta/misc/RepartitionMetaChanger.java @@ -30,7 +30,6 @@ import org.apache.commons.lang3.StringUtils; import java.sql.Connection; -import java.sql.SQLException; import java.util.List; import java.util.Map; import java.util.UUID; @@ -311,6 +310,14 @@ public static void cutOverIndexes(Connection metaDbConn, throw new TddlNestableRuntimeException(msgContent); } + // update index status public --- write only + tableInfoManager.updateIndexesStatus(schemaName, sourceIndexName, targetTableIndex.get(0).indexStatus); + tableInfoManager.updateIndexesStatus(schemaName, targetIndexName, sourceTableIndex.get(0).indexStatus); + + // update flag + tableInfoManager.updateIndexesFlag(schemaName, sourceIndexName, targetTableIndex.get(0).flag); + tableInfoManager.updateIndexesFlag(schemaName, targetIndexName, sourceTableIndex.get(0).flag); + // cut over tableInfoManager.alterPartitionCountCutOver(schemaName, sourceIndexName, random); tableInfoManager.alterPartitionCountCutOver(schemaName, targetIndexName, sourceIndexName); @@ -351,6 +358,17 @@ public static void cutOverColumns(Connection metaDbConn, throw new TddlNestableRuntimeException(msgContent); } + for (ColumnsRecord columnsRecord : targetTableColumns) { + if (columnsRecord.columnMappingName != null) { + tableInfoManager.updateColumnMappingName(schemaName, targetTableName, columnsRecord.columnName, + null); + if (!columnsRecord.columnMappingName.isEmpty()) { + tableInfoManager.updateColumnMappingName(schemaName, sourceTableName, + columnsRecord.columnMappingName, columnsRecord.columnName); + } + } + } + // cut over tableInfoManager.alterModifyColumnCutOver(schemaName, sourceTableName, random); tableInfoManager.alterModifyColumnCutOver(schemaName, targetTableName, sourceTableName); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/AlterGsiVisibilityValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/AlterGsiVisibilityValidateTask.java index 90d4d6d7d..4e71e40fc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/AlterGsiVisibilityValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/AlterGsiVisibilityValidateTask.java @@ -18,9 +18,6 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator; -import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; -import com.alibaba.polardbx.gms.metadb.table.IndexStatus; -import com.alibaba.polardbx.gms.metadb.table.IndexVisibility; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -46,7 +43,7 @@ public AlterGsiVisibilityValidateTask(String schemaName, String primaryTableName @Override public void executeImpl(ExecutionContext executionContext) { - GsiValidator.validateGsi(schemaName, indexTableName); + GsiValidator.validateGsiOrCci(schemaName, indexTableName); GsiValidator.validateAllowDdlOnTable(schemaName, primaryTableName, executionContext); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/BaseGmsTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/BaseGmsTask.java index f712659f4..dff43c2a1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/BaseGmsTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/BaseGmsTask.java @@ -17,10 +17,14 @@ package com.alibaba.polardbx.executor.ddl.job.task; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.ddl.job.meta.CommonMetaChanger; +import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableMetaChangePreemptiveSyncAction; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import org.apache.commons.lang3.StringUtils; import java.sql.Connection; import java.util.concurrent.TimeUnit; @@ -56,9 +60,11 @@ protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionC protected void onRollbackSuccess(ExecutionContext executionContext) { //this sync invocation may be deleted in the future //CommonMetaChanger.sync(MetaDbDataIdBuilder.getTableDataId(schemaName, logicalTableName)); - SyncManagerHelper.sync( - new TableMetaChangePreemptiveSyncAction(schemaName, logicalTableName, 1500L, 1500L, - TimeUnit.MICROSECONDS)); + if (!StringUtils.isEmpty(logicalTableName)) { + SyncManagerHelper.sync( + new TableMetaChangePreemptiveSyncAction(schemaName, logicalTableName, 1500L, 1500L, + TimeUnit.MICROSECONDS), SyncScope.ALL); + } } /** diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/BasePhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/BasePhyDdlTask.java index 4d1577f7e..e335f7acc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/BasePhyDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/BasePhyDdlTask.java @@ -31,6 +31,7 @@ import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineAccessorDelegate; +import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlJobManager; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlJobManagerUtils; import com.alibaba.polardbx.executor.ddl.newengine.utils.TaskHelper; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; @@ -92,6 +93,11 @@ public void rollbackImpl(ExecutionContext executionContext) { executePhyDdl(rollbackPhysicalPlans, executionContext); } + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + + } + @Override protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { DdlEngineAccessor ddlEngineAccessor = new DdlEngineAccessor(); @@ -172,7 +178,7 @@ protected void verifyResult(PhyDdlTableOperation ddl, List exceptions int inputCount = phyDdlExecutionRecord.getNumPhyObjectsTotal(); - if (isRollBackRunning(ddlContext.getState()) || !executionContext.needToRenamePhyTables()) { + if (isRollBackRunning(ddlContext.getState())) { inputCount = 0; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/CostEstimableDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/CostEstimableDdlTask.java index a4e1b3978..d1e9f4d79 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/CostEstimableDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/CostEstimableDdlTask.java @@ -33,34 +33,40 @@ public interface CostEstimableDdlTask { class CostInfo { public final long rows; public final long dataSize; + public final long tableCount; @JSONCreator - private CostInfo(long rows, long dataSize) { + private CostInfo(long rows, long dataSize, long tableCount) { this.rows = rows; this.dataSize = dataSize; + this.tableCount = tableCount; } public static CostInfo combine(CostInfo c1, CostInfo c2) { long rows = 0L; long dataSize = 0L; + long tableCount = 0L; if (c1 != null) { rows += c1.rows; dataSize += c1.dataSize; + tableCount += c1.tableCount; } if (c2 != null) { rows += c2.rows; dataSize += c2.dataSize; + tableCount += c2.tableCount; } - return new CostInfo(rows, dataSize); + return new CostInfo(rows, dataSize, tableCount); } } - static CostInfo createCostInfo(Long rows, Long dataSize) { - return new CostInfo(rows != null ? rows : 0L, dataSize != null ? dataSize : 0L); + static CostInfo createCostInfo(Long rows, Long dataSize, Long tableCount) { + return new CostInfo(rows != null ? rows : 0L, dataSize != null ? dataSize : 0L, + tableCount != null ? tableCount : 0L); } static CostInfo aggregate(List costInfoList) { - CostInfo result = new CostInfo(0, 0); + CostInfo result = new CostInfo(0, 0, 0); if (costInfoList == null || costInfoList.size() == 0) { return result; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/ReimportTableChangeMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/ReimportTableChangeMetaTask.java new file mode 100644 index 000000000..6d1e13a2a --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/ReimportTableChangeMetaTask.java @@ -0,0 +1,110 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; +import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.metadb.seq.SequenceBaseRecord; +import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.metadb.table.TablesExtRecord; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import org.apache.calcite.sql.SequenceBean; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.List; +import java.util.Map; + +import static com.alibaba.polardbx.common.constants.SequenceAttribute.AUTO_SEQ_PREFIX; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +@Getter +@TaskName(name = "ReimportTableChangeMetaTask") +public class ReimportTableChangeMetaTask extends BaseDdlTask { + private String dbIndex; + private String phyTableName; + private String logicalTableName; + private SequenceBean sequenceBean; + private TablesExtRecord tablesExtRecord; + private boolean partitioned; + private boolean ifNotExists; + private SqlKind sqlKind; + private boolean hasTimestampColumnDefault; + private List addedForeignKeys; + private Map specialDefaultValues; + private Map specialDefaultValueFlags; + + @JSONCreator + public ReimportTableChangeMetaTask(String schemaName, String logicalTableName, String dbIndex, String phyTableName, + SequenceBean sequenceBean, TablesExtRecord tablesExtRecord, + boolean partitioned, boolean ifNotExists, SqlKind sqlKind, + List addedForeignKeys, + boolean hasTimestampColumnDefault, + Map specialDefaultValues, + Map specialDefaultValueFlags) { + super(schemaName); + this.dbIndex = dbIndex; + this.phyTableName = phyTableName; + this.sequenceBean = sequenceBean; + this.tablesExtRecord = tablesExtRecord; + this.partitioned = partitioned; + this.ifNotExists = ifNotExists; + this.sqlKind = sqlKind; + this.addedForeignKeys = addedForeignKeys; + this.hasTimestampColumnDefault = hasTimestampColumnDefault; + this.specialDefaultValues = specialDefaultValues; + this.specialDefaultValueFlags = specialDefaultValueFlags; + this.logicalTableName = logicalTableName; + onExceptionTryRecoveryThenRollback(); + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + //query old meta's table version + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + long oldVersion = tableInfoManager.getVersionForUpdate(schemaName, logicalTableName); + + TableMetaChanger.removeTableMetaWithoutNotify(metaDbConnection, schemaName, logicalTableName, false, + executionContext); + TableInfoManager.PhyInfoSchemaContext phyInfoSchemaContext = + TableMetaChanger.buildPhyInfoSchemaContextAndCreateSequence(schemaName, + logicalTableName, dbIndex, phyTableName, sequenceBean, tablesExtRecord, partitioned, ifNotExists, + sqlKind, + 0L, executionContext); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + TableMetaChanger.addTableMeta(metaDbConnection, phyInfoSchemaContext, hasTimestampColumnDefault, + executionContext, specialDefaultValues, specialDefaultValueFlags, addedForeignKeys, null, null); + + SequenceBaseRecord sequenceRecord = + tableInfoManager.fetchSequence(schemaName, AUTO_SEQ_PREFIX + logicalTableName); + tableInfoManager.showTable(schemaName, logicalTableName, sequenceRecord); + + //handle table version + tableInfoManager.updateTableVersion(schemaName, logicalTableName, oldVersion + 1); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/AlterTableGroupBackFillTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/AlterTableGroupBackFillTask.java index 33c31dc4a..e5f72e2a9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/AlterTableGroupBackFillTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/AlterTableGroupBackFillTask.java @@ -23,10 +23,13 @@ import com.alibaba.polardbx.executor.ddl.job.task.RemoteExecutableDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.AlterTableGroupBackfill; +import com.alibaba.polardbx.optimizer.core.rel.PhysicalBackfill; import lombok.Getter; +import org.apache.calcite.rel.RelNode; import java.util.Map; import java.util.Set; @@ -41,6 +44,7 @@ public class AlterTableGroupBackFillTask extends BaseBackfillTask implements Rem boolean broadcast; boolean movePartitions; boolean useChangeSet; + boolean usePhysicalBackfill; @JSONCreator public AlterTableGroupBackFillTask(String schemaName, @@ -49,7 +53,8 @@ public AlterTableGroupBackFillTask(String schemaName, Map> targetPhyTables, boolean broadcast, boolean movePartitions, - boolean useChangeSet) { + boolean useChangeSet, + boolean usePhysicalBackfill) { super(schemaName); this.logicalTableName = logicalTableName; this.sourcePhyTables = sourcePhyTables; @@ -57,31 +62,51 @@ public AlterTableGroupBackFillTask(String schemaName, this.broadcast = broadcast; this.movePartitions = movePartitions; this.useChangeSet = useChangeSet; + this.usePhysicalBackfill = usePhysicalBackfill; if (useChangeSet) { // onExceptionTryRollback, such as dn ha - onExceptionTryRollback(); + onExceptionTryRecoveryThenRollback(); } } + @Override + protected void beforeTransaction(ExecutionContext executionContext) { + if (usePhysicalBackfill) { + updateTaskStateInNewTxn(DdlTaskState.DIRTY); + } + executeImpl(executionContext); + } + @Override protected void executeImpl(ExecutionContext executionContext) { updateTaskStateInNewTxn(DdlTaskState.DIRTY); executionContext = executionContext.copy(); executionContext.setBackfillId(getTaskId()); executionContext.setSchemaName(schemaName); - AlterTableGroupBackfill backFillPlan = - AlterTableGroupBackfill - .createAlterTableGroupBackfill(schemaName, logicalTableName, executionContext, sourcePhyTables, - targetPhyTables, broadcast, movePartitions, useChangeSet); FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); - ExecutorHelper.execute(backFillPlan, executionContext); + if (usePhysicalBackfill && !broadcast) { + final RelNode executablePhyBackfillPlan = + PhysicalBackfill.createPhysicalBackfill(schemaName, logicalTableName, executionContext, sourcePhyTables, + targetPhyTables, broadcast, null); + ExecutorHelper.execute(executablePhyBackfillPlan, executionContext); + } else { + final RelNode executableLogicalBackfillPlan = AlterTableGroupBackfill + .createAlterTableGroupBackfill(schemaName, logicalTableName, executionContext, sourcePhyTables, + targetPhyTables, broadcast, movePartitions, useChangeSet); + ExecutorHelper.execute(executableLogicalBackfillPlan, executionContext); + } } @Override protected void rollbackImpl(ExecutionContext executionContext) { - GsiBackfillManager gsiBackfillManager = new GsiBackfillManager(schemaName); - gsiBackfillManager.deleteByBackfillId(getTaskId()); + if (usePhysicalBackfill) { + //cleanup idb file + PhysicalBackfillUtils.rollbackCopyIbd(getTaskId(), schemaName, logicalTableName, 0, executionContext); + } else { + GsiBackfillManager gsiBackfillManager = new GsiBackfillManager(schemaName); + gsiBackfillManager.deleteByBackfillId(getTaskId()); + } } public static String getTaskName() { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/LogicalTableBackFillTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/LogicalTableBackFillTask.java index 2ceb2cc3a..c94b0568e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/LogicalTableBackFillTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/LogicalTableBackFillTask.java @@ -26,6 +26,7 @@ import com.alibaba.polardbx.optimizer.core.rel.GsiBackfill; import lombok.Getter; +import java.util.List; import java.util.Map; @TaskName(name = "LogicalTableBackFillTask") @@ -35,16 +36,31 @@ public class LogicalTableBackFillTask extends BaseBackfillTask implements Remote public String sourceTableName; public String targetTableName; public Map virtualColumns; + public Map backfillColumnMap; + public List modifyStringColumns; + public boolean useChangeSet; + public boolean modifyColumn; + public boolean mirrorCopy; @JSONCreator public LogicalTableBackFillTask(String schemaName, String sourceTableName, String targetTableName, - Map virtualColumns) { + Map virtualColumns, + Map backfillColumnMap, + List modifyStringColumns, + boolean useChangeSet, + boolean mirrorCopy, + boolean modifyColumn) { super(schemaName); this.sourceTableName = sourceTableName; this.targetTableName = targetTableName; this.virtualColumns = virtualColumns; + this.backfillColumnMap = backfillColumnMap; + this.modifyStringColumns = modifyStringColumns; + this.useChangeSet = useChangeSet; + this.modifyColumn = modifyColumn; + this.mirrorCopy = mirrorCopy; onExceptionTryRecoveryThenRollback(); } @@ -54,6 +70,11 @@ protected void executeImpl(ExecutionContext executionContext) { executionContext.setBackfillId(getTaskId()); GsiBackfill backFillPlan = GsiBackfill.createGsiBackfill(schemaName, sourceTableName, targetTableName, executionContext); + backFillPlan.setUseChangeSet(useChangeSet); + backFillPlan.setModifyColumn(modifyColumn); + backFillPlan.setMirrorCopy(mirrorCopy); + backFillPlan.setModifyStringColumns(modifyStringColumns); + backFillPlan.setBackfillColumnMap(backfillColumnMap); backFillPlan.setVirtualColumnMap(virtualColumns); FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/MoveTableBackFillTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/MoveTableBackFillTask.java index 9ee0d88b1..2241ea21a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/MoveTableBackFillTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/backfill/MoveTableBackFillTask.java @@ -22,11 +22,14 @@ import com.alibaba.polardbx.executor.ddl.job.task.BaseBackfillTask; import com.alibaba.polardbx.executor.ddl.job.task.RemoteExecutableDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.PhysicalBackfill; import com.alibaba.polardbx.optimizer.core.rel.MoveTableBackfill; import lombok.Getter; +import org.apache.calcite.rel.RelNode; import java.util.Map; import java.util.Set; @@ -44,7 +47,7 @@ public class MoveTableBackFillTask extends BaseBackfillTask implements RemoteExe Map> sourcePhyTables; Map> targetPhyTables; Map sourceTargetGroup; - + boolean broadcast; boolean useChangeSet; @JSONCreator @@ -53,13 +56,24 @@ public MoveTableBackFillTask(String schemaName, Map> sourcePhyTables, Map> targetPhyTables, Map sourceTargetGroup, + boolean broadcast, boolean useChangeSet) { super(schemaName); this.logicalTableName = logicalTableName; this.sourcePhyTables = sourcePhyTables; this.targetPhyTables = targetPhyTables; this.sourceTargetGroup = sourceTargetGroup; + this.broadcast = broadcast; this.useChangeSet = useChangeSet; + if (useChangeSet) { + // onExceptionTryRollback, such as dn ha + onExceptionTryRecoveryThenRollback(); + } + } + + @Override + protected void beforeTransaction(ExecutionContext executionContext) { + executeImpl(executionContext); } @Override @@ -68,12 +82,13 @@ protected void executeImpl(ExecutionContext executionContext) { executionContext = executionContext.copy(); executionContext.setBackfillId(getTaskId()); executionContext.setSchemaName(schemaName); - MoveTableBackfill backFillPlan = + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + final MoveTableBackfill backFillPlan = MoveTableBackfill .createMoveTableBackfill(schemaName, logicalTableName, executionContext, sourcePhyTables, targetPhyTables, sourceTargetGroup, useChangeSet); - FailPoint.injectRandomExceptionFromHint(executionContext); - FailPoint.injectRandomSuspendFromHint(executionContext); ExecutorHelper.execute(backFillPlan, executionContext); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AddLocalPartitionTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AddLocalPartitionTask.java index c90dbcde0..f71dc27ad 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AddLocalPartitionTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AddLocalPartitionTask.java @@ -18,11 +18,11 @@ import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.scheduler.SchedulePolicy; -import com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType; import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.scheduler.ScheduledJobsManager; +import com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType; import com.alibaba.polardbx.gms.scheduler.ScheduledJobsRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.partition.common.LocalPartitionDefinitionInfo; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterForeignKeyTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterForeignKeyTask.java index 1119827ec..1c8f0ebe5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterForeignKeyTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterForeignKeyTask.java @@ -44,6 +44,7 @@ public class AlterForeignKeyTask extends BaseGmsTask { private String phyTableName; private List addedForeignKeys; private List droppedForeignKeys; + private boolean withoutIndex; @JSONCreator public AlterForeignKeyTask(String schemaName, @@ -51,12 +52,14 @@ public AlterForeignKeyTask(String schemaName, String dbIndex, String phyTableName, List addedForeignKeys, - List droppedForeignKeys) { + List droppedForeignKeys, + boolean withoutIndex) { super(schemaName, logicalTableName); this.dbIndex = dbIndex; this.phyTableName = phyTableName; this.addedForeignKeys = addedForeignKeys; this.droppedForeignKeys = droppedForeignKeys; + this.withoutIndex = withoutIndex; } @Override @@ -64,7 +67,7 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); TableMetaChanger.addForeignKeyMeta(metaDbConnection, schemaName, logicalTableName, dbIndex, phyTableName, - addedForeignKeys); + addedForeignKeys, withoutIndex); TableMetaChanger.dropForeignKeyMeta(metaDbConnection, schemaName, logicalTableName, dbIndex, phyTableName, droppedForeignKeys); } @@ -86,7 +89,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut tableMeta.getForeignKeys().get(schemaName + "/" + logicalTableName + "/" + indexName)); } TableMetaChanger.addForeignKeyMeta(metaDbConnection, schemaName, logicalTableName, dbIndex, phyTableName, - rollbackAddedFks); + rollbackAddedFks, withoutIndex); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableAddLogicalForeignKeyValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableAddLogicalForeignKeyValidateTask.java new file mode 100644 index 000000000..c588e5b8c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableAddLogicalForeignKeyValidateTask.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator; +import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.util.Set; +import java.util.TreeSet; +import java.util.stream.Collectors; + +@Getter +@TaskName(name = "AlterTableAddLogicalForeignKeyValidateTask") +public class AlterTableAddLogicalForeignKeyValidateTask extends BaseValidateTask { + private String tableName; + private Long tableVersion; + private ForeignKeyData fk; + + private transient TableMeta tableMeta; + + @JSONCreator + public AlterTableAddLogicalForeignKeyValidateTask(String schemaName, String tableName, ForeignKeyData fk, + Long tableVersion) { + super(schemaName); + this.tableName = tableName; + this.fk = fk; + this.tableVersion = tableVersion; + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + TableValidator.validateTableExistence(schemaName, tableName, executionContext); + GsiValidator.validateAllowDdlOnTable(schemaName, tableName, executionContext); + + this.tableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(tableName); + + if (this.tableMeta == null) { + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_TABLE, schemaName, tableName); + } + + if (tableMeta.getVersion() < tableVersion.longValue()) { + throw new TddlRuntimeException(ErrorCode.ERR_TABLE_META_TOO_OLD, schemaName, tableName); + } + + Set constraints = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + constraints.addAll( + tableMeta.getForeignKeys().values().stream().map(c -> c.constraint).collect(Collectors.toList())); + + checkFkConstraintsExists(constraints, fk.constraint); + } + + private void checkFkConstraintsExists(Set constraints, String constraintName) { + if (constraints.contains(constraintName)) { + throw new TddlRuntimeException(ErrorCode.ERR_DUPLICATE_NAME_FK_CONSTRAINT, constraintName); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableChangeMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableChangeMetaTask.java index af14eb30e..1f60ad3ad 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableChangeMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableChangeMetaTask.java @@ -71,6 +71,8 @@ public class AlterTableChangeMetaTask extends BaseGmsTask { private boolean onlineModifyColumnIndexTask; + private final long versionId; + public AlterTableChangeMetaTask(String schemaName, String logicalTableName, String dbIndex, @@ -95,7 +97,8 @@ public AlterTableChangeMetaTask(String schemaName, String tableComment, String tableRowFormat, SequenceBean sequenceBean, - boolean onlineModifyColumnIndexTask) { + boolean onlineModifyColumnIndexTask, + long versionId) { super(schemaName, logicalTableName); this.dbIndex = dbIndex; this.phyTableName = phyTableName; @@ -120,6 +123,7 @@ public AlterTableChangeMetaTask(String schemaName, this.tableRowFormat = tableRowFormat; this.sequenceBean = sequenceBean; this.onlineModifyColumnIndexTask = onlineModifyColumnIndexTask; + this.versionId = versionId; } @Override @@ -136,6 +140,12 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi renamedIndexes, primaryKeyDropped, addedPrimaryKeyColumns, columnAfterAnother, requireLogicalColumnOrder, tableComment, tableRowFormat, sequenceBean, onlineModifyColumnIndexTask, changeFileStore, executionContext); + + // Change columnar table meta in same transaction + // columnar_table_mapping, columnar_table_evolution, columnar_column_evolution + TableMetaChanger.changeColumnarTableMeta(metaDbConnection, schemaName, logicalTableName, addedColumns, + droppedColumns, updatedColumns, changedColumns, versionId, jobId); + List alterColumnList = new ArrayList<>(); if (updatedColumns != null) { alterColumnList.addAll(updatedColumns); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableHideMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableHideMetaTask.java index 54053dd6e..e94c36c7a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableHideMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableHideMetaTask.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -55,7 +56,7 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi protected void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { TableMetaChanger.showTableMeta(metaDbConnection, schemaName, logicalTableName, columnNames, indexNames); // Refresh table meta to make hidden columns visible after rollback. - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); executionContext.refreshTableMeta(); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableInsertColumnsMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableInsertColumnsMetaTask.java index 376ffb431..c651971f6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableInsertColumnsMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableInsertColumnsMetaTask.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; import org.apache.commons.collections.CollectionUtils; @@ -66,7 +67,7 @@ public void rollbackImpl(Connection metaDbConnection, ExecutionContext execution null, addedColumns); //sync have to be successful to continue - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); executionContext.refreshTableMeta(); LOGGER.info(String.format("Rollback Insert GSI columns meta. schema:%s, table:%s, index:%s", diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTablePhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTablePhyDdlTask.java index be3af84d2..7b496bc96 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTablePhyDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTablePhyDdlTask.java @@ -63,6 +63,12 @@ public class AlterTablePhyDdlTask extends BasePhyDdlTask { private String rollbackSqlTemplate; + private Long shadowTableId; + + private String shadowTableName; + + private Boolean shadowTableAltered; + public void setSourceSql(String sourceSql) { this.sourceSql = sourceSql; } @@ -86,9 +92,13 @@ public void executeImpl(ExecutionContext executionContext) { if (!executionContext.getParamManager().getBoolean(PHYSICAL_DDL_TASK_RETRY)) { onExceptionTryRollback(); } + String origSql = StringUtils.isNotEmpty(sourceSql) ? sourceSql : executionContext.getDdlContext().getDdlStmt(); try { super.executeImpl(executionContext); + if (!AlterTableRollbacker.checkIfRollbackable(origSql)) { + updateSupportedCommands(true, false, null); + } } catch (PhysicalDdlException e) { int successCount = e.getSuccessCount(); if (successCount == 0) { @@ -96,7 +106,7 @@ public void executeImpl(ExecutionContext executionContext) { } else { // Some physical DDLs failed && they do not support rollback, // so we forbid CANCEL DDL command here. - if (!AlterTableRollbacker.checkIfRollbackable(executionContext.getDdlContext().getDdlStmt())) { + if (!AlterTableRollbacker.checkIfRollbackable(origSql)) { updateSupportedCommands(true, false, null); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableValidateTask.java index fa0171a5b..b6f7079e4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AlterTableValidateTask.java @@ -18,24 +18,41 @@ import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.common.TddlConstants; +import com.alibaba.polardbx.common.ddl.Attribute; import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.common.utils.version.InstanceVersion; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableItem; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableRenameColumn; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement; import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator; import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; +import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.GeneratedColumnUtil; import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; +import com.alibaba.polardbx.optimizer.config.table.IndexMeta; +import com.alibaba.polardbx.optimizer.config.table.SchemaManager; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTablePreparedData; import com.alibaba.polardbx.optimizer.parse.FastsqlParser; +import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter; +import com.alibaba.polardbx.optimizer.utils.MetaUtils; import com.alibaba.polardbx.rule.TableRule; import lombok.Getter; import org.apache.calcite.sql.SqlAddColumn; @@ -50,6 +67,7 @@ import org.apache.calcite.sql.SqlAlterTable; import org.apache.calcite.sql.SqlAlterTableDropIndex; import org.apache.calcite.sql.SqlAlterTableRenameIndex; +import org.apache.calcite.sql.SqlBinaryStringLiteral; import org.apache.calcite.sql.SqlCall; import org.apache.calcite.sql.SqlChangeColumn; import org.apache.calcite.sql.SqlColumnDeclaration; @@ -57,9 +75,20 @@ import org.apache.calcite.sql.SqlDataTypeSpec; import org.apache.calcite.sql.SqlDropColumn; import org.apache.calcite.sql.SqlDropForeignKey; +import org.apache.calcite.sql.SqlDropPrimaryKey; +import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlIndexColumnName; +import org.apache.calcite.sql.SqlIndexDefinition; +import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlModifyColumn; - +import org.apache.commons.collections.CollectionUtils; + +import java.sql.Types; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; @@ -83,17 +112,30 @@ public class AlterTableValidateTask extends BaseValidateTask { private transient TableMeta tableMeta; + private Boolean pushDownMultipleStatement; + private String tableName; private Long tableVersion; private TableGroupConfig tableGroupConfig; @JSONCreator + public AlterTableValidateTask(String schemaName, String tableName, String stmt, Long tableVersion, + Boolean pushDownMultipleStatement, TableGroupConfig tableGroupConfig) { + super(schemaName); + this.tableName = tableName; + this.stmt = stmt; + this.tableVersion = tableVersion; + this.pushDownMultipleStatement = pushDownMultipleStatement; + this.tableGroupConfig = TableGroupConfig.copyWithoutTables(tableGroupConfig); + } + public AlterTableValidateTask(String schemaName, String tableName, String stmt, Long tableVersion, TableGroupConfig tableGroupConfig) { super(schemaName); this.tableName = tableName; this.stmt = stmt; this.tableVersion = tableVersion; + this.pushDownMultipleStatement = false; this.tableGroupConfig = TableGroupConfig.copyWithoutTables(tableGroupConfig); } @@ -112,8 +154,7 @@ public void executeImpl(ExecutionContext executionContext) { throw new TddlRuntimeException(ErrorCode.ERR_TABLE_META_TOO_OLD, schemaName, tableName); } if (OptimizerContext.getContext(schemaName).getRuleManager() != null) { - this.tableRule = - OptimizerContext.getContext(schemaName).getRuleManager().getTddlRule().getTable(tableName); + this.tableRule = OptimizerContext.getContext(schemaName).getRuleManager().getTddlRule().getTable(tableName); } else { this.tableRule = null; } @@ -125,12 +166,9 @@ public void executeImpl(ExecutionContext executionContext) { this.partitionInfo = null; } - SqlAlterTable sqlAlterTable = (SqlAlterTable) new FastsqlParser() - .parse(stmt, executionContext) - .get(0); + SqlAlterTable sqlAlterTable = (SqlAlterTable) new FastsqlParser().parse(stmt, executionContext).get(0); - final boolean checkForeignKey = - executionContext.foreignKeyChecks(); + final boolean checkForeignKey = executionContext.foreignKeyChecks(); Map referencedColumns = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); for (ForeignKeyData data : tableMeta.getReferencedForeignKeys().values()) { for (String refColumn : data.refColumns) { @@ -161,20 +199,25 @@ public void executeImpl(ExecutionContext executionContext) { Set indexes = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); Set indexesBeforeDdl = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); - indexes - .addAll(tableMeta.getAllIndexes().stream().map(i -> i.getPhysicalIndexName()).collect(Collectors.toList())); - indexesBeforeDdl - .addAll(tableMeta.getAllIndexes().stream().map(i -> i.getPhysicalIndexName()).collect(Collectors.toList())); + indexes.addAll( + tableMeta.getAllIndexes().stream().map(i -> i.getPhysicalIndexName()).collect(Collectors.toList())); + indexesBeforeDdl.addAll( + tableMeta.getAllIndexes().stream().map(i -> i.getPhysicalIndexName()).collect(Collectors.toList())); GsiMetaManager.GsiMetaBean gsiMetaBean = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getGsi(tableName, IndexStatus.ALL); boolean existsPrimary = tableMeta.getPrimaryIndex() != null; + if (sqlAlterTable.getAlters().size() > 1 && pushDownMultipleStatement) { + validateMultipleStatement(tableMeta, sqlAlterTable); + return; + } for (SqlAlterSpecification alterItem : sqlAlterTable.getAlters()) { switch (alterItem.getKind()) { case ADD_COLUMN: checkColumnNotExists(columns, ((SqlAddColumn) alterItem).getColName().getLastName()); + checkWithCci(executionContext, alterItem.getKind()); if (((SqlAddColumn) alterItem).getAfterColumn() != null) { checkColumnExists(columns, ((SqlAddColumn) alterItem).getAfterColumn().getLastName()); } @@ -187,6 +230,7 @@ public void executeImpl(ExecutionContext executionContext) { String columnName = ((SqlDropColumn) alterItem).getColName().getLastName(); checkColumnExists(columnsBeforeDdl, columnName); checkModifyShardingKey(columnName); + checkWithCci(executionContext, alterItem.getKind()); if (checkForeignKey) { checkFkDropColumn(referencedColumns, referencingColumns, columnName); } @@ -200,6 +244,7 @@ public void executeImpl(ExecutionContext executionContext) { case MODIFY_COLUMN: checkColumnExists(columnsBeforeDdl, ((SqlModifyColumn) alterItem).getColName().getLastName()); + checkWithCci(executionContext, alterItem.getKind()); if (((SqlModifyColumn) alterItem).getAfterColumn() != null) { checkColumnExists(columns, ((SqlModifyColumn) alterItem).getAfterColumn().getLastName()); } @@ -211,10 +256,13 @@ public void executeImpl(ExecutionContext executionContext) { case ALTER_COLUMN_DEFAULT_VAL: checkColumnExists(columnsBeforeDdl, ((SqlAlterColumnDefaultVal) alterItem).getColumnName().getLastName()); + checkWithCci(executionContext, alterItem.getKind()); break; case CHANGE_COLUMN: checkColumnExists(columnsBeforeDdl, ((SqlChangeColumn) alterItem).getOldName().getLastName()); + checkWithCci(executionContext, alterItem.getKind()); + checkLBAC(((SqlChangeColumn) alterItem).getOldName().getLastName()); columns.remove(((SqlChangeColumn) alterItem).getOldName().getLastName()); checkColumnNotExists(columns, ((SqlChangeColumn) alterItem).getNewName().getLastName()); if (((SqlChangeColumn) alterItem).getAfterColumn() != null) { @@ -274,6 +322,13 @@ public void executeImpl(ExecutionContext executionContext) { break; case ALTER_RENAME_INDEX: + final SqlAlterTableRenameIndex renameIndex = (SqlAlterTableRenameIndex) alterItem; + if (null != renameIndex.getOriginIndexName() && tableMeta.withCci( + renameIndex.getOriginIndexName().getLastName())) { + throw new TddlRuntimeException(ErrorCode.ERR_OPTIMIZER, "Do not support rename cci " + + renameIndex.getOriginIndexName().getLastName()); + } + checkIndexExists(indexes, ((SqlAlterTableRenameIndex) alterItem).getIndexName().getLastName()); indexes.remove(((SqlAlterTableRenameIndex) alterItem).getIndexName().getLastName()); checkIndexNotExists(indexes, ((SqlAlterTableRenameIndex) alterItem).getNewIndexName().getLastName()); @@ -323,24 +378,330 @@ public void executeImpl(ExecutionContext executionContext) { } } - if (columns.size() == 1 && columns - .contains(TddlConstants.IMPLICIT_COL_NAME)) { // no columns without implicit primary key + if (columns.size() == 1 && columns.contains( + TddlConstants.IMPLICIT_COL_NAME)) { // no columns without implicit primary key throw new TddlRuntimeException(ErrorCode.ERR_DROP_ALL_COLUMNS); } if (tableGroupConfig != null) { TableValidator.validateTableGroupChange(schemaName, tableGroupConfig); } + + checkRenameColumn(); + } + + private void checkRenameColumn() { + SQLAlterTableStatement alterTableStatement = (SQLAlterTableStatement) FastsqlUtils.parseSql(stmt).get(0); + for (SQLAlterTableItem item : alterTableStatement.getItems()) { + if (item instanceof SQLAlterTableRenameColumn) { + checkLBAC(((SQLAlterTableRenameColumn) item).getColumn().getSimpleName()); + } + } + } + + private void checkLBAC(String columnName) { + if (LBACSecurityManager.getInstance().getColumnLabel(schemaName, tableName, columnName) != null) { + throw new TddlRuntimeException(ErrorCode.ERR_LBAC, "the column exist security label"); + } + } + + // refer: sql_table.cc#mysql_prepare_alter_table + private void validateMultipleStatement(TableMeta tableMeta, SqlAlterTable sqlAlterTable) { + boolean existsPrimary = tableMeta.getPrimaryIndex() != null; + List alterItems = new LinkedList<>(sqlAlterTable.getAlters()); + // oldColumnMetas: all the column of original table + // oldIndexes: all the index of original table + // newIndexes: all the index of new table + // newColumns: all the column of new table + List oldColumnMetas = tableMeta.getAllColumns(); + List oldIndexes = tableMeta.getSecondaryIndexes(); + List newIndexes = new ArrayList<>(oldIndexes.size() + alterItems.size()); + List newIndexeDefs = new ArrayList<>(newIndexes.size()); + LinkedList newColumns = new LinkedList<>(); + List> afterColumns = new ArrayList<>(); + Iterator iter; + + for (ColumnMeta field : oldColumnMetas) { + String fieldName = field.getName(); + List alterItemsForThisField = findAlterItems(alterItems, fieldName); + // If there are no add, drop or any other alter action, then store new columns into newColumns + if (alterItemsForThisField.isEmpty()) { + newColumns.add(fieldName); + } else { + for (SqlAlterSpecification alterItem : alterItemsForThisField) { + switch (alterItem.getKind()) { + case DROP_COLUMN: + if (existsPrimary) { + if (tableMeta.getPrimaryIndex().getKeyColumn(fieldName) != null) { + // Not supported in Mutiple Statement DDL. + throw new TddlRuntimeException(ErrorCode.ERR_DROP_PRIMARY_KEY); + } + } + alterItems.remove(alterItem); + break; + case MODIFY_COLUMN: + if (((SqlModifyColumn) alterItem).getAfterColumn() != null) { + afterColumns.add( + Pair.of(fieldName, ((SqlModifyColumn) alterItem).getAfterColumn().getLastName())); + } + newColumns.add(fieldName); + break; + case ALTER_COLUMN_DEFAULT_VAL: + newColumns.add(fieldName); + alterItems.remove(alterItem); + break; + case CHANGE_COLUMN: + String newFieldName = ((SqlChangeColumn) alterItem).getNewName().getLastName(); + if (((SqlChangeColumn) alterItem).getAfterColumn() != null) { + afterColumns.add( + Pair.of(newFieldName, ((SqlChangeColumn) alterItem).getAfterColumn().getLastName())); + } + newColumns.add(newFieldName); + break; + } + } + } + } + + iter = alterItems.iterator(); + while (iter.hasNext()) { + SqlAlterSpecification alterItem = iter.next(); + switch (alterItem.getKind()) { + case MODIFY_COLUMN: + if (((SqlModifyColumn) alterItem).getAfterColumn() != null) { + String afterName = ((SqlModifyColumn) alterItem).getAfterColumn().getLastName(); + if (!newColumns.contains(afterName)) { + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_COLUMN, afterName, tableName); + } + } + iter.remove(); + break; + case CHANGE_COLUMN: + if (((SqlChangeColumn) alterItem).getAfterColumn() != null) { + String afterName = ((SqlChangeColumn) alterItem).getAfterColumn().getLastName(); + if (!newColumns.contains(afterName)) { + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_COLUMN, afterName, tableName); + } + } + iter.remove(); + break; + case ADD_COLUMN: + // put the new add columns in the end. + // which would not be adjusted. + if (((SqlAddColumn) alterItem).getAfterColumn() != null) { + String afterName = ((SqlAddColumn) alterItem).getAfterColumn().getLastName(); + if (!newColumns.contains(afterName)) { + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_COLUMN, afterName, tableName); + } + afterColumns.add(Pair.of(((SqlAddColumn) alterItem).getColName().getLastName(), + afterName)); + } + newColumns.add(((SqlAddColumn) alterItem).getColName().getLastName()); + iter.remove(); + break; + } + } + //Reorder by add and change after relationShip + for (Pair afterColumn : afterColumns) { + String columnName = afterColumn.getKey(); + String afterColumnName = afterColumn.getValue(); + int index = newColumns.indexOf(columnName); + if (index == -1) { + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_COLUMN, columnName, tableName); + } + newColumns.remove(index); + index = newColumns.indexOf(afterColumnName); + if (index == -1) { + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_COLUMN, afterColumnName, tableName); + } + newColumns.add(index + 1, columnName); + } + + String columnName; + iter = alterItems.iterator(); + while (iter.hasNext()) { + SqlAlterSpecification alterItem = iter.next(); + switch (alterItem.getKind()) { + case DROP_COLUMN: + columnName = ((SqlDropColumn) alterItem).getColName().getLastName(); + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_COLUMN, columnName, tableName); + case MODIFY_COLUMN: + columnName = ((SqlModifyColumn) alterItem).getColName().getLastName(); + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_COLUMN, columnName, tableName); + case ALTER_COLUMN_DEFAULT_VAL: + columnName = ((SqlAlterColumnDefaultVal) alterItem).getColumnName().getLastName(); + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_COLUMN, columnName, tableName); + case CHANGE_COLUMN: + columnName = ((SqlChangeColumn) alterItem).getOldName().getLastName(); + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_COLUMN, columnName, tableName); + } + } + validateNewColumns(newColumns); + for (IndexMeta indexMeta : oldIndexes) { + boolean drop = false; + iter = alterItems.iterator(); + while (iter.hasNext()) { + SqlAlterSpecification alterItem = iter.next(); + switch (alterItem.getKind()) { + case DROP_INDEX: + if (indexMeta.getPhysicalIndexName() + .equalsIgnoreCase(((SqlAlterTableDropIndex) alterItem).getIndexName().getLastName())) { + drop = true; + iter.remove(); + } + break; + case ALTER_RENAME_INDEX: + if (indexMeta.getPhysicalIndexName() + .equalsIgnoreCase(((SqlAlterTableRenameIndex) alterItem).getOriginIndexName().getLastName())) { + drop = true; + newIndexes.add(((SqlAlterTableRenameIndex) alterItem).getNewIndexName().getLastName()); + iter.remove(); + } + break; + } + } + if (!drop) { + newIndexes.add(indexMeta.getPhysicalIndexName()); + } + } + + iter = alterItems.iterator(); + while (iter.hasNext()) { + SqlAlterSpecification alterItem = iter.next(); + switch (alterItem.getKind()) { + case DROP_INDEX: + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_KEY, + ((SqlAlterTableDropIndex) alterItem).getIndexName().getLastName()); + case ADD_INDEX: + if (((SqlAddIndex) alterItem).getIndexName() != null) { + newIndexes.add(((SqlAddIndex) alterItem).getIndexName().getLastName()); + } + newIndexeDefs.add(((SqlAddIndex) alterItem).getIndexDef()); + iter.remove(); + break; + case ADD_FULL_TEXT_INDEX: + if (((SqlAddFullTextIndex) alterItem).getIndexName() != null) { + newIndexes.add(((SqlAddFullTextIndex) alterItem).getIndexName().getLastName()); + } + newIndexeDefs.add(((SqlAddFullTextIndex) alterItem).getIndexDef()); + iter.remove(); + break; + case ADD_UNIQUE_INDEX: + if (((SqlAddUniqueIndex) alterItem).getIndexName() != null) { + newIndexes.add(((SqlAddUniqueIndex) alterItem).getIndexName().getLastName()); + } + newIndexeDefs.add(((SqlAddUniqueIndex) alterItem).getIndexDef()); + iter.remove(); + break; + case ADD_SPATIAL_INDEX: + if (((SqlAddSpatialIndex) alterItem).getIndexName() != null) { + newIndexes.add(((SqlAddSpatialIndex) alterItem).getIndexName().getLastName()); + } + newIndexeDefs.add(((SqlAddSpatialIndex) alterItem).getIndexDef()); + iter.remove(); + break; + case ALTER_RENAME_INDEX: + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_KEY, + ((SqlAlterTableRenameIndex) alterItem).getIndexName().getLastName()); + case ADD_PRIMARY_KEY: + if (DbInfoManager.getInstance().isNewPartitionDb(schemaName)) { + throw new TddlRuntimeException(ErrorCode.ERR_ADD_PRIMARY_KEY); + } else { + Set newColumnSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + newColumnSet.addAll(newColumns); + for (SqlIndexColumnName column : ((SqlAddPrimaryKey) alterItem).getColumns()) { + checkColumnExists(newColumnSet, column.getColumnName().getLastName()); + } + checkPrimaryKeyNotExists(existsPrimary); + existsPrimary = true; + } + break; + case DROP_PRIMARY_KEY: + if (DbInfoManager.getInstance().isNewPartitionDb(schemaName)) { + throw new TddlRuntimeException(ErrorCode.ERR_DROP_PRIMARY_KEY); + } else { + checkPrimaryKeExists(existsPrimary); + existsPrimary = false; + } + break; + } + } + validateNewIndexes(newIndexes); + } + + private void validateNewColumns(List newColumns) { + if (newColumns.size() == 1 && newColumns.contains(TddlConstants.IMPLICIT_COL_NAME) + || newColumns.isEmpty()) { // no columns without implicit primary key + throw new TddlRuntimeException(ErrorCode.ERR_DROP_ALL_COLUMNS); + } + Set newColumnSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + for (String name : newColumns) { + if (newColumnSet.contains(name)) { + throw new TddlRuntimeException(ErrorCode.ERR_DUPLICATE_COLUMN, name); + } + newColumnSet.add(name); + } + } + + private void validateNewIndexes(List newIndexes) { + Set newIndexSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + for (String name : newIndexes) { + if (newIndexSet.contains(name)) { + throw new TddlRuntimeException(ErrorCode.ERR_DUPLICATE_KEY, name); + } + newIndexSet.add(name); + } + } + + private void validateColumns(List newColumns) { + Set names = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + for (String name : newColumns) { + checkColumnNotExists(names, name); + names.add(name); + } + } + + private List findAlterItems(List alterItems, String name) { + List matched = new ArrayList<>(); + String columnName; + for (SqlAlterSpecification alterItem : alterItems) { + switch (alterItem.getKind()) { + case DROP_COLUMN: + columnName = ((SqlDropColumn) alterItem).getColName().getLastName(); + if (name.equalsIgnoreCase(columnName)) { + matched.add(alterItem); + } + continue; + case MODIFY_COLUMN: + columnName = ((SqlModifyColumn) alterItem).getColName().getLastName(); + if (name.equalsIgnoreCase(columnName)) { + matched.add(alterItem); + } + continue; + case ALTER_COLUMN_DEFAULT_VAL: + columnName = ((SqlAlterColumnDefaultVal) alterItem).getColumnName().getLastName(); + if (name.equalsIgnoreCase(columnName)) { + matched.add(alterItem); + } + continue; + case CHANGE_COLUMN: + columnName = ((SqlChangeColumn) alterItem).getOldName().getLastName(); + if (name.equalsIgnoreCase(columnName)) { + matched.add(alterItem); + } + continue; + } + } + return matched; } private void checkDropLocalIndex(String indexName, GsiMetaManager.GsiMetaBean gsiMetaBean) { // 默认主键拆分的表,不允许删除默认生成的 local index - if (tableMeta.isAutoPartition() && !gsiMetaBean.isGsi(indexName) - && tableMeta.getGsiTableMetaBean() != null) { + if (tableMeta.isAutoPartition() && !gsiMetaBean.isGsi(indexName) && tableMeta.getGsiTableMetaBean() != null) { String logicalGsiName = unwrapLocalIndexName(indexName); final String wrapped = tableMeta.getGsiTableMetaBean().indexMap.keySet().stream() - .filter(idx -> TddlSqlToRelConverter.unwrapGsiName(idx).equalsIgnoreCase(logicalGsiName)) - .findFirst().orElse(null); + .filter(idx -> TddlSqlToRelConverter.unwrapGsiName(idx).equalsIgnoreCase(logicalGsiName)).findFirst() + .orElse(null); if (wrapped != null) { throw new TddlRuntimeException(ErrorCode.ERR_AUTO_PARTITION_TABLE, @@ -441,9 +802,8 @@ private void checkColumnType(SqlModifyColumn alterItem, String referencedColumnName = columns.get(columnName).columns.get(columnIndex); if (referenced) { throw new TddlRuntimeException(ErrorCode.ERR_CHANGE_COLUMN_FK_CONSTRAINT, referencingColumnName, - schemaName, - tableName, referencedColumnName, columns.get(columnName).schema, columns.get(columnName).tableName, - columns.get(columnName).constraint); + schemaName, tableName, referencedColumnName, columns.get(columnName).schema, + columns.get(columnName).tableName, columns.get(columnName).constraint); } else { throw new TddlRuntimeException(ErrorCode.ERR_CHANGE_COLUMN_FK_CONSTRAINT, columns.get(columnName).refSchema, columns.get(columnName).refTableName, referencingColumnName, @@ -453,9 +813,9 @@ private void checkColumnType(SqlModifyColumn alterItem, } private void checkNotNull(SqlModifyColumn alterItem, Map columns) { - boolean onSetNull = columns.get(alterItem.getColName().getSimple()).onUpdate.equals( - ForeignKeyData.ReferenceOptionType.SET_NULL) || - columns.get(alterItem.getColName().getSimple()).onDelete.equals( + boolean onSetNull = + columns.get(alterItem.getColName().getSimple()).onUpdate.equals(ForeignKeyData.ReferenceOptionType.SET_NULL) + || columns.get(alterItem.getColName().getSimple()).onDelete.equals( ForeignKeyData.ReferenceOptionType.SET_NULL); boolean isNotNull = alterItem.getColDef().getNotNull() == SqlColumnDeclaration.ColumnNull.NOTNULL; boolean isPrimary = alterItem.getColDef().getSpecialIndex() != null && alterItem.getColDef().getSpecialIndex() @@ -468,10 +828,9 @@ private void checkNotNull(SqlModifyColumn alterItem, Map private void checkNotNull(SqlChangeColumn alterItem, Map columns) { boolean onSetNull = columns.get(alterItem.getOldName().getSimple()) != null && ( - columns.get(alterItem.getOldName().getSimple()).onUpdate.equals( - ForeignKeyData.ReferenceOptionType.SET_NULL) || - columns.get(alterItem.getOldName().getSimple()).onDelete.equals( - ForeignKeyData.ReferenceOptionType.SET_NULL)); + columns.get(alterItem.getOldName().getSimple()).onUpdate.equals(ForeignKeyData.ReferenceOptionType.SET_NULL) + || columns.get(alterItem.getOldName().getSimple()).onDelete.equals( + ForeignKeyData.ReferenceOptionType.SET_NULL)); boolean isNotNull = alterItem.getColDef().getNotNull() == SqlColumnDeclaration.ColumnNull.NOTNULL; boolean isPrimary = alterItem.getColDef().getSpecialIndex() != null && alterItem.getColDef().getSpecialIndex() .equals(SqlColumnDeclaration.SpecialIndex.PRIMARY); @@ -493,9 +852,8 @@ private void checkColumnType(SqlChangeColumn alterItem, String referencedColumnName = columns.get(columnName).columns.get(columnIndex); if (referenced) { throw new TddlRuntimeException(ErrorCode.ERR_CHANGE_COLUMN_FK_CONSTRAINT, referencingColumnName, - schemaName, - tableName, referencedColumnName, columns.get(columnName).schema, columns.get(columnName).tableName, - columns.get(columnName).constraint); + schemaName, tableName, referencedColumnName, columns.get(columnName).schema, + columns.get(columnName).tableName, columns.get(columnName).constraint); } else { throw new TddlRuntimeException(ErrorCode.ERR_CHANGE_COLUMN_FK_CONSTRAINT, columns.get(columnName).refSchema, columns.get(columnName).refTableName, referencingColumnName, @@ -505,8 +863,7 @@ private void checkColumnType(SqlChangeColumn alterItem, } private void checkFkDropColumn(Map referencedColumns, - Map referencingColumns, - String columnName) { + Map referencingColumns, String columnName) { if (referencedColumns.containsKey(columnName) || referencingColumns.containsKey(columnName)) { ForeignKeyData data = null; if (referencedColumns.containsKey(columnName)) { @@ -562,6 +919,13 @@ private void checkAddGeneratedColumnOnFk(SqlAddColumn addColumn) { } } + private void checkWithCci(ExecutionContext executionContext, SqlKind sqlKind) { + boolean forbidDdlWithCci = executionContext.getParamManager().getBoolean(ConnectionParams.FORBID_DDL_WITH_CCI); + if (forbidDdlWithCci && tableMeta.withCci()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_WITH_CCI, sqlKind.name()); + } + } + @Override protected String remark() { return "|logicalTableName: " + tableName; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AnalyzeTablePhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AnalyzeTablePhyDdlTask.java index 9a1600885..19cd6ed74 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AnalyzeTablePhyDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AnalyzeTablePhyDdlTask.java @@ -19,9 +19,9 @@ import com.alibaba.druid.util.JdbcUtils; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.LoggerUtil; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.ddl.newengine.cross.CrossEngineValidator; @@ -45,28 +45,28 @@ import java.util.List; import static com.alibaba.polardbx.common.properties.ConnectionParams.ENABLE_HLL; -import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.forceAnalyzeColumns; +import static com.alibaba.polardbx.common.properties.ConnectionParams.SKIP_PHYSICAL_ANALYZE; import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.forceAnalyzeColumnsDdl; @Getter @TaskName(name = "AnalyzeTablePhyDdlTask") public class AnalyzeTablePhyDdlTask extends BaseDdlTask { - private static final Logger logger = LoggerFactory.getLogger("STATISTICS"); + private static final Logger logger = LoggerUtil.statisticsLogger; public final String ANALYZE_TABLE_SQL = "ANALYZE TABLE "; private List schemaNames; private List tableNames; private List useHll; - private List success; + private List msg; public AnalyzeTablePhyDdlTask(List schemaNames, List tableNames, - List useHll, List success) { + List useHll, List msg) { super(schemaNames.get(0)); this.schemaNames = schemaNames; this.tableNames = tableNames; this.useHll = useHll; - this.success = success; + this.msg = msg; onExceptionTryRollback(); } @@ -81,7 +81,7 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC } List retUseHll = new ArrayList<>(tableNames.size()); - List retSuccess = new ArrayList<>(tableNames.size()); + List retMsg = new ArrayList<>(tableNames.size()); List fullTableName = new ArrayList<>(tableNames.size()); long start = System.currentTimeMillis(); @@ -91,7 +91,6 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC fullTableName.add(schemaName + "." + table); IDataSourceGetter mysqlDsGetter = new DatasourceMySQLImplement(schemaName); - doAnalyzeOneLogicalTable(schemaName, table, mysqlDsGetter, executionContext); retUseHll.add(executionContext.getParamManager().getBoolean(ENABLE_HLL) && SchemaMetaUtil @@ -102,14 +101,14 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC "no table rule for logicalTableName = " + table + ", analyze this table as the single table!"); } - retSuccess.add(forceAnalyzeColumnsDdl(schemaName, table, executionContext)); + forceAnalyzeColumnsDdl(schemaName, table, retMsg, executionContext); // refresh plan cache - DdlUtils.invalidatePlan(schemaName, table, false); + DdlUtils.invalidatePlanCache(schemaName, table); } this.useHll = retUseHll; - this.success = retSuccess; + this.msg = retMsg; long end = System.currentTimeMillis(); ModuleLogInfo.getInstance() @@ -124,6 +123,15 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC protected void doAnalyzeOneLogicalTable(String schemaName, String logicalTableName, IDataSourceGetter mysqlDsGetter, ExecutionContext executionContext) { + long startNanos = System.nanoTime(); + if (executionContext != null && executionContext.getParamManager().getBoolean(SKIP_PHYSICAL_ANALYZE)) { + ModuleLogInfo.getInstance().logRecord(Module.STATISTICS, LogPattern.NOT_ENABLED, + new String[] { + "analyze physical table [" + schemaName + "." + logicalTableName + "]", + "SKIP_PHYSICAL_ANALYZE=true]"}, + LogLevel.NORMAL); + return; + } List> keys = StatisticManager.getInstance().buildStatisticKey(schemaName, logicalTableName, executionContext); for (Pair key : keys) { @@ -136,6 +144,9 @@ protected void doAnalyzeOneLogicalTable(String schemaName, String logicalTableNa "The job '" + jobId + "' has been cancelled"); } } + long endNanos = System.nanoTime(); + logger.info(String.format("Analyze all phyTables of logical table %s.%s consumed %.2fs", + schemaName, logicalTableName, (endNanos - startNanos) / 1_000_000_000D)); } protected void doAnalyzeOnePhysicalTable(String group, String physicalTableName, IDataSourceGetter mysqlDsGetter) { @@ -149,10 +160,11 @@ protected void doAnalyzeOnePhysicalTable(String group, String physicalTableName, PreparedStatement stmt = null; try { conn = ds.getConnection(); - stmt = conn.prepareStatement(ANALYZE_TABLE_SQL + physicalTableName); + String analyzeSql = ANALYZE_TABLE_SQL + physicalTableName; + stmt = conn.prepareStatement(analyzeSql); stmt.execute(); } catch (Exception e) { - logger.error("Analyze physical table " + physicalTableName + " ERROR"); + logger.error("Analyze physical table " + physicalTableName + " ERROR: " + e.getMessage()); } finally { JdbcUtils.close(stmt); JdbcUtils.close(conn); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AtomicTablesSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AtomicTablesSyncTask.java index 5cc6aca02..7f55e1a33 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AtomicTablesSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/AtomicTablesSyncTask.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TablesMetaChangeCrossDBPreemptiveSyncAction; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -56,6 +57,7 @@ public void executeImpl(ExecutionContext executionContext) { SyncManagerHelper.sync( new TablesMetaChangeCrossDBPreemptiveSyncAction(schemaName, multiSchemas, logicalTables, initWait, interval, timeUnit), + SyncScope.ALL, true); } catch (Throwable t) { LOGGER.error(String.format( @@ -71,6 +73,7 @@ protected void onRollbackSuccess(ExecutionContext executionContext) { SyncManagerHelper.sync( new TablesMetaChangeCrossDBPreemptiveSyncAction(schemaName, multiSchemas, logicalTables, initWait, interval, timeUnit), + SyncScope.ALL, true); } catch (Throwable t) { LOGGER.error(String.format( diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ChangeColumnStatusTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ChangeColumnStatusTask.java index 8c6c0c072..4da0c954a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ChangeColumnStatusTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ChangeColumnStatusTask.java @@ -22,7 +22,6 @@ import com.alibaba.polardbx.gms.metadb.table.ColumnStatus; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; -import org.apache.commons.collections.CollectionUtils; import java.sql.Connection; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ChangeInstanceReadonlyStatusTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ChangeInstanceReadonlyStatusTask.java new file mode 100644 index 000000000..aca019dce --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ChangeInstanceReadonlyStatusTask.java @@ -0,0 +1,94 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; +import java.sql.SQLException; +import java.text.MessageFormat; +import java.util.Properties; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +@Getter +@TaskName(name = "ChangeInstanceReadonlyStatusTask") +public class ChangeInstanceReadonlyStatusTask extends BaseDdlTask { + + private final static Logger logger = LoggerFactory.getLogger(ChangeInstanceReadonlyStatusTask.class); + + protected boolean readonly; + + @JSONCreator + public ChangeInstanceReadonlyStatusTask(String schemaName, boolean readonly) { + super(schemaName); + this.readonly = readonly; + onExceptionTryRecoveryThenPause(); + } + + public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + try { + setGlobal(readonly); + } catch (SQLException e) { + logger.error(MessageFormat.format("set instance readonly {0} failed", readonly), e); + throw new TddlRuntimeException(ErrorCode.ERR_INSTANCE_READ_ONLY_OPTION_SET_FAILED, readonly + ""); + } + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + executeImpl(metaDbConnection, executionContext); + } + + @Override + protected void onExecutionSuccess(ExecutionContext executionContext) { + } + + @Override + protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + try { + setGlobal(!readonly); + } catch (SQLException e) { + logger.error(MessageFormat.format("rollback instance_read_only to {0} failed!", !readonly), e); + } + } + + private void setGlobal(boolean value) throws SQLException { + Properties properties = new Properties(); + properties.setProperty(ConnectionProperties.INSTANCE_READ_ONLY, String.valueOf(value)); + MetaDbUtil.setGlobal(properties); + // Wait until global value propagates to all CN + String error = ExecUtils.waitVarChange("instanceReadOnly", String.valueOf(value), 10); + if (null != error) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, "set global readonly failed, caused by " + error); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CleanRemovedDbGroupMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CleanRemovedDbGroupMetaTask.java index d63ad4bea..bf053b4e6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CleanRemovedDbGroupMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CleanRemovedDbGroupMetaTask.java @@ -58,7 +58,8 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi String dbName = schemaName; for (int i = 0; i < targetGroupNames.size(); i++) { String targetGrpName = targetGroupNames.get(i); - List groupDetails = groupDetailInfoAccessor.getGroupDetailInfoByDbNameAndGroup(dbName, targetGrpName); + List groupDetails = + groupDetailInfoAccessor.getGroupDetailInfoByDbNameAndGroup(dbName, targetGrpName); /** * One group may contain multi group details because of read-only inst of cn */ diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CloneTableDataFileTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CloneTableDataFileTask.java new file mode 100644 index 000000000..b5b410fd1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CloneTableDataFileTask.java @@ -0,0 +1,361 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.async.AsyncTask; +import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.newengine.cross.CrossEngineValidator; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillManager; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillReporter; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; +import com.alibaba.polardbx.gms.partition.PhysicalBackfillDetailInfoFieldJSON; +import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; +import com.alibaba.polardbx.optimizer.config.table.ScaleOutPlanUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.rpc.client.XSession; +import com.alibaba.polardbx.rpc.pool.XConnection; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import com.mysql.cj.polarx.protobuf.PolarxPhysicalBackfill; +import lombok.Getter; +import org.apache.commons.lang3.StringUtils; + +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.atomic.AtomicReference; + +import static com.alibaba.polardbx.common.TddlConstants.LONG_ENOUGH_TIMEOUT_FOR_DDL_ON_XPROTO_CONN; + +@Getter +@TaskName(name = "CloneTableDataFileTask") +public class CloneTableDataFileTask extends BaseDdlTask { + + final Pair srcDbAndGroup; + final Pair tarDbAndGroup; + final String logicalTableName; + final String phyTableName; + final List phyPartNames; + final Pair sourceHostIpAndPort; + final List> targetHostsIpAndPort; + final String sourceStorageInstId; + final Long batchSize; + final boolean encrypted; + //don't serialize this parameter + private transient Map> storageInstAndUserInfos = new ConcurrentHashMap<>(); + + @JSONCreator + public CloneTableDataFileTask(String schemaName, String logicalTableName, Pair srcDbAndGroup, + Pair tarDbAndGroup, String phyTableName, List phyPartNames, + String sourceStorageInstId, Pair sourceHostIpAndPort, + List> targetHostsIpAndPort, Long batchSize, Boolean encrypted) { + super(schemaName); + this.srcDbAndGroup = srcDbAndGroup; + this.tarDbAndGroup = tarDbAndGroup; + this.logicalTableName = logicalTableName; + this.phyTableName = phyTableName.toLowerCase(); + this.phyPartNames = phyPartNames; + this.sourceStorageInstId = sourceStorageInstId; + this.sourceHostIpAndPort = sourceHostIpAndPort; + this.targetHostsIpAndPort = targetHostsIpAndPort; + this.batchSize = batchSize; + this.encrypted = encrypted; + } + + @Override + protected void beforeTransaction(ExecutionContext executionContext) { + updateTaskStateInNewTxn(DdlTaskState.DIRTY); + executeImpl(executionContext); + } + + @Override + protected void beforeRollbackTransaction(ExecutionContext ec) { + rollbackImpl(ec); + } + + public void executeImpl(ExecutionContext ec) { + PhysicalBackfillManager backfillManager = new PhysicalBackfillManager(schemaName); + PhysicalBackfillReporter reporter = new PhysicalBackfillReporter(backfillManager); + + // in case restart this task and the GeneralUtil.isEmpty(phyPartNames)==false + boolean hasNoPhyPart = + GeneralUtil.isEmpty(phyPartNames) || phyPartNames.size() == 1 && StringUtils.isEmpty(phyPartNames.get(0)); + if (hasNoPhyPart && GeneralUtil.isEmpty(phyPartNames)) { + phyPartNames.add(""); + } + Pair userInfo = storageInstAndUserInfos.computeIfAbsent(sourceStorageInstId, + key -> PhysicalBackfillUtils.getUserPasswd(sourceStorageInstId)); + + Map> srcFileAndDirs = + PhysicalBackfillUtils.getSourceTableInfo(userInfo, srcDbAndGroup.getKey(), phyTableName, phyPartNames, + hasNoPhyPart, sourceHostIpAndPort); + + DbGroupInfoRecord tarDbGroupInfoRecord = ScaleOutPlanUtil.getDbGroupInfoByGroupName(tarDbAndGroup.getValue()); + + initBackfillMeta(reporter, phyPartNames, srcFileAndDirs, tarDbGroupInfoRecord, ec); + cloneInnodbDataFile(hasNoPhyPart, srcFileAndDirs, userInfo, ec); + updateBackfillStatus(reporter, srcFileAndDirs, userInfo, ec); + } + + public void rollbackImpl(ExecutionContext ec) { + PhysicalBackfillUtils.rollbackCopyIbd(getTaskId(), schemaName, logicalTableName, 1, ec); + } + + @Override + public String remark() { + return "|clone data for table:" + phyTableName + " in db:" + srcDbAndGroup.getKey() + " host:" + + sourceHostIpAndPort; + } + + private void initBackfillMeta(PhysicalBackfillReporter reporter, List phyPartNames, + Map> srcFileAndDirs, + DbGroupInfoRecord tarDbGroupInfoRecord, ExecutionContext ec) { + //use this taskId as backfill id, and pass this id to backfill task as input + PhysicalBackfillManager.BackfillBean initBean = + reporter.loadBackfillMeta(getTaskId(), schemaName, srcDbAndGroup.getKey(), phyTableName, + phyPartNames.get(0)); + if (!initBean.isEmpty()) { + for (String phyPart : phyPartNames) { + PhysicalBackfillManager.BackfillBean backfillBean = + reporter.loadBackfillMeta(getTaskId(), schemaName, srcDbAndGroup.getKey(), phyTableName, phyPart); + assert backfillBean.isInit(); + PhysicalBackfillManager.BackfillObjectBean bean = backfillBean.backfillObject; + try { + PhysicalBackfillDetailInfoFieldJSON detailInfoFieldJSON = bean.detailInfo; + PhysicalBackfillUtils.deleteInnodbDataFiles(schemaName, detailInfoFieldJSON.getSourceHostAndPort(), + bean.sourceDirName, bean.sourceGroupName, bean.physicalDb, true, ec); + } catch (Exception ex) { + //ignore + try { + SQLRecorderLogger.ddlLogger.info(ex.toString()); + } catch (Exception e) { + + } + } + + reporter.getBackfillManager().deleteById(bean.id); + } + } + + for (Map.Entry> entry : srcFileAndDirs.entrySet()) { + Pair partSrcFileAndDir = entry.getValue(); + String tmpDir = partSrcFileAndDir.getValue() + PhysicalBackfillUtils.TEMP_FILE_POSTFIX; + String tmpFile = partSrcFileAndDir.getKey(); + Pair partTempFileAndDir = Pair.of(tmpFile, tmpDir); + + String partTargetFile = partSrcFileAndDir.getKey().substring(srcDbAndGroup.getKey().length()); + partTargetFile = tarDbGroupInfoRecord.phyDbName.toLowerCase() + partTargetFile; + + String partTargetDir = partSrcFileAndDir.getValue() + .substring(PhysicalBackfillUtils.IDB_DIR_PREFIX.length() + srcDbAndGroup.getKey().length()); + partTargetDir = + PhysicalBackfillUtils.IDB_DIR_PREFIX + tarDbGroupInfoRecord.phyDbName.toLowerCase() + partTargetDir; + + Pair partTargetFileAndDir = new Pair<>(partTargetFile, partTargetDir); + + reporter.getBackfillManager() + .insertBackfillMeta(schemaName, logicalTableName, getTaskId(), srcDbAndGroup.getKey(), phyTableName, + entry.getKey(), srcDbAndGroup.getValue(), tarDbAndGroup.getValue(), partTempFileAndDir, + partTargetFileAndDir, 0, batchSize, 0, 0, sourceHostIpAndPort, targetHostsIpAndPort); + + } + } + + private void cloneInnodbDataFile(boolean hasNoPhyPart, Map> srcFileAndDirs, + Pair userInfo, ExecutionContext ec) { + + String msg = "begin to clone the files for table:" + phyTableName; + SQLRecorderLogger.ddlLogger.info(msg); + XConnection conn = null; + + boolean success = false; + int tryTime = 1; + StringBuilder copyFileInfo = null; + AtomicReference finished = new AtomicReference<>(false); + do { + try { + copyFileInfo = new StringBuilder(); + conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage(srcDbAndGroup.getKey(), + sourceHostIpAndPort.getKey(), sourceHostIpAndPort.getValue(), userInfo.getKey(), + userInfo.getValue(), -1)); + conn.setNetworkTimeoutNanos(LONG_ENOUGH_TIMEOUT_FOR_DDL_ON_XPROTO_CONN * 1000000L); + conn.execQuery(String.format(PhysicalBackfillUtils.FLUSH_TABLE_SQL_TEMPLATE, phyTableName)); + PolarxPhysicalBackfill.FileManageOperator.Builder builder = + PolarxPhysicalBackfill.FileManageOperator.newBuilder(); + + PolarxPhysicalBackfill.TableInfo.Builder tableInfoBuilder = + PolarxPhysicalBackfill.TableInfo.newBuilder(); + tableInfoBuilder.setTableSchema(srcDbAndGroup.getKey()); + tableInfoBuilder.setTableName(phyTableName); + tableInfoBuilder.setPartitioned(hasNoPhyPart); + int i = 0; + for (Map.Entry> entry : srcFileAndDirs.entrySet()) { + Pair srcFileAndDir = entry.getValue(); + + boolean handleCfgFile = false; + boolean handleIbdFile = false; + boolean needHandleCfpFile = encrypted; + do { + PhysicalBackfillUtils.checkInterrupted(ec, null); + PolarxPhysicalBackfill.FileInfo.Builder srcFileInfoBuilder = + PolarxPhysicalBackfill.FileInfo.newBuilder(); + String fileName = srcFileAndDir.getKey(); + String directory = srcFileAndDir.getValue(); + + if (!handleCfgFile) { + directory = + PhysicalBackfillUtils.convertToCfgFileName(directory, PhysicalBackfillUtils.CFG); + handleCfgFile = true; + } else if (needHandleCfpFile) { + needHandleCfpFile = false; + directory = + PhysicalBackfillUtils.convertToCfgFileName(directory, PhysicalBackfillUtils.CFP); + } else { + handleIbdFile = true; + } + + srcFileInfoBuilder.setFileName(fileName); + srcFileInfoBuilder.setDirectory(directory); + srcFileInfoBuilder.setPartitionName(entry.getKey()); + + PolarxPhysicalBackfill.FileInfo.Builder tmpFileInfoBuilder = + PolarxPhysicalBackfill.FileInfo.newBuilder(); + tmpFileInfoBuilder.setFileName(fileName); + tmpFileInfoBuilder.setDirectory(directory + PhysicalBackfillUtils.TEMP_FILE_POSTFIX); + tmpFileInfoBuilder.setPartitionName(entry.getKey()); + + tableInfoBuilder.addFileInfo(srcFileInfoBuilder.build()); + tableInfoBuilder.addFileInfo(tmpFileInfoBuilder.build()); + if (i > 0) { + copyFileInfo.append(", "); + } + copyFileInfo.append(directory); + i++; + if (handleIbdFile) { + break; + } + } while (true); + } + builder.setTableInfo(tableInfoBuilder.build()); + + builder.setOperatorType(PolarxPhysicalBackfill.FileManageOperator.Type.COPY_IBD_TO_TEMP_DIR_IN_SRC); + + Thread parentThread = Thread.currentThread(); + XSession session = conn.getSession(); + finished.set(false); + FutureTask task = new FutureTask<>(() -> { + do { + if (finished.get()) { + break; + } + if (session == null) { + SQLRecorderLogger.ddlLogger.info("exeCloneFile session was terminated, sessionId"); + break; + } + if (parentThread.isInterrupted() || CrossEngineValidator.isJobInterrupted(ec)) { + SQLRecorderLogger.ddlLogger.info( + String.format("exeCloneFile session was cancel, sessionId:%d", session.getSessionId())); + session.cancel(); + break; + } + try { + Thread.sleep(100); + } catch (Exception e) { + //ignore + } + } while (true); + }, null); + Future futureTask = + ec.getExecutorService().submit(ec.getSchemaName(), ec.getTraceId(), AsyncTask.build(task)); + + conn.exeCloneFile(builder); + + finished.set(true); + try { + futureTask.get(); + } catch (Exception ex) { + try { + futureTask.cancel(true); + } catch (Throwable ignore) { + } + } + msg = String.format("already clone the files[%s] for table %s", copyFileInfo, phyTableName); + SQLRecorderLogger.ddlLogger.info(msg); + success = true; + } catch (Exception ex) { + msg = String.format("fail to clone those files:%s, [ip:%s,port:%s,db:%s]", copyFileInfo.toString(), + sourceHostIpAndPort.getKey(), sourceHostIpAndPort.getValue().toString(), srcDbAndGroup.getKey()); + if (ex != null && ex.toString() != null) { + msg += " " + ex.toString(); + } + SQLRecorderLogger.ddlLogger.info(msg); + if (tryTime > PhysicalBackfillUtils.MAX_RETRY) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + PhysicalBackfillUtils.checkInterrupted(ec, null); + tryTime++; + } finally { + try { + finished.set(true); + if (conn != null && !conn.isClosed()) { + try { + conn.execQuery(PhysicalBackfillUtils.UNLOCK_TABLE); + } catch (SQLException e) { + msg = "fail to clone those files:" + copyFileInfo.toString() + " " + e.toString(); + SQLRecorderLogger.ddlLogger.info(msg); + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e); + } + } + } catch (SQLException ex) { + msg = "fail to clone those files:" + copyFileInfo.toString() + " " + ex.toString(); + SQLRecorderLogger.ddlLogger.info(msg); + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + } + } while (!success); + } + + private void updateBackfillStatus(PhysicalBackfillReporter reporter, + Map> srcFileAndDirs, Pair userInfo, + ExecutionContext ec) { + for (Map.Entry> entry : srcFileAndDirs.entrySet()) { + + List> offsetAndSize = new ArrayList<>(); + PhysicalBackfillUtils.getTempIbdFileInfo(userInfo, sourceHostIpAndPort, srcDbAndGroup, phyTableName, + entry.getKey(), entry.getValue(), batchSize, false, offsetAndSize); + + PhysicalBackfillManager.BackfillBean backfillBean = + reporter.loadBackfillMeta(getTaskId(), schemaName, srcDbAndGroup.getKey(), phyTableName, + entry.getKey()); + assert backfillBean.isInit(); + + reporter.getBackfillManager() + .updateStatusAndTotalBatch(backfillBean.backfillObject.id, offsetAndSize.size()); + } + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ConvertAllSequenceValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ConvertAllSequenceValidateTask.java new file mode 100644 index 000000000..ce2aaa73c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ConvertAllSequenceValidateTask.java @@ -0,0 +1,88 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.gms.metadb.table.SchemataAccessor; +import com.alibaba.polardbx.gms.metadb.table.SchemataRecord; +import com.alibaba.polardbx.gms.topology.SystemDbHelper; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +@Getter +@TaskName(name = "ConvertAllSequenceValidateTask") +public class ConvertAllSequenceValidateTask extends BaseValidateTask { + private List allSchemaNamesTobeConvert; + private boolean onlySingleSchema; + + @JSONCreator + public ConvertAllSequenceValidateTask(List allSchemaNamesTobeConvert, boolean onlySingleSchema) { + super(allSchemaNamesTobeConvert.get(0)); + this.allSchemaNamesTobeConvert = allSchemaNamesTobeConvert; + this.onlySingleSchema = onlySingleSchema; + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + final Set userSchemata = new HashSet<>(); + List schemataRecords = SchemataAccessor.getAllSchemata(); + schemataRecords.stream() + .filter(s -> !SystemDbHelper.isDBBuildIn(s.schemaName)) + .forEach(s -> userSchemata.add(s.schemaName.toLowerCase())); + + if (!onlySingleSchema) { + Set schemasBefore = allSchemaNamesTobeConvert.stream().map( + x -> x.toLowerCase() + ).collect(Collectors.toSet()); + + Set schemasNow = userSchemata; + for (String before : schemasBefore) { + if (!schemasNow.contains(before)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("not found schema [%s], please retry", before)); + } + } + for (String nowSc : schemasNow) { + if (!schemasBefore.contains(nowSc)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("newly built schema [%s] found, please retry", nowSc)); + } + } + } else { + String schema = allSchemaNamesTobeConvert.get(0); + if (!userSchemata.contains(schema.toLowerCase())) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("schema [%s] not found, please retry", schema)); + } + } + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ConvertSequenceInSchemasTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ConvertSequenceInSchemasTask.java new file mode 100644 index 000000000..c49157aa7 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ConvertSequenceInSchemasTask.java @@ -0,0 +1,90 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.sync.ClearSeqCacheSyncAction; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.metadb.seq.SequencesAccessor; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.gms.util.SeqTypeUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; +import java.util.List; + +import static com.alibaba.polardbx.common.constants.SequenceAttribute.Type; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ + +@Getter +@TaskName(name = "ConvertSequenceInSchemasTask") +public class ConvertSequenceInSchemasTask extends BaseGmsTask { + final List schemaNames; + Type fromType; + Type toType; + + @JSONCreator + public ConvertSequenceInSchemasTask(List schemaNames, Type fromType, Type toType) { + //just fill a causal db name + super("polardbx", "none"); + this.schemaNames = schemaNames; + this.fromType = fromType; + this.toType = toType; + } + + @Override + public void executeImpl(Connection metaDbConn, ExecutionContext executionContext) { + for (String schema : schemaNames) { + convert(schema, fromType, toType, metaDbConn); + } + } + + @Override + public void onExecutionSuccess(ExecutionContext executionContext) { + boolean newSeqNotInvolved = fromType != Type.NEW && toType != Type.NEW; + for (String schema : schemaNames) { + if (SeqTypeUtil.isNewSeqSupported(schema) || newSeqNotInvolved) { + try { + SyncManagerHelper.sync(new ClearSeqCacheSyncAction(schema, null, true, false), SyncScope.ALL); + } catch (Exception e) { + throw new TddlNestableRuntimeException(e); + } + } + + } + } + + private void convert(String schemaName, Type fromType, Type toType, Connection metaDbConn) { + boolean newSeqNotInvolved = fromType != Type.NEW && toType != Type.NEW; + if (SeqTypeUtil.isNewSeqSupported(schemaName) || newSeqNotInvolved) { + try { + SequencesAccessor.change(schemaName, fromType, toType, metaDbConn); + } catch (Exception e) { + throw new TddlNestableRuntimeException(e); + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateDatabaseTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateDatabaseTask.java index d4c435069..987e16cd1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateDatabaseTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateDatabaseTask.java @@ -20,12 +20,8 @@ import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; -import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; -import org.apache.calcite.rel.ddl.CreateDatabase; - -import java.sql.Connection; /** * Created by zhuqiwei. diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateEntitySecurityAttrTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateEntitySecurityAttrTask.java new file mode 100644 index 000000000..a61962ff3 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateEntitySecurityAttrTask.java @@ -0,0 +1,98 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; +import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.lbac.LBACSecurityEntity; +import com.alibaba.polardbx.gms.lbac.accessor.LBACEntityAccessor; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.List; + +/** + * @author pangzhaoxing + */ + +@TaskName(name = "CreateEntitySecurityAttrTask") +public class CreateEntitySecurityAttrTask extends BaseGmsTask { + + List esaList; + + @JSONCreator + public CreateEntitySecurityAttrTask(String schemaName, String logicalTableName, List esaList) { + super(schemaName, logicalTableName); + this.esaList = esaList; + } + + @Override + protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + + LBACEntityAccessor esaAccessor = new LBACEntityAccessor(); + esaAccessor.setConnection(metaDbConnection); + for (LBACSecurityEntity esa : esaList) { + esaAccessor.replace(esa); + } + } + + @Override + protected void onExecutionSuccess(ExecutionContext executionContext) { + super.onExecutionSuccess(executionContext); + try (Connection conn = MetaDbDataSource.getInstance().getConnection()) { + MetaDbConfigManager.getInstance().notify(MetaDbDataIdBuilder.getLBACSecurityDataId(), + conn); + // wait for all cn to load metadb + MetaDbConfigManager.getInstance().sync(MetaDbDataIdBuilder.getLBACSecurityDataId()); + } catch (SQLException e) { + throw new TddlNestableRuntimeException(e); + } + } + + @Override + protected void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { + LBACEntityAccessor esaAccessor = new LBACEntityAccessor(); + esaAccessor.setConnection(metaDbConnection); + for (LBACSecurityEntity esa : esaList) { + esaAccessor.deleteByKeyAndType(esa); + } + } + + @Override + protected void onRollbackSuccess(ExecutionContext executionContext) { + super.onRollbackSuccess(executionContext); + try (Connection conn = MetaDbDataSource.getInstance().getConnection()) { + MetaDbConfigManager.getInstance().notify(MetaDbDataIdBuilder.getLBACSecurityDataId(), + conn); + // wait for all cn to load metadb + MetaDbConfigManager.getInstance().sync(MetaDbDataIdBuilder.getLBACSecurityDataId()); + } catch (SQLException e) { + throw new TddlNestableRuntimeException(e); + } + } + + public List getEsaList() { + return esaList; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateIndexPhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateIndexPhyDdlTask.java index 3233a7a95..d4c5a5688 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateIndexPhyDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateIndexPhyDdlTask.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.ddl.job.task.basic; import com.alibaba.fastjson.annotation.JSONCreator; -import com.google.common.collect.Lists; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.executor.ddl.job.builder.DdlPhyPlanBuilder; import com.alibaba.polardbx.executor.ddl.job.builder.DropLocalIndexBuilder; @@ -30,6 +29,7 @@ import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; import com.alibaba.polardbx.optimizer.core.rel.ReplaceTableNameWithQuestionMarkVisitor; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropIndex; +import com.google.common.collect.Lists; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.ddl.DropIndex; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateJoinGroupTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateJoinGroupTask.java new file mode 100644 index 000000000..c0e502041 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateJoinGroupTask.java @@ -0,0 +1,64 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.gms.tablegroup.JoinGroupInfoAccessor; +import com.alibaba.polardbx.gms.tablegroup.JoinGroupInfoRecord; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import lombok.Getter; + +import java.sql.Connection; + +@Getter +@TaskName(name = "CreateJoinGroupTask") +public class CreateJoinGroupTask extends BaseDdlTask { + + private final static Logger LOG = SQLRecorderLogger.ddlLogger; + private final String joinGroupName; + private final String locality; + private final boolean isIfNotExists; + + @JSONCreator + public CreateJoinGroupTask(String schemaName, String joinGroupName, String locality, boolean isIfNotExists) { + super(schemaName); + this.joinGroupName = joinGroupName; + this.locality = locality; + this.isIfNotExists = isIfNotExists; + onExceptionTryRollback(); + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + JoinGroupInfoAccessor joinGroupInfoAccessor = new JoinGroupInfoAccessor(); + joinGroupInfoAccessor.setConnection(metaDbConnection); + JoinGroupInfoRecord record = new JoinGroupInfoRecord(); + record.tableSchema = schemaName; + record.joinGroupName = joinGroupName; + record.locality = locality; + joinGroupInfoAccessor.addJoinGroup(record, isIfNotExists); + } + + @Override + protected String remark() { + return "|CreateJoinGroupTask: " + joinGroupName + "," + isIfNotExists + "," + isIfNotExists; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesExtMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesExtMetaTask.java index b0f90e0f3..48eb876e5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesExtMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesExtMetaTask.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.metadb.table.TablesExtRecord; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -68,7 +69,7 @@ public void rollbackImpl(Connection metaDbConnection, ExecutionContext execution TableMetaChanger.removeTableExt(metaDbConnection, schemaName, logicalTableName); FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); } private boolean isCreateTableSupported(ExecutionContext executionContext) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesMetaTask.java index 0b3c17a7f..e7f4889dc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesMetaTask.java @@ -48,6 +48,8 @@ public class CreateTableAddTablesMetaTask extends BaseGmsTask { private List addedForeignKeys; private Map specialDefaultValues; private Map specialDefaultValueFlags; + private Map columnMapping; + private List addNewColumns; @JSONCreator public CreateTableAddTablesMetaTask(String schemaName, String logicalTableName, String dbIndex, String phyTableName, @@ -56,7 +58,9 @@ public CreateTableAddTablesMetaTask(String schemaName, String logicalTableName, List addedForeignKeys, boolean hasTimestampColumnDefault, Map specialDefaultValues, - Map specialDefaultValueFlags) { + Map specialDefaultValueFlags, + Map columnMapping, + List addNewColumns) { super(schemaName, logicalTableName); this.dbIndex = dbIndex; this.phyTableName = phyTableName; @@ -69,6 +73,8 @@ public CreateTableAddTablesMetaTask(String schemaName, String logicalTableName, this.hasTimestampColumnDefault = hasTimestampColumnDefault; this.specialDefaultValues = specialDefaultValues; this.specialDefaultValueFlags = specialDefaultValueFlags; + this.columnMapping = columnMapping; + this.addNewColumns = addNewColumns; onExceptionTryRecoveryThenRollback(); } @@ -80,7 +86,8 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); TableMetaChanger.addTableMeta(metaDbConnection, phyInfoSchemaContext, hasTimestampColumnDefault, - executionContext, specialDefaultValues, specialDefaultValueFlags, addedForeignKeys); + executionContext, specialDefaultValues, specialDefaultValueFlags, addedForeignKeys, columnMapping, + addNewColumns); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesPartitionInfoMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesPartitionInfoMetaTask.java index 4a2b5fd60..f4b50ccc2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesPartitionInfoMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableAddTablesPartitionInfoMetaTask.java @@ -20,6 +20,7 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.ddl.ImplicitTableGroupUtil; import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; @@ -34,6 +35,7 @@ import com.alibaba.polardbx.gms.tablegroup.JoinGroupTableDetailAccessor; import com.alibaba.polardbx.gms.tablegroup.JoinGroupTableDetailRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.partition.common.LocalPartitionDefinitionInfo; @@ -50,31 +52,41 @@ public class CreateTableAddTablesPartitionInfoMetaTask extends BaseGmsTask { private boolean temporary; - private TableGroupConfig tableGroupConfig; + private TableGroupDetailConfig tableGroupConfig; private LocalPartitionDefinitionInfo localPartitionDefinitionInfo; - private boolean indexAlignWithPrimaryTableGroup; + private String tableGroupAlignWithTargetTable; private String primaryTable; private String locality; //specified in create table statement private String joinGroup; + private Boolean oss; + private Boolean withTableGroupImplicit; + private Boolean autoCreateTg; @JSONCreator public CreateTableAddTablesPartitionInfoMetaTask(String schemaName, String logicalTableName, boolean temporary, - TableGroupConfig tableGroupConfig, + TableGroupDetailConfig tableGroupConfig, LocalPartitionDefinitionInfo localPartitionDefinitionInfo, - boolean indexAlignWithPrimaryTableGroup, + String tableGroupAlignWithTargetTable, String primaryTable, - String joinGroup) { + String joinGroup, + boolean oss, + boolean withTableGroupImplicit, + boolean autoCreateTg) { super(schemaName, logicalTableName); this.temporary = temporary; this.tableGroupConfig = tableGroupConfig; this.localPartitionDefinitionInfo = localPartitionDefinitionInfo; - this.indexAlignWithPrimaryTableGroup = indexAlignWithPrimaryTableGroup; + this.tableGroupAlignWithTargetTable = tableGroupAlignWithTargetTable; this.primaryTable = primaryTable; this.joinGroup = joinGroup; + this.oss = oss; + this.autoCreateTg = autoCreateTg; + this.withTableGroupImplicit = withTableGroupImplicit; + ImplicitTableGroupUtil.checkAutoCreateTableGroup(tableGroupConfig, oss, withTableGroupImplicit, autoCreateTg); onExceptionTryRecoveryThenRollback(); } @@ -83,14 +95,18 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC if (!isCreateTableSupported(executionContext)) { return; } - if (indexAlignWithPrimaryTableGroup) { - assert primaryTable != null; + TablePartitionConfig tablePartitionConfig = null; + if (StringUtils.isNotEmpty(tableGroupAlignWithTargetTable)) { + tablePartitionConfig = + getTablePartitionConfig(tableGroupAlignWithTargetTable, metaDbConnection); + } + if (StringUtils.isNotEmpty(tableGroupAlignWithTargetTable) && tablePartitionConfig != null) { tableGroupConfig.setTableGroupRecord(null); if (tableGroupConfig.getAllTables().size() != 1) { throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, "unexpected table count"); } - TablePartRecordInfoContext tablePartRecordInfoContext = tableGroupConfig.getAllTables().get(0); - TablePartitionConfig tablePartitionConfig = getTablePartitionConfig(primaryTable, metaDbConnection); + TablePartRecordInfoContext tablePartRecordInfoContext = + tableGroupConfig.getTablesPartRecordInfoContext().get(0); List tablePartitionSpecConfigs = tablePartitionConfig.getPartitionSpecConfigs(); if (tablePartitionSpecConfigs.size() != tablePartRecordInfoContext.getPartitionRecList().size()) { throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, "unexpected partition count"); @@ -107,27 +123,53 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC "can't found the right partition"); } tablePartitionRecord.setGroupId(tablePartitionSpecConfig.get().getSpecConfigInfo().getGroupId()); + if (GeneralUtil.isNotEmpty(tablePartRecordInfoContext.getSubPartitionRecMap())) { + List subTablePartitionRecords = + tablePartRecordInfoContext.getSubPartitionRecMap().get(tablePartitionRecord.getPartName()); + List subTablePartitionConfigs = + tablePartitionSpecConfig.get().getSubPartitionSpecConfigs(); + if (GeneralUtil.isEmpty(subTablePartitionRecords) || GeneralUtil.isEmpty( + subTablePartitionConfigs)) { + throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, + "can't found the right subpartition"); + } + for (TablePartitionRecord subTablePartitionRecord : subTablePartitionRecords) { + Optional subTablePartitionSpecConfig = + subTablePartitionConfigs.stream() + .filter(o -> o.getSpecConfigInfo().partName.equalsIgnoreCase( + subTablePartitionRecord.partName)) + .findFirst(); + if (!subTablePartitionSpecConfig.isPresent()) { + throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, + "can't found the right partition"); + } + subTablePartitionRecord.setGroupId(subTablePartitionSpecConfig.get().getSpecConfigInfo() + .getGroupId()); + } + } } - } - if (primaryTable == null) { + } else if (primaryTable == null) { JoinGroupInfoAccessor joinGroupInfoAccessor = new JoinGroupInfoAccessor(); JoinGroupTableDetailAccessor joinGroupTableDetailAccessor = new JoinGroupTableDetailAccessor(); joinGroupTableDetailAccessor.setConnection(metaDbConnection); joinGroupInfoAccessor.setConnection(metaDbConnection); if (tableGroupConfig.getTableGroupRecord() == null) { - TablePartRecordInfoContext tablePartRecordInfoContext = tableGroupConfig.getTables().get(0); + TablePartRecordInfoContext tablePartRecordInfoContext = + tableGroupConfig.getTablesPartRecordInfoContext() + .get(0); Long groupId = tablePartRecordInfoContext.getLogTbRec().groupId; - boolean isEmptyGroup = tableGroupConfig.getTables().size() == 1 && (groupId == null || groupId == -1); + boolean isEmptyGroup = + tableGroupConfig.getAllTables().size() == 1 && (groupId == null || groupId == -1); if (!isEmptyGroup) { TableGroupConfig tgConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager() .getTableGroupConfigById(groupId); - if (GeneralUtil.isNotEmpty(tgConfig.getTables())) { + if (GeneralUtil.isNotEmpty(tgConfig.getAllTables())) { JoinGroupTableDetailRecord joinGroupTableDetailRecord = joinGroupTableDetailAccessor.getJoinGroupDetailBySchemaTableName(schemaName, - tgConfig.getTables().get(0).getTableName()); + tgConfig.getAllTables().get(0)); if (joinGroupTableDetailRecord != null) { joinGroupTableDetailAccessor.insertJoingroupTableDetail(schemaName, joinGroupTableDetailRecord.joinGroupId, @@ -173,11 +215,11 @@ private boolean isCreateTableSupported(ExecutionContext executionContext) { return !(temporary || executionContext.isUseHint()); } - public TableGroupConfig getTableGroupConfig() { + public TableGroupDetailConfig getTableGroupConfig() { return tableGroupConfig; } - public void setTableGroupConfig(TableGroupConfig tableGroupConfig) { + public void setTableGroupConfig(TableGroupDetailConfig tableGroupConfig) { this.tableGroupConfig = tableGroupConfig; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableGroupAddMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableGroupAddMetaTask.java index a1b6dd498..1075f7175 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableGroupAddMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableGroupAddMetaTask.java @@ -33,21 +33,27 @@ public class CreateTableGroupAddMetaTask extends BaseGmsTask { private String tableGroupName; private String locality; private String partitionDefinition; + private boolean single; + private boolean withImplicit; @JSONCreator public CreateTableGroupAddMetaTask(String schemaName, String tableGroupName, String locality, - String partitionDefinition) { + String partitionDefinition, + boolean single, + boolean withImplicit) { super(schemaName, ""); this.tableGroupName = tableGroupName; this.locality = locality; this.partitionDefinition = partitionDefinition; + this.single = single; + this.withImplicit = withImplicit; } @Override protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { - updateSupportedCommands(true, false, metaDbConnection); + updateSupportedCommands(true, withImplicit, metaDbConnection); TableGroupAccessor tableGroupAccessor = new TableGroupAccessor(); tableGroupAccessor.setConnection(metaDbConnection); TableGroupRecord tableGroupRecord = new TableGroupRecord(); @@ -56,8 +62,13 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi tableGroupRecord.locality = locality; tableGroupRecord.setInited(0); tableGroupRecord.meta_version = 1L; - tableGroupRecord.manual_create = 1; - tableGroupRecord.partition_definition = partitionDefinition; + tableGroupRecord.manual_create = withImplicit ? 0 : 1; + if (single) { + tableGroupRecord.tg_type = TableGroupRecord.TG_TYPE_NON_DEFAULT_SINGLE_TBL_TG; + tableGroupRecord.partition_definition = "SINGLE"; + } else { + tableGroupRecord.partition_definition = partitionDefinition; + } tableGroupAccessor.addNewTableGroup(tableGroupRecord); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableGroupValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableGroupValidateTask.java index f3fa0ad3d..98fd573f4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableGroupValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableGroupValidateTask.java @@ -23,27 +23,31 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; +import java.util.List; + @Getter @TaskName(name = "CreateTableGroupValidateTask") public class CreateTableGroupValidateTask extends BaseValidateTask { - private String tableGroupName; + private List tableGroupNames; @JSONCreator public CreateTableGroupValidateTask(String schemaName, - String tableGroupName) { + List tableGroupNames) { super(schemaName); - this.tableGroupName = tableGroupName; + this.tableGroupNames = tableGroupNames; } @Override public void executeImpl(ExecutionContext executionContext) { - TableValidator.validateTableGroupNoExists(schemaName, tableGroupName); + for (String tableGroupName : tableGroupNames) { + TableValidator.validateTableGroupNoExists(schemaName, tableGroupName); + } } @Override protected String remark() { - return "|tableGroupName: " + tableGroupName; + return "|tableGroupNames: " + tableGroupNames; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTablePhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTablePhyDdlTask.java index e6858d54f..6f84f654e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTablePhyDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTablePhyDdlTask.java @@ -17,32 +17,15 @@ package com.alibaba.polardbx.executor.ddl.job.task.basic; import com.alibaba.fastjson.annotation.JSONCreator; -import com.google.common.collect.Lists; -import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.executor.ddl.job.builder.DdlPhyPlanBuilder; -import com.alibaba.polardbx.executor.ddl.job.builder.DropPartitionTableBuilder; import com.alibaba.polardbx.executor.ddl.job.builder.DropPhyTableBuilder; -import com.alibaba.polardbx.executor.ddl.job.builder.DropTableBuilder; -import com.alibaba.polardbx.executor.ddl.job.converter.DdlJobDataConverter; import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; -import com.alibaba.polardbx.gms.topology.DbInfoManager; -import com.alibaba.polardbx.optimizer.context.DdlContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation; -import com.alibaba.polardbx.optimizer.core.rel.ReplaceTableNameWithQuestionMarkVisitor; -import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropTable; import lombok.Getter; -import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.rel.RelNode; -import org.apache.calcite.rel.ddl.DropTable; -import org.apache.calcite.sql.SqlDdlNodes; -import org.apache.calcite.sql.SqlDropTable; -import org.apache.calcite.sql.SqlIdentifier; -import org.apache.calcite.sql.parser.SqlParserPos; import java.util.List; @@ -66,6 +49,10 @@ protected List genRollbackPhysicalPlans(ExecutionContext executionConte .createBuilder(schemaName, logicalTableName, true, this.physicalPlanData.getTableTopology(), executionContext).build(); List physicalPlans = dropPhyTableBuilder.getPhysicalPlans(); + // delete redundant params in foreign key + physicalPlans.forEach(physicalPlan -> { + physicalPlan.getParam().entrySet().removeIf(entry -> !entry.getKey().equals(1)); + }); return convertToRelNodes(physicalPlans); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableShowTableMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableShowTableMetaTask.java index c0c94641e..7e214373e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableShowTableMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableShowTableMetaTask.java @@ -16,7 +16,7 @@ package com.alibaba.polardbx.executor.ddl.job.task.basic; -import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; @@ -26,9 +26,9 @@ import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.metadb.seq.SequenceBaseRecord; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; -import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.planmanager.PlanManager; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import lombok.Getter; import java.sql.Connection; @@ -39,6 +39,8 @@ @TaskName(name = "CreateTableShowTableMetaTask") public class CreateTableShowTableMetaTask extends BaseGmsTask { + protected static final Logger LOGGER = SQLRecorderLogger.ddlLogger; + public CreateTableShowTableMetaTask(String schemaName, String logicalTableName) { super(schemaName, logicalTableName); onExceptionTryRecoveryThenPause(); @@ -51,13 +53,12 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC SequenceBaseRecord sequenceRecord = tableInfoManager.fetchSequence(schemaName, AUTO_SEQ_PREFIX + logicalTableName); - TableMetaChanger.triggerSchemaChange(metaDbConnection, schemaName, logicalTableName, sequenceRecord, tableInfoManager); FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); - SyncManagerHelper.syncWithDefaultDB(new BaselinePlanValidCheckSyncAction()); + SyncManagerHelper.syncWithDefaultDB(new BaselinePlanValidCheckSyncAction(), SyncScope.ALL); } @Override @@ -68,7 +69,7 @@ protected void onExecutionSuccess(ExecutionContext executionContext) { @Override public void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { TableMetaChanger.hideTableMeta(metaDbConnection, schemaName, logicalTableName); - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableValidateTask.java index 26d58e1c3..5c3fb09bd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateTableValidateTask.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.gms.metadb.table.TablesExtRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.ddl.data.LikeTableInfo; import com.alibaba.polardbx.rule.TableRule; import lombok.Getter; @@ -32,19 +33,26 @@ public class CreateTableValidateTask extends BaseValidateTask { private String logicalTableName; private TablesExtRecord tablesExtRecord; + private LikeTableInfo likeTableInfo; @JSONCreator public CreateTableValidateTask(String schemaName, String logicalTableName, - TablesExtRecord tablesExtRecord) { + TablesExtRecord tablesExtRecord, + LikeTableInfo likeTableInfo) { super(schemaName); this.logicalTableName = logicalTableName; this.tablesExtRecord = tablesExtRecord; + this.likeTableInfo = likeTableInfo; } @Override public void executeImpl(ExecutionContext executionContext) { TableValidator.validateTableNonExistence(schemaName, logicalTableName, executionContext); + if (likeTableInfo != null) { + TableValidator.validateTableExistence(likeTableInfo.getSchemaName(), likeTableInfo.getTableName(), + executionContext); + } TableRule newTableRule = DdlJobDataConverter.buildTableRule(tablesExtRecord); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateViewAddMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateViewAddMetaTask.java new file mode 100644 index 000000000..e79b6e7b6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateViewAddMetaTask.java @@ -0,0 +1,96 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.handler.ddl.LogicalCreateViewHandler; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.view.ViewManager; +import lombok.Getter; + +import java.sql.Connection; +import java.util.List; + +@Getter +@TaskName(name = "CreateViewAddMetaTask") +public class CreateViewAddMetaTask extends BaseDdlTask { + + protected String viewName; + protected Boolean isReplace; + protected List columnList; + protected String viewDefinition; + protected String planString; + protected String planType; + + @JSONCreator + public CreateViewAddMetaTask(String schemaName, + String viewName, + boolean isReplace, + List columnList, + String viewDefinition, + String planString, + String planType) { + super(schemaName); + this.viewName = viewName; + this.isReplace = isReplace; + this.columnList = columnList; + this.viewDefinition = viewDefinition; + this.planString = planString; + this.planType = planType; + onExceptionTryRecoveryThenRollback(); + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + ViewManager viewManager = OptimizerContext.getContext(schemaName).getViewManager(); + + boolean success; + if (isReplace) { + success = viewManager + .replace(viewName, columnList, viewDefinition, executionContext.getConnection().getUser(), planString, + planType); + if (!success) { + throw new TddlRuntimeException(ErrorCode.ERR_VIEW, "can't replace view " + viewName); + } + } else { + if (viewManager.select(viewName) != null) { + throw new TddlRuntimeException(ErrorCode.ERR_VIEW, "table '" + viewName + "' already exists "); + } + success = viewManager + .insert(viewName, columnList, viewDefinition, executionContext.getConnection().getUser(), planString, + planType); + if (!success) { + throw new TddlRuntimeException(ErrorCode.ERR_VIEW, "can't add view " + viewName); + } + } + } + + @Override + protected String remark() { + return "|viewDefinition: " + viewDefinition; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateViewSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateViewSyncTask.java new file mode 100644 index 000000000..3a21f74fd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/CreateViewSyncTask.java @@ -0,0 +1,51 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.sync.CreateViewSyncAction; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +@TaskName(name = "CreateViewSyncTask") +@Getter +public class CreateViewSyncTask extends BaseDdlTask { + final private String schemaName; + final private String viewName; + + @JSONCreator + public CreateViewSyncTask(String schemaName, String viewName) { + super(schemaName); + this.schemaName = schemaName; + this.viewName = viewName; + } + + @Override + protected void onExecutionSuccess(ExecutionContext executionContext) { + try { + SyncManagerHelper.sync(new CreateViewSyncAction(schemaName, viewName), schemaName, SyncScope.ALL); + } catch (Throwable ignore) { + LOGGER.error( + "error occurs while execute GsiStatisticsSyncAction" + ); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DdlBackfillCostRecordTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DdlBackfillCostRecordTask.java new file mode 100644 index 000000000..d6f7261e1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DdlBackfillCostRecordTask.java @@ -0,0 +1,62 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.CostEstimableDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlExceptionAction; +import lombok.Getter; +import lombok.Setter; + +@TaskName(name = "DdlBackfillCostRecordTask") +@Getter +@Setter +public final class DdlBackfillCostRecordTask extends BaseDdlTask implements CostEstimableDdlTask { + + @JSONCreator + public DdlBackfillCostRecordTask(String schemaName) { + super(schemaName); + setExceptionAction(DdlExceptionAction.ROLLBACK); + } + + @Override + public String remark() { + String costInfoStr = ""; + if (costInfo != null) { + costInfoStr = String.format("|estimated rows:%s, estimated size:%s", costInfo.rows, costInfo.dataSize); + } + return costInfoStr; + } + + public static String getTaskName() { + return "DdlBackfillCostRecordTask"; + } + + private transient volatile CostInfo costInfo; + + @Override + public void setCostInfo(CostInfo costInfo) { + this.costInfo = costInfo; + } + + @Override + public CostInfo getCostInfo() { + return costInfo; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DiscardTableSpaceDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DiscardTableSpaceDdlTask.java new file mode 100644 index 000000000..79b9a3abe --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DiscardTableSpaceDdlTask.java @@ -0,0 +1,42 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; +import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +@Getter +@TaskName(name = "DiscardTableSpaceDdlTask") +public class DiscardTableSpaceDdlTask extends BasePhyDdlTask { + + private String logicalTableName; + + @JSONCreator + public DiscardTableSpaceDdlTask(String schemaName, String logicalTableName, PhysicalPlanData physicalPlanData) { + super(schemaName, physicalPlanData); + this.logicalTableName = logicalTableName; + onExceptionTryRecoveryThenRollback(); + } + + public void executeImpl(ExecutionContext executionContext) { + super.executeImpl(executionContext); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DrainNodeOfTableGroupValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DrainNodeOfTableGroupValidateTask.java index 174618a02..fd698d4eb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DrainNodeOfTableGroupValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DrainNodeOfTableGroupValidateTask.java @@ -17,14 +17,10 @@ package com.alibaba.polardbx.executor.ddl.job.task.basic; import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; -import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupUtils; @@ -32,11 +28,7 @@ import com.alibaba.polardbx.statistics.SQLRecorderLogger; import lombok.Getter; -import java.util.HashSet; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; import java.util.stream.Collectors; @Getter @@ -46,8 +38,10 @@ public class DrainNodeOfTableGroupValidateTask extends BaseValidateTask { private final static Logger LOG = SQLRecorderLogger.ddlLogger; private List tableGroupConfigs; private String tableGroupName; + @JSONCreator - public DrainNodeOfTableGroupValidateTask(String schemaName, List tableGroupConfigs, String tableGroupName) { + public DrainNodeOfTableGroupValidateTask(String schemaName, List tableGroupConfigs, + String tableGroupName) { super(schemaName); this.tableGroupConfigs = tableGroupConfigs; this.tableGroupName = tableGroupName; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DrainNodeValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DrainNodeValidateTask.java index fd3ea6119..7a36561c1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DrainNodeValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DrainNodeValidateTask.java @@ -21,10 +21,8 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.executor.balancer.stats.StatsUtils; import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator; import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; @@ -32,10 +30,8 @@ import com.alibaba.polardbx.gms.tablegroup.TableGroupUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.statistics.SQLRecorderLogger; -import com.google.common.base.Joiner; import lombok.Getter; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -62,8 +58,14 @@ public void executeImpl(ExecutionContext executionContext) { Map curTableGroupMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); Map saveTableGroupMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); - curTableGroupConfigs.stream().forEach(o -> curTableGroupMap.put(o.getTableGroupRecord().tg_name, o)); - tableGroupConfigs.stream().forEach(o -> saveTableGroupMap.put(o.getTableGroupRecord().tg_name, o)); + curTableGroupConfigs + .stream() + .filter(tg -> !tg.isColumnarTableGroup()) + .forEach(o -> curTableGroupMap.put(o.getTableGroupRecord().tg_name, o)); + tableGroupConfigs + .stream() + .filter(tg -> !tg.isColumnarTableGroup()) + .forEach(o -> saveTableGroupMap.put(o.getTableGroupRecord().tg_name, o)); if (curTableGroupMap.size() == saveTableGroupMap.size()) { for (Map.Entry entry : saveTableGroupMap.entrySet()) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropDbGroupHideMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropDbGroupHideMetaTask.java index 69d2a042d..80111b60b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropDbGroupHideMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropDbGroupHideMetaTask.java @@ -31,9 +31,7 @@ import com.alibaba.polardbx.gms.topology.DbGroupInfoAccessor; import com.alibaba.polardbx.gms.topology.DbGroupInfoManager; import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import lombok.Getter; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropEntitySecurityAttrTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropEntitySecurityAttrTask.java new file mode 100644 index 000000000..8a7238c5e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropEntitySecurityAttrTask.java @@ -0,0 +1,99 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; +import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.lbac.LBACSecurityEntity; +import com.alibaba.polardbx.gms.lbac.accessor.LBACEntityAccessor; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.List; + +/** + * @author pangzhaoxing + */ +@TaskName(name = "DropEntitySecurityAttrTask") +public class DropEntitySecurityAttrTask extends BaseGmsTask { + + List esaList; + + @JSONCreator + public DropEntitySecurityAttrTask(String schemaName, String logicalTableName, List esaList) { + super(schemaName, logicalTableName); + this.esaList = esaList; + } + + @Override + protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + LBACEntityAccessor esaAccessor = new LBACEntityAccessor(); + esaAccessor.setConnection(metaDbConnection); + for (LBACSecurityEntity esa : esaList) { + esaAccessor.deleteByKeyAndType(esa); + } + } + + @Override + protected void onExecutionSuccess(ExecutionContext executionContext) { + super.onExecutionSuccess(executionContext); + try (Connection conn = MetaDbDataSource.getInstance().getConnection()) { + MetaDbConfigManager.getInstance().notify(MetaDbDataIdBuilder.getLBACSecurityDataId(), + conn); + // wait for all cn to load metadb + MetaDbConfigManager.getInstance().sync(MetaDbDataIdBuilder.getLBACSecurityDataId()); + } catch (SQLException e) { + throw new TddlNestableRuntimeException(e); + } + } + + @Override + protected void onRollbackSuccess(ExecutionContext executionContext) { + super.onRollbackSuccess(executionContext); + try (Connection conn = MetaDbDataSource.getInstance().getConnection()) { + MetaDbConfigManager.getInstance().notify(MetaDbDataIdBuilder.getLBACSecurityDataId(), + conn); + // wait for all cn to load metadb + MetaDbConfigManager.getInstance().sync(MetaDbDataIdBuilder.getLBACSecurityDataId()); + } catch (SQLException e) { + throw new TddlNestableRuntimeException(e); + } + } + + @Override + protected void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { + LBACEntityAccessor esaAccessor = new LBACEntityAccessor(); + esaAccessor.setConnection(metaDbConnection); + for (LBACSecurityEntity esa : esaList) { + esaAccessor.replace(esa); + } + } + + public List getEsaList() { + return esaList; + } + + public void setEsaList(List esaList) { + this.esaList = esaList; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropIndexHideMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropIndexHideMetaTask.java index ddbf2f959..a0c1e123c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropIndexHideMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropIndexHideMetaTask.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -51,7 +52,8 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi protected void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { TableMetaChanger.showIndexMeta(metaDbConnection, schemaName, logicalTableName, indexName); // Refresh table meta to make hidden columns visible after rollback. - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), + SyncScope.ALL); executionContext.refreshTableMeta(); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableGroupValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableGroupValidateTask.java deleted file mode 100644 index a80c219a1..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableGroupValidateTask.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.ddl.job.task.basic; - -import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; -import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.ddl.job.validator.TableGroupValidator; -import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import lombok.Getter; - -@Getter -@TaskName(name = "DropTableGroupValidateTask") -public class DropTableGroupValidateTask extends BaseValidateTask { - - private String tableGroupName; - - @JSONCreator - public DropTableGroupValidateTask(String schemaName, - String tableGroupName) { - super(schemaName); - this.tableGroupName = tableGroupName; - } - - @Override - public void executeImpl(ExecutionContext executionContext) { - TableGroupValidator.validateTableGroupIsEmpty(schemaName, tableGroupName); - } - - @Override - protected String remark() { - return "|tableGroupName: " + tableGroupName; - } - -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableHideTableMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableHideTableMetaTask.java index 1adbda5cb..62b5a71c4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableHideTableMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableHideTableMetaTask.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -49,7 +50,7 @@ public void rollbackImpl(Connection metaDbConnection, ExecutionContext execution FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); //sync have to be successful to continue - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTablePhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTablePhyDdlTask.java index b90389777..b52cac400 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTablePhyDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTablePhyDdlTask.java @@ -34,16 +34,7 @@ public DropTablePhyDdlTask(String schemaName, PhysicalPlanData physicalPlanData) @Override public void executeImpl(ExecutionContext executionContext) { updateSupportedCommands(true, false, null); - try { - super.executeImpl(executionContext); - } catch (PhysicalDdlException e) { - int successCount = e.getSuccessCount(); - if (successCount == 0) { - updateSupportedCommands(true, true, null); - enableRollback(this); - } - throw new PhysicalDdlException(e.getTotalCount(), e.getSuccessCount(), e.getFailCount(), - e.getErrMsg(), e.getSimpleErrMsg()); - } + // 元数据已经清理,肯定不支持回滚的 + super.executeImpl(executionContext); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableRemoveMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableRemoveMetaTask.java index 045f00f4d..63d53633a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableRemoveMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableRemoveMetaTask.java @@ -22,9 +22,6 @@ import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; -import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.SchemaManager; -import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -41,6 +38,8 @@ public DropTableRemoveMetaTask(String schemaName, String logicalTableName) { @Override public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + TableMetaChanger.removeTableMeta(metaDbConnection, schemaName, logicalTableName, true, executionContext); FailPoint.injectRandomExceptionFromHint(executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableSyncTask.java index 327455e12..427d3582d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTableSyncTask.java @@ -20,6 +20,7 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.DropTableSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -37,7 +38,7 @@ public DropTableSyncTask(String schemaName, String logicalTableName) { @Override protected void executeImpl(ExecutionContext executionContext) { - SyncManagerHelper.sync(new DropTableSyncAction(schemaName, logicalTableName), true); + SyncManagerHelper.sync(new DropTableSyncAction(schemaName, logicalTableName), SyncScope.ALL, true); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTruncateTmpPrimaryTablePhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTruncateTmpPrimaryTablePhyDdlTask.java index 97ae8fad5..cb09de8a4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTruncateTmpPrimaryTablePhyDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropTruncateTmpPrimaryTablePhyDdlTask.java @@ -55,7 +55,8 @@ public void executeImpl(ExecutionContext executionContext) { Map>> tableTopology = builder.getTableTopology(); List physicalPlans = builder.getPhysicalPlans(); //generate a "drop table" physical plan - PhysicalPlanData physicalPlanData = DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, physicalPlans); + PhysicalPlanData physicalPlanData = + DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, physicalPlans, executionContext); this.physicalPlanData = physicalPlanData; super.executeImpl(executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropViewRemoveMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropViewRemoveMetaTask.java new file mode 100644 index 000000000..d4e5e18ff --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropViewRemoveMetaTask.java @@ -0,0 +1,61 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.view.PolarDbXSystemTableView; +import com.alibaba.polardbx.optimizer.view.SystemTableView; +import com.alibaba.polardbx.optimizer.view.ViewManager; +import lombok.Getter; + +import java.sql.Connection; +import java.util.List; + +@Getter +@TaskName(name = "DropViewRemoveMetaTask") +public class DropViewRemoveMetaTask extends BaseDdlTask { + + protected String viewName; + + @JSONCreator + public DropViewRemoveMetaTask(String schemaName, + String viewName) { + super(schemaName); + this.viewName = viewName; + onExceptionTryRecoveryThenRollback(); + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + boolean success = OptimizerContext.getContext(schemaName).getViewManager().delete(viewName); + + if (!success) { + throw new TddlRuntimeException(ErrorCode.ERR_VIEW, + "drop view " + viewName + " fail for " + PolarDbXSystemTableView.TABLE_NAME); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropViewSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropViewSyncTask.java new file mode 100644 index 000000000..fda7394d8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/DropViewSyncTask.java @@ -0,0 +1,53 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.sync.DropViewSyncAction; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.util.Collections; + +@TaskName(name = "DropViewSyncTask") +@Getter +public class DropViewSyncTask extends BaseDdlTask { + final private String schemaName; + final private String viewName; + + @JSONCreator + public DropViewSyncTask(String schemaName, String viewName) { + super(schemaName); + this.schemaName = schemaName; + this.viewName = viewName; + } + + @Override + protected void onExecutionSuccess(ExecutionContext executionContext) { + try { + SyncManagerHelper.sync(new DropViewSyncAction(schemaName, Collections.singletonList(viewName)), schemaName, SyncScope.ALL); + } catch (Throwable ignore) { + LOGGER.error( + "error occurs while execute DropViewSyncTask" + ); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/EmptyTableGroupValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/EmptyTableGroupValidateTask.java new file mode 100644 index 000000000..e7da83398 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/EmptyTableGroupValidateTask.java @@ -0,0 +1,49 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.job.validator.TableGroupValidator; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +@Getter +@TaskName(name = "EmptyTableGroupValidateTask") +public class EmptyTableGroupValidateTask extends BaseValidateTask { + + private String tableGroupName; + + @JSONCreator + public EmptyTableGroupValidateTask(String schemaName, + String tableGroupName) { + super(schemaName); + this.tableGroupName = tableGroupName; + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + TableGroupValidator.validateTableGroupIsEmpty(schemaName, tableGroupName); + } + + @Override + protected String remark() { + return "|tableGroupName: " + tableGroupName; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/GlobalAcquireMdlLockInDbSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/GlobalAcquireMdlLockInDbSyncTask.java index ca04c04fd..7a970a3c7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/GlobalAcquireMdlLockInDbSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/GlobalAcquireMdlLockInDbSyncTask.java @@ -22,10 +22,10 @@ import com.alibaba.polardbx.executor.sync.GlobalAcquireMdlLockInDbSyncAction; import com.alibaba.polardbx.executor.sync.GlobalReleaseMdlLockInDbSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; -import java.util.List; import java.util.Set; /** @@ -47,7 +47,7 @@ public GlobalAcquireMdlLockInDbSyncTask(String srcSchemaName, Set schema @Override public void executeImpl(ExecutionContext executionContext) { try { - SyncManagerHelper.sync(new GlobalAcquireMdlLockInDbSyncAction(schemaNames)); + SyncManagerHelper.sync(new GlobalAcquireMdlLockInDbSyncAction(schemaNames), SyncScope.ALL); } catch (Throwable t) { LOGGER.error(String.format( "error occurs while lock tables meta, schemaNames:%s", schemaNames)); @@ -58,7 +58,7 @@ public void executeImpl(ExecutionContext executionContext) { @Override protected void beforeRollbackTransaction(ExecutionContext executionContext) { try { - SyncManagerHelper.sync(new GlobalReleaseMdlLockInDbSyncAction(schemaNames)); + SyncManagerHelper.sync(new GlobalReleaseMdlLockInDbSyncAction(schemaNames), SyncScope.ALL); } catch (Throwable t) { LOGGER.error(String.format( "error occurs while unlock tables meta, schemaName:%s", schemaNames)); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/GlobalReleaseMdlLockInDbSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/GlobalReleaseMdlLockInDbSyncTask.java index d6cc8aaa7..fcf839c55 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/GlobalReleaseMdlLockInDbSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/GlobalReleaseMdlLockInDbSyncTask.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.GlobalReleaseMdlLockInDbSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -46,7 +47,7 @@ public GlobalReleaseMdlLockInDbSyncTask(String srcSchema, Set schemaName @Override public void executeImpl(ExecutionContext executionContext) { try { - SyncManagerHelper.sync(new GlobalReleaseMdlLockInDbSyncAction(schemaNames)); + SyncManagerHelper.sync(new GlobalReleaseMdlLockInDbSyncAction(schemaNames), SyncScope.ALL); } catch (Throwable t) { LOGGER.error(String.format( "error occurs while sync table meta, schemaName:%s", schemaNames)); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ImportTableSpaceDdlNormalTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ImportTableSpaceDdlNormalTask.java new file mode 100644 index 000000000..e8f81a2d8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ImportTableSpaceDdlNormalTask.java @@ -0,0 +1,92 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.executor.ddl.job.builder.AlterTableImportTableSpaceBuilder; +import com.alibaba.polardbx.executor.ddl.job.builder.DdlPhyPlanBuilder; +import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +@Getter +@TaskName(name = "ImportTableSpaceDdlNormalTask") +public class ImportTableSpaceDdlNormalTask extends BasePhyDdlTask { + + private final String tableName; + private final Map>> tableTopology; + + @JSONCreator + public ImportTableSpaceDdlNormalTask(String schemaName, + String tableName, + Map>> tableTopology) { + super(schemaName, null); + this.tableName = tableName; + this.tableTopology = tableTopology; + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + boolean executeInLeader = executionContext.getParamManager() + .getBoolean(ConnectionParams.PHYSICAL_BACKFILL_IMPORT_TABLESPACE_BY_LEADER); + if (executeInLeader) { + DdlPhyPlanBuilder builder = + AlterTableImportTableSpaceBuilder.createBuilder( + schemaName, tableName, true, tableTopology, executionContext).build(); + + this.physicalPlanData = builder.genPhysicalPlanData(); + + super.executeImpl(executionContext); + } + } + + @Override + public String remark() { + StringBuilder sb = new StringBuilder(); + sb.append("("); + for (Map.Entry>> entry : tableTopology.entrySet()) { + sb.append(entry.getKey()); + sb.append(".("); + sb.append(String.join(",", entry.getValue().get(0))); + sb.append(") "); + } + sb.append(")"); + return "|alter table " + sb + " import tablespace"; + } + + public List explainInfo() { + StringBuilder sb = new StringBuilder(); + sb.append("("); + for (Map.Entry>> entry : tableTopology.entrySet()) { + sb.append(entry.getKey()); + sb.append(".("); + sb.append(String.join(",", entry.getValue().get(0))); + sb.append(") "); + } + sb.append(")"); + List command = new ArrayList<>(1); + command.add(sb.toString()); + return command; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ImportTableSpaceDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ImportTableSpaceDdlTask.java new file mode 100644 index 000000000..ed89eaf9a --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ImportTableSpaceDdlTask.java @@ -0,0 +1,147 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.rpc.pool.XConnection; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import lombok.Getter; +import org.apache.commons.lang3.StringUtils; + +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static com.alibaba.polardbx.common.TddlConstants.LONG_ENOUGH_TIMEOUT_FOR_DDL_ON_XPROTO_CONN; + +@Getter +@TaskName(name = "ImportTableSpaceDdlTask") +public class ImportTableSpaceDdlTask extends BaseDdlTask { + + private String logicalTableName; + private String phyDbName; + private String phyTableName; + private Pair targetHost;//leader、follower or leaner + private Pair userAndPasswd; + + @JSONCreator + public ImportTableSpaceDdlTask(String schemaName, String logicalTableName, String phyDbName, String phyTableName, + Pair targetHost, Pair userAndPasswd) { + super(schemaName); + this.logicalTableName = logicalTableName; + this.phyDbName = phyDbName.toLowerCase(); + this.phyTableName = phyTableName.toLowerCase(); + this.targetHost = targetHost; + this.userAndPasswd = userAndPasswd; + onExceptionTryRecoveryThenRollback(); + } + + @Override + protected void beforeTransaction(ExecutionContext executionContext) { + updateTaskStateInNewTxn(DdlTaskState.DIRTY); + executeImpl(executionContext); + } + + public void executeImpl(ExecutionContext executionContext) { + ///!!!!!!DANGER!!!! + // can't change variables via sql bypass CN + // String disableBinlog = "SET SESSION sql_log_bin=0; + HashMap variables = new HashMap<>(); + //disable sql_lon_bin + variables.put(PhysicalBackfillUtils.SQL_LOG_BIN, "OFF"); + String importTableSpace = "alter table " + phyTableName + " import tablespace"; + try ( + XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage(phyDbName, + targetHost.getKey(), targetHost.getValue(), userAndPasswd.getKey(), userAndPasswd.getValue(), -1))) { + try { + conn.setLastException(new Exception("discard connection due to change SQL_LOG_BIN in this session"), + true); + conn.setNetworkTimeoutNanos(LONG_ENOUGH_TIMEOUT_FOR_DDL_ON_XPROTO_CONN * 1000000L); + //disable sql_lon_bin + conn.setSessionVariables(variables); + SQLRecorderLogger.ddlLogger.info( + String.format("begin to execute import tablespace command %s, in host: %s, db:%s", importTableSpace, + targetHost, phyDbName)); + conn.execQuery(importTableSpace); + SQLRecorderLogger.ddlLogger.info( + String.format("finish execute import tablespace command %s, in host: %s, db:%s", importTableSpace, + targetHost, phyDbName)); + } finally { + variables.clear(); + //reset + conn.setSessionVariables(variables); + } + + } catch (Exception ex) { + try { + if (tableSpaceExistError(ex.toString())) { + //pass + } else { + throw ex; + } + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e, "import tablespace error"); + } + } + } + + private boolean tableSpaceExistError(String errMsg) { + if (StringUtils.isEmpty(errMsg)) { + return false; + } + String pattern = "tablespace.*exists"; + Pattern regex = Pattern.compile(pattern); + Matcher matcher = regex.matcher(errMsg.toLowerCase()); + if (matcher.find()) { + return true; + } else { + return false; + } + } + + public void rollbackImpl(ExecutionContext executionContext) { + } + + @Override + protected void beforeRollbackTransaction(ExecutionContext ec) { + rollbackImpl(ec); + } + + @Override + public String remark() { + return "|alter table " + phyTableName + " import tablespace, phyDb:" + phyDbName + " host:" + targetHost; + } + + public List explainInfo() { + String importTableSpace = "alter table " + phyTableName + " import tablespace"; + List command = new ArrayList<>(1); + command.add(importTableSpace); + return command; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/InitNewStorageInstTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/InitNewStorageInstTask.java index df061d6f4..8766af3f0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/InitNewStorageInstTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/InitNewStorageInstTask.java @@ -17,6 +17,7 @@ package com.alibaba.polardbx.executor.ddl.job.task.basic; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.executor.ddl.job.meta.CommonMetaChanger; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; @@ -25,7 +26,10 @@ import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; +import com.alibaba.polardbx.gms.topology.DbInfoAccessor; +import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import lombok.Getter; import java.sql.Connection; @@ -64,18 +68,33 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC protected void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { Long socketTimeout = executionContext.getParamManager().getLong(ConnectionParams.SOCKET_TIMEOUT); long socketTimeoutVal = socketTimeout != null ? socketTimeout : -1; - for (Map.Entry>> entry : instGroupDbInfos.entrySet()) { - for (Pair pair : entry.getValue()) { - //update the type to removable - ScaleOutUtils.updateGroupType(schemaName, Arrays.asList(pair.getKey()), - DbGroupInfoRecord.GROUP_TYPE_ADDED, DbGroupInfoRecord.GROUP_TYPE_REMOVING, - metaDbConnection); - ScaleOutUtils.doRemoveNewGroupFromDb(schemaName, pair.getKey(), pair.getValue(), - entry.getKey(), socketTimeoutVal, LOGGER, metaDbConnection); - FailPoint.injectRandomExceptionFromHint(executionContext); - FailPoint.injectRandomSuspendFromHint(executionContext); + + try (Connection conn = MetaDbUtil.getConnection()) { + conn.setAutoCommit(false); + DbInfoAccessor accessor = new DbInfoAccessor(); + accessor.setConnection(metaDbConnection); + + //avoid rollback parallelly + accessor.getDbInfoByDbNameForUpdate(schemaName); + for (Map.Entry>> entry : instGroupDbInfos.entrySet()) { + for (Pair pair : entry.getValue()) { + //update the type to removable + ScaleOutUtils.updateGroupType(schemaName, Arrays.asList(pair.getKey()), + DbGroupInfoRecord.GROUP_TYPE_ADDED, DbGroupInfoRecord.GROUP_TYPE_REMOVING, + conn); + ScaleOutUtils.doRemoveNewGroupFromDb(schemaName, pair.getKey(), pair.getValue(), + entry.getKey(), socketTimeoutVal, LOGGER, conn); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } } + conn.commit(); + } catch (Exception ex) { + SQLRecorderLogger.ddlLogger.error("InitNewStorageInstTask rollback failed due to:" + ex.getMessage()); + throw GeneralUtil.nestedException(ex); } + + CommonMetaChanger.sync(MetaDbDataIdBuilder.getDbTopologyDataId(schemaName)); } @Override @@ -95,6 +114,5 @@ protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionC @Override protected void onRollbackSuccess(ExecutionContext executionContext) { - CommonMetaChanger.sync(MetaDbDataIdBuilder.getDbTopologyDataId(schemaName)); } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/LogicalHandleSequenceTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/LogicalHandleSequenceTask.java new file mode 100644 index 000000000..2dc7b71e0 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/LogicalHandleSequenceTask.java @@ -0,0 +1,118 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.constants.SequenceAttribute; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.properties.DynamicConfig; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.gms.util.SequenceUtil; +import com.alibaba.polardbx.gms.metadb.seq.SequenceBaseRecord; +import com.alibaba.polardbx.gms.metadb.seq.SequencesAccessor; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.sequence.SequenceManagerProxy; +import com.alibaba.polardbx.sequence.exception.SequenceException; +import lombok.Getter; +import org.apache.calcite.sql.SequenceBean; + +import java.sql.Connection; + +import static com.alibaba.polardbx.common.constants.SequenceAttribute.AUTO_SEQ_PREFIX; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +@Getter +@TaskName(name = "LogicalHandleSequenceTask") +public class LogicalHandleSequenceTask extends BaseGmsTask { + private SequenceBean sequenceBean; + + @JSONCreator + public LogicalHandleSequenceTask(String schemaName, String logicalTableName, SequenceBean sequenceBean) { + super(schemaName, logicalTableName); + this.sequenceBean = sequenceBean; + onExceptionTryRecoveryThenRollback(); + } + + @Override + public void executeImpl(Connection metaDbConn, ExecutionContext executionContext) { + SequencesAccessor sequencesAccessor = new SequencesAccessor(); + sequencesAccessor.setConnection(metaDbConn); + + final String seqSchema = sequenceBean.getSchemaName(); + final String seqName = sequenceBean.getName(); + + SequenceBaseRecord record = SequenceUtil.convert(sequenceBean, null, executionContext); + + long newSeqCacheSize = executionContext.getParamManager().getLong(ConnectionParams.NEW_SEQ_CACHE_SIZE); + newSeqCacheSize = newSeqCacheSize < 1 ? 0 : newSeqCacheSize; + + switch (sequenceBean.getKind()) { + case CREATE_SEQUENCE: + sequencesAccessor.insert(record, newSeqCacheSize, + SequenceUtil.buildFailPointInjector(executionContext)); + break; + case ALTER_SEQUENCE: + boolean alterWithoutTypeChange = true; + + if (sequenceBean.getToType() != null && sequenceBean.getToType() != SequenceAttribute.Type.NA) { + Pair recordPair = + SequenceUtil.change(sequenceBean, null, executionContext); + if (recordPair != null) { + sequencesAccessor.change(recordPair, newSeqCacheSize, + SequenceUtil.buildFailPointInjector(executionContext)); + alterWithoutTypeChange = false; + } + } + + if (alterWithoutTypeChange) { + SequenceAttribute.Type + existingType = SequenceManagerProxy.getInstance().checkIfExists(seqSchema, seqName); + /** + * alter语句是不改变sequence类型的时候, group sequence 只允许修改start with, time based sequence 啥都不允许修改 + * */ + if (existingType == SequenceAttribute.Type.GROUP && sequenceBean.getStart() != null + || existingType != SequenceAttribute.Type.GROUP && existingType != SequenceAttribute.Type.TIME) { + sequencesAccessor.update(record, newSeqCacheSize); + } + } + + break; + case DROP_SEQUENCE: + if (!DynamicConfig.getInstance().isSupportDropAutoSeq() + && TStringUtil.startsWithIgnoreCase(seqName, AUTO_SEQ_PREFIX)) { + throw new SequenceException( + "A sequence associated with a table is not allowed to be dropped separately"); + } + + sequencesAccessor.delete(record); + + break; + case RENAME_SEQUENCE: + sequencesAccessor.rename(record); + break; + default: + throw new SequenceException("Unexpected operation: " + sequenceBean.getKind()); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/LogicalInsertTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/LogicalInsertTask.java index 3b8bc3905..8283998c2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/LogicalInsertTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/LogicalInsertTask.java @@ -103,7 +103,8 @@ protected void executeImpl(ExecutionContext executionContext) { if (currentParameter != null) { //不为空意味着是执行PreparedStatement,newInsert已经参数化,需要附带数值运行 parameters.setParams(currentParameter); - params = currentParameter.values().stream().map(ParameterContext::getValue).collect(Collectors.toList()); + params = + currentParameter.values().stream().map(ParameterContext::getValue).collect(Collectors.toList()); } executionContext.setParams(parameters); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/LogicalSequenceValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/LogicalSequenceValidateTask.java new file mode 100644 index 000000000..b01f45835 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/LogicalSequenceValidateTask.java @@ -0,0 +1,48 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.job.validator.SequenceValidator; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import org.apache.calcite.sql.SequenceBean; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ + +@Getter +@TaskName(name = "LogicalSequenceValidateTask") +public class LogicalSequenceValidateTask extends BaseValidateTask { + private SequenceBean sequenceBean; + + @JSONCreator + public LogicalSequenceValidateTask(String schemaName, SequenceBean sequenceBean) { + super(schemaName); + this.sequenceBean = sequenceBean; + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + SequenceValidator.validate(sequenceBean, executionContext, true); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ModifyPartitionKeyRemoveTableStatisticTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ModifyPartitionKeyRemoveTableStatisticTask.java new file mode 100644 index 000000000..fdfd54c96 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ModifyPartitionKeyRemoveTableStatisticTask.java @@ -0,0 +1,54 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.ddl.job.meta.CommonMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.task.BaseSyncTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.util.List; + +/** + * @author wumu + */ +@Getter +@TaskName(name = "ModifyPartitionKeyRemoveTableStatisticTask") +public class ModifyPartitionKeyRemoveTableStatisticTask extends BaseSyncTask { + private final String tableName; + private final List columnList; + + public ModifyPartitionKeyRemoveTableStatisticTask(String schemaName, String tableName, List columnList) { + super(schemaName); + this.tableName = tableName; + this.columnList = columnList; + } + + @Override + protected void executeImpl(ExecutionContext executionContext) { + if (GeneralUtil.isNotEmpty(columnList)) { + CommonMetaChanger.alterTableColumnFinalOperationsOnSuccess(schemaName, tableName, columnList); + } + } + + @Override + protected String remark() { + return "|tableName: " + tableName; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseAddMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseAddMetaTask.java index bf3761224..014151c4f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseAddMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseAddMetaTask.java @@ -24,10 +24,10 @@ import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.executor.sync.TableMetaChangePreemptiveSyncAction; import com.alibaba.polardbx.executor.sync.TablesMetaChangePreemptiveSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.tablegroup.ComplexTaskOutlineRecord; import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; @@ -110,7 +110,8 @@ protected void onRollbackSuccess(ExecutionContext executionContext) { //for creating status SyncManagerHelper.sync( new TablesMetaChangePreemptiveSyncAction(schemaName, objectNames, 1500L, 1500L, - TimeUnit.MICROSECONDS)); + TimeUnit.MICROSECONDS), + SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseReleaseXLockTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseReleaseXLockTask.java index f4f6d315f..cd7bb31cf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseReleaseXLockTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseReleaseXLockTask.java @@ -35,6 +35,7 @@ public class MoveDatabaseReleaseXLockTask extends BaseSyncTask { protected String targetSchemaName; + public MoveDatabaseReleaseXLockTask(String schema, String targetSchemaName) { super(schema); this.targetSchemaName = targetSchemaName; @@ -51,7 +52,8 @@ protected void executeImpl(ExecutionContext executionContext) { } } catch (Exception e) { LOGGER.error(String.format( - "error occurs while MoveDatabaseReleaseXLock, schemaName:%s, schemaXLockToRelease:%s", schemaName, targetSchemaName)); + "error occurs while MoveDatabaseReleaseXLock, schemaName:%s, schemaXLockToRelease:%s", schemaName, + targetSchemaName)); throw GeneralUtil.nestedException(e); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseSwitchDataSourcesTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseSwitchDataSourcesTask.java index 7aa1b963d..dbad1217e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseSwitchDataSourcesTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseSwitchDataSourcesTask.java @@ -60,7 +60,7 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC .switchGroupStorageInfos(schemaName, entry.getKey(), entry.getValue().getKey(), targetGroup, entry.getValue().getValue(), metaDbConnection); - ScaleOutUtils.updateGroupType(schemaName, GroupInfoUtil.buildScaloutGroupName(entry.getKey()), + ScaleOutUtils.updateGroupType(schemaName, GroupInfoUtil.buildScaleOutGroupName(entry.getKey()), DbGroupInfoRecord.GROUP_TYPE_SCALEOUT_FINISHED, metaDbConnection); } updateSupportedCommands(true, false, metaDbConnection); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseValidateTask.java index 0c4ba3f6f..18d831393 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/MoveDatabaseValidateTask.java @@ -49,7 +49,7 @@ public class MoveDatabaseValidateTask extends BaseValidateTask { */ Map primaryTableVersions; - public MoveDatabaseValidateTask(String schemaName, String targetSchema, Map primaryTableVersions ) { + public MoveDatabaseValidateTask(String schemaName, String targetSchema, Map primaryTableVersions) { super(schemaName); this.primaryTableVersions = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); this.primaryTableVersions.putAll(primaryTableVersions); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/OptimizeTablePhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/OptimizeTablePhyDdlTask.java index ef5c0661a..2804b7b1f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/OptimizeTablePhyDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/OptimizeTablePhyDdlTask.java @@ -39,24 +39,25 @@ public OptimizeTablePhyDdlTask(String schemaName, PhysicalPlanData physicalPlanD onExceptionTryRollback(); } - public List partition(int physicalTableCount){ + public List partition(int physicalTableCount) { List>>> topos = this.physicalPlanData.partitionTableTopology(physicalTableCount); - List>> params = this.physicalPlanData.partitionParamsList(physicalTableCount); + List>> params = + this.physicalPlanData.partitionParamsList(physicalTableCount); List result = new ArrayList<>(); - for(int i=0;i>> topoMap = topos.get(i); p.setTableTopology(topoMap); List> paramList = new ArrayList<>(); - for(Map.Entry>> entry: topoMap.entrySet()){ + for (Map.Entry>> entry : topoMap.entrySet()) { int size = entry.getValue().size(); - for(int j=0;j parameterContextMap = new HashMap<>(); - parameterContextMap.put(1, new ParameterContext(ParameterMethod.setTableName, new Object[]{ - 1, entry.getValue().get(j).get(0) + parameterContextMap.put(1, new ParameterContext(ParameterMethod.setTableName, new Object[] { + 1, entry.getValue().get(j).get(0) })); paramList.add(parameterContextMap); } @@ -66,22 +67,23 @@ public List partition(int physicalTableCount){ result.add(new OptimizeTablePhyDdlTask(this.schemaName, p)); } - if(FailPoint.isAssertEnable()){ + if (FailPoint.isAssertEnable()) { result.forEach(OptimizeTablePhyDdlTask::validatePartitionPlan); } return result; } - private void validatePartitionPlan(){ - if(FailPoint.isAssertEnable()){ + private void validatePartitionPlan() { + if (FailPoint.isAssertEnable()) { int index = 0; for (Map.Entry>> topology : this.physicalPlanData.getTableTopology().entrySet()) { for (List phyTableNames : topology.getValue()) { - final String phyTableName = (String) this.physicalPlanData.getParamsList().get(index++).get(1).getValue(); - if(!StringUtils.equalsIgnoreCase( - phyTableNames.get(0).replace("`",""), - phyTableName.replace("`",""))){ + final String phyTableName = + (String) this.physicalPlanData.getParamsList().get(index++).get(1).getValue(); + if (!StringUtils.equalsIgnoreCase( + phyTableNames.get(0).replace("`", ""), + phyTableName.replace("`", ""))) { throw new RuntimeException("generate optimize table plan error"); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/PhysicalBackfillTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/PhysicalBackfillTask.java new file mode 100644 index 000000000..8694f436f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/PhysicalBackfillTask.java @@ -0,0 +1,752 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.backfill.BatchConsumer; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.newengine.cross.CrossEngineValidator; +import com.alibaba.polardbx.executor.ddl.workqueue.BackFillThreadPool; +import com.alibaba.polardbx.executor.ddl.workqueue.PriorityFIFOTask; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillManager; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; +import com.alibaba.polardbx.executor.physicalbackfill.physicalBackfillLoader; +import com.alibaba.polardbx.gms.partition.PhysicalBackfillDetailInfoFieldJSON; +import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.ScaleOutPlanUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.rpc.pool.XConnection; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import com.google.common.collect.ImmutableList; +import com.mysql.cj.polarx.protobuf.PolarxPhysicalBackfill; +import lombok.Getter; +import org.apache.commons.lang3.StringUtils; + +import java.sql.SQLException; +import java.text.DecimalFormat; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Calendar; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Created by luoyanxin. + * + * @author luoyanxin + */ +@Getter +@TaskName(name = "PhysicalBackfillTask") +public class PhysicalBackfillTask extends BaseDdlTask { + + private final String schemaName; + private final String logicalTableName; + private final Long backfillId;// use the taskId of CloneTableDataFileTask + private final long batchSize; + + private final long parallelism; + private final long minUpdateBatch; + private final String physicalTableName; + private final List phyPartitionNames; + private final Pair sourceTargetGroup; + private final Pair sourceTargetDnId; + private final boolean newPartitionDb; + private final Map> storageInstAndUserInfos; + private final boolean waitLsn; + private final boolean encrypted; + + //don't serialize those parameters + private transient long lastUpdateTime = 0l; + private transient Object lock = new Object(); + private transient volatile long curSpeedLimit; + private transient PhysicalBackfillManager backfillManager; + + //todo broadcast table 1对N(新DN) N对M M=N*k k=副本数 + //type 不能是REFRESH_TOPOLOGY 需要是move table + public PhysicalBackfillTask(String schemaName, + Long backfillId, + String logicalTableName, + String physicalTableName, + List phyPartitionNames, + Pair sourceTargetGroup, + Pair sourceTargetDnId, + Map> storageInstAndUserInfos, + long batchSize, + long parallelism, + long minUpdateBatch, + boolean waitLsn, + boolean encrypted) { + super(schemaName); + this.schemaName = schemaName; + this.backfillId = backfillId; + this.logicalTableName = logicalTableName; + this.physicalTableName = physicalTableName.toLowerCase(); + this.phyPartitionNames = phyPartitionNames; + this.sourceTargetGroup = sourceTargetGroup; + this.sourceTargetDnId = sourceTargetDnId; + this.storageInstAndUserInfos = storageInstAndUserInfos; + this.batchSize = batchSize; + this.parallelism = Math.max(parallelism, 1); + this.minUpdateBatch = minUpdateBatch; + this.waitLsn = waitLsn; + this.encrypted = encrypted; + + this.curSpeedLimit = OptimizerContext.getContext(schemaName).getParamManager() + .getLong(ConnectionParams.PHYSICAL_BACKFILL_SPEED_LIMIT); + this.newPartitionDb = DbInfoManager.getInstance().isNewPartitionDb(schemaName); + if (!newPartitionDb && sourceTargetGroup == null) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "missing source-target group mapping entry"); + } + backfillManager = new PhysicalBackfillManager(schemaName); + } + + @Override + protected void beforeTransaction(ExecutionContext executionContext) { + updateTaskStateInNewTxn(DdlTaskState.DIRTY); + executeImpl(executionContext); + } + + public void executeImpl(ExecutionContext ec) { + physicalBackfillLoader loader = new physicalBackfillLoader(schemaName, logicalTableName); + + doExtract(ec, new BatchConsumer() { + @Override + public void consume(Pair targetDbAndGroup, + Pair targetFileAndDir, + List> targetHosts, + Pair userInfo, + PolarxPhysicalBackfill.TransferFileDataOperator transferFileData) { + loader.applyBatch(targetDbAndGroup, targetFileAndDir, targetHosts, userInfo, transferFileData, ec); + } + }); + } + + protected void rollbackImpl(ExecutionContext ec) { + //drop physical table before remove the ibd file + // otherwise when tablespace is import, can't remove the .frm with drop table + // and can't create the same table in the next round + String dropPhyTable = "drop table if exists " + physicalTableName; + + ///!!!!!!DANGER!!!! + // can't change variables via sql bypass CN + // String disableBinlog = "SET SESSION sql_log_bin=0; + HashMap variables = new HashMap<>(); + boolean ignore = false; + try { + DbGroupInfoRecord srcDbGroupInfoRecord = + ScaleOutPlanUtil.getDbGroupInfoByGroupName(sourceTargetGroup.getKey()); + DbGroupInfoRecord tarDbGroupInfoRecord = + ScaleOutPlanUtil.getDbGroupInfoByGroupName(sourceTargetGroup.getValue()); + + PhysicalBackfillManager.BackfillBean physicalBackfillRecord = + backfillManager.loadBackfillMeta(backfillId, schemaName, srcDbGroupInfoRecord.phyDbName.toLowerCase(), + physicalTableName, + GeneralUtil.isEmpty(phyPartitionNames) ? "" : phyPartitionNames.get(0)); + + PhysicalBackfillDetailInfoFieldJSON detailInfoFieldJSON = physicalBackfillRecord.backfillObject.detailInfo; + final String targetStorageId = sourceTargetDnId.getValue(); + Pair userAndPasswd = storageInstAndUserInfos.get(targetStorageId); + boolean healthyCheck = + ec.getParamManager().getBoolean(ConnectionParams.PHYSICAL_BACKFILL_STORAGE_HEALTHY_CHECK); + for (Pair targetHost : detailInfoFieldJSON.getTargetHostAndPorts()) { + ignore = false; + try ( + XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage( + tarDbGroupInfoRecord.phyDbName.toLowerCase(), + targetHost.getKey(), targetHost.getValue(), userAndPasswd.getKey(), userAndPasswd.getValue(), + -1))) { + try { + //disable sql_lon_bin + conn.setLastException( + new Exception("discard connection due to change SQL_LOG_BIN in this session"), true); + variables.put(PhysicalBackfillUtils.SQL_LOG_BIN, "OFF"); + conn.setSessionVariables(variables); + SQLRecorderLogger.ddlLogger.info( + String.format( + "revert: begin to drop physical table before remove the ibd file %s, in host: %s, db:%s", + dropPhyTable, + targetHost, tarDbGroupInfoRecord.phyDbName.toLowerCase())); + conn.execQuery(dropPhyTable); + SQLRecorderLogger.ddlLogger.info( + String.format( + "revert: finish drop physical table before remove the ibd file %s, in host: %s, db:%s", + dropPhyTable, + targetHost, tarDbGroupInfoRecord.phyDbName.toLowerCase())); + } finally { + variables.clear(); + //reset + conn.setSessionVariables(variables); + } + } catch (Exception ex) { + if (ex != null && ex.toString() != null && ex.toString().indexOf("connect fail") != -1) { + List> hostsIpAndPort = + PhysicalBackfillUtils.getMySQLServerNodeIpAndPorts(targetStorageId, healthyCheck); + Optional> targetHostOpt = + hostsIpAndPort.stream().filter(o -> o.getKey().equalsIgnoreCase(targetHost.getKey()) + && o.getValue().intValue() == targetHost.getValue().intValue()).findFirst(); + if (!targetHostOpt.isPresent()) { + //maybe backup in other host + ignore = true; + } + } + throw ex; + } + } + } catch (Exception ex) { + SQLRecorderLogger.ddlLogger.info( + "drop physical table error:" + ex == null ? "" : ex.toString() + " ignore=" + ignore); + if (ignore) { + } else { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex, "drop physical table error"); + } + } + + PhysicalBackfillUtils.rollbackCopyIbd(backfillId, schemaName, logicalTableName, 2, ec); + } + + @Override + protected void beforeRollbackTransaction(ExecutionContext ec) { + if (waitLsn) { + SQLRecorderLogger.ddlLogger.info("begin wait lsn when rollback PhysicalBackfillTask"); + Map targetGroupAndStorageIdMap = new HashMap<>(); + targetGroupAndStorageIdMap.put(sourceTargetGroup.getValue(), sourceTargetDnId.getValue()); + PhysicalBackfillUtils.waitLsn(schemaName, targetGroupAndStorageIdMap, true, ec); + SQLRecorderLogger.ddlLogger.info("finish wait lsn when rollback PhysicalBackfillTask"); + } + rollbackImpl(ec); + } + + @Override + public String remark() { + return "|physical backfill for table:" + physicalTableName + " from group:" + sourceTargetGroup.getKey() + + " to " + sourceTargetGroup.getValue(); + } + + public void doExtract(ExecutionContext ec, BatchConsumer batchConsumer) { + PhysicalBackfillUtils.checkInterrupted(ec, null); + + DbGroupInfoRecord srcDbGroupInfoRecord = ScaleOutPlanUtil.getDbGroupInfoByGroupName(sourceTargetGroup.getKey()); + DbGroupInfoRecord tarDbGroupInfoRecord = + ScaleOutPlanUtil.getDbGroupInfoByGroupName(sourceTargetGroup.getValue()); + + assert srcDbGroupInfoRecord != null; + assert tarDbGroupInfoRecord != null; + + Pair srcDbAndGroup = + Pair.of(srcDbGroupInfoRecord.phyDbName.toLowerCase(), srcDbGroupInfoRecord.groupName); + Pair targetDbAndGroup = + Pair.of(tarDbGroupInfoRecord.phyDbName.toLowerCase(), tarDbGroupInfoRecord.groupName); + // in case restart this task and the GeneralUtil.isEmpty(phyPartNames)==false + boolean hasNoPhyPart = + GeneralUtil.isEmpty(phyPartitionNames) || phyPartitionNames.size() == 1 && StringUtils.isEmpty( + phyPartitionNames.get(0)); + if (hasNoPhyPart && GeneralUtil.isEmpty(phyPartitionNames)) { + phyPartitionNames.add(""); + } + for (String phyPartName : phyPartitionNames) { + foreachPhysicalFile(srcDbAndGroup, targetDbAndGroup, phyPartName, batchConsumer, ec); + } + + } + + public void foreachPhysicalFile(final Pair srcDbAndGroup, + final Pair targetDbAndGroup, + final String phyPartName, + final BatchConsumer consumer, + final ExecutionContext ec) { + + //1 copy to target dn + //2 delete temp ibd file + + String msg = + "begin to backfill the idb file for table[" + srcDbAndGroup.getKey() + ":" + physicalTableName + "]"; + SQLRecorderLogger.ddlLogger.info(msg); + + Pair srcUserInfo = storageInstAndUserInfos.get(sourceTargetDnId.getKey()); + List> offsetAndSize = new ArrayList<>(); + + final Pair targetFileAndDir; + + PhysicalBackfillManager.BackfillBean initBean = + backfillManager.loadBackfillMeta(backfillId, schemaName, srcDbAndGroup.getKey(), physicalTableName, + phyPartName); + Pair srcFileAndDir = null; + Pair sourceHost = null; + final Pair tempFileAndDir; + if (initBean.isEmpty() || initBean.isInit()) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, + "the status of BackfillBean is empty or init"); + } + + if (initBean.isSuccess()) { + return; + } + sourceHost = initBean.backfillObject.detailInfo.getSourceHostAndPort(); + srcFileAndDir = Pair.of(initBean.backfillObject.sourceFileName, initBean.backfillObject.sourceDirName); + tempFileAndDir = srcFileAndDir; + targetFileAndDir = + Pair.of(initBean.backfillObject.targetFileName, initBean.backfillObject.targetDirName); + //update the offsetAndSize + PhysicalBackfillUtils.getTempIbdFileInfo(srcUserInfo, sourceHost, srcDbAndGroup, physicalTableName, + phyPartName, srcFileAndDir, batchSize, + true, offsetAndSize); + + BitSet bitSet; + long[] bitSetPosMark = null; + + assert !initBean.isInit(); + + PhysicalBackfillDetailInfoFieldJSON detailInfo = initBean.backfillObject.detailInfo; + + if (detailInfo != null) { + bitSetPosMark = detailInfo.getBitSet(); + } else { + detailInfo = new PhysicalBackfillDetailInfoFieldJSON(); + } + + List futures = new ArrayList<>(16); + AtomicReference excep = new AtomicReference<>(null); + final AtomicInteger successBatch = new AtomicInteger(0); + final List> targetHost = detailInfo.getTargetHostAndPorts(); + final Pair sourceHostIpAndPort = detailInfo.getSourceHostAndPort(); + final AtomicReference interrupted = new AtomicReference<>(false); + + // copy the .cfg/.cfp file before .ibd file + + copyCfgFile(srcFileAndDir, srcDbAndGroup, sourceHostIpAndPort, + targetFileAndDir, targetDbAndGroup, targetHost, consumer, !initBean.isEmpty(), ec); + + if (bitSetPosMark == null || bitSetPosMark.length == 0) { + bitSet = new BitSet(offsetAndSize.size()); + } else { + bitSet = BitSet.valueOf(bitSetPosMark); + } + + long fileSize = 0l; + if (offsetAndSize.size() > 0) { + Pair lastBatch = offsetAndSize.get(offsetAndSize.size() - 1); + fileSize = lastBatch.getKey() + lastBatch.getValue(); + } + fallocateIbdFile(ec, targetFileAndDir, targetDbAndGroup, targetHost, physicalTableName, "", fileSize); + + // Use a bounded blocking queue to control the parallelism. + BlockingQueue blockingQueue = new ArrayBlockingQueue<>((int) parallelism); + + AtomicInteger startPos = new AtomicInteger(0); + + for (int i = 0; i < parallelism; i++) { + FutureTask task = new FutureTask<>(() -> { + try { + doWork(srcDbAndGroup, targetDbAndGroup, tempFileAndDir, + targetFileAndDir, offsetAndSize, startPos, bitSet, batchSize, successBatch, + minUpdateBatch, phyPartName, + sourceHostIpAndPort, targetHost, consumer, ec, interrupted, excep); + } finally { + // Poll in finally to prevent dead lock on putting blockingQueue. + blockingQueue.poll(); + } + return null; + }); + futures.add(task); + BackFillThreadPool.getInstance() + .executeWithContext(task, PriorityFIFOTask.TaskPriority.GSI_BACKFILL_TASK); + if (PhysicalBackfillUtils.miniBatchForeachThread * (i + 1) >= offsetAndSize.size()) { + break; + } + } + + if (excep.get() != null) { + // Interrupt all. + futures.forEach(f -> { + try { + f.cancel(true); + } catch (Throwable ignore) { + } + }); + } + + for (Future future : futures) { + try { + future.get(); + } catch (Exception e) { + futures.forEach(f -> { + try { + f.cancel(true); + } catch (Throwable ignore) { + } + }); + if (null == excep.get()) { + excep.set(e); + } + // set interrupt + interrupted.set(true); + } + } + PhysicalBackfillManager.BackfillBean bfb = + backfillManager.loadBackfillMeta(backfillId, schemaName, srcDbAndGroup.getKey(), physicalTableName, + phyPartName); + PhysicalBackfillManager.BackfillObjectRecord bor = new PhysicalBackfillManager.BackfillObjectRecord(); + bor.setJobId(bfb.backfillObject.jobId); + bor.setSuccessBatchCount(bfb.backfillObject.successBatchCount + successBatch.get()); + bor.setExtra(bfb.backfillObject.extra); + bor.setPhysicalDb(bfb.backfillObject.physicalDb); + bor.setPhysicalTable(bfb.backfillObject.physicalTable); + bor.setPhysicalPartition(bfb.backfillObject.physicalPartition); + bor.setEndTime(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime())); + + if (excep.get() != null) { + detailInfo.setMsg(excep.get().toString()); + detailInfo.setBitSet(bitSet.toLongArray()); + bor.setDetailInfo(PhysicalBackfillDetailInfoFieldJSON.toJson(detailInfo)); + bor.setStatus((int) PhysicalBackfillManager.BackfillStatus.FAILED.getValue()); + + backfillManager.updateBackfillObject(ImmutableList.of(bor)); + throw GeneralUtil.nestedException(excep.get()); + } + bfb.backfillObject.detailInfo.setBitSet(null); + bfb.backfillObject.detailInfo.setMsg(""); + bor.setStatus((int) PhysicalBackfillManager.BackfillStatus.SUCCESS.getValue()); + bor.setDetailInfo(PhysicalBackfillDetailInfoFieldJSON.toJson(bfb.backfillObject.detailInfo)); + bor.setSuccessBatchCount(offsetAndSize.size()); + + Pair ipPortPair = bfb.backfillObject.detailInfo.getSourceHostAndPort(); + + PhysicalBackfillUtils.deleteInnodbDataFiles(schemaName, ipPortPair, + tempFileAndDir.getValue(), srcDbAndGroup.getValue(), srcDbAndGroup.getKey(), false, ec); + + // After all physical table finished + backfillManager.updateBackfillObject(ImmutableList.of(bor)); + + msg = "already backfill the idb file for table[" + srcDbAndGroup.getKey() + ":" + physicalTableName + "]" + + phyPartName; + SQLRecorderLogger.ddlLogger.info(msg); + } + + private void doWork(final Pair srcDbAndGroup, + final Pair targetDbAndGroup, + final Pair srcFileAndDir, + final Pair targetFileAndDir, + final List> totalOffsetAndSize, + final AtomicInteger startPos, + final BitSet bitSet, + long batchSize, + final AtomicInteger successBatch, + final long minUpdateBatch, + final String phyPartName, + final Pair sourceHost, + final List> targetHost, + final BatchConsumer consumer, + final ExecutionContext ec, + final AtomicReference interrupted, + final AtomicReference excep) { + + do { + int pos = startPos.getAndAdd(PhysicalBackfillUtils.miniBatchForeachThread); + for (int i = pos; i < pos + PhysicalBackfillUtils.miniBatchForeachThread; i++) { + if (i >= totalOffsetAndSize.size()) { + return; + } + Pair offsetAndSize = totalOffsetAndSize.get(i); + + int index = (int) (offsetAndSize.getKey() / batchSize); + if (!bitSet.get(index)) { + if (CrossEngineValidator.isJobInterrupted(ec) || Thread.currentThread().isInterrupted() + || interrupted.get()) { + long jobId = ec.getDdlJobId(); + excep.set(new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The job '" + jobId + "' has been cancelled")); + interrupted.set(true); + return; + } + + PolarxPhysicalBackfill.TransferFileDataOperator transferFileData = null; + + Pair srcUserInfo = storageInstAndUserInfos.get(sourceTargetDnId.getKey()); + Pair tarUserInfo = storageInstAndUserInfos.get(sourceTargetDnId.getValue()); + boolean success = false; + int tryTime = 1; + DecimalFormat df = new DecimalFormat("#.0"); + + Long speedLimit = OptimizerContext.getContext(schemaName).getParamManager() + .getLong(ConnectionParams.PHYSICAL_BACKFILL_SPEED_LIMIT); + if (speedLimit.longValue() != PhysicalBackfillUtils.getRateLimiter().getCurSpeedLimiter()) { + this.curSpeedLimit = speedLimit; + if (speedLimit > 0) { + double curSpeed = PhysicalBackfillUtils.getRateLimiter().getRate() / 1024; + PhysicalBackfillUtils.getRateLimiter().setRate(speedLimit.longValue()); + String msg = + "change the maximum speed limit from " + df.format(curSpeed) + "KB/s to " + + df.format(PhysicalBackfillUtils.getRateLimiter().getRate() / 1024) + + "KB/s"; + SQLRecorderLogger.ddlLogger.info(msg); + } + } + do { + // Check DDL is ongoing. + PhysicalBackfillUtils.checkInterrupted(ec, interrupted); + if (this.curSpeedLimit > 0) { + PhysicalBackfillUtils.getRateLimiter().acquire(offsetAndSize.getValue().intValue()); + } + try ( + XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage( + srcDbAndGroup.getKey(), + sourceHost.getKey(), sourceHost.getValue(), srcUserInfo.getKey(), + srcUserInfo.getValue(), + -1))) { + PolarxPhysicalBackfill.TransferFileDataOperator.Builder builder = + PolarxPhysicalBackfill.TransferFileDataOperator.newBuilder(); + + builder.setOperatorType( + PolarxPhysicalBackfill.TransferFileDataOperator.Type.GET_DATA_FROM_SRC_IBD); + PolarxPhysicalBackfill.FileInfo.Builder fileInfoBuilder = + PolarxPhysicalBackfill.FileInfo.newBuilder(); + fileInfoBuilder.setFileName(srcFileAndDir.getKey()); + fileInfoBuilder.setTempFile(false); + fileInfoBuilder.setDirectory(srcFileAndDir.getValue()); + fileInfoBuilder.setPartitionName(""); + builder.setFileInfo(fileInfoBuilder.build()); + builder.setBufferLen(offsetAndSize.getValue()); + builder.setOffset(offsetAndSize.getKey()); + transferFileData = conn.execReadBufferFromFile(builder); + success = true; + } catch (Exception ex) { + if (tryTime >= PhysicalBackfillUtils.MAX_RETRY) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + tryTime++; + } + } while (!success); + consumer.consume(targetDbAndGroup, targetFileAndDir, targetHost, tarUserInfo, + transferFileData); + synchronized (lock) { + if (lastUpdateTime == 0) { + lastUpdateTime = System.currentTimeMillis(); + } + bitSet.set((int) (transferFileData.getOffset() / batchSize)); + int curSuccessBatch = successBatch.incrementAndGet(); + if (curSuccessBatch >= minUpdateBatch) { + long curTime = System.currentTimeMillis(); + //update to metadb + PhysicalBackfillDetailInfoFieldJSON detailInfo = new PhysicalBackfillDetailInfoFieldJSON(); + detailInfo.setBitSet(bitSet.toLongArray()); + detailInfo.setMsg(""); + + PhysicalBackfillManager.BackfillBean bfb = + backfillManager.loadBackfillMeta(backfillId, schemaName, srcDbAndGroup.getKey(), + physicalTableName, phyPartName); + + PhysicalBackfillManager.BackfillObjectRecord bor = + new PhysicalBackfillManager.BackfillObjectRecord(); + + detailInfo.setSourceHostAndPort(bfb.backfillObject.detailInfo.sourceHostAndPort); + detailInfo.setTargetHostAndPorts(bfb.backfillObject.detailInfo.targetHostAndPorts); + + bor.setJobId(bfb.backfillObject.jobId); + bor.setSuccessBatchCount(bfb.backfillObject.successBatchCount + successBatch.get()); + bor.setExtra(bfb.backfillObject.extra); + bor.setPhysicalDb(bfb.backfillObject.physicalDb); + bor.setPhysicalTable(bfb.backfillObject.physicalTable); + bor.setPhysicalPartition(bfb.backfillObject.physicalPartition); + bor.setEndTime( + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime())); + bor.setStatus((int) PhysicalBackfillManager.BackfillStatus.RUNNING.getValue()); + bor.setDetailInfo(PhysicalBackfillDetailInfoFieldJSON.toJson(detailInfo)); + + backfillManager.updateBackfillObject(ImmutableList.of(bor)); + + successBatch.set(0); + + double speed = + (curSuccessBatch * batchSize) * 1000.0 / Math.max(1, curTime - lastUpdateTime) / 1024; + + //todo calc the speed by 1000 batch / time + String msg = + "already write " + curSuccessBatch + " batch successfully for " + + srcFileAndDir.getValue() + + " speed:" + df.format(speed) + "KB/s the maximum speed limit:" + + df.format(PhysicalBackfillUtils.getRateLimiter().getRate() / 1024) + "KB/s"; + SQLRecorderLogger.ddlLogger.info(msg); + lastUpdateTime = System.currentTimeMillis(); + } + } + } + } + } while (true); + } + + private void fallocateIbdFile(final ExecutionContext ec, final Pair targetFileAndDir, + final Pair tarDbAndGroup, + final List> targetHosts, String physicalTableName, + String phyPartitionName, long fileSize) { + String msg = "begin to fallocate ibd file:" + targetFileAndDir.getValue(); + SQLRecorderLogger.ddlLogger.info(msg); + + Pair userInfo = storageInstAndUserInfos.get(sourceTargetDnId.getValue()); + + for (Pair targetHost : targetHosts) { + boolean success = false; + int tryTime = 1; + do { + PhysicalBackfillUtils.checkInterrupted(ec, null); + try (XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage( + tarDbAndGroup.getKey(), + targetHost.getKey(), targetHost.getValue(), userInfo.getKey(), userInfo.getValue(), -1))) { + PolarxPhysicalBackfill.FileManageOperator.Builder builder = + PolarxPhysicalBackfill.FileManageOperator.newBuilder(); + + builder.setOperatorType(PolarxPhysicalBackfill.FileManageOperator.Type.FALLOCATE_IBD); + PolarxPhysicalBackfill.TableInfo.Builder tableInfoBuilder = + PolarxPhysicalBackfill.TableInfo.newBuilder(); + tableInfoBuilder.setTableSchema(tarDbAndGroup.getKey()); + tableInfoBuilder.setTableName(physicalTableName); + tableInfoBuilder.setPartitioned(false); + + PolarxPhysicalBackfill.FileInfo.Builder fileInfoBuilder = + PolarxPhysicalBackfill.FileInfo.newBuilder(); + fileInfoBuilder.setTempFile(false); + fileInfoBuilder.setFileName(targetFileAndDir.getKey()); + fileInfoBuilder.setPartitionName(phyPartitionName); + fileInfoBuilder.setDirectory(targetFileAndDir.getValue()); + fileInfoBuilder.setDataSize(fileSize); + + tableInfoBuilder.addFileInfo(fileInfoBuilder.build()); + builder.setTableInfo(tableInfoBuilder.build()); + + conn.execFallocateIbdFile(builder); + success = true; + } catch (Exception ex) { + SQLRecorderLogger.ddlLogger.info(ex.toString()); + if (tryTime > PhysicalBackfillUtils.MAX_RETRY) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + PhysicalBackfillUtils.checkInterrupted(ec, null); + tryTime++; + } + } while (!success); + } + msg = "already fallocate the ibd file:" + targetFileAndDir.getValue(); + SQLRecorderLogger.ddlLogger.info(msg); + } + + private void copyCfgFile(final Pair srcDataFileAndDir, + final Pair srcDbAndGroup, + final Pair sourceHostIpAndPort, + final Pair targetDataFileAndDir, final Pair tarDbAndGroup, + final List> targetHosts, BatchConsumer consumer, + boolean isInit, + ExecutionContext ec) { + + //delete first before copy,because do not have backfillMeta for cfg/cfp file + String srcCfgDir; + String srcCfpDir; + List> srcTargetFilePair = new ArrayList<>(); + if (!isInit) { + srcCfgDir = PhysicalBackfillUtils.convertToCfgFileName( + srcDataFileAndDir.getValue() + PhysicalBackfillUtils.TEMP_FILE_POSTFIX, PhysicalBackfillUtils.CFG); + srcCfpDir = PhysicalBackfillUtils.convertToCfgFileName( + srcDataFileAndDir.getValue() + PhysicalBackfillUtils.TEMP_FILE_POSTFIX, PhysicalBackfillUtils.CFP); + } else { + srcCfgDir = + PhysicalBackfillUtils.convertToCfgFileName(srcDataFileAndDir.getValue(), PhysicalBackfillUtils.CFG); + srcCfpDir = + PhysicalBackfillUtils.convertToCfgFileName(srcDataFileAndDir.getValue(), PhysicalBackfillUtils.CFP); + } + + //PhysicalBackfillUtils. + //String tarFileName = targetFileAndDir.getKey(); + String tarCfgDir = + PhysicalBackfillUtils.convertToCfgFileName(targetDataFileAndDir.getValue(), PhysicalBackfillUtils.CFG); + String tarCfpDir = + PhysicalBackfillUtils.convertToCfgFileName(targetDataFileAndDir.getValue(), PhysicalBackfillUtils.CFP); + srcTargetFilePair.add(Pair.of(srcCfgDir, tarCfgDir)); + if (encrypted) { + srcTargetFilePair.add(Pair.of(srcCfpDir, tarCfpDir)); + } + + for (Pair pair : GeneralUtil.emptyIfNull( + targetHosts)) { + PhysicalBackfillUtils.deleteInnodbDataFile(schemaName, tarDbAndGroup.getValue(), tarDbAndGroup.getKey(), + pair.getKey(), pair.getValue(), targetDataFileAndDir.getValue(), true, ec); + } + PolarxPhysicalBackfill.TransferFileDataOperator transferFileData = null; + + Pair srcUserInfo = storageInstAndUserInfos.get(sourceTargetDnId.getKey()); + Pair tarUserInfo = storageInstAndUserInfos.get(sourceTargetDnId.getValue()); + + for (Pair srcTarDir : srcTargetFilePair) { + long offset = 0l; + do { + boolean success = false; + int tryTime = 0; + do { + try (XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage( + srcDbAndGroup.getKey(), + sourceHostIpAndPort.getKey(), sourceHostIpAndPort.getValue(), srcUserInfo.getKey(), + srcUserInfo.getValue(), -1))) { + + PolarxPhysicalBackfill.TransferFileDataOperator.Builder builder = + PolarxPhysicalBackfill.TransferFileDataOperator.newBuilder(); + + builder.setOperatorType( + PolarxPhysicalBackfill.TransferFileDataOperator.Type.GET_DATA_FROM_SRC_IBD); + PolarxPhysicalBackfill.FileInfo.Builder fileInfoBuilder = + PolarxPhysicalBackfill.FileInfo.newBuilder(); + fileInfoBuilder.setFileName(srcDataFileAndDir.getKey()); + fileInfoBuilder.setTempFile(false); + fileInfoBuilder.setDirectory(srcTarDir.getKey()); + fileInfoBuilder.setPartitionName(""); + builder.setFileInfo(fileInfoBuilder.build()); + builder.setBufferLen(batchSize); + builder.setOffset(offset); + transferFileData = conn.execReadBufferFromFile(builder); + success = true; + } catch (Exception ex) { + if (tryTime >= PhysicalBackfillUtils.MAX_RETRY) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + tryTime++; + } + } while (!success); + consumer.consume(tarDbAndGroup, Pair.of(srcDataFileAndDir.getKey(), srcTarDir.getValue()), targetHosts, + tarUserInfo, + transferFileData); + if (transferFileData.getBufferLen() < batchSize) { + break; + } + offset += transferFileData.getBufferLen(); + } while (true); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RebuildTableChangeMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RebuildTableChangeMetaTask.java new file mode 100644 index 000000000..177a417e3 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RebuildTableChangeMetaTask.java @@ -0,0 +1,65 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.polardbx.common.eventlogger.EventLogger; +import com.alibaba.polardbx.common.eventlogger.EventType; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; + +/** + * @author wumu + */ +@Getter +@TaskName(name = "RebuildTableChangeMetaTask") +public class RebuildTableChangeMetaTask extends BaseGmsTask { + + public RebuildTableChangeMetaTask(String schemaName, String logicalTableName) { + super(schemaName, logicalTableName); + } + + @Override + protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + EventLogger.log(EventType.DDL_INFO, "Online modify column start"); + + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + + tableInfoManager.updateRebuildingTableFlag(schemaName, logicalTableName, false); + } + + @Override + protected void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { + EventLogger.log(EventType.DDL_WARN, "Online modify column rollback"); + + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + + tableInfoManager.updateRebuildingTableFlag(schemaName, logicalTableName, true); + } + + @Override + public String remark() { + String sb = "set rebuilding table flag on table " + this.getLogicalTableName(); + return "|" + sb; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameGsiUpdateMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameGsiUpdateMetaTask.java index 81a39b247..82c1b3851 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameGsiUpdateMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameGsiUpdateMetaTask.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -35,14 +36,14 @@ public class RenameGsiUpdateMetaTask extends RenameTableUpdateMetaTask { final private String primaryTableName; public RenameGsiUpdateMetaTask(String schemaName, String primaryTableName, - String logicalTableName, String newLogicalTableName) { - super(schemaName, logicalTableName, newLogicalTableName); + String logicalTableName, String newLogicalTableName, + boolean needRenamePhyTables) { + super(schemaName, logicalTableName, newLogicalTableName, needRenamePhyTables); this.primaryTableName = primaryTableName; } @Override protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { - executionContext.setPhyTableRenamed(false); super.executeImpl(metaDbConnection, executionContext); try { @@ -54,15 +55,15 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi @Override protected void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { - executionContext.setPhyTableRenamed(false); boolean isNewPartitionDb = DbInfoManager.getInstance().isNewPartitionDb(schemaName); if (isNewPartitionDb) { TableMetaChanger .renamePartitionTableMeta(metaDbConnection, schemaName, newLogicalTableName, logicalTableName, - executionContext); + needRenamePhyTables, executionContext); } else { TableMetaChanger - .renameTableMeta(metaDbConnection, schemaName, newLogicalTableName, logicalTableName, executionContext); + .renameTableMeta(metaDbConnection, schemaName, newLogicalTableName, logicalTableName, + needRenamePhyTables, executionContext); } try { @@ -72,7 +73,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut } //sync have to be successful to continue - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); executionContext.refreshTableMeta(); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenamePartitionTablePhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenamePartitionTablePhyDdlTask.java deleted file mode 100644 index f440334ad..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenamePartitionTablePhyDdlTask.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.ddl.job.task.basic; - -import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; -import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; -import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; -import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.partition.PartitionInfo; - -@TaskName(name = "RenamePartitionTablePhyDdlTask") -public class RenamePartitionTablePhyDdlTask extends BasePhyDdlTask { - - @JSONCreator - public RenamePartitionTablePhyDdlTask(String schemaName, PhysicalPlanData physicalPlanData) { - super(schemaName, physicalPlanData); - } - - @Override - public void executeImpl(ExecutionContext executionContext) { - checkTableNamePatternForRename(schemaName, physicalPlanData.getLogicalTableName(), executionContext); - FailPoint.injectRandomExceptionFromHint(executionContext); - FailPoint.injectRandomSuspendFromHint(executionContext); - super.executeImpl(executionContext); - } - - private void checkTableNamePatternForRename(String schemaName, String logicalTableName, - ExecutionContext executionContext) { - - PartitionInfo partitionInfo = - OptimizerContext.getContext(schemaName).getPartitionInfoManager().getPartitionInfo(logicalTableName); - FailPoint.injectRandomExceptionFromHint(executionContext); - FailPoint.injectRandomSuspendFromHint(executionContext); - if (partitionInfo.isRandomTableNamePatternEnabled()) { - executionContext.setPhyTableRenamed(false); - } - } - -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableAddMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableAddMetaTask.java index fe30599d7..92d6233d2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableAddMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableAddMetaTask.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.ddl.job.task.basic; import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.executor.ddl.job.meta.CommonMetaChanger; import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; @@ -42,14 +41,9 @@ public RenameTableAddMetaTask(String schemaName, String logicalTableName, String @Override protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { - if (!executionContext.needToRenamePhyTables()) { - return; - } - TableMetaChanger.addNewTableName(metaDbConnection, schemaName, logicalTableName, newLogicalTableName); FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); - // 这个task 看起来是方便debug 或者物理ddl失败的时候用的,没有特别大的用处。 } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablePhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablePhyDdlTask.java index aa3de8416..62a48d05c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablePhyDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablePhyDdlTask.java @@ -41,7 +41,6 @@ public RenameTablePhyDdlTask(String schemaName, PhysicalPlanData physicalPlanDat @Override public void executeImpl(ExecutionContext executionContext) { - checkTableNamePatternForRename(schemaName, physicalPlanData.getLogicalTableName(), executionContext); super.executeImpl(executionContext); } @@ -49,40 +48,4 @@ public void executeImpl(ExecutionContext executionContext) { protected List genRollbackPhysicalPlans(ExecutionContext executionContext) { return getPhysicalPlans(executionContext); } - - private void checkTableNamePatternForRename(String schemaName, String logicalTableName, - ExecutionContext executionContext) { - boolean hasRandomSuffixInTableNamePattern = true; - - try { - TableRule tableRule = - OptimizerContext.getContext(schemaName).getRuleManager().getTableRule(logicalTableName); - FailPoint.injectRandomExceptionFromHint(executionContext); - FailPoint.injectRandomSuspendFromHint(executionContext); - if (tableRule != null && executionContext.isRandomPhyTableEnabled()) { - String tableNamePattern = tableRule.getTbNamePattern(); - if (TStringUtil.isEmpty(tableNamePattern) - || tableNamePattern.length() <= RANDOM_SUFFIX_LENGTH_OF_PHYSICAL_TABLE_NAME) { - // Must be single or broadcast table. - hasRandomSuffixInTableNamePattern = false; - } else if (TStringUtil.startsWithIgnoreCase(tableNamePattern, logicalTableName)) { - // Not renamed yet. - String randomSuffix = tableRule.extractRandomSuffix(); - hasRandomSuffixInTableNamePattern = TStringUtil.isNotEmpty(randomSuffix); - } else { - // The table may have been renamed when logical table name - // is supported, so that the table name pattern's prefix is - // not the logical table name, so it should be safe to - // contain random string. - hasRandomSuffixInTableNamePattern = true; - } - } - } catch (Throwable ignored) { - } - - if (executionContext.isRandomPhyTableEnabled() && hasRandomSuffixInTableNamePattern) { - executionContext.setPhyTableRenamed(false); - } - } - } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableSyncTask.java index 3ef54b0fa..4e99bae0d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableSyncTask.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.RenameTableSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -40,7 +41,8 @@ public RenameTableSyncTask(String schemaName, String logicalTableName, String ne @Override protected void executeImpl(ExecutionContext executionContext) { - SyncManagerHelper.sync(new RenameTableSyncAction(schemaName, logicalTableName, newLogicalTableName), true); + SyncManagerHelper.sync(new RenameTableSyncAction(schemaName, logicalTableName, newLogicalTableName), + SyncScope.ALL, true); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableUpdateMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableUpdateMetaTask.java index c932b06a4..c2db2fd53 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableUpdateMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableUpdateMetaTask.java @@ -35,11 +35,14 @@ public class RenameTableUpdateMetaTask extends BaseGmsTask { protected String newLogicalTableName; + protected boolean needRenamePhyTables; @JSONCreator - public RenameTableUpdateMetaTask(String schemaName, String logicalTableName, String newLogicalTableName) { + public RenameTableUpdateMetaTask(String schemaName, String logicalTableName, String newLogicalTableName, + boolean needRenamePhyTables) { super(schemaName, logicalTableName); this.newLogicalTableName = newLogicalTableName; + this.needRenamePhyTables = needRenamePhyTables; } @Override @@ -50,10 +53,11 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi if (isNewPartitionDb) { TableMetaChanger .renamePartitionTableMeta(metaDbConnection, schemaName, logicalTableName, newLogicalTableName, - executionContext); + needRenamePhyTables, executionContext); } else { TableMetaChanger - .renameTableMeta(metaDbConnection, schemaName, logicalTableName, newLogicalTableName, executionContext); + .renameTableMeta(metaDbConnection, schemaName, logicalTableName, newLogicalTableName, + needRenamePhyTables, executionContext); } CommonMetaChanger.renameFinalOperationsOnSuccess(schemaName, logicalTableName, newLogicalTableName); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableValidateTask.java index a97256893..4c74f8ff8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTableValidateTask.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; +import org.apache.calcite.sql.SqlKind; @Getter @TaskName(name = "RenameTableValidateTask") @@ -42,6 +43,7 @@ public RenameTableValidateTask(String schemaName, String logicalTableName, Strin public void executeImpl(ExecutionContext executionContext) { TableValidator.validateTableExistence(schemaName, logicalTableName, executionContext); TableValidator.validateTableNonExistence(schemaName, newLogicalTableName, executionContext); + TableValidator.validateTableWithCCI(schemaName, logicalTableName, executionContext, SqlKind.RENAME_TABLE); GsiValidator.validateAllowRenameOnTable(schemaName, logicalTableName, executionContext); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesCdcSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesCdcSyncTask.java index 24a4fedfe..c61192fb4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesCdcSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesCdcSyncTask.java @@ -16,8 +16,8 @@ package com.alibaba.polardbx.executor.ddl.job.task.basic; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.cdc.ICdcManager; import com.alibaba.polardbx.common.cdc.TablesExtInfo; import com.alibaba.polardbx.common.utils.GeneralUtil; @@ -27,9 +27,9 @@ import com.alibaba.polardbx.executor.sync.LockTablesSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TablesMetaChangePreemptiveSyncAction; -import com.alibaba.polardbx.executor.sync.TablesMetaChangeSyncAction; import com.alibaba.polardbx.executor.sync.UnlockTableSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.DdlContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -102,6 +102,7 @@ public void executeImpl(ExecutionContext executionContext) { timeUnit ), schemaName, + SyncScope.ALL, true ); @@ -135,7 +136,7 @@ public void executeImpl(ExecutionContext executionContext) { CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, tableName, "RENAME_TABLE", ddlSql, ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), - DdlVisibility.Public, params, true, newTableTopologies.get(i), + CdcDdlMarkVisibility.Public, params, true, newTableTopologies.get(i), new Pair<>(collates.get(i), cdcMetas.get(i))); } @@ -144,6 +145,7 @@ public void executeImpl(ExecutionContext executionContext) { SyncManagerHelper.sync( new TablesMetaChangePreemptiveSyncAction(schemaName, tableNames, initWait, interval, timeUnit, executionContext.getConnId(), false), + SyncScope.ALL, true); } catch (Throwable t) { LOGGER.error(String.format( @@ -168,6 +170,7 @@ public void handleError(ExecutionContext executionContext) { executionContext.getTraceId() ), schemaName, + SyncScope.ALL, true ); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesUpdateDataIdTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesUpdateDataIdTask.java new file mode 100644 index 000000000..389ab540d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesUpdateDataIdTask.java @@ -0,0 +1,86 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.meta.CommonMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; +import java.util.List; + +/** + * @author wumu + */ +@Getter +@TaskName(name = "RenameTablesUpdateDataIdTask") +public class RenameTablesUpdateDataIdTask extends BaseGmsTask { + + private List oldTableNames; + private List newTableNames; + + @JSONCreator + public RenameTablesUpdateDataIdTask(String schemaName, List oldTableNames, List newTableNames) { + super(schemaName, null); + this.oldTableNames = oldTableNames; + this.newTableNames = newTableNames; + onExceptionTryRecoveryThenRollback(); + } + + @Override + protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + for (int i = 0; i < oldTableNames.size(); ++i) { + String tableName = oldTableNames.get(i); + String newTableName = newTableNames.get(i); + + TableMetaChanger.renameTableDataId(metaDbConnection, schemaName, tableName, newTableName); + CommonMetaChanger.renameFinalOperationsOnSuccess(schemaName, tableName, newTableName); + } + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + executeImpl(metaDbConnection, executionContext); + } + + /** + * 只改版本,不sync + */ + @Override + protected void onExecutionSuccess(ExecutionContext executionContext) { + } + + @Override + protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + rollbackImpl(metaDbConnection, executionContext); + } + + /** + * 只改版本,不sync + */ + @Override + protected void onRollbackSuccess(ExecutionContext executionContext) { + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesUpdateMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesUpdateMetaTask.java index c18ddc543..3a94ae1c2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesUpdateMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesUpdateMetaTask.java @@ -27,11 +27,13 @@ import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; -import org.apache.calcite.schema.Table; import java.sql.Connection; import java.util.List; +/** + * @author wumu + */ @Getter @TaskName(name = "RenameTablesUpdateMetaTask") public class RenameTablesUpdateMetaTask extends BaseGmsTask { @@ -60,10 +62,11 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi if (isNewPartitionDb) { TableMetaChanger .renamePartitionTableMeta(metaDbConnection, schemaName, tableName, newTableName, - executionContext, false); + executionContext, false, false); } else { TableMetaChanger - .renameTableMeta(metaDbConnection, schemaName, tableName, newTableName, executionContext, false); + .renameTableMeta(metaDbConnection, schemaName, tableName, newTableName, executionContext, false, + false); } CommonMetaChanger.renameFinalOperationsOnSuccess(schemaName, tableName, newTableName); } @@ -82,10 +85,10 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut if (isNewPartitionDb) { TableMetaChanger .renamePartitionTableMeta(metaDbConnection, schemaName, tableName, newTableName, - executionContext); + false, executionContext); } else { TableMetaChanger - .renameTableMeta(metaDbConnection, schemaName, tableName, newTableName, executionContext); + .renameTableMeta(metaDbConnection, schemaName, tableName, newTableName, true, executionContext); } CommonMetaChanger.renameFinalOperationsOnSuccess(schemaName, tableName, newTableName); } @@ -95,6 +98,9 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { executeImpl(metaDbConnection, executionContext); updateTablesVersion(metaDbConnection); + if (!DbInfoManager.getInstance().isNewPartitionDb(schemaName)) { + updateTablesExtVersion(metaDbConnection); + } } /** @@ -108,6 +114,9 @@ protected void onExecutionSuccess(ExecutionContext executionContext) { protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { rollbackImpl(metaDbConnection, executionContext); updateTablesVersion(metaDbConnection); + if (!DbInfoManager.getInstance().isNewPartitionDb(schemaName)) { + updateTablesExtVersion(metaDbConnection); + } } /** @@ -141,4 +150,27 @@ private void updateTablesVersion(Connection metaDbConnection) { } } } + + private void updateTablesExtVersion(Connection metaDbConnection) { + if (GeneralUtil.isNotEmpty(newTableNames)) { + int i = 0; + long maxVersion = 1; + try { + for (String tableName : newTableNames) { + long version = TableInfoManager.getTableExtVersion4Rename(schemaName, tableName, metaDbConnection); + maxVersion = Math.max(maxVersion, version); + } + for (String tableName : newTableNames) { + TableInfoManager.updateTableExtVersion4Rename(schemaName, tableName, maxVersion + 1, + metaDbConnection); + i++; + } + } catch (Throwable t) { + LOGGER.error(String.format( + "error occurs while update tables_ext version, schemaName:%s, tableName:%s", schemaName, + newTableNames.get(i))); + throw GeneralUtil.nestedException(t); + } + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesValidateTask.java index 01996f958..9f9379744 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/RenameTablesValidateTask.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; +import org.apache.calcite.sql.SqlKind; import java.util.List; import java.util.Set; @@ -45,8 +46,6 @@ public RenameTablesValidateTask(String schemaName, List oldTableNames, L @Override public void executeImpl(ExecutionContext executionContext) { - executionContext.setPhyTableRenamed(false); - Set allTableNames = TableValidator.getAllTableNames(schemaName); Set allTableNamesTmp = new TreeSet<>(String::compareToIgnoreCase); allTableNamesTmp.addAll(allTableNames); @@ -58,6 +57,7 @@ public void executeImpl(ExecutionContext executionContext) { TableValidator.validateTableName(tableName); TableValidator.validateTableName(newTableName); TableValidator.validateTableNameLength(newTableName); + TableValidator.validateTableWithCCI(schemaName, tableName, executionContext, SqlKind.RENAME_TABLE); TableValidator.validateRenamesTableNotContainsFk(schemaName, tableName, executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SequenceClearPlanCacheSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SequenceClearPlanCacheSyncTask.java new file mode 100644 index 000000000..d5e748ebe --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SequenceClearPlanCacheSyncTask.java @@ -0,0 +1,58 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.executor.ddl.job.task.BaseSyncTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.sync.ClearPlanCacheSyncAction; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import org.apache.calcite.sql.SqlKind; + +import static com.alibaba.polardbx.common.constants.SequenceAttribute.AUTO_SEQ_PREFIX; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +@TaskName(name = "SequenceClearPlanCacheSyncTask") +@Getter +public class SequenceClearPlanCacheSyncTask extends BaseSyncTask { + final protected String seqName; + final protected SqlKind sqlKind; + + @JSONCreator + public SequenceClearPlanCacheSyncTask(String schemaName, + String seqName, SqlKind sqlKind) { + super(schemaName); + this.seqName = seqName; + this.sqlKind = sqlKind; + } + + @Override + protected void executeImpl(ExecutionContext executionContext) { + if (TStringUtil.startsWithIgnoreCase(seqName, AUTO_SEQ_PREFIX) && + sqlKind == SqlKind.CREATE_SEQUENCE) { + SyncManagerHelper.sync(new ClearPlanCacheSyncAction(schemaName), schemaName, SyncScope.ALL); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SequenceSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SequenceSyncTask.java new file mode 100644 index 000000000..5c290e0ec --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SequenceSyncTask.java @@ -0,0 +1,62 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.logger.LoggerInit; +import com.alibaba.polardbx.executor.ddl.job.task.BaseSyncTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.sync.SequenceSyncAction; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import org.apache.calcite.sql.SqlKind; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ + +@TaskName(name = "SequenceSyncTask") +@Getter +public class SequenceSyncTask extends BaseSyncTask { + final private String seqName; + final private SqlKind sqlKind; + + @JSONCreator + public SequenceSyncTask(String schemaName, + String seqName, + SqlKind sqlKind) { + super(schemaName); + this.seqName = seqName; + this.sqlKind = sqlKind; + } + + @Override + protected void executeImpl(ExecutionContext executionContext) { + SyncManagerHelper.sync(new SequenceSyncAction(schemaName, seqName), schemaName, SyncScope.CURRENT_ONLY); + } + + @Override + protected void onExecutionSuccess(ExecutionContext executionContext) { + LoggerInit.TDDL_SEQUENCE_LOG.info(String.format("Sequence operation %s for %s was successful in %s", + sqlKind, seqName, schemaName)); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ShowTableMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ShowTableMetaTask.java index 668358c12..b7677ac54 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ShowTableMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ShowTableMetaTask.java @@ -25,6 +25,7 @@ import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.metadb.seq.SequenceBaseRecord; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -69,7 +70,7 @@ public void rollbackImpl(Connection metaDbConnection, ExecutionContext execution FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); //sync have to be successful to continue - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SubJobTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SubJobTask.java index 8fe12952d..f96f43243 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SubJobTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SubJobTask.java @@ -33,7 +33,6 @@ import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineSchedulerManager; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlJobManager; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; -import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlJobManagerUtils; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.executor.utils.failpoint.FailPointKey; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SyncLsnTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SyncLsnTask.java new file mode 100644 index 000000000..c31423d9d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/SyncLsnTask.java @@ -0,0 +1,59 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; +import java.util.Map; + +/** + * + */ +@Getter +@TaskName(name = "SyncLsnTask") +public class SyncLsnTask extends BaseGmsTask { + + final Map targetGroupAndStorageIdMap; + final Map sourceGroupAndStorageIdMap; + + @JSONCreator + public SyncLsnTask(String schemaName, Map sourceGroupAndStorageIdMap, + Map targetGroupAndStorageIdMap) { + super(schemaName, ""); + this.sourceGroupAndStorageIdMap = sourceGroupAndStorageIdMap; + this.targetGroupAndStorageIdMap = targetGroupAndStorageIdMap; + } + + @Override + public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + //make sure the table schema in the follower node is the same as source leader node + PhysicalBackfillUtils.waitLsn(schemaName, sourceGroupAndStorageIdMap, false, executionContext); + //make sure the create table/discard tablespace has been executed in follower/learner node + PhysicalBackfillUtils.waitLsn(schemaName, targetGroupAndStorageIdMap, false, executionContext); + } + + protected void onRollbackSuccess(ExecutionContext executionContext) { + + } +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TableListDataIdSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TableListDataIdSyncTask.java index 422b9483e..d16ff5ee1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TableListDataIdSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TableListDataIdSyncTask.java @@ -16,10 +16,10 @@ package com.alibaba.polardbx.executor.ddl.job.task.basic; +import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -32,12 +32,18 @@ @Getter @TaskName(name = "TableListDataIdSyncTask") public class TableListDataIdSyncTask extends BaseDdlTask { + private List tableNames; - public TableListDataIdSyncTask(String schemaName) { + @JSONCreator + public TableListDataIdSyncTask(String schemaName, List tableNames) { super(schemaName); + this.tableNames = tableNames; } protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + for (String tableName : tableNames) { + TableMetaChanger.syncTableDataId(schemaName, tableName); + } TableMetaChanger.notifyTableListDataId(metaDbConnection, schemaName); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TableSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TableSyncTask.java index 7cfa47626..98a85f079 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TableSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TableSyncTask.java @@ -25,6 +25,7 @@ import com.alibaba.polardbx.executor.sync.TableMetaChangePreemptiveSyncAction; import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -77,10 +78,12 @@ public void executeImpl(ExecutionContext executionContext) { .orElse(executionContext.getParamManager().getLong(ConnectionParams.PREEMPTIVE_MDL_INTERVAL)); TimeUnit timeUnit = Optional.ofNullable(this.timeUnit).orElse(TimeUnit.MILLISECONDS); if (!preemptive || !enablePreemptiveMdl) { - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, tableName), throwExceptions); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, tableName), SyncScope.ALL, + throwExceptions); } else { SyncManagerHelper.sync( new TableMetaChangePreemptiveSyncAction(schemaName, tableName, initWait, interval, timeUnit), + SyncScope.ALL, throwExceptions); } FailPoint.injectSuspendFromHint("FP_TABLE_SYNC_TASK_SUSPEND", executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TablesSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TablesSyncTask.java index d821ea3cd..ae7b9d6bd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TablesSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TablesSyncTask.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TablesMetaChangePreemptiveSyncAction; import com.alibaba.polardbx.executor.sync.TablesMetaChangeSyncAction; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.google.common.base.Joiner; import lombok.Getter; @@ -67,10 +68,11 @@ public TablesSyncTask(String schemaName, public void executeImpl(ExecutionContext executionContext) { try { if (!preemptive) { - SyncManagerHelper.sync(new TablesMetaChangeSyncAction(schemaName, tableNames), true); + SyncManagerHelper.sync(new TablesMetaChangeSyncAction(schemaName, tableNames), SyncScope.ALL, true); } else { SyncManagerHelper.sync( new TablesMetaChangePreemptiveSyncAction(schemaName, tableNames, initWait, interval, timeUnit), + SyncScope.ALL, true); } } catch (Throwable t) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TruncateTableValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TruncateTableValidateTask.java index f3e015931..026d6505b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TruncateTableValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/TruncateTableValidateTask.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import org.apache.calcite.sql.SqlKind; import lombok.Getter; @Getter @@ -43,6 +44,7 @@ public TruncateTableValidateTask(String schemaName, String logicalTableName, Tab public void executeImpl(ExecutionContext executionContext) { TableValidator.validateTableExistence(schemaName, logicalTableName, executionContext); TableValidator.validateTableNotReferenceFk(schemaName, logicalTableName, executionContext); + TableValidator.validateTableWithCCI(schemaName, logicalTableName, executionContext, SqlKind.TRUNCATE_TABLE); GsiValidator.validateAllowTruncateOnTable(schemaName, logicalTableName, executionContext); if (tableGroupConfig != null) { TableValidator.validateTableGroupChange(schemaName, tableGroupConfig); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/UpdateTablesVersionTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/UpdateTablesVersionTask.java index 3bf4a5c29..80e8511a4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/UpdateTablesVersionTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/UpdateTablesVersionTask.java @@ -19,6 +19,7 @@ import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.google.common.base.Joiner; @@ -27,6 +28,8 @@ import java.sql.Connection; import java.util.List; +import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_UPDATE_TABLES_VERSION_ERROR; + @Getter @TaskName(name = "UpdateTablesVersionTask") public class UpdateTablesVersionTask extends BaseGmsTask { @@ -46,6 +49,8 @@ protected String remark() { @Override protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + FailPoint.injectExceptionFromHintWithKeyEnable(FP_UPDATE_TABLES_VERSION_ERROR, executionContext); + updateTablesVersion(metaDbConnection); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ValidateCreateViewTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ValidateCreateViewTask.java new file mode 100644 index 000000000..afa65e3c7 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ValidateCreateViewTask.java @@ -0,0 +1,75 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.handler.ddl.LogicalCreateViewHandler; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.view.ViewManager; +import lombok.Getter; + +@TaskName(name = "ValidateViewTask") +@Getter +public class ValidateCreateViewTask extends BaseValidateTask { + + final private String viewName; + final private Boolean isReplace; + + @JSONCreator + public ValidateCreateViewTask(String schemaName, String viewName, boolean isReplace) { + super(schemaName); + this.viewName = viewName; + this.isReplace = isReplace; + } + + @Override + protected void executeImpl(ExecutionContext executionContext) { + ViewManager viewManager = OptimizerContext.getContext(schemaName).getViewManager(); + + if (viewManager.count(schemaName) > LogicalCreateViewHandler.MAX_VIEW_NUMBER) { + throw new TddlRuntimeException(ErrorCode.ERR_VIEW, + "View number at most " + LogicalCreateViewHandler.MAX_VIEW_NUMBER); + } + // check view name + TableMeta tableMeta; + try { + tableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(viewName); + } catch (Throwable throwable) { + // pass + tableMeta = null; + } + + if (tableMeta != null) { + if (isReplace) { + throw new TddlRuntimeException(ErrorCode.ERR_VIEW, "'" + viewName + "' is not VIEW "); + } else { + throw new TddlRuntimeException(ErrorCode.ERR_VIEW, "table '" + viewName + "' already exists "); + } + } + } + + @Override + protected String remark() { + return "|schema: " + schemaName + " viewName: " + viewName; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ValidateDropViewTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ValidateDropViewTask.java new file mode 100644 index 000000000..5cc1074d6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/ValidateDropViewTask.java @@ -0,0 +1,57 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.basic; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.view.SystemTableView; +import lombok.Getter; + +@TaskName(name = "ValidateDropViewTask") +@Getter +public class ValidateDropViewTask extends BaseValidateTask { + + final private String viewName; + protected Boolean ifExists; + + @JSONCreator + public ValidateDropViewTask(String schemaName, String viewName, boolean ifExists) { + super(schemaName); + this.viewName = viewName; + this.ifExists = ifExists; + } + + @Override + protected void executeImpl(ExecutionContext executionContext) { + if (!ifExists) { + SystemTableView.Row row = OptimizerContext.getContext(schemaName).getViewManager().select(viewName); + if (row == null) { + throw new TddlRuntimeException(ErrorCode.ERR_VIEW, "Unknown view " + viewName); + } + } + } + + @Override + protected String remark() { + return "|schema: " + schemaName + " viewName: " + viewName + " ifExists: " + ifExists; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/ArchiveOSSTableDataMppTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/ArchiveOSSTableDataMppTask.java index df28a5268..369796f53 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/ArchiveOSSTableDataMppTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/ArchiveOSSTableDataMppTask.java @@ -86,7 +86,7 @@ protected Integer invoke() { List columnMetas = columnMetaAccessor.queryUncommitted(getTaskId(), schemaName, logicalTableName); deleteUncommitted(files, columnMetas); - filesAccessor.deleteUncommited(getTaskId(), schemaName, logicalTableName); + filesAccessor.deleteUncommitted(getTaskId(), schemaName, logicalTableName); columnMetaAccessor.deleteUncommitted(getTaskId(), schemaName, logicalTableName); return 0; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/ArchiveOSSTableDataTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/ArchiveOSSTableDataTask.java index 199bf15e4..c36d8c641 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/ArchiveOSSTableDataTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/ArchiveOSSTableDataTask.java @@ -136,7 +136,7 @@ protected Integer invoke() { protected void deleteUncommitted(List files, List columnMetas) { // delete remote oss files and local tmp files for (FilesRecord record : files) { - FileSystemUtils.deleteIfExistsFile(record.getFileName(), this.targetTableEngine); + FileSystemUtils.deleteIfExistsFile(record.getFileName(), this.targetTableEngine, false); File tmpFile = new File(record.getLocalPath()); if (tmpFile.exists()) { if (!tmpFile.delete()) { @@ -150,7 +150,7 @@ protected void deleteUncommitted(List files, List columnMetas = columnMetaAccessor.queryUncommitted(getTaskId(), schemaName, logicalTableName); deleteUncommitted(files, columnMetas); - filesAccessor.deleteUncommited(getTaskId(), schemaName, logicalTableName); + filesAccessor.deleteUncommitted(getTaskId(), schemaName, logicalTableName); columnMetaAccessor.deleteUncommitted(getTaskId(), schemaName, logicalTableName); return 0; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CloseFileStorageTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CloseFileStorageTask.java index dd27ee99b..e656a97a9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CloseFileStorageTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CloseFileStorageTask.java @@ -39,11 +39,13 @@ public class CloseFileStorageTask extends BaseDdlTask { private String engine; + private boolean onlyCloseColdData; @JSONCreator - public CloseFileStorageTask(String engine) { + public CloseFileStorageTask(String engine, boolean onlyCloseColdData) { super(DefaultDbSchema.NAME); this.engine = engine; + this.onlyCloseColdData = onlyCloseColdData; } @Override @@ -55,23 +57,21 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi updateSupportedCommands(true, false, metaDbConnection); Engine fileEngine = Engine.of(engine); CommonMetaChanger.invalidateBufferPool(); - long stamp = FileSystemManager.readLockWithTimeOut(fileEngine); - try { + + if (!onlyCloseColdData) { FileStorageInfoAccessor fileStorageInfoAccessor = new FileStorageInfoAccessor(); fileStorageInfoAccessor.setConnection(metaDbConnection); fileStorageInfoAccessor.delete(fileEngine); + } - FileStorageMetaStore fileStorageMetaStore = new FileStorageMetaStore(fileEngine); - fileStorageMetaStore.setConnection(metaDbConnection); - fileStorageMetaStore.deleteAll(); + FileStorageMetaStore fileStorageMetaStore = new FileStorageMetaStore(fileEngine); + fileStorageMetaStore.setConnection(metaDbConnection); + fileStorageMetaStore.deleteAll(); + if (!onlyCloseColdData) { ConfigListenerAccessor configListenerAccessor = new ConfigListenerAccessor(); configListenerAccessor.setConnection(metaDbConnection); configListenerAccessor.updateOpVersion(MetaDbDataIdBuilder.getFileStorageInfoDataId()); - } catch (Throwable e) { - throw GeneralUtil.nestedException(e); - } finally { - FileSystemManager.unlockRead(fileEngine, stamp); } FailPoint.injectRandomExceptionFromHint(executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateFileStorageTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateFileStorageTask.java index 13f38ad1e..dce217320 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateFileStorageTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateFileStorageTask.java @@ -40,6 +40,7 @@ import java.io.IOException; import java.sql.Connection; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; @@ -49,17 +50,24 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.ABS_URI_SUFFIX_PATTERN; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.AZURE_WASBS_SCHEME; +import static com.alibaba.polardbx.common.oss.filesystem.Constants.AZURE_WASB_SCHEME; + @Getter @TaskName(name = "CreateFileStorageTask") public class CreateFileStorageTask extends BaseGmsTask { private String engineName; private Map items; + private Map azureItems; @JSONCreator - public CreateFileStorageTask(String schemaName, String engineName, Map items) { + public CreateFileStorageTask(String schemaName, String engineName, Map items, + Map azureItems) { super(schemaName, null); this.engineName = engineName; this.items = items; + this.azureItems = azureItems; onExceptionTryRollback(); } @@ -76,10 +84,10 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi record1.priority = 1; record1.regionId = ""; record1.availableZoneId = ""; - record1.cachePolicy = CachePolicy.META_AND_DATA_CACHE.getValue(); + record1.cachePolicy = CachePolicy.DATA_CACHE.getValue(); record1.deletePolicy = DeletePolicy.MASTER_ONLY.getValue(); record1.status = 1; - String uri = items.get(FileStorageInfoKey.FILE_URI).trim(); + String uri = items.getOrDefault(FileStorageInfoKey.FILE_URI, "").trim(); if (!uri.endsWith("/")) { uri = uri + "/"; } @@ -90,6 +98,23 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi record1.internalVpcEndpoint = items.get(FileStorageInfoKey.ENDPOINT); record1.accessKeyId = items.get(FileStorageInfoKey.ACCESS_KEY_ID); record1.accessKeySecret = PasswdUtil.encrypt(items.get(FileStorageInfoKey.ACCESS_KEY_SECRET)); + } else if (Engine.S3.name().equalsIgnoreCase(record1.engine)) { + record1.accessKeyId = items.get(FileStorageInfoKey.ACCESS_KEY_ID); + record1.accessKeySecret = PasswdUtil.encrypt(items.get(FileStorageInfoKey.ACCESS_KEY_SECRET)); + } else if (Engine.ABS.name().equalsIgnoreCase(record1.engine)) { + record1.accessKeyId = azureItems.get(FileStorageInfoKey.AzureConnectionStringKey.AccountName); + record1.accessKeySecret = + PasswdUtil.encrypt(azureItems.get(FileStorageInfoKey.AzureConnectionStringKey.AccountKey)); + record1.externalEndpoint = azureItems.get(FileStorageInfoKey.AzureConnectionStringKey.EndpointSuffix); + record1.internalClassicEndpoint = + azureItems.get(FileStorageInfoKey.AzureConnectionStringKey.EndpointSuffix); + record1.internalVpcEndpoint = azureItems.get(FileStorageInfoKey.AzureConnectionStringKey.EndpointSuffix); + String endpointsProtocol = + azureItems.get(FileStorageInfoKey.AzureConnectionStringKey.DefaultEndpointsProtocol); + record1.fileUri = + ("https".equalsIgnoreCase(endpointsProtocol) ? AZURE_WASBS_SCHEME : AZURE_WASB_SCHEME) + "://" + + items.get(FileStorageInfoKey.AZURE_CONTAINER_NAME) + + "@" + String.format(ABS_URI_SUFFIX_PATTERN, record1.accessKeyId, record1.externalEndpoint) + "/"; } if (fileStorageInfoAccessor.query(Engine.of(engineName)).size() != 0) { @@ -105,7 +130,7 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi try (FileSystem master = FileSystemManager.buildFileSystem(record1)) { future = executor.submit(() -> { try { - master.exists(FileSystemUtils.buildPath(master, "1.orc")); + master.exists(FileSystemUtils.buildPath(master, "1.orc", false)); } catch (Exception e) { unexpectedErrors.add(e.getMessage()); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableFormatTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableFormatTask.java index 8157d8d30..c88117423 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableFormatTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableFormatTask.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.oss.OSSFileType; import com.alibaba.polardbx.common.oss.OSSMetaLifeCycle; import com.alibaba.polardbx.common.oss.access.OSSKey; import com.alibaba.polardbx.common.utils.GeneralUtil; @@ -42,8 +43,6 @@ import java.util.Map; import java.util.UUID; -import static com.alibaba.polardbx.gms.metadb.table.FilesRecord.TABLE_FORMAT_TYPE; - @Getter @TaskName(name = "CreateOssTableFormatTask") public class CreateOssTableFormatTask extends BaseGmsTask { @@ -101,7 +100,7 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi } // 4. upload to oss - FileSystemUtils.writeFile(localFormatFile, ossKey.toString(), tableEngine); + FileSystemUtils.writeFile(localFormatFile, ossKey.toString(), tableEngine, false); // 5. delete tmp file localFormatFile.delete(); @@ -130,7 +129,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut List files = TableMetaChanger.lockOssFileMeta(metaDbConnection, getTaskId(), schemaName, logicalTableName); for (FilesRecord record : files) { - FileSystemUtils.deleteIfExistsFile(record.getFileName(), this.tableEngine); + FileSystemUtils.deleteIfExistsFile(record.getFileName(), this.tableEngine, false); File tmpFile = new File(record.getLocalPath()); if (tmpFile.exists()) { if (!tmpFile.delete()) { @@ -147,7 +146,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut private FilesRecord buildFilesRecord(File localFormatFile, OSSKey ossKey) { FilesRecord filesRecord = new FilesRecord(); filesRecord.fileName = ossKey.toString(); - filesRecord.fileType = TABLE_FORMAT_TYPE; + filesRecord.fileType = OSSFileType.TABLE_FORMAT.name(); filesRecord.fileMeta = new byte[] {}; filesRecord.tablespaceName = null; filesRecord.tableCatalog = ""; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableGenerateDataMppTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableGenerateDataMppTask.java index c04b2784a..2be819ef9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableGenerateDataMppTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableGenerateDataMppTask.java @@ -47,8 +47,10 @@ public class CreateOssTableGenerateDataMppTask extends CreateOssTableGenerateDat public CreateOssTableGenerateDataMppTask(String schemaName, String logicalTableName, PhysicalPlanData physicalPlanData, String loadTableSchema, String loadTableName, Engine tableEngine, - ArchiveMode archiveMode, int totalTaskNumber, int serialNumber) { - super(schemaName, logicalTableName, physicalPlanData, loadTableSchema, loadTableName, tableEngine, archiveMode); + ArchiveMode archiveMode, List dictColumns, int totalTaskNumber, + int serialNumber) { + super(schemaName, logicalTableName, physicalPlanData, loadTableSchema, loadTableName, tableEngine, archiveMode, + dictColumns); this.totalTaskNumber = totalTaskNumber; this.serialNumber = serialNumber; onExceptionTryRollback(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableGenerateDataTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableGenerateDataTask.java index 11789445a..9cbf01a83 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableGenerateDataTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/CreateOssTableGenerateDataTask.java @@ -50,12 +50,16 @@ import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.pruning.PhysicalPartitionInfo; import lombok.Getter; +import org.apache.calcite.sql.SqlIndexColumnName; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.jetbrains.annotations.NotNull; import java.io.File; import java.sql.Connection; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -77,16 +81,19 @@ public class CreateOssTableGenerateDataTask extends BaseGmsTask { protected final ArchiveMode archiveMode; + protected final List dictColumns; + @JSONCreator public CreateOssTableGenerateDataTask(String schemaName, String logicalTableName, PhysicalPlanData physicalPlanData, String loadTableSchema, String loadTableName, Engine tableEngine, - ArchiveMode archiveMode) { + ArchiveMode archiveMode, List dictColumns) { super(schemaName, logicalTableName); this.physicalPlanData = physicalPlanData; this.loadTableSchema = loadTableSchema; this.loadTableName = loadTableName; this.tableEngine = tableEngine; this.archiveMode = archiveMode; + this.dictColumns = dictColumns; onExceptionTryRollback(); } @@ -119,7 +126,7 @@ protected Integer invoke() { protected void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { List files = TableMetaChanger.lockOssFileMeta(metaDbConnection, getTaskId(), schemaName, logicalTableName); for (FilesRecord record : files) { - FileSystemUtils.deleteIfExistsFile(record.getFileName(), this.tableEngine); + FileSystemUtils.deleteIfExistsFile(record.getFileName(), this.tableEngine, false); File tmpFile = new File(record.getLocalPath()); if (tmpFile.exists()) { if (!tmpFile.delete()) { @@ -138,7 +145,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut List columnMetas = TableMetaChanger.lockOssColumnMeta(metaDbConnection, getTaskId(), schemaName, logicalTableName); for (ColumnMetasRecord record : columnMetas) { - FileSystemUtils.deleteIfExistsFile(record.tableFileName, this.tableEngine); + FileSystemUtils.deleteIfExistsFile(record.tableFileName, this.tableEngine, false); } TableMetaChanger.deleteOssColumnMeta(metaDbConnection, getTaskId(), schemaName, logicalTableName); @@ -184,7 +191,8 @@ protected Map invoke() { // build orc schema PolarDBXOrcSchema orcSchema = - OrcMetaUtils.buildPolarDBXOrcSchema(sourceTableMeta, Optional.of(columnToFieldIdMap), false); + OrcMetaUtils.buildPolarDBXOrcSchema(sourceTableMeta, Optional.of(columnToFieldIdMap), false, + dictColumns); // data config Configuration conf = OrcMetaUtils.getConfiguration(executionContext, orcSchema); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteFileStorageDirectoryTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteFileStorageDirectoryTask.java index 3d60c214a..3f71d4136 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteFileStorageDirectoryTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteFileStorageDirectoryTask.java @@ -54,16 +54,13 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { updateSupportedCommands(true, false, metaDbConnection); Engine fileEngine = Engine.of(engine); - long stamp = FileSystemManager.readLockWithTimeOut(fileEngine); try { FileSystemGroup fileSystemGroup = FileSystemManager.getFileSystemGroup(fileEngine); - if(fileSystemGroup.exists(directory)) { - fileSystemGroup.delete(directory, true); + if (fileSystemGroup.exists(directory, false)) { + fileSystemGroup.delete(directory, true, false); } } catch (Throwable e) { throw GeneralUtil.nestedException(e); - } finally { - FileSystemManager.unlockRead(fileEngine, stamp); } FailPoint.injectRandomExceptionFromHint(executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteOssFilesTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteOssFilesTask.java index e1c26eddb..0d2a12945 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteOssFilesTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteOssFilesTask.java @@ -55,21 +55,18 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { updateSupportedCommands(true, false, metaDbConnection); Engine fileEngine = Engine.of(engine); - long stamp = FileSystemManager.readLockWithTimeOut(fileEngine); try { FileStorageMetaStore fileStorageMetaStore = new FileStorageMetaStore(fileEngine); fileStorageMetaStore.setConnection(metaDbConnection); FileSystemGroup fileSystemGroup = FileSystemManager.getFileSystemGroup(fileEngine); for (String dataFilePath : dataFilePath) { - if (fileSystemGroup.exists(dataFilePath)) { - fileSystemGroup.delete(dataFilePath, false); + if (fileSystemGroup.exists(dataFilePath, false)) { + fileSystemGroup.delete(dataFilePath, false, false); } fileStorageMetaStore.deleteFile(dataFilePath); } } catch (Throwable e) { throw GeneralUtil.nestedException(e); - } finally { - FileSystemManager.unlockRead(fileEngine, stamp); } FailPoint.injectRandomExceptionFromHint(executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteRecycleBinTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteRecycleBinTask.java index 23446e2ca..e5813127a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteRecycleBinTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DeleteRecycleBinTask.java @@ -53,7 +53,8 @@ protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionC protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { updateSupportedCommands(true, false, metaDbConnection); try (Statement statement = metaDbConnection.createStatement()) { - statement.executeUpdate(String.format("delete from %s where `schema_name` = '%s' and `name` = '%s'", GmsSystemTables.RECYCLE_BIN, schemaName, binName)); + statement.executeUpdate(String.format("delete from %s where `schema_name` = '%s' and `name` = '%s'", + GmsSystemTables.RECYCLE_BIN, schemaName, binName)); } catch (Throwable t) { throw new TddlRuntimeException(ErrorCode.ERR_RECYCLEBIN_EXECUTE, "delete from recycle bin error", t); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DropOssFilesTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DropOssFilesTask.java index 7737c6130..1480a8469 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DropOssFilesTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/DropOssFilesTask.java @@ -41,7 +41,6 @@ /** * @author chenzilin - * @date 2022/1/13 17:42 */ @Getter @TaskName(name = "DropOssFilesTask") @@ -64,7 +63,6 @@ public DropOssFilesTask(String engine, String schemaName, String logicalTableNam protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { updateSupportedCommands(true, false, metaDbConnection); Engine fileEngine = Engine.of(engine); - long stamp = FileSystemManager.readLockWithTimeOut(fileEngine); try (Connection connection = MetaDbUtil.getConnection()) { FileSystemGroup fileSystemGroup = FileSystemManager.getFileSystemGroup(fileEngine); TableInfoManager tableInfoManager = new TableInfoManager(); @@ -80,7 +78,8 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi fileStorageMetaStore.setConnection(connection); List columnsRecords = tableInfoManager.queryColumns(schemaName, logicalTableName); - List filesRecordList = tableInfoManager.queryFilesByLogicalSchemaTable(schemaName, logicalTableName); + List filesRecordList = + tableInfoManager.queryFilesByLogicalSchemaTable(schemaName, logicalTableName); for (FilesRecord filesRecord : filesRecordList) { if (files == null) { @@ -90,12 +89,13 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi } for (ColumnsRecord columnsRecord : columnsRecords) { - List records = accessor.query(filesRecord.getFileName(), columnsRecord.columnName); + List records = + accessor.query(filesRecord.getFileName(), columnsRecord.columnName); for (ColumnMetasRecord record : records) { if (record.bloomFilterPath != null && !record.bloomFilterPath.isEmpty()) { String dataFilePath = record.bloomFilterPath; - if (fileSystemGroup.exists(dataFilePath)) { - fileSystemGroup.delete(dataFilePath, false); + if (fileSystemGroup.exists(dataFilePath, false)) { + fileSystemGroup.delete(dataFilePath, false, false); } fileStorageMetaStore.deleteFile(dataFilePath); accessor.delete(record.columnMetaId); @@ -104,8 +104,8 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi } String dataFilePath = filesRecord.getFileName(); - if (fileSystemGroup.exists(dataFilePath)) { - fileSystemGroup.delete(dataFilePath, false); + if (fileSystemGroup.exists(dataFilePath, false)) { + fileSystemGroup.delete(dataFilePath, false, false); } fileStorageMetaStore.deleteFile(dataFilePath); filesAccessor.delete(filesRecord.fileId); @@ -113,8 +113,6 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi } } catch (Throwable e) { throw GeneralUtil.nestedException(e); - } finally { - FileSystemManager.unlockRead(fileEngine, stamp); } FailPoint.injectRandomExceptionFromHint(executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/FileStorageBackupTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/FileStorageBackupTask.java index e94568cc0..caf3a85d7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/FileStorageBackupTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/FileStorageBackupTask.java @@ -28,7 +28,6 @@ import java.sql.Connection; - @Getter @TaskName(name = "FileStorageBackupTask") public class FileStorageBackupTask extends BaseDdlTask { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/MoveDataToFileStoreTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/MoveDataToFileStoreTask.java index 1f98afaba..c7f7a1046 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/MoveDataToFileStoreTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/MoveDataToFileStoreTask.java @@ -111,7 +111,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut List files = TableMetaChanger.lockOssFileMeta(metaDbConnection, getTaskId(), schemaName, logicalTableName); for (FilesRecord record : files) { - FileSystemUtils.deleteIfExistsFile(record.getFileName(), this.tableEngine); + FileSystemUtils.deleteIfExistsFile(record.getFileName(), this.tableEngine, false); File tmpFile = new File(record.getLocalPath()); if (tmpFile.exists()) { if (!tmpFile.delete()) { @@ -126,7 +126,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut List columnMetas = TableMetaChanger.lockOssColumnMeta(metaDbConnection, getTaskId(), schemaName, logicalTableName); for (ColumnMetasRecord record : columnMetas) { - FileSystemUtils.deleteIfExistsFile(record.tableFileName, this.tableEngine); + FileSystemUtils.deleteIfExistsFile(record.tableFileName, this.tableEngine, false); } TableMetaChanger.deleteOssColumnMeta(metaDbConnection, getTaskId(), schemaName, logicalTableName); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/MoveDataToInnodbTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/MoveDataToInnodbTask.java index 59c05e27c..85a3b20db 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/MoveDataToInnodbTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/MoveDataToInnodbTask.java @@ -78,7 +78,8 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi try (Connection metaDbConn = MetaDbUtil.getConnection()) { try { MetaDbUtil.beginTransaction(metaDbConn); - List files = TableMetaChanger.lockOssFileMeta(metaDbConn, getTaskId(), schemaName, logicalTableName); + List files = + TableMetaChanger.lockOssFileMeta(metaDbConn, getTaskId(), schemaName, logicalTableName); if (files != null && files.size() > 0) { throw new TddlRuntimeException(ErrorCode.ERR_CANT_CONTINUE_DDL); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/OSSTaskUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/OSSTaskUtils.java index ac6100875..a7e5a1ee7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/OSSTaskUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/OSSTaskUtils.java @@ -182,7 +182,7 @@ public static Optional chooseRemoteNode(Long taskId) { } List candidates = new ArrayList<>(remoteNodeList); candidates.add(null); - int mod = (int) (MurmurHashUtils.murmurHashWithZeroSeed(taskId) % candidates.size()); + int mod = (int) (MurmurHashUtils.murmurHash128WithZeroSeed(taskId) % candidates.size()); if (mod < 0) { mod = mod + candidates.size(); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UnBindingArchiveTableMetaByArchiveTableTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UnBindingArchiveTableMetaByArchiveTableTask.java index f674e184d..eb33e2b6e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UnBindingArchiveTableMetaByArchiveTableTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UnBindingArchiveTableMetaByArchiveTableTask.java @@ -46,7 +46,7 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi tableInfoManager.setConnection(metaDbConnection); TableLocalPartitionRecord record = - tableInfoManager.getLocalPartitionRecordByArchiveTable(getSchemaName(), archiveTableName); + tableInfoManager.getLocalPartitionRecordByArchiveTable(getSchemaName(), archiveTableName); if (record == null) { return; @@ -56,7 +56,7 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi originTableName = record.getTableName(); tableInfoManager - .unBindingByArchiveTableName(getSchemaName(), archiveTableName); + .unBindingByArchiveTableName(getSchemaName(), archiveTableName); } @Override @@ -66,7 +66,7 @@ public void rollbackImpl(Connection metaDbConnection, ExecutionContext execution tableInfoManager.setConnection(metaDbConnection); tableInfoManager - .updateArchiveTable(originSchemaName, originTableName, getSchemaName(), archiveTableName); + .updateArchiveTable(originSchemaName, originTableName, getSchemaName(), archiveTableName); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UnBindingArchiveTableMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UnBindingArchiveTableMetaTask.java index c00d0f622..de5bbe18f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UnBindingArchiveTableMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UnBindingArchiveTableMetaTask.java @@ -17,8 +17,6 @@ package com.alibaba.polardbx.executor.ddl.job.task.basic.oss; import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; @@ -26,14 +24,13 @@ import com.alibaba.polardbx.executor.sync.TablesMetaChangePreemptiveSyncAction; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; import com.alibaba.polardbx.gms.partition.TableLocalPartitionRecord; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import lombok.Getter; -import org.apache.commons.collections.CollectionUtils; import java.sql.Connection; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -97,7 +94,7 @@ protected void onRollbackSuccess(ExecutionContext executionContext) { // sync to restore the status of table meta SyncManagerHelper.sync( new TablesMetaChangePreemptiveSyncAction(schemaName, tables, 1500L, 1500L, - TimeUnit.MICROSECONDS)); + TimeUnit.MICROSECONDS), SyncScope.ALL); } protected void updateTableVersion(Connection metaDbConnection) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UpdateFileRemoveTsTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UpdateFileRemoveTsTask.java index cfe5db0f6..8dba7ce8d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UpdateFileRemoveTsTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/oss/UpdateFileRemoveTsTask.java @@ -41,7 +41,8 @@ public class UpdateFileRemoveTsTask extends BaseGmsTask { private Long ts; @JSONCreator - public UpdateFileRemoveTsTask(String engine, String schemaName, String logicalTableName, List files, Long ts) { + public UpdateFileRemoveTsTask(String engine, String schemaName, String logicalTableName, List files, + Long ts) { super(schemaName, logicalTableName); this.engine = engine; this.files = files; @@ -59,7 +60,8 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi fileStorageMetaStore.setConnection(metaDbConnection); if (files != null && !files.isEmpty()) { - List filesRecords = tableInfoManager.queryFilesByLogicalSchemaTable(schemaName, logicalTableName); + List filesRecords = + tableInfoManager.queryFilesByLogicalSchemaTable(schemaName, logicalTableName); for (FilesRecord filesRecord : filesRecords) { if (files.contains(filesRecord.getFileName())) { if (ts == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/FunctionAccessor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/FunctionAccessor.java index 37a07bf74..797d43867 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/FunctionAccessor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/FunctionAccessor.java @@ -46,7 +46,14 @@ import java.util.Map; import java.util.Optional; -import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.*; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.DEF_ROUTINE_CATALOG; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.FUNCTION; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.MOCK_CHARACTER_SET_CLIENT; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.MOCK_COLLATION_CONNECTION; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.MOCK_DATABASE_COLLATION; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.MYSQL; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.NO_SQL; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.SQL; public class FunctionAccessor extends AbstractAccessor { private static final Logger logger = LoggerFactory.getLogger(FunctionAccessor.class); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/PlParameterAccessor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/PlParameterAccessor.java index ac9c95188..b1735b2fd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/PlParameterAccessor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/PlParameterAccessor.java @@ -35,7 +35,12 @@ import java.util.HashMap; import java.util.Map; -import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.*; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.DEF_ROUTINE_CATALOG; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.FUNCTION; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.MOCK_CHARACTER_SET_CLIENT; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.MOCK_COLLATION_CONNECTION; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.MYSQL; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.PROCEDURE; /** * @author yuehan.wcf diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/ProcedureAccessor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/ProcedureAccessor.java index 6dceabd0c..d08302553 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/ProcedureAccessor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/accessor/ProcedureAccessor.java @@ -34,8 +34,8 @@ import com.alibaba.polardbx.gms.metadb.GmsSystemTables; import com.alibaba.polardbx.gms.metadb.accessor.AbstractAccessor; import com.alibaba.polardbx.gms.metadb.pl.procedure.CreateProcedureRecord; -import com.alibaba.polardbx.gms.metadb.pl.procedure.ProcedureMetaRecord; import com.alibaba.polardbx.gms.metadb.pl.procedure.ProcedureDefinitionRecord; +import com.alibaba.polardbx.gms.metadb.pl.procedure.ProcedureMetaRecord; import com.alibaba.polardbx.gms.metadb.pl.procedure.ProcedureStatusRecord; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; @@ -46,7 +46,12 @@ import java.util.Map; import java.util.Optional; -import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.*; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.DEF_ROUTINE_CATALOG; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.MOCK_CHARACTER_SET_CLIENT; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.MOCK_COLLATION_CONNECTION; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.MOCK_DATABASE_COLLATION; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.PROCEDURE; +import static com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants.SQL; public class ProcedureAccessor extends AbstractAccessor { private static final Logger logger = LoggerFactory.getLogger(ProcedureAccessor.class); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/CreateProcedureSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/CreateProcedureSyncTask.java index af4552945..77417a351 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/CreateProcedureSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/CreateProcedureSyncTask.java @@ -18,6 +18,7 @@ import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.common.TddlConstants; +import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.CreateProcedureSyncAction; @@ -40,15 +41,20 @@ public CreateProcedureSyncTask(String schemaName, String procedureSchema, String super(schemaName); this.procedureSchema = procedureSchema; this.procedureName = procedureName; - onExceptionTryRecoveryThenPause(); + onExceptionTryRecoveryThenRollback(); } @Override protected void beforeTransaction(ExecutionContext executionContext) { FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); + + updateTaskStateInNewTxn(DdlTaskState.DIRTY); + + FailPoint.injectExceptionFromHint("FP_CREATE_PROCEDURE_ERROR", executionContext); + SyncManagerHelper.sync(new CreateProcedureSyncAction(procedureSchema, procedureName), - TddlConstants.INFORMATION_SCHEMA, SyncScope.ALL); + TddlConstants.INFORMATION_SCHEMA, SyncScope.NOT_COLUMNAR_SLAVE); } @Override @@ -56,7 +62,7 @@ protected void beforeRollbackTransaction(ExecutionContext executionContext) { FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); SyncManagerHelper.sync(new DropProcedureSyncAction(procedureSchema, procedureName), - TddlConstants.INFORMATION_SCHEMA, SyncScope.ALL); + TddlConstants.INFORMATION_SCHEMA, SyncScope.NOT_COLUMNAR_SLAVE); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/DropProcedureDropMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/DropProcedureDropMetaTask.java index 634fb386b..37222925f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/DropProcedureDropMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/DropProcedureDropMetaTask.java @@ -43,6 +43,8 @@ public DropProcedureDropMetaTask(String schemaName, String logicalTableName, Str @Override protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/DropProcedureSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/DropProcedureSyncTask.java index 1f167791d..c8700580c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/DropProcedureSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/procedure/DropProcedureSyncTask.java @@ -43,7 +43,7 @@ public DropProcedureSyncTask(String schemaName, String procedureSchema, String p @Override protected void executeImpl(ExecutionContext executionContext) { SyncManagerHelper.sync(new DropProcedureSyncAction(procedureSchema, procedureName), - TddlConstants.INFORMATION_SCHEMA, SyncScope.ALL); + TddlConstants.INFORMATION_SCHEMA, SyncScope.NOT_COLUMNAR_SLAVE); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/AlterFunctionModifyMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/AlterFunctionModifyMetaTask.java index c5ee54a57..f853e7c62 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/AlterFunctionModifyMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/AlterFunctionModifyMetaTask.java @@ -19,7 +19,6 @@ import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.accessor.FunctionAccessor; -import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.accessor.ProcedureAccessor; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.optimizer.context.ExecutionContext; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/CreateFunctionOnAllDnTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/CreateFunctionOnAllDnTask.java index 2a38a09db..5f065379f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/CreateFunctionOnAllDnTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/CreateFunctionOnAllDnTask.java @@ -17,6 +17,7 @@ package com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.utils.ExecUtils; @@ -46,6 +47,8 @@ public CreateFunctionOnAllDnTask(String schemaName, String functionName, String @Override protected void beforeTransaction(ExecutionContext executionContext) { + updateTaskStateInNewTxn(DdlTaskState.DIRTY); + Set allDnId = ExecUtils.getAllDnStorageId(); for (String dnId : allDnId) { try (Connection conn = DbTopologyManager.getConnectionForStorage(dnId); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/CreateFunctionSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/CreateFunctionSyncTask.java index ad71fe318..de73e2e06 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/CreateFunctionSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/CreateFunctionSyncTask.java @@ -18,18 +18,16 @@ import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.common.TddlConstants; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLBlockStatement; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateFunctionStatement; -import com.alibaba.polardbx.druid.sql.visitor.VisitorFeature; +import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.pl.UdfUtils; import com.alibaba.polardbx.executor.sync.CreateStoredFunctionSyncAction; import com.alibaba.polardbx.executor.sync.DropStoredFunctionSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; import lombok.Getter; @Getter @@ -47,7 +45,7 @@ public CreateFunctionSyncTask(String schemaName, String functionName, String cre this.functionName = functionName; this.createFunctionContent = createFunctionContent; this.canPush = canPush; - onExceptionTryRecoveryThenPause(); + onExceptionTryRecoveryThenRollback(); } @Override @@ -55,18 +53,13 @@ protected void beforeTransaction(ExecutionContext executionContext) { FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); - String tempCreateFunction = rewriteFuncContent(createFunctionContent); + updateTaskStateInNewTxn(DdlTaskState.DIRTY); + + String tempCreateFunction = UdfUtils.removeFuncBody(createFunctionContent); SyncManagerHelper.sync(new CreateStoredFunctionSyncAction(functionName, tempCreateFunction, canPush), TddlConstants.INFORMATION_SCHEMA, SyncScope.ALL); } - private String rewriteFuncContent(String createFunctionContent) { - SQLCreateFunctionStatement - statement = (SQLCreateFunctionStatement) FastsqlUtils.parseSql(createFunctionContent).get(0); - statement.setBlock(new SQLBlockStatement()); - return statement.toString(VisitorFeature.OutputPlOnlyDefinition); - } - @Override protected void beforeRollbackTransaction(ExecutionContext executionContext) { SyncManagerHelper.sync(new DropStoredFunctionSyncAction(functionName), diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropFunctionDropMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropFunctionDropMetaTask.java index 7d47a9364..30cbc7e3c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropFunctionDropMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropFunctionDropMetaTask.java @@ -42,6 +42,8 @@ public DropFunctionDropMetaTask(String schemaName, String logicalTableName, @Override protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); FunctionAccessor accessor = new FunctionAccessor(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropFunctionOnAllDnTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropFunctionOnAllDnTask.java index 528b6a22c..fb78c4341 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropFunctionOnAllDnTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropFunctionOnAllDnTask.java @@ -42,12 +42,8 @@ public DropFunctionOnAllDnTask(String schemaName, String functionName) { } @Override - protected void beforeTransaction(ExecutionContext executionContext) { - updateTaskStateInNewTxn(DdlTaskState.DIRTY); - executeImpl(executionContext); - } - - public void executeImpl(ExecutionContext executionContext) { + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); Set allDnId = ExecUtils.getAllDnStorageId(); for (String dnId : allDnId) { try (Connection conn = DbTopologyManager.getConnectionForStorage(dnId); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropJavaFunctionDropMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropJavaFunctionDropMetaTask.java index b42c4747b..8e9834dc6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropJavaFunctionDropMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/basic/pl/udf/DropJavaFunctionDropMetaTask.java @@ -41,6 +41,8 @@ public DropJavaFunctionDropMetaTask(String schemaName, String logicalTableName, @Override protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); JavaFunctionAccessor accessor = new JavaFunctionAccessor(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterFunctionMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterFunctionMarkTask.java new file mode 100644 index 000000000..c7e4f433b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterFunctionMarkTask.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.DdlScope; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcAlterFunctionMarkTask") +@Getter +@Setter +public class CdcAlterFunctionMarkTask extends BaseDdlTask { + + private final String functionName; + + @JSONCreator + public CdcAlterFunctionMarkTask(String schemaName, String functionName) { + super(schemaName); + this.functionName = functionName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + param.put(ICdcManager.CDC_DDL_SCOPE, DdlScope.Instance); + + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + functionName, + SqlKind.ALTER_FUNCTION.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + param); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterIndexVisibilityMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterIndexVisibilityMarkTask.java new file mode 100644 index 000000000..879befbde --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterIndexVisibilityMarkTask.java @@ -0,0 +1,72 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcAlterIndexVisibilityMarkTask") +@Getter +@Setter +public class CdcAlterIndexVisibilityMarkTask extends BaseDdlTask { + + private final String tableName; + + @JSONCreator + public CdcAlterIndexVisibilityMarkTask(String schemaName, String tableName) { + super(schemaName); + this.tableName = tableName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + tableName, + SqlKind.ALTER_INDEX_VISIBILITY.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterJoinGroupMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterJoinGroupMarkTask.java new file mode 100644 index 000000000..d611ec026 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterJoinGroupMarkTask.java @@ -0,0 +1,72 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcAlterJoinGroupMarkTask") +@Getter +@Setter +public class CdcAlterJoinGroupMarkTask extends BaseDdlTask { + + private final String joinGroupName; + + @JSONCreator + public CdcAlterJoinGroupMarkTask(String schemaName, String joinGroupName) { + super(schemaName); + this.joinGroupName = joinGroupName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + joinGroupName, + SqlKind.ALTER_JOINGROUP.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterProcedureMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterProcedureMarkTask.java new file mode 100644 index 000000000..1afc72777 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterProcedureMarkTask.java @@ -0,0 +1,73 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.DdlScope; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +@TaskName(name = "CdcAlterProcedureMarkTask") +@Getter +@Setter +public class CdcAlterProcedureMarkTask extends BaseDdlTask { + + private final String procedureName; + + @JSONCreator + public CdcAlterProcedureMarkTask(String schemaName, String procedureName) { + super(schemaName); + this.procedureName = procedureName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + param.put(ICdcManager.CDC_DDL_SCOPE, DdlScope.Schema); + + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + procedureName, + SqlKind.ALTER_PROCEDURE.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + param); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableColumnDdlMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableColumnDdlMarkTask.java index d41e8d8b4..4ad0e5e41 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableColumnDdlMarkTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableColumnDdlMarkTask.java @@ -17,8 +17,8 @@ package com.alibaba.polardbx.executor.ddl.job.task.cdc; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; @@ -45,12 +45,15 @@ public class CdcAlterTableColumnDdlMarkTask extends BaseDdlTask { private final PhysicalPlanData physicalPlanData; private final boolean useOMC; + private final Long versionId; @JSONCreator - public CdcAlterTableColumnDdlMarkTask(String schemaName, PhysicalPlanData physicalPlanData, boolean useOMC) { + public CdcAlterTableColumnDdlMarkTask(String schemaName, PhysicalPlanData physicalPlanData, boolean useOMC, + long versionId) { super(schemaName); this.physicalPlanData = physicalPlanData; this.useOMC = useOMC; + this.versionId = versionId; } @Override @@ -58,6 +61,7 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e if (CBOUtil.isGsi(schemaName, physicalPlanData.getLogicalTableName())) { return; } + prepareExtraCmdsKey(executionContext); updateSupportedCommands(true, false, metaDbConnection); FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); @@ -74,8 +78,25 @@ private void mark4AlterTable(ExecutionContext executionContext) { executionContext.getExtraCmds().put(USE_OMC, useOMC); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), - DdlVisibility.Public, + getDdlStmt(executionContext), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); } + + private void prepareExtraCmdsKey(ExecutionContext executionContext) { + if (CdcMarkUtil.isVersionIdInitialized(versionId)) { + CdcMarkUtil.useDdlVersionId(executionContext, versionId); + } + } + + private String getDdlStmt(ExecutionContext executionContext) { + return getDdlStmt(executionContext.getDdlContext().getDdlStmt()); + } + + private String getDdlStmt(String ddl) { + if (CdcMarkUtil.isVersionIdInitialized(versionId)) { + return CdcMarkUtil.buildVersionIdHint(versionId) + ddl; + } + return ddl; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableGroupAddTablesMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableGroupAddTablesMarkTask.java new file mode 100644 index 000000000..9c4ad43de --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableGroupAddTablesMarkTask.java @@ -0,0 +1,82 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.tablegroup.TableGroupUtils; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.common.cdc.ICdcManager.CDC_TABLE_GROUP_MANUAL_CREATE_FLAG; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcAlterTableGroupAddTablesMarkTask") +@Getter +@Setter +public class CdcAlterTableGroupAddTablesMarkTask extends BaseDdlTask { + + private final String tableGroupName; + + @JSONCreator + public CdcAlterTableGroupAddTablesMarkTask(String schemaName, String tableGroupName) { + super(schemaName); + this.tableGroupName = tableGroupName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + Map parameter = buildExtendParameter(executionContext); + + boolean isManuallyCreatedTg = TableGroupUtils + .getTableGroupInfoByGroupName(schemaName, tableGroupName).isManuallyCreated(); + parameter.put(CDC_TABLE_GROUP_MANUAL_CREATE_FLAG, isManuallyCreatedTg); + + DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + tableGroupName, + SqlKind.ALTER_TABLEGROUP_ADD_TABLE.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + parameter); + + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableGroupFinalMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableGroupFinalMarkTask.java new file mode 100644 index 000000000..dd6dda94c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableGroupFinalMarkTask.java @@ -0,0 +1,80 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.tablegroup.TableGroupUtils; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.common.cdc.ICdcManager.CDC_TABLE_GROUP_MANUAL_CREATE_FLAG; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcAlterTableGroupFinalMarkTask") +@Getter +@Setter +public class CdcAlterTableGroupFinalMarkTask extends BaseDdlTask { + + private final String tableGroupName; + + @JSONCreator + public CdcAlterTableGroupFinalMarkTask(String schemaName, String tableGroupName) { + super(schemaName); + this.tableGroupName = tableGroupName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + Map parameter = buildExtendParameter(executionContext); + + boolean isManuallyCreatedTg = TableGroupUtils + .getTableGroupInfoByGroupName(schemaName, tableGroupName).isManuallyCreated(); + parameter.put(CDC_TABLE_GROUP_MANUAL_CREATE_FLAG, isManuallyCreatedTg); + + DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + tableGroupName, + SqlKind.ALTER_TABLEGROUP.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + parameter); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableGroupRenamePartitionMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableGroupRenamePartitionMarkTask.java new file mode 100644 index 000000000..f231e7a58 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableGroupRenamePartitionMarkTask.java @@ -0,0 +1,81 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.tablegroup.TableGroupUtils; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.common.cdc.ICdcManager.CDC_TABLE_GROUP_MANUAL_CREATE_FLAG; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcAlterTableGroupRenamePartitionMarkTask") +@Getter +@Setter +public class CdcAlterTableGroupRenamePartitionMarkTask extends BaseDdlTask { + + private final String tableGroupName; + + @JSONCreator + public CdcAlterTableGroupRenamePartitionMarkTask(String schemaName, String tableGroupName) { + super(schemaName); + this.tableGroupName = tableGroupName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + Map parameter = buildExtendParameter(executionContext); + + boolean isManuallyCreatedTg = TableGroupUtils + .getTableGroupInfoByGroupName(schemaName, tableGroupName).isManuallyCreated(); + parameter.put(CDC_TABLE_GROUP_MANUAL_CREATE_FLAG, isManuallyCreatedTg); + + DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + tableGroupName, + SqlKind.ALTER_TABLEGROUP.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + parameter); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableRenamePartitionMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableRenamePartitionMarkTask.java new file mode 100644 index 000000000..c41654a56 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableRenamePartitionMarkTask.java @@ -0,0 +1,103 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.druid.sql.SQLUtils; +import com.alibaba.polardbx.druid.sql.ast.SQLStatement; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement; +import com.alibaba.polardbx.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.polardbx.druid.sql.parser.ByteString; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import lombok.extern.slf4j.Slf4j; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.List; +import java.util.Map; + +import static com.alibaba.polardbx.common.cdc.ICdcManager.CDC_IS_GSI; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcAlterTableRenamePartitionMarkTask") +@Getter +@Setter +@Slf4j +public class CdcAlterTableRenamePartitionMarkTask extends BaseDdlTask { + + private final String tableName; + private final boolean placeHolder; + + @JSONCreator + public CdcAlterTableRenamePartitionMarkTask(String schemaName, String tableName, boolean placeHolder) { + super(schemaName); + this.tableName = tableName; + this.placeHolder = placeHolder; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + if (placeHolder) { + return; + } + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + + // alter index ... on table ... , 使用主表的名字进行打标 + String markTableName = tableName; + MySqlStatementParser parser = new MySqlStatementParser(ByteString.from(ddlContext.getDdlStmt())); + List parseResult = parser.parseStatementList(); + if (!parseResult.isEmpty() && parseResult.get(0) instanceof SQLAlterTableStatement) { + SQLAlterTableStatement stmt = (SQLAlterTableStatement) parseResult.get(0); + if (stmt.getAlterIndexName() != null) { + markTableName = SQLUtils.normalize(stmt.getTableName()); + param.put(CDC_IS_GSI, true); + } + } + + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + markTableName, + SqlKind.ALTER_TABLE.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + param); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableRewrittenDdlMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableRewrittenDdlMarkTask.java index e79e4caf0..ac4ea8e30 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableRewrittenDdlMarkTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableRewrittenDdlMarkTask.java @@ -17,8 +17,8 @@ package com.alibaba.polardbx.executor.ddl.job.task.cdc; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; @@ -35,7 +35,7 @@ import static com.alibaba.polardbx.common.cdc.ICdcManager.FOREIGN_KEYS_DDL; import static com.alibaba.polardbx.common.cdc.ICdcManager.REFRESH_CREATE_SQL_4_PHY_TABLE; -import static com.alibaba.polardbx.common.cdc.ICdcManager.USE_ORGINAL_DDL; +import static com.alibaba.polardbx.common.cdc.ICdcManager.USE_ORIGINAL_DDL; import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; @TaskName(name = "CdcAlterTableRewrittenDdlMarkTask") @@ -76,7 +76,7 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e if (isAddColumns || isDropColumns) { executionContext.getExtraCmds().put(REFRESH_CREATE_SQL_4_PHY_TABLE, "true"); if (CollectionUtils.isNotEmpty(alterTablePreparedData.getAddedIndexes())) { - executionContext.getExtraCmds().put(USE_ORGINAL_DDL, "true"); + executionContext.getExtraCmds().put(USE_ORIGINAL_DDL, "true"); } } } @@ -86,7 +86,7 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - logicalSql, DdlType.ALTER_TABLE, ddlContext.getJobId(), getTaskId(), DdlVisibility.Public, + logicalSql, DdlType.ALTER_TABLE, ddlContext.getJobId(), getTaskId(), CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableSetTableGroupMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableSetTableGroupMarkTask.java new file mode 100644 index 000000000..d6fbcf420 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAlterTableSetTableGroupMarkTask.java @@ -0,0 +1,110 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.druid.DbType; +import com.alibaba.polardbx.druid.sql.SQLUtils; +import com.alibaba.polardbx.druid.sql.ast.SQLStatement; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement; +import com.alibaba.polardbx.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.polardbx.druid.sql.parser.ByteString; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; +import org.apache.commons.lang3.StringUtils; + +import java.sql.Connection; +import java.util.List; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; +import static com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter.unwrapGsiName; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcAlterTableSetTableGroupMarkTask") +@Getter +@Setter +public class CdcAlterTableSetTableGroupMarkTask extends BaseDdlTask { + + private final String primaryTableName; + private final String gsiTableName; + private final boolean gsi; + + @JSONCreator + public CdcAlterTableSetTableGroupMarkTask(String schemaName, String primaryTableName, String gsiTableName, + boolean gsi) { + super(schemaName); + this.primaryTableName = primaryTableName; + this.gsiTableName = gsiTableName; + this.gsi = gsi; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + String ddl = ddlContext.getDdlStmt(); + if (gsi) { + executionContext.getExtraCmds().put(ICdcManager.CDC_IS_GSI, true); + ddl = tryRewriteTableName(ddl); + } + + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + primaryTableName, + SqlKind.ALTER_TABLE_SET_TABLEGROUP.name(), + ddl, + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); + + } + + private String tryRewriteTableName(String ddl) { + MySqlStatementParser parser = new MySqlStatementParser(ByteString.from(ddl)); + List parseResult = parser.parseStatementList(); + if (!parseResult.isEmpty() && parseResult.get(0) instanceof SQLAlterTableStatement) { + SQLAlterTableStatement alterTableStatement = (SQLAlterTableStatement) parseResult.get(0); + String tableName = SQLUtils.normalize(alterTableStatement.getTableName()); + if (StringUtils.equalsIgnoreCase(gsiTableName, tableName)) { + String newTableName = primaryTableName + "." + unwrapGsiName(gsiTableName); + alterTableStatement.setName(new SQLIdentifierExpr(newTableName)); + return SQLUtils.toSQLString(alterTableStatement, DbType.mysql, new SQLUtils.FormatOption(true, false)); + } + } + return ddl; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAnalyzeTableMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAnalyzeTableMarkTask.java new file mode 100644 index 000000000..69e78963b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcAnalyzeTableMarkTask.java @@ -0,0 +1,72 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcAnalyzeTableMarkTask") +@Getter +@Setter +public class CdcAnalyzeTableMarkTask extends BaseDdlTask { + + private final String tableName; + + @JSONCreator + public CdcAnalyzeTableMarkTask(String schemaName, String tableName) { + super(schemaName); + this.tableName = tableName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + tableName, + SqlKind.ANALYZE_TABLE.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateColumnarIndexTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateColumnarIndexTask.java new file mode 100644 index 000000000..84118fa17 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateColumnarIndexTask.java @@ -0,0 +1,120 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.common.ddl.newengine.DdlType; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.columnar.CciSchemaEvolutionTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +@Getter +@TaskName(name = "CdcCreateColumnarIndexTask") +public class CdcCreateColumnarIndexTask extends BaseDdlTask { + private final String logicalTableName; + private final String createIndexSql; + private final String columnarIndexTableName; + private final String originIndexName; + private final Long versionId; + private final CciSchemaEvolutionTask cciSchemaEvolutionTask; + + public CdcCreateColumnarIndexTask(String schemaName, String logicalTableName, String columnarIndexTableName, + String originIndexName, Map options, + String createIndexSql, Long versionId) { + this(schemaName, + logicalTableName, + columnarIndexTableName, + originIndexName, + createIndexSql, + versionId, + CciSchemaEvolutionTask.createCci(schemaName, + logicalTableName, + columnarIndexTableName, + options, + versionId)); + } + + @JSONCreator + private CdcCreateColumnarIndexTask(String schemaName, String logicalTableName, String columnarIndexTableName, + String originIndexName, String createIndexSql, Long versionId, + CciSchemaEvolutionTask cciSchemaEvolutionTask) { + super(schemaName); + this.logicalTableName = logicalTableName; + this.createIndexSql = createIndexSql; + this.columnarIndexTableName = columnarIndexTableName; + this.originIndexName = originIndexName; + this.versionId = versionId; + this.cciSchemaEvolutionTask = cciSchemaEvolutionTask; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + cciSchemaEvolutionTask.duringTransaction(jobId, metaDbConnection, executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + String markSql = CdcMarkUtil.buildVersionIdHint(versionId) + createIndexSql; + CdcMarkUtil.useDdlVersionId(executionContext, versionId); + CdcMarkUtil.useOriginalDDL(executionContext); + final Map extParam = buildExtendParameter(executionContext, createIndexSql); + // Set TASK_MARK_SEQ=0, so that CdcDdlMark for rollback task, with same jobId and taskId, + // will not be ignored in com.alibaba.polardbx.cdc.CdcManager.recordDdl + extParam.put(ICdcManager.TASK_MARK_SEQ, 1); + + CdcManagerHelper.getInstance() + .notifyDdlNew(schemaName, logicalTableName, SqlKind.CREATE_INDEX.name(), + markSql, DdlType.CREATE_INDEX, ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Protected, extParam); + } + + @Override + protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + cciSchemaEvolutionTask.duringRollbackTransaction(jobId, metaDbConnection, executionContext); + + // DDL_ID for ext will be added in front of sql in CdcMarkUtil.buildExtendParameter() + final String rollbackSql = "DROP INDEX `" + originIndexName + "` ON `" + logicalTableName + "`"; + final String markSql = CdcMarkUtil.buildVersionIdHint(versionId) + rollbackSql; + CdcMarkUtil.useDdlVersionId(executionContext, versionId); + CdcMarkUtil.useOriginalDDL(executionContext); + final Map extParam = buildExtendParameter(executionContext, rollbackSql); + // Set TASK_MARK_SEQ=1, so that CdcDdlMark for rollback task will not be ignored + // in com.alibaba.polardbx.cdc.CdcManager.recordDdl + extParam.put(ICdcManager.TASK_MARK_SEQ, 2); + + final DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew(schemaName, logicalTableName, SqlKind.DROP_INDEX.name(), + markSql, DdlType.DROP_INDEX, ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Protected, extParam); + } + + @Override + protected String remark() { + return String.format("|ddlVersionId: %s |sql: %s", versionId, createIndexSql); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateFunctionMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateFunctionMarkTask.java new file mode 100644 index 000000000..a98e8e47c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateFunctionMarkTask.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.DdlScope; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcCreateFunctionMarkTask") +@Getter +@Setter +public class CdcCreateFunctionMarkTask extends BaseDdlTask { + + private final String functionName; + + @JSONCreator + public CdcCreateFunctionMarkTask(String schemaName, String functionName) { + super(schemaName); + this.functionName = functionName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + param.put(ICdcManager.CDC_DDL_SCOPE, DdlScope.Instance); + + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + functionName, + SqlKind.CREATE_FUNCTION.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + param); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateJavaFunctionMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateJavaFunctionMarkTask.java new file mode 100644 index 000000000..b9f90750d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateJavaFunctionMarkTask.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.DdlScope; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcCreateJavaFunctionMarkTask") +@Getter +@Setter +public class CdcCreateJavaFunctionMarkTask extends BaseDdlTask { + + private final String functionName; + + @JSONCreator + public CdcCreateJavaFunctionMarkTask(String schemaName, String functionName) { + super(schemaName); + this.functionName = functionName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + param.put(ICdcManager.CDC_DDL_SCOPE, DdlScope.Instance); + + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + functionName, + SqlKind.CREATE_JAVA_FUNCTION.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + param); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateJoinGroupMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateJoinGroupMarkTask.java new file mode 100644 index 000000000..3efb82890 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateJoinGroupMarkTask.java @@ -0,0 +1,74 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcCreateJoinGroupMarkTask") +@Getter +@Setter +public class CdcCreateJoinGroupMarkTask extends BaseDdlTask { + + private final String joinGroupName; + + @JSONCreator + public CdcCreateJoinGroupMarkTask(String schemaName, String joinGroupName) { + super(schemaName); + this.joinGroupName = joinGroupName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + joinGroupName, + SqlKind.CREATE_JOINGROUP.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + param); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateProcedureMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateProcedureMarkTask.java new file mode 100644 index 000000000..cf2d45283 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateProcedureMarkTask.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.DdlScope; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcCreateProcedureMarkTask") +@Getter +@Setter +public class CdcCreateProcedureMarkTask extends BaseDdlTask { + + private final String procedureName; + + @JSONCreator + public CdcCreateProcedureMarkTask(String schemaName, String procedureName) { + super(schemaName); + this.procedureName = procedureName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + param.put(ICdcManager.CDC_DDL_SCOPE, DdlScope.Schema); + + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + procedureName, + SqlKind.CREATE_PROCEDURE.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + param); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateTableGroupMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateTableGroupMarkTask.java new file mode 100644 index 000000000..1fe705570 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateTableGroupMarkTask.java @@ -0,0 +1,72 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcCreateTableGroupMarkTask") +@Getter +@Setter +public class CdcCreateTableGroupMarkTask extends BaseDdlTask { + + private final String tableGroupName; + + @JSONCreator + public CdcCreateTableGroupMarkTask(String schemaName, String tableGroupName) { + super(schemaName); + this.tableGroupName = tableGroupName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + tableGroupName, + SqlKind.CREATE_TABLEGROUP.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateTableIfNotExistsMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateTableIfNotExistsMarkTask.java new file mode 100644 index 000000000..ade1e1b26 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateTableIfNotExistsMarkTask.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import lombok.extern.slf4j.Slf4j; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +@TaskName(name = "CdcCreateTableIfNotExistsMarkTask") +@Getter +@Setter +@Slf4j +public class CdcCreateTableIfNotExistsMarkTask extends BaseDdlTask { + + private String tableName; + + @JSONCreator + public CdcCreateTableIfNotExistsMarkTask(String schemaName, String tableName) { + super(schemaName); + this.tableName = tableName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + boolean tableExists = TableValidator.checkIfTableExists(schemaName, tableName); + + //如果表还存在,说明和prepare阶段的判断结果是一致的,正常打标即可 + //如表表已经不存在了,说明和prepare阶段的判断结果是不一致的,则不能打标,否则会引发一致性问题,如: + //存在一张表t1,线程1执行drop,线程2执行create t1 if not exist,线程2看到表存在直接打标,但在线程2执行打标前,线程1先执行成功了, + //同步到下游的顺序则为 drop -> create,最终的结果是上游不存在t1,但下游存在t1 + if (tableExists) { + // 需要考虑历史兼容问题 + // 历史上cdc下游依据job_id是否为空来判断是否需要对打标sql进行apply,如果不为空则进行apply,如果为空则不进行apply + // 所以此处需要继续保持job_id为空,来解决兼容性问题。否则,当只升级CN、没有升级CDC时,老版本的CDC无法识别是真实建表,还是单纯打标,会触发问题 + CdcManagerHelper.getInstance().notifyDdlNew(schemaName, tableName, SqlKind.CREATE_TABLE.name(), + ddlContext.getDdlStmt(), ddlContext.getDdlType(), null, getTaskId(), + CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); + } else { + log.warn("table {} has been dropped, cdc ddl mark for creating table with if not exits is ignored, sql {}", + tableName, ddlContext.getDdlStmt()); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateViewMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateViewMarkTask.java new file mode 100644 index 000000000..c785fa7bc --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcCreateViewMarkTask.java @@ -0,0 +1,99 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.DdlScope; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.common.ddl.newengine.DdlType; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcCreateViewMarkTask") +@Getter +@Setter +public class CdcCreateViewMarkTask extends BaseDdlTask { + private final String viewName; + private final Boolean isAlter; + + @JSONCreator + public CdcCreateViewMarkTask(String schemaName, String viewName, Boolean isAlter) { + super(schemaName); + this.viewName = viewName; + this.isAlter = isAlter; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + param.put(ICdcManager.CDC_DDL_SCOPE, DdlScope.Schema); + + if (isAlter) { + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + viewName, + SqlKind.ALTER_VIEW.name(), + ddlContext.getDdlStmt(), + DdlType.ALTER_VIEW, + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); + } else { + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + viewName, + SqlKind.CREATE_VIEW.name(), + ddlContext.getDdlStmt(), + DdlType.CREATE_VIEW, + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); + } + } + + @Override + protected String remark() { + return "|SqlKind: " + (isAlter ? SqlKind.ALTER_VIEW.name() : SqlKind.CREATE_VIEW.name()); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDdlMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDdlMarkTask.java index 9280fa1eb..6f2b5d041 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDdlMarkTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDdlMarkTask.java @@ -17,8 +17,8 @@ package com.alibaba.polardbx.executor.ddl.job.task.cdc; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.cdc.ICdcManager; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.common.exception.TddlRuntimeException; @@ -31,6 +31,7 @@ import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.columnar.CciSchemaEvolutionTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.topology.DbInfoManager; @@ -41,8 +42,10 @@ import lombok.Getter; import lombok.Setter; import org.apache.calcite.sql.SqlKind; +import org.apache.commons.lang.StringUtils; import java.sql.Connection; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -51,7 +54,9 @@ import static com.alibaba.polardbx.common.cdc.ICdcManager.REFRESH_CREATE_SQL_4_PHY_TABLE; import static com.alibaba.polardbx.common.properties.ConnectionParams.SIM_CDC_FAILED; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDropTableIfExistsMarkTask.checkTableName; import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.isUseFkOriginalDDL; import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcSqlUtils.SQL_PARSE_FEATURES; /** @@ -63,16 +68,33 @@ public class CdcDdlMarkTask extends BaseDdlTask { private final PhysicalPlanData physicalPlanData; - private boolean useOrginalDDl; + private final Long versionId; + + private boolean useOriginalDDl; private boolean foreignKeys; + /** + * For statement like CREATE TABLE with CCI, + * the original ddl statement will be normalized + * (reassigned a unique index name, assigned a partitioning part if not already specified) + * in {@link org.apache.calcite.sql.validate.SqlValidatorImpl#gsiNormalizeNewPartition} + * and {@link com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter#checkAndRewriteGsiName}. + *
+ * We have to save the normalized ddl statement in ext part of cdc mark, + * so that the down stream can replay the statement as the upper does. + *
+ * PS: PARTITIONS is assigned in {@link com.alibaba.polardbx.optimizer.partition.PartitionInfoBuilder#buildCompletePartByDefByAstParams} + */ + private String normalizedOriginalDdl; + private final List schemaEvolutionRecordInitializer = new ArrayList<>(); @JSONCreator - public CdcDdlMarkTask(String schemaName, PhysicalPlanData physicalPlanData, Boolean useOrginalDDl, - Boolean foreignKeys) { + public CdcDdlMarkTask(String schemaName, PhysicalPlanData physicalPlanData, Boolean useOriginalDdl, + Boolean foreignKeys, Long versionId) { super(schemaName); this.physicalPlanData = physicalPlanData; - this.useOrginalDDl = useOrginalDDl != null && useOrginalDDl; + this.useOriginalDDl = useOriginalDdl != null && useOriginalDdl; this.foreignKeys = foreignKeys != null && foreignKeys; + this.versionId = versionId; } @Override @@ -94,17 +116,17 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e if (physicalPlanData.getKind() == SqlKind.CREATE_TABLE) { if (executionContext.getDdlContext() != null && executionContext.getDdlContext().getDdlType() == DdlType.CREATE_TABLE) { - useOrginalDDl = true; + useOriginalDDl = true; prepareExtraCmdsKey(executionContext); } - mark4CreateTable(executionContext); + mark4CreateTable(metaDbConnection, executionContext); } else if (physicalPlanData.getKind() == SqlKind.DROP_TABLE) { mark4DropTable(executionContext); } else if (physicalPlanData.getKind() == SqlKind.RENAME_TABLE) { if (DbInfoManager.getInstance().isNewPartitionDb(schemaName)) { - mark4RenamePartitionModeTable(executionContext); + mark4RenamePartitionModeTable(executionContext, physicalPlanData.isRenamePhyTable()); } else { - mark4RenameTable(executionContext); + mark4RenameTable(executionContext, physicalPlanData.isRenamePhyTable()); } } else if (physicalPlanData.getKind() == SqlKind.ALTER_TABLE) { mark4AlterTable(executionContext); @@ -124,25 +146,33 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e } private void prepareExtraCmdsKey(ExecutionContext executionContext) { - if (useOrginalDDl) { - executionContext.getExtraCmds().put(ICdcManager.USE_ORGINAL_DDL, "true"); + if (useOriginalDDl) { + executionContext.getExtraCmds().put(ICdcManager.USE_ORIGINAL_DDL, "true"); } if (foreignKeys) { executionContext.getExtraCmds().put(ICdcManager.FOREIGN_KEYS_DDL, "true"); } + if (CdcMarkUtil.isVersionIdInitialized(versionId)) { + CdcMarkUtil.useDdlVersionId(executionContext, versionId); + } } - private void mark4CreateTable(ExecutionContext executionContext) { + private void mark4CreateTable(Connection metaDbConnection, ExecutionContext executionContext) { + //物化视图不在此处打标,在LogicalDropViewHandler中进行打标 DdlContext ddlContext = executionContext.getDdlContext(); if (isCreateMaterializedView(ddlContext.getDdlStmt())) { - //物化视图不打标 return; } + + for (CciSchemaEvolutionTask initializer : schemaEvolutionRecordInitializer) { + initializer.duringTransaction(jobId, metaDbConnection, executionContext); + } + CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - physicalPlanData.getCreateTablePhysicalSql(), ddlContext.getDdlType(), ddlContext.getJobId(), - getTaskId(), - DdlVisibility.Public, buildExtendParameter(executionContext)); + physicalPlanData.getCreateTablePhysicalSql(), ddlContext.getDdlType(), + ddlContext.getJobId(), getTaskId(), CdcDdlMarkVisibility.Public, + buildExtendParameterWithNormalizedDdl(executionContext)); } private void mark4DropTable(ExecutionContext executionContext) { @@ -152,19 +182,20 @@ private void mark4DropTable(ExecutionContext executionContext) { //物化视图不打标 return; } + checkTableName(getDdlStmt(executionContext)); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), - DdlVisibility.Public, + getDdlStmt(executionContext), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); } - private void mark4RenameTable(ExecutionContext executionContext) { + private void mark4RenameTable(ExecutionContext executionContext, boolean renamePhyTable) { // 如果物理表名也发生了变化,需要将新的tablePattern作为附加参数传给cdcManager // 如果物理表名也发生了变更,此处所有物理表已经都完成了rename(此时用户针对该逻辑表提交的任何dml操作都会报错),cdc打标必须先于元数据变更 // 如果物理表名未进行变更,那么tablePattern不会发生改变,Rename是一个轻量级的操作,打标的位置放到元数据变更之前或之后,都可以 String newTbNamePattern = TableMetaChanger.buildNewTbNamePattern(executionContext, schemaName, - physicalPlanData.getLogicalTableName(), physicalPlanData.getNewLogicalTableName()); + physicalPlanData.getLogicalTableName(), physicalPlanData.getNewLogicalTableName(), renamePhyTable); Map params = buildExtendParameter(executionContext); params.put(ICdcManager.TABLE_NEW_NAME, physicalPlanData.getNewLogicalTableName()); params.put(ICdcManager.TABLE_NEW_PATTERN, newTbNamePattern); @@ -172,16 +203,16 @@ private void mark4RenameTable(ExecutionContext executionContext) { DdlContext ddlContext = executionContext.getDdlContext(); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), - DdlVisibility.Public, params); + getDdlStmt(executionContext), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Public, params); } - private void mark4RenamePartitionModeTable(ExecutionContext executionContext) { + private void mark4RenamePartitionModeTable(ExecutionContext executionContext, boolean renamePhyTable) { //分区表没有tablePattern,也不会改物理表的名字,所以和非分区表区分开,单独打标 Map params = buildExtendParameter(executionContext); params.put(ICdcManager.TABLE_NEW_NAME, physicalPlanData.getNewLogicalTableName()); - if (executionContext.isPhyTableRenamed()) { + if (renamePhyTable) { Map> newTopology = new HashMap<>(); Map>> topology = physicalPlanData.getTableTopology(); topology.forEach((k, v) -> @@ -192,14 +223,14 @@ private void mark4RenamePartitionModeTable(ExecutionContext executionContext) { DdlContext ddlContext = executionContext.getDdlContext(); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), - DdlVisibility.Public, params, true, newTopology); + getDdlStmt(executionContext), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Public, params, true, newTopology); } else { DdlContext ddlContext = executionContext.getDdlContext(); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), - DdlVisibility.Public, params); + getDdlStmt(executionContext), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Public, params); } } @@ -220,8 +251,8 @@ private void mark4AlterTable(ExecutionContext executionContext) { CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), - DdlVisibility.Public, + getDdlStmt(executionContext), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); } @@ -229,8 +260,8 @@ private void mark4CreateIndex(ExecutionContext executionContext) { DdlContext ddlContext = executionContext.getDdlContext(); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), - DdlVisibility.Public, + getDdlStmt(executionContext), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); } @@ -238,8 +269,8 @@ private void mark4DropIndex(ExecutionContext executionContext) { DdlContext ddlContext = executionContext.getDdlContext(); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), - DdlVisibility.Public, + getDdlStmt(executionContext), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); } @@ -247,8 +278,8 @@ private void mark4TruncateTable(ExecutionContext executionContext) { DdlContext ddlContext = executionContext.getDdlContext(); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), - DdlVisibility.Public, + getDdlStmt(executionContext), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); } @@ -256,8 +287,8 @@ private void mark4TruncatePartition(ExecutionContext executionContext) { DdlContext ddlContext = executionContext.getDdlContext(); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, physicalPlanData.getLogicalTableName(), physicalPlanData.getKind().name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), - DdlVisibility.Private, + getDdlStmt(executionContext), ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Protected, buildExtendParameter(executionContext)); } @@ -271,4 +302,42 @@ private boolean isDropMaterializedView(String sql) { return !list.isEmpty() && list.get(0) instanceof SQLDropMaterializedViewStatement; } + private String getDdlStmt(ExecutionContext executionContext) { + // 对于create table 和 alter table,在LogicalCommonDdlHandler里可能进行了重写 + String cdcRewriteDdlStmt = executionContext.getDdlContext().getCdcRewriteDdlStmt(); + String ddl = StringUtils.isNotBlank(cdcRewriteDdlStmt) ? cdcRewriteDdlStmt : + executionContext.getDdlContext().getDdlStmt(); + return getDdlStmt(ddl); + } + + private String getDdlStmt(String ddl) { + if (CdcMarkUtil.isVersionIdInitialized(versionId)) { + return CdcMarkUtil.buildVersionIdHint(versionId) + ddl; + } + return ddl; + } + + public void setUseOriginalDDl(boolean useOriginalDDl) { + this.useOriginalDDl = useOriginalDDl; + } + + public void setNormalizedOriginalDdl(String normalizedOriginalDdl) { + this.normalizedOriginalDdl = normalizedOriginalDdl; + } + + private Map buildExtendParameterWithNormalizedDdl(ExecutionContext executionContext) { + if (null != normalizedOriginalDdl || isUseFkOriginalDDL(executionContext)) { + return buildExtendParameter(executionContext, normalizedOriginalDdl); + } + return buildExtendParameter(executionContext); + } + + public void addSchemaEvolutionInitializers(List initializers) { + schemaEvolutionRecordInitializer.addAll(initializers); + } + + @Override + protected String remark() { + return String.format("|ddlVersionId: %s", versionId); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropColumnarIndexTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropColumnarIndexTask.java new file mode 100644 index 000000000..374a93a4c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropColumnarIndexTask.java @@ -0,0 +1,65 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.ddl.newengine.DdlType; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLDropIndexStatement; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +@Getter +@TaskName(name = "CdcDropColumnarIndexTask") +public class CdcDropColumnarIndexTask extends BaseDdlTask { + private final String logicalTableName; + private final String indexName; + private final Long versionId; + + public CdcDropColumnarIndexTask(String schemaName, String logicalTableName, String indexName, Long versionId) { + super(schemaName); + this.logicalTableName = logicalTableName; + this.indexName = indexName; + this.versionId = versionId; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + SQLDropIndexStatement stmt = new SQLDropIndexStatement(); + stmt.setTableName(new SQLIdentifierExpr(logicalTableName)); + stmt.setIndexName(new SQLIdentifierExpr(indexName)); + + String markSql = CdcMarkUtil.buildVersionIdHint(versionId) + stmt; + CdcMarkUtil.useDdlVersionId(executionContext, versionId); + CdcMarkUtil.useOriginalDDL(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew(schemaName, logicalTableName, SqlKind.DROP_INDEX.name(), + markSql, DdlType.DROP_INDEX, ddlContext.getJobId(), getTaskId(), + CdcDdlMarkVisibility.Protected, buildExtendParameter(executionContext)); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropFunctionMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropFunctionMarkTask.java new file mode 100644 index 000000000..9b81953ed --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropFunctionMarkTask.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.DdlScope; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcDropFunctionMarkTask") +@Getter +@Setter +public class CdcDropFunctionMarkTask extends BaseDdlTask { + + private final String functionName; + + @JSONCreator + public CdcDropFunctionMarkTask(String schemaName, String functionName) { + super(schemaName); + this.functionName = functionName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + param.put(ICdcManager.CDC_DDL_SCOPE, DdlScope.Instance); + + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + functionName, + SqlKind.DROP_FUNCTION.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + param); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropJavaFunctionMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropJavaFunctionMarkTask.java new file mode 100644 index 000000000..7d2d56612 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropJavaFunctionMarkTask.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.DdlScope; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcDropJavaFunctionMarkTask") +@Getter +@Setter +public class CdcDropJavaFunctionMarkTask extends BaseDdlTask { + + private final String functionName; + + @JSONCreator + public CdcDropJavaFunctionMarkTask(String schemaName, String functionName) { + super(schemaName); + this.functionName = functionName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + param.put(ICdcManager.CDC_DDL_SCOPE, DdlScope.Instance); + + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + functionName, + SqlKind.DROP_JAVA_FUNCTION.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + param); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropJoinGroupMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropJoinGroupMarkTask.java new file mode 100644 index 000000000..a342d607e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropJoinGroupMarkTask.java @@ -0,0 +1,72 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcDropJoinGroupMarkTask") +@Getter +@Setter +public class CdcDropJoinGroupMarkTask extends BaseDdlTask { + + private final String joinGroupName; + + @JSONCreator + public CdcDropJoinGroupMarkTask(String schemaName, String joinGroupName) { + super(schemaName); + this.joinGroupName = joinGroupName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + joinGroupName, + SqlKind.DROP_JOINGROUP.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropProcedureMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropProcedureMarkTask.java new file mode 100644 index 000000000..a51af809b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropProcedureMarkTask.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.DdlScope; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcDropProcedureMarkTask") +@Getter +@Setter +public class CdcDropProcedureMarkTask extends BaseDdlTask { + + private final String procedureName; + + @JSONCreator + public CdcDropProcedureMarkTask(String schemaName, String procedureName) { + super(schemaName); + this.procedureName = procedureName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + param.put(ICdcManager.CDC_DDL_SCOPE, DdlScope.Schema); + + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + procedureName, + SqlKind.DROP_PROCEDURE.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + param); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropTableGroupMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropTableGroupMarkTask.java new file mode 100644 index 000000000..dff14a966 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropTableGroupMarkTask.java @@ -0,0 +1,72 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcDropTableGroupMarkTask") +@Getter +@Setter +public class CdcDropTableGroupMarkTask extends BaseDdlTask { + + private final String tableGroupName; + + @JSONCreator + public CdcDropTableGroupMarkTask(String schemaName, String tableGroupName) { + super(schemaName); + this.tableGroupName = tableGroupName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + tableGroupName, + SqlKind.DROP_TABLEGROUP.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropTableIfExistsMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropTableIfExistsMarkTask.java new file mode 100644 index 000000000..364988789 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropTableIfExistsMarkTask.java @@ -0,0 +1,102 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.druid.DbType; +import com.alibaba.polardbx.druid.sql.ast.SQLExpr; +import com.alibaba.polardbx.druid.sql.ast.SQLStatement; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLPropertyExpr; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLDropTableStatement; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLExprTableSource; +import com.alibaba.polardbx.druid.sql.parser.SQLParserUtils; +import com.alibaba.polardbx.druid.sql.parser.SQLStatementParser; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import lombok.extern.slf4j.Slf4j; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.List; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcSqlUtils.SQL_PARSE_FEATURES; + +@TaskName(name = "CdcDropTableIfExistsMarkTask") +@Getter +@Setter +@Slf4j +public class CdcDropTableIfExistsMarkTask extends BaseDdlTask { + + private String tableName; + + @JSONCreator + public CdcDropTableIfExistsMarkTask(String schemaName, String tableName) { + super(schemaName); + this.tableName = tableName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + boolean tableExists = TableValidator.checkIfTableExists(schemaName, tableName); + + //如果表不存在,说明和prepare阶段的判断结果是一致的,正常打标即可 + //如表表存在,说明和prepare阶段的判断结果是不一致的,则不能打标,否则会引发线性一致性问题,如: + //表t1并不存在,线程1执行drop,线程2执行create t1 ,线程1发现表不存在准备打标,但在打标前,线程2先执行成功,随后线程1才完成打标 + //同步到下游的顺序则为 create -> drop,最终的结果是上游存在t1,但下游不存在t1 + if (!tableExists) { + checkTableName(ddlContext.getDdlStmt()); + // 需要考虑历史兼容问题 + // 历史上cdc下游依据job_id是否为空来判断是否需要对打标sql进行apply,如果不为空则进行apply,如果为空则不进行apply + // 所以此处需要继续保持job_id为空,来解决兼容性问题。否则,当只升级CN、没有升级CDC时,老版本的CDC无法识别是真实建表,还是单纯打标,会触发问题 + CdcManagerHelper.getInstance().notifyDdlNew(schemaName, tableName, SqlKind.DROP_TABLE.name(), + ddlContext.getDdlStmt(), ddlContext.getDdlType(), null, getTaskId(), + CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); + } else { + log.warn("table {} is currently present, cdc ddl mark for drop table with if exits is ignored, sql {}", + tableName, ddlContext.getDdlStmt()); + } + } + + public static void checkTableName(String sql) { + SQLStatementParser parser = SQLParserUtils.createSQLStatementParser(sql, DbType.mysql, SQL_PARSE_FEATURES); + List statementList = parser.parseStatementList(); + SQLDropTableStatement dropTableStatement = (SQLDropTableStatement) statementList.get(0); + for (SQLExprTableSource tableSource : dropTableStatement.getTableSources()) { + SQLExpr sqlName = tableSource.getExpr(); + if (sqlName instanceof SQLPropertyExpr) { + SQLExpr owner = ((SQLPropertyExpr) sqlName).getOwner(); + if (owner instanceof SQLPropertyExpr) { + throw new RuntimeException("duplicate schema name in drop table sql."); + } + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropViewMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropViewMarkTask.java new file mode 100644 index 000000000..81826d914 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcDropViewMarkTask.java @@ -0,0 +1,80 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.DdlScope; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; +import java.util.Map; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcDropViewMarkTask") +@Getter +@Setter +public class CdcDropViewMarkTask extends BaseDdlTask { + + private final String viewName; + + @JSONCreator + public CdcDropViewMarkTask(String schemaName, String viewName) { + super(schemaName); + this.viewName = viewName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + Map param = buildExtendParameter(executionContext); + param.put(ICdcManager.CDC_DDL_SCOPE, DdlScope.Schema); + + // view is not support for native mysql,because is difficult to resolve compatibility issue,like this + // CREATE VIEW v1 AS SELECT c1, sum(c2) OVER (PARTITION BY c1 ) AS sum1 FROM t1 + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + viewName, + SqlKind.DROP_VIEW.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + param); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcGsiDdlMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcGsiDdlMarkTask.java index 30b81dfc4..275eb428c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcGsiDdlMarkTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcGsiDdlMarkTask.java @@ -17,8 +17,8 @@ package com.alibaba.polardbx.executor.ddl.job.task.cdc; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.cdc.ICdcManager; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; @@ -29,6 +29,7 @@ import lombok.Getter; import lombok.Setter; import org.apache.calcite.sql.SqlKind; +import org.apache.commons.lang3.StringUtils; import java.sql.Connection; @@ -61,7 +62,7 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e injectGSI(executionContext); FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); - useOrginalDDL(executionContext); + CdcMarkUtil.useOriginalDDL(executionContext); DdlType ddlType; SqlKind sqlKind; if (physicalPlanData.getKind() == SqlKind.CREATE_TABLE) { @@ -78,16 +79,15 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e } String originalDdl = this.originalDdl != null ? this.originalDdl : executionContext.getOriginSql(); + if (StringUtils.isNotBlank(executionContext.getDdlContext().getCdcRewriteDdlStmt())) { + originalDdl = executionContext.getDdlContext().getCdcRewriteDdlStmt(); + } CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, primaryTable, sqlKind.name(), originalDdl, ddlType, executionContext.getDdlContext().getJobId(), getTaskId(), - DdlVisibility.Public, buildExtendParameter(executionContext)); - } - - private void useOrginalDDL(ExecutionContext executionContext) { - executionContext.getExtraCmds().put(ICdcManager.USE_ORGINAL_DDL, "true"); + CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); } private void injectGSI(ExecutionContext executionContext) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcInsertOverwriteTasks.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcInsertOverwriteTasks.java index ed34f3f98..79fd48988 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcInsertOverwriteTasks.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcInsertOverwriteTasks.java @@ -17,8 +17,8 @@ package com.alibaba.polardbx.executor.ddl.job.task.cdc; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.cdc.ICdcManager; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; @@ -65,7 +65,7 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e String sql = "drop table if exists " + surroundWithBacktick(logicTableName); CdcManagerHelper.getInstance().notifyDdlNew(schemaName, logicTableName, SqlKind.DROP_TABLE.name(), - sql, DdlType.DROP_TABLE, getJobId(), getTaskId(), DdlVisibility.Public, + sql, DdlType.DROP_TABLE, getJobId(), getTaskId(), CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); } } @@ -103,7 +103,7 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, renameFrom, SqlKind.RENAME_TABLE.name(), sql, DdlType.RENAME_TABLE, - getJobId(), getTaskId(), DdlVisibility.Public, params); + getJobId(), getTaskId(), CdcDdlMarkVisibility.Public, params); } else { Map params = buildExtendParameter(executionContext); params.put(ICdcManager.TABLE_NEW_NAME, renameTo); @@ -114,7 +114,7 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, renameFrom, SqlKind.RENAME_TABLE.name(), sql, DdlType.RENAME_TABLE, - getJobId(), getTaskId(), DdlVisibility.Public, params, true, topology); + getJobId(), getTaskId(), CdcDdlMarkVisibility.Public, params, true, topology); } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcLogicalSequenceMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcLogicalSequenceMarkTask.java new file mode 100644 index 000000000..b1daf3c5a --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcLogicalSequenceMarkTask.java @@ -0,0 +1,79 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.ddl.newengine.DdlType; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +@TaskName(name = "CdcLogicalSequenceMarkTask") +@Getter +@Setter +public class CdcLogicalSequenceMarkTask extends BaseDdlTask { + private SqlKind sqlKind; + private String sequenceName; + private String ddlSql; + + @JSONCreator + public CdcLogicalSequenceMarkTask(String schemaName, String sequenceName, String ddlSql, SqlKind sqlKind) { + super(schemaName); + this.sqlKind = sqlKind; + this.sequenceName = sequenceName; + this.ddlSql = ddlSql; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + mark4LogicalSequence(executionContext, sqlKind, schemaName, sequenceName, ddlSql); + } + + private void mark4LogicalSequence(ExecutionContext executionContext, SqlKind sqlKind, String schemaName, + String sequenceName, String ddlSql) { + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + sequenceName, + sqlKind.name(), + ddlSql, + DdlType.UNSUPPORTED, + null, + null, + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcMarkUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcMarkUtil.java index d65319958..f5fd6a96e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcMarkUtil.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcMarkUtil.java @@ -18,25 +18,45 @@ import com.alibaba.polardbx.common.cdc.ICdcManager; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.properties.DynamicConfig; +import com.alibaba.polardbx.executor.mpp.metadata.NotNull; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import org.apache.commons.lang3.StringUtils; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import static com.alibaba.polardbx.common.cdc.ICdcManager.CDC_MARK_SQL_MODE; +import static com.alibaba.polardbx.common.cdc.ICdcManager.DDL_ID; +import static com.alibaba.polardbx.common.cdc.ICdcManager.DEFAULT_DDL_VERSION_ID; +import static com.alibaba.polardbx.common.cdc.ICdcManager.POLARDBX_SERVER_ID; /** * created by ziyang.lb **/ public class CdcMarkUtil { - public static Map buildExtendParameter(ExecutionContext executionContext) { + public static Map buildExtendParameter(@NotNull ExecutionContext executionContext) { + String originalDdl = Optional.ofNullable(executionContext.getDdlContext()) + .map(c -> { + if (StringUtils.isNotBlank(c.getCdcRewriteDdlStmt())) { + return c.getCdcRewriteDdlStmt(); + } else { + return c.getDdlStmt(); + } + }).orElse(""); + return buildExtendParameter(executionContext, originalDdl); + } + + public static Map buildExtendParameter(ExecutionContext executionContext, String originalDdl) { Map parameter = executionContext.getExtraCmds(); final Map extraVariables = executionContext.getExtraServerVariables(); final Map serverVariables = executionContext.getServerVariables(); - if (null != extraVariables && extraVariables.containsKey("polardbx_server_id")) { - Object serverId = extraVariables.get("polardbx_server_id"); - parameter.put("polardbx_server_id", serverId); + if (null != extraVariables && extraVariables.containsKey(POLARDBX_SERVER_ID)) { + Object serverId = extraVariables.get(POLARDBX_SERVER_ID); + parameter.put(POLARDBX_SERVER_ID, serverId); } if (executionContext.getDdlContext() != null && executionContext.getDdlContext().getSqlMode() != null) { parameter.put(CDC_MARK_SQL_MODE, executionContext.getDdlContext().getSqlMode()); @@ -57,28 +77,45 @@ public static Map buildExtendParameter(ExecutionContext executio } parameter.put(ICdcManager.CDC_ORIGINAL_DDL, ""); - if (isUseOriginalDDL(executionContext)) { - parameter.put(ICdcManager.CDC_ORIGINAL_DDL, executionContext.getDdlContext().getDdlStmt()); - } else if (isUseFkOriginalDDL(executionContext)) { + if (isUseFkOriginalDDL(executionContext)) { parameter.put(ICdcManager.CDC_ORIGINAL_DDL, executionContext.getDdlContext().getForeignKeyOriginalSql()); + } else if (isUseOriginalDDL(executionContext)) { + // Has to remove USE_DDL_VERSION_ID flag from ExecutionContext, in case corrupt following ddl statements + final Long ddlVersionId = getAndRemoveDdlVersionId(executionContext); + if (isVersionIdInitialized(ddlVersionId)) { + originalDdl = buildVersionIdHint(ddlVersionId) + originalDdl; + parameter.put(DDL_ID, ddlVersionId); + } + + parameter.put(ICdcManager.CDC_ORIGINAL_DDL, originalDdl); } return parameter; } + private static Long getAndRemoveDdlVersionId(ExecutionContext executionContext) { + if (executionContext.getExtraCmds().containsKey(ICdcManager.USE_DDL_VERSION_ID)) { + final String ddlVersionId = executionContext + .getExtraCmds() + .remove(ICdcManager.USE_DDL_VERSION_ID) + .toString(); + return DynamicConfig.parseValue(ddlVersionId, Long.class, DEFAULT_DDL_VERSION_ID); + } + return DEFAULT_DDL_VERSION_ID; + } + private static boolean isUseOriginalDDL(ExecutionContext executionContext) { Map parameter = executionContext.getExtraCmds(); - String useOriginalDDL = (String) parameter.get(ICdcManager.USE_ORGINAL_DDL); + String useOriginalDDL = (String) parameter.get(ICdcManager.USE_ORIGINAL_DDL); if (executionContext.getDdlContext() == null || - StringUtils.isEmpty( - executionContext.getDdlContext().getDdlStmt())) { + StringUtils.isEmpty(executionContext.getDdlContext().getDdlStmt())) { return false; } return StringUtils.equalsIgnoreCase("true", useOriginalDDL); } - private static boolean isUseFkOriginalDDL(ExecutionContext executionContext) { + public static boolean isUseFkOriginalDDL(ExecutionContext executionContext) { Map parameter = executionContext.getExtraCmds(); String foreignKeysDdl = (String) parameter.get(ICdcManager.FOREIGN_KEYS_DDL); @@ -90,4 +127,22 @@ private static boolean isUseFkOriginalDDL(ExecutionContext executionContext) { return StringUtils.equalsIgnoreCase("true", foreignKeysDdl); } + + public static String buildVersionIdHint(Long versionId) { + return String.format("/*%s=%s*/", DDL_ID, versionId); + } + + public static void useOriginalDDL(ExecutionContext executionContext) { + executionContext.getExtraCmds().put(ICdcManager.USE_ORIGINAL_DDL, "true"); + } + + public static void useDdlVersionId(ExecutionContext executionContext, Long versionId) { + if (null != versionId) { + executionContext.getExtraCmds().put(ICdcManager.USE_DDL_VERSION_ID, versionId); + } + } + + public static boolean isVersionIdInitialized(Long versionId) { + return !Objects.equals(versionId, DEFAULT_DDL_VERSION_ID); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcMergeTableGroupMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcMergeTableGroupMarkTask.java new file mode 100644 index 000000000..613b6bea8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcMergeTableGroupMarkTask.java @@ -0,0 +1,72 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.cdc; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.DdlContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; +import org.apache.calcite.sql.SqlKind; + +import java.sql.Connection; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * description: + * author: ziyang.lb + * create: 2023-08-28 18:52 + **/ +@TaskName(name = "CdcMergeTableGroupMarkTask") +@Getter +@Setter +public class CdcMergeTableGroupMarkTask extends BaseDdlTask { + + private final String tableGroupName; + + @JSONCreator + public CdcMergeTableGroupMarkTask(String schemaName, String tableGroupName) { + super(schemaName); + this.tableGroupName = tableGroupName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + DdlContext ddlContext = executionContext.getDdlContext(); + CdcManagerHelper.getInstance() + .notifyDdlNew( + schemaName, + tableGroupName, + SqlKind.MERGE_TABLEGROUP.name(), + ddlContext.getDdlStmt(), + ddlContext.getDdlType(), + ddlContext.getJobId(), + getTaskId(), + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcModifyPartitionKeyMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcModifyPartitionKeyMarkTask.java index 320af8d47..337736d34 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcModifyPartitionKeyMarkTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcModifyPartitionKeyMarkTask.java @@ -17,12 +17,13 @@ package com.alibaba.polardbx.executor.ddl.job.task.cdc; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.cdc.ICdcManager; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.DdlContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -42,13 +43,18 @@ @Setter public class CdcModifyPartitionKeyMarkTask extends BaseDdlTask { private String logicalTableName; + private String indexName; private SqlKind sqlKind; + private Map exchangeNamesMapping; @JSONCreator - public CdcModifyPartitionKeyMarkTask(String schemaName, String logicalTableName, SqlKind sqlKind) { + public CdcModifyPartitionKeyMarkTask(String schemaName, String logicalTableName, String indexName, + SqlKind sqlKind, Map exchangeNamesMapping) { super(schemaName); this.logicalTableName = logicalTableName; + this.indexName = indexName; this.sqlKind = sqlKind; + this.exchangeNamesMapping = exchangeNamesMapping; } @Override @@ -58,16 +64,20 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e } private void mark4RepartitionTable(ExecutionContext executionContext) { - // 主表和目标表之间已经完成了物理表的Switch操作,目标表以GSI的形式存在,依靠分布式事务,双边数据是强一致的 - // 需要在job结束前和Gsi被clean前,进行打标 + // 主表和目标表之间未完成物理表的Switch操作,目标表以GSI的形式存在,依靠分布式事务,双边数据是强一致的 + // 在交换前进行打标,保证最终一致性 DdlContext ddlContext = executionContext.getDdlContext(); Map param = buildExtendParameter(executionContext); param.put(ICdcManager.ALTER_TRIGGER_TOPOLOGY_CHANGE_FLAG, ""); param.put(ICdcManager.REFRESH_CREATE_SQL_4_PHY_TABLE, "true"); + param.put(ICdcManager.EXCHANGE_NAMES_MAPPING, exchangeNamesMapping); FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); + + TableMeta indexTableMeta = executionContext.getSchemaManager().getTable(indexName); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, logicalTableName, sqlKind.name(), ddlContext.getDdlStmt(), - ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), DdlVisibility.Public, param); + ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), CdcDdlMarkVisibility.Public, param, + true, indexTableMeta.getLatestTopology()); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcMoveDatabaseDdlMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcMoveDatabaseDdlMarkTask.java index 4d4af9f8b..7a5830254 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcMoveDatabaseDdlMarkTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcMoveDatabaseDdlMarkTask.java @@ -17,8 +17,8 @@ package com.alibaba.polardbx.executor.ddl.job.task.cdc; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; @@ -63,7 +63,7 @@ private void mark4MoveDatabase(ExecutionContext executionContext) { FailPoint.injectRandomSuspendFromHint(executionContext); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, "", sqlKind.name(), ddlStmt, DdlType.MOVE_DATABASE, - ddlContext.getJobId(), getTaskId(), DdlVisibility.Private, + ddlContext.getJobId(), getTaskId(), CdcDdlMarkVisibility.Private, buildExtendParameter(executionContext), false, null); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcRepartitionMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcRepartitionMarkTask.java index 94ea40ba4..bcc093136 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcRepartitionMarkTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcRepartitionMarkTask.java @@ -17,17 +17,20 @@ package com.alibaba.polardbx.executor.ddl.job.task.cdc; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.optimizer.context.DdlContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.planner.rule.util.CBOUtil; import lombok.Getter; import lombok.Setter; import org.apache.calcite.sql.SqlKind; +import org.apache.commons.lang.StringUtils; import java.sql.Connection; import java.util.Map; @@ -43,12 +46,15 @@ public class CdcRepartitionMarkTask extends BaseDdlTask { private String logicalTableName; private SqlKind sqlKind; + private CdcDdlMarkVisibility CdcDdlMarkVisibility; @JSONCreator - public CdcRepartitionMarkTask(String schemaName, String logicalTableName, SqlKind sqlKind) { + public CdcRepartitionMarkTask(String schemaName, String logicalTableName, SqlKind sqlKind, + CdcDdlMarkVisibility CdcDdlMarkVisibility) { super(schemaName); this.logicalTableName = logicalTableName; this.sqlKind = sqlKind; + this.CdcDdlMarkVisibility = CdcDdlMarkVisibility; } @Override @@ -58,6 +64,16 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e } private void mark4RepartitionTable(ExecutionContext executionContext) { + if (CBOUtil.isGsi(schemaName, logicalTableName)) { + return; + } + + final String skipCutover = + executionContext.getParamManager().getString(ConnectionParams.REPARTITION_SKIP_CUTOVER); + if (StringUtils.equalsIgnoreCase(skipCutover, Boolean.TRUE.toString())) { + return; + } + // 主表和目标表之间已经完成了物理表的Switch操作,目标表以GSI的形式存在,依靠分布式事务,双边数据是强一致的 // 需要在job结束前和Gsi被clean前,进行打标 DdlContext ddlContext = executionContext.getDdlContext(); @@ -67,6 +83,6 @@ private void mark4RepartitionTable(ExecutionContext executionContext) { FailPoint.injectRandomSuspendFromHint(executionContext); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, logicalTableName, sqlKind.name(), ddlContext.getDdlStmt(), - ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), DdlVisibility.Private, param); + ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), CdcDdlMarkVisibility, param); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcSqlUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcSqlUtils.java index 9f5f2347d..3f9e03c07 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcSqlUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcSqlUtils.java @@ -24,8 +24,16 @@ public class CdcSqlUtils { public final static SQLParserFeature[] SQL_PARSE_FEATURES = { SQLParserFeature.EnableSQLBinaryOpExprGroup, - SQLParserFeature.UseInsertColumnsCache, SQLParserFeature.OptimizedForParameterized, - SQLParserFeature.TDDLHint, SQLParserFeature.EnableCurrentUserExpr, SQLParserFeature.DRDSAsyncDDL, - SQLParserFeature.DRDSBaseline, SQLParserFeature.DrdsMisc, SQLParserFeature.DrdsGSI, SQLParserFeature.DrdsCCL + SQLParserFeature.UseInsertColumnsCache, + SQLParserFeature.OptimizedForParameterized, + SQLParserFeature.TDDLHint, + SQLParserFeature.EnableCurrentUserExpr, + SQLParserFeature.DRDSAsyncDDL, + SQLParserFeature.DRDSBaseline, + SQLParserFeature.DrdsMisc, + SQLParserFeature.DrdsGSI, + SQLParserFeature.DrdsCCL, + SQLParserFeature.KeepComments, + SQLParserFeature.EnableFillKeyName }; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTableGroupDdlMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTableGroupDdlMarkTask.java index eed005dd4..acd00dd14 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTableGroupDdlMarkTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTableGroupDdlMarkTask.java @@ -17,9 +17,16 @@ package com.alibaba.polardbx.executor.ddl.job.task.cdc; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.ddl.newengine.DdlType; +import com.alibaba.polardbx.druid.DbType; +import com.alibaba.polardbx.druid.sql.SQLUtils; +import com.alibaba.polardbx.druid.sql.ast.SQLStatement; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement; +import com.alibaba.polardbx.druid.sql.dialect.mysql.parser.MySqlStatementParser; +import com.alibaba.polardbx.druid.sql.parser.ByteString; +import com.alibaba.polardbx.druid.sql.parser.SQLParserUtils; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.gms.util.TableGroupNameUtil; @@ -32,10 +39,13 @@ import org.apache.calcite.sql.SqlKind; import java.sql.Connection; +import java.util.List; import java.util.Map; import java.util.Set; +import static com.alibaba.polardbx.common.cdc.ICdcManager.CDC_IS_GSI; import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcSqlUtils.SQL_PARSE_FEATURES; /** * created by ziyang.lb @@ -47,20 +57,24 @@ public class CdcTableGroupDdlMarkTask extends BaseDdlTask { private String tableGroup; - private String logicalTableName; + private String tableName; private SqlKind sqlKind; private Map> targetTableTopology; private String ddlStmt; + private CdcDdlMarkVisibility cdcDdlMarkVisibility; @JSONCreator - public CdcTableGroupDdlMarkTask(String tableGroup, String schemaName, String logicalTableName, SqlKind sqlKind, - Map> targetTableTopology, String ddlStmt) { + public CdcTableGroupDdlMarkTask(String tableGroup, String schemaName, String tableName, SqlKind sqlKind, + Map> targetTableTopology, String ddlStmt, + CdcDdlMarkVisibility cdcDdlMarkVisibility) { super(schemaName); this.tableGroup = tableGroup; - this.logicalTableName = logicalTableName; + this.tableName = tableName; this.sqlKind = sqlKind; this.targetTableTopology = targetTableTopology; + this.ddlStmt = ddlStmt; + this.cdcDdlMarkVisibility = cdcDdlMarkVisibility; } @Override @@ -70,16 +84,31 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e } private void mark4TableGroupChange(ExecutionContext executionContext) { - if (TableGroupNameUtil.isOssTg(tableGroup) || CBOUtil.isGsi(schemaName, logicalTableName)) { + Map param = buildExtendParameter(executionContext); + + boolean isAlterIndex = false; + String markTableName = tableName; + // alter index ... on table ... , 使用主表的名字进行打标 + List parseResult = SQLUtils.parseStatements(ddlStmt, DbType.mysql, SQL_PARSE_FEATURES); + if (!parseResult.isEmpty() && parseResult.get(0) instanceof SQLAlterTableStatement) { + SQLAlterTableStatement stmt = (SQLAlterTableStatement) parseResult.get(0); + if (stmt.getAlterIndexName() != null) { + isAlterIndex = true; + markTableName = SQLUtils.normalize(stmt.getTableName()); + param.put(CDC_IS_GSI, true); + } + } + + if (!isAlterIndex && + (TableGroupNameUtil.isOssTg(tableGroup) || (CBOUtil.isGsi(schemaName, markTableName)))) { return; } - log.info("new topology for table {} is {}", logicalTableName, targetTableTopology); + log.info("new topology for table {} is {}, isAlterIndex {}", markTableName, targetTableTopology, isAlterIndex); DdlContext ddlContext = executionContext.getDdlContext(); - CdcManagerHelper.getInstance() - .notifyDdlNew(schemaName, logicalTableName, sqlKind.name(), ddlStmt, DdlType.ALTER_TABLEGROUP, - ddlContext.getJobId(), getTaskId(), DdlVisibility.Private, - buildExtendParameter(executionContext), true, targetTableTopology); + CdcManagerHelper.getInstance().notifyDdlNew(schemaName, markTableName, sqlKind.name(), ddlStmt, + DdlType.ALTER_TABLEGROUP, ddlContext.getJobId(), getTaskId(), cdcDdlMarkVisibility, param, true, + targetTableTopology); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTruncateTableWithGsiMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTruncateTableWithGsiMarkTask.java index fdb93dcad..c4a8236d0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTruncateTableWithGsiMarkTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTruncateTableWithGsiMarkTask.java @@ -17,8 +17,8 @@ package com.alibaba.polardbx.executor.ddl.job.task.cdc; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.cdc.ICdcManager; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; @@ -74,13 +74,13 @@ private void mark4TruncateTableWithGsi(ExecutionContext executionContext) { params.put(ICdcManager.TABLE_NEW_PATTERN, tmpTbNamePattern); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, logicalTableName, SqlKind.TRUNCATE_TABLE.name(), truncateSql, - ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), DdlVisibility.Public, params, + ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), CdcDdlMarkVisibility.Public, params, true, Maps.newHashMap()); } else { Map> tmpTableTopology = TruncateUtil.getTmpTableTopology(schemaName, tmpTableName); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, logicalTableName, SqlKind.TRUNCATE_TABLE.name(), truncateSql, - ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), DdlVisibility.Public, + ddlContext.getDdlType(), ddlContext.getJobId(), getTaskId(), CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext), true, tmpTableTopology); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTruncateWithRecycleMarkTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTruncateWithRecycleMarkTask.java index 08f1e5f63..b633c2448 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTruncateWithRecycleMarkTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/cdc/CdcTruncateWithRecycleMarkTask.java @@ -17,8 +17,8 @@ package com.alibaba.polardbx.executor.ddl.job.task.cdc; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.cdc.ICdcManager; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; @@ -35,6 +35,7 @@ import java.util.Map; import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; +import static com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalRenameTable.checkTableNamePatternForRename; /** * Created by ziyang.lb @@ -67,9 +68,8 @@ private void mark4RenameTable(ExecutionContext executionContext) { // 如果物理表名也发生了变化,需要将新的tablePattern作为附加参数传给cdcManager // 如果物理表名也发生了变更,此处所有物理表已经都完成了rename(此时用户针对该逻辑表提交的任何dml操作都会报错),cdc打标必须先于元数据变更 // 如果物理表名未进行变更,那么tablePattern不会发生改变,Rename是一个轻量级的操作,打标的位置放到元数据变更之前或之后,都可以 - executionContext.setPhyTableRenamed(false); String newTbNamePattern = TableMetaChanger.buildNewTbNamePattern(executionContext, schemaName, - sourceTableName, targetTableName); + sourceTableName, targetTableName, !checkTableNamePatternForRename(schemaName, sourceTableName)); Map params = buildExtendParameter(executionContext); params.put(ICdcManager.TABLE_NEW_NAME, targetTableName); params.put(ICdcManager.TABLE_NEW_PATTERN, newTbNamePattern); @@ -79,6 +79,6 @@ private void mark4RenameTable(ExecutionContext executionContext) { DdlContext ddlContext = executionContext.getDdlContext(); CdcManagerHelper.getInstance() .notifyDdlNew(schemaName, sourceTableName, SqlKind.RENAME_TABLE.name(), renameSql, - DdlType.RENAME_TABLE, ddlContext.getJobId(), getTaskId(), DdlVisibility.Public, params); + DdlType.RENAME_TABLE, ddlContext.getJobId(), getTaskId(), CdcDdlMarkVisibility.Public, params); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/AlterTableGroupMovePartitionsCheckTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/AlterTableGroupMovePartitionsCheckTask.java index 273271053..708da0f79 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/AlterTableGroupMovePartitionsCheckTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/AlterTableGroupMovePartitionsCheckTask.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.ddl.job.task.changset; import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; import com.alibaba.polardbx.common.eventlogger.EventLogger; import com.alibaba.polardbx.common.eventlogger.EventType; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; @@ -29,19 +28,17 @@ import com.alibaba.polardbx.executor.corrector.Reporter; import com.alibaba.polardbx.executor.ddl.job.task.BaseBackfillTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineAccessorDelegate; -import com.alibaba.polardbx.executor.ddl.newengine.utils.TaskHelper; +import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils; import com.alibaba.polardbx.executor.fastchecker.FastChecker; import com.alibaba.polardbx.executor.gsi.CheckerManager; import com.alibaba.polardbx.executor.partitionmanagement.corrector.AlterTableGroupChecker; import com.alibaba.polardbx.executor.partitionmanagement.corrector.AlterTableGroupReporter; import com.alibaba.polardbx.executor.partitionmanagement.fastchecker.AlterTableGroupFastChecker; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.executor.sync.TableMetaChangePreemptiveSyncAction; import com.alibaba.polardbx.executor.sync.TablesMetaChangePreemptiveSyncAction; -import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.statistics.SQLRecorderLogger; @@ -83,50 +80,19 @@ protected void executeImpl(ExecutionContext executionContext) { if (executionContext.getParamManager().getBoolean(ConnectionParams.SKIP_CHANGE_SET_CHECKER) || !executionContext.getParamManager().getBoolean(ConnectionParams.TABLEGROUP_REORG_CHECK_AFTER_BACKFILL)) { if (stopDoubleWrite) { - DdlTask currentTask = this; - DdlEngineAccessorDelegate delegate = new DdlEngineAccessorDelegate() { - @Override - protected Integer invoke() { - ComplexTaskMetaManager - .updateSubTasksStatusByJobIdAndObjName(getJobId(), - schemaName, - logicalTableName, - ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG, - ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER, - getConnection()); - try { - for (String tbName : relatedTables) { - TableInfoManager.updateTableVersionWithoutDataId(schemaName, tbName, getConnection()); - } - } catch (Exception e) { - throw GeneralUtil.nestedException(e); - } - currentTask.setState(DdlTaskState.DIRTY); - DdlEngineTaskRecord taskRecord = TaskHelper.toDdlEngineTaskRecord(currentTask); - return engineTaskAccessor.updateTask(taskRecord); - } - }; - delegate.execute(); + ChangeSetUtils.doChangeSetSchemaChange( + schemaName, logicalTableName, + relatedTables, this, + ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG, + ComplexTaskMetaManager.ComplexTaskStatus.DELETE_ONLY + ); - LOGGER.info( - String.format( - "Update table status[ schema:%s, table:%s, before state:%s, after state:%s]", - schemaName, - logicalTableName, - ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG.name(), - ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER.name())); - - try { - SyncManagerHelper.sync( - new TablesMetaChangePreemptiveSyncAction(schemaName, relatedTables, 1500L, 1500L, - TimeUnit.SECONDS), - true); - } catch (Throwable t) { - LOGGER.error(String.format( - "error occurs while sync table meta, schemaName:%s, tableName:%s", schemaName, - logicalTableName)); - throw GeneralUtil.nestedException(t); - } + ChangeSetUtils.doChangeSetSchemaChange( + schemaName, logicalTableName, + relatedTables, this, + ComplexTaskMetaManager.ComplexTaskStatus.DELETE_ONLY, + ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER + ); } return; } @@ -148,19 +114,24 @@ protected void onRollbackSuccess(ExecutionContext executionContext) { // sync to restore the status of table meta SyncManagerHelper.sync( new TablesMetaChangePreemptiveSyncAction(schemaName, relatedTables, 1500L, 1500L, - TimeUnit.MICROSECONDS)); + TimeUnit.MICROSECONDS), SyncScope.ALL); } } @Override protected void rollbackImpl(ExecutionContext executionContext) { if (stopDoubleWrite) { - DdlEngineAccessorDelegate delegate = new DdlEngineAccessorDelegate() { + new DdlEngineAccessorDelegate() { @Override protected Integer invoke() { ComplexTaskMetaManager .updateSubTasksStatusByJobIdAndObjName(getJobId(), schemaName, logicalTableName, ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER, + ComplexTaskMetaManager.ComplexTaskStatus.DELETE_ONLY, + getConnection()); + ComplexTaskMetaManager + .updateSubTasksStatusByJobIdAndObjName(getJobId(), schemaName, logicalTableName, + ComplexTaskMetaManager.ComplexTaskStatus.DELETE_ONLY, ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG, getConnection()); try { @@ -172,16 +143,15 @@ protected Integer invoke() { } return null; } - }; - delegate.execute(); + }.execute(); LOGGER.info(String .format( "Rollback table status[ schema:%s, table:%s, before state:%s, after state:%s]", schemaName, logicalTableName, - ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG.name(), - ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER.name())); + ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER.name(), + ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG.name())); } } @@ -223,16 +193,14 @@ protected boolean fastCheckWithCatchEx(ExecutionContext executionContext) { boolean fastCheck(ExecutionContext executionContext) { long startTime = System.currentTimeMillis(); - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "FastChecker for alter tablegroup, schema [{0}] logical table [{1}] start", schemaName, logicalTableName)); - final int fastCheckerParallelism = - executionContext.getParamManager().getInt(ConnectionParams.TABLEGROUP_REORG_FASTCHECKER_PARALLELISM); FastChecker fastChecker = AlterTableGroupFastChecker .create(schemaName, logicalTableName, sourcePhyTableNames, targetPhyTableNames, - fastCheckerParallelism, executionContext); + executionContext); boolean fastCheckResult = false; try { @@ -242,7 +210,7 @@ boolean fastCheck(ExecutionContext executionContext) { throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e, "alter tablegroup fastchecker failed to check"); } finally { - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "FastChecker for alter tablegroup, schema [{0}] logical src table [{1}] finish, time use [{2}], check result [{3}]", schemaName, logicalTableName, (System.currentTimeMillis() - startTime) / 1000.0, @@ -250,6 +218,8 @@ boolean fastCheck(ExecutionContext executionContext) { ); if (!fastCheckResult) { EventLogger.log(EventType.DDL_WARN, "FastChecker failed"); + } else { + EventLogger.log(EventType.DDL_INFO, "FastChecker succeed"); } } @@ -267,6 +237,7 @@ private void checkInCN(ExecutionContext executionContext) { executionContext.getParamManager().getLong(ConnectionParams.TABLEGROUP_REORG_CHECK_PARALLELISM); final long earlyFailNumber = executionContext.getParamManager().getLong(ConnectionParams.TABLEGROUP_REORG_EARLY_FAIL_NUMBER); + final boolean useBinary = executionContext.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY); Checker checker = AlterTableGroupChecker.create(schemaName, logicalTableName, @@ -275,6 +246,7 @@ private void checkInCN(ExecutionContext executionContext) { speedMin, speedLimit, parallelism, + useBinary, SqlSelect.LockMode.UNDEF, SqlSelect.LockMode.UNDEF, executionContext, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/ChangeSetCatchUpTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/ChangeSetCatchUpTask.java index 24c1c5638..04aef50f4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/ChangeSetCatchUpTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/ChangeSetCatchUpTask.java @@ -77,6 +77,7 @@ public void executeImpl(ExecutionContext executionContext) { FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); + FailPoint.injectSuspendFromHint("FP_CATCHUP_TASK_SUSPEND", executionContext); changeSetManager.logicalTableChangeSetCatchUp( logicalTableName, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/MoveTableCheckTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/MoveTableCheckTask.java index c918cabfb..64a7babde 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/MoveTableCheckTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/changset/MoveTableCheckTask.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.ddl.job.task.changset; import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; import com.alibaba.polardbx.common.eventlogger.EventLogger; import com.alibaba.polardbx.common.eventlogger.EventType; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; @@ -29,9 +28,8 @@ import com.alibaba.polardbx.executor.corrector.Reporter; import com.alibaba.polardbx.executor.ddl.job.task.BaseBackfillTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineAccessorDelegate; -import com.alibaba.polardbx.executor.ddl.newengine.utils.TaskHelper; +import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils; import com.alibaba.polardbx.executor.fastchecker.FastChecker; import com.alibaba.polardbx.executor.gsi.CheckerManager; import com.alibaba.polardbx.executor.scaleout.corrector.MoveTableChecker; @@ -39,14 +37,13 @@ import com.alibaba.polardbx.executor.scaleout.fastchecker.MoveTableFastChecker; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TablesMetaChangePreemptiveSyncAction; -import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import lombok.Getter; import org.apache.calcite.sql.SqlSelect; -import org.apache.commons.lang3.StringUtils; import java.text.MessageFormat; import java.util.List; @@ -61,7 +58,7 @@ public class MoveTableCheckTask extends BaseBackfillTask { final private Map sourceTargetGroup; final private Map> sourcePhyTableNames; final private Map> targetPhyTableNames; - final private Boolean stopDoubleWrite; + final private Boolean optimizeDoubleWrite; final private List relatedTables; @JSONCreator @@ -69,14 +66,14 @@ public MoveTableCheckTask(String schemaName, String logicalTableName, Map sourceTargetGroup, Map> sourcePhyTableNames, Map> targetPhyTableNames, - Boolean stopDoubleWrite, List relatedTables + Boolean optimizeDoubleWrite, List relatedTables ) { super(schemaName); this.logicalTableName = logicalTableName; this.sourceTargetGroup = sourceTargetGroup; this.sourcePhyTableNames = sourcePhyTableNames; this.targetPhyTableNames = targetPhyTableNames; - this.stopDoubleWrite = stopDoubleWrite; + this.optimizeDoubleWrite = optimizeDoubleWrite; this.relatedTables = relatedTables; onExceptionTryRollback(); } @@ -86,51 +83,20 @@ protected void executeImpl(ExecutionContext executionContext) { // for debug, skip checker if (executionContext.getParamManager().getBoolean(ConnectionParams.SKIP_CHANGE_SET_CHECKER) || !executionContext.getParamManager().getBoolean(ConnectionParams.SCALEOUT_CHECK_AFTER_BACKFILL)) { - if (stopDoubleWrite) { - DdlTask currentTask = this; - DdlEngineAccessorDelegate delegate = new DdlEngineAccessorDelegate() { - @Override - protected Integer invoke() { - ComplexTaskMetaManager - .updateSubTasksStatusByJobIdAndObjName(getJobId(), - schemaName, - logicalTableName, - ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG, - ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER, - getConnection()); - try { - for (String tbName : relatedTables) { - TableInfoManager.updateTableVersionWithoutDataId(schemaName, tbName, getConnection()); - } - } catch (Exception e) { - throw GeneralUtil.nestedException(e); - } - currentTask.setState(DdlTaskState.DIRTY); - DdlEngineTaskRecord taskRecord = TaskHelper.toDdlEngineTaskRecord(currentTask); - return engineTaskAccessor.updateTask(taskRecord); - } - }; - delegate.execute(); - - LOGGER.info( - String.format( - "Update table status[ schema:%s, table:%s, before state:%s, after state:%s]", - schemaName, - logicalTableName, - ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG.name(), - ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER.name())); + if (optimizeDoubleWrite) { + ChangeSetUtils.doChangeSetSchemaChange( + schemaName, logicalTableName, + relatedTables, this, + ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG, + ComplexTaskMetaManager.ComplexTaskStatus.DELETE_ONLY + ); - try { - SyncManagerHelper.sync( - new TablesMetaChangePreemptiveSyncAction(schemaName, relatedTables, 1500L, 1500L, - TimeUnit.SECONDS), - true); - } catch (Throwable t) { - LOGGER.error(String.format( - "error occurs while sync table meta, schemaName:%s, tableName:%s", schemaName, - logicalTableName)); - throw GeneralUtil.nestedException(t); - } + ChangeSetUtils.doChangeSetSchemaChange( + schemaName, logicalTableName, + relatedTables, this, + ComplexTaskMetaManager.ComplexTaskStatus.DELETE_ONLY, + ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER + ); } return; } @@ -138,7 +104,7 @@ protected Integer invoke() { final boolean useFastChecker = FastChecker.isSupported(schemaName) && executionContext.getParamManager().getBoolean(ConnectionParams.SCALEOUT_BACKFILL_USE_FASTCHECKER); - if (stopDoubleWrite) { + if (optimizeDoubleWrite) { checkWithStopDoubleWrite(executionContext, useFastChecker); } else { checkWithDoubleCheck(executionContext, useFastChecker); @@ -147,37 +113,48 @@ protected Integer invoke() { @Override protected void onRollbackSuccess(ExecutionContext executionContext) { - if (stopDoubleWrite) { + if (optimizeDoubleWrite) { // sync to restore the status of table meta SyncManagerHelper.sync( new TablesMetaChangePreemptiveSyncAction(schemaName, relatedTables, 1500L, 1500L, - TimeUnit.MICROSECONDS)); + TimeUnit.MICROSECONDS), SyncScope.ALL); } } @Override protected void rollbackImpl(ExecutionContext executionContext) { - if (stopDoubleWrite) { - DdlEngineAccessorDelegate delegate = new DdlEngineAccessorDelegate() { + if (optimizeDoubleWrite) { + new DdlEngineAccessorDelegate() { @Override protected Integer invoke() { ComplexTaskMetaManager .updateSubTasksStatusByJobIdAndObjName(getJobId(), schemaName, logicalTableName, ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER, + ComplexTaskMetaManager.ComplexTaskStatus.DELETE_ONLY, + getConnection()); + ComplexTaskMetaManager + .updateSubTasksStatusByJobIdAndObjName(getJobId(), schemaName, logicalTableName, + ComplexTaskMetaManager.ComplexTaskStatus.DELETE_ONLY, ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG, getConnection()); + try { + for (String tbName : relatedTables) { + TableInfoManager.updateTableVersionWithoutDataId(schemaName, tbName, getConnection()); + } + } catch (Exception e) { + throw GeneralUtil.nestedException(e); + } return null; } - }; - delegate.execute(); + }.execute(); LOGGER.info(String .format( "Rollback table status[ schema:%s, table:%s, before state:%s, after state:%s]", schemaName, logicalTableName, - ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG.name(), - ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER.name())); + ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER.name(), + ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG.name())); } } @@ -227,6 +204,7 @@ void checkInCN(ExecutionContext executionContext) { executionContext.getParamManager().getLong(ConnectionParams.SCALEOUT_CHECK_PARALLELISM); final long earlyFailNumber = executionContext.getParamManager().getLong(ConnectionParams.SCALEOUT_EARLY_FAIL_NUMBER); + final boolean useBinary = executionContext.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY); Checker checker = MoveTableChecker.create(schemaName, logicalTableName, @@ -235,6 +213,7 @@ void checkInCN(ExecutionContext executionContext) { speedMin, speedLimit, parallelism, + useBinary, SqlSelect.LockMode.UNDEF, SqlSelect.LockMode.UNDEF, executionContext, @@ -276,25 +255,24 @@ boolean fastCheck(ExecutionContext executionContext) { String schemaName = getSchemaName(); String logicalTable = logicalTableName; - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "FastChecker for move table, schema [{0}] logical src table [{1}] logic dst table [{2}] start", schemaName, logicalTable, logicalTable)); - final int fastCheckerParallelism = - executionContext.getParamManager().getInt(ConnectionParams.SCALEOUT_FASTCHECKER_PARALLELISM); FastChecker fastChecker = MoveTableFastChecker - .create(schemaName, logicalTable, sourceTargetGroup, - sourcePhyTableNames, targetPhyTableNames, fastCheckerParallelism, executionContext); + .create(schemaName, logicalTable, + sourcePhyTableNames, targetPhyTableNames, executionContext); boolean fastCheckResult = false; try { - fastCheckResult = fastChecker.checkWithChangeSet(executionContext, stopDoubleWrite, this, relatedTables); + fastCheckResult = + fastChecker.checkWithChangeSet(executionContext, optimizeDoubleWrite, this, relatedTables); } catch (TddlNestableRuntimeException e) { //other exception, we simply throw out throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e, "alter tablegroup fastchecker failed to check"); } finally { - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "FastChecker for alter tablegroup, schema [{0}] logical src table [{1}] finish, time use [{2}], check result [{3}]", schemaName, logicalTableName, (System.currentTimeMillis() - startTime) / 1000.0, @@ -302,6 +280,8 @@ boolean fastCheck(ExecutionContext executionContext) { ); if (!fastCheckResult) { EventLogger.log(EventType.DDL_WARN, "FastChecker failed"); + } else { + EventLogger.log(EventType.DDL_INFO, "FastChecker succeed"); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/AddColumnarTablesMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/AddColumnarTablesMetaTask.java new file mode 100644 index 000000000..add98c94f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/AddColumnarTablesMetaTask.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.DdlUtils; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; +import java.util.List; + +@Getter +@TaskName(name = "AddColumnarTablesMetaTask") +public class AddColumnarTablesMetaTask extends BaseGmsTask { + + private final String columnarTableName; + private final Engine engine; + private final Long versionId; + + @JSONCreator + public AddColumnarTablesMetaTask(String schemaName, String logicalTableName, String columnarTableName, + Long versionId, Engine engine) { + super(schemaName, logicalTableName); + this.columnarTableName = columnarTableName; + this.versionId = versionId; + this.engine = engine; + onExceptionTryRecoveryThenRollback(); + } + + @Override + public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + TableMetaChanger.addColumnarTableMeta( + metaDbConnection, + schemaName, + logicalTableName, + columnarTableName, + engine); + } + + @Override + public void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + TableMetaChanger.removeColumnarTableMeta(metaDbConnection, + schemaName, + columnarTableName); + } + + @Override + protected void onRollbackSuccess(ExecutionContext executionContext) { + TableMetaChanger.afterRemovingTableMeta(schemaName, logicalTableName); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/AddColumnarTablesPartitionInfoMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/AddColumnarTablesPartitionInfoMetaTask.java new file mode 100644 index 000000000..b1ceb851b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/AddColumnarTablesPartitionInfoMetaTask.java @@ -0,0 +1,84 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.partition.TablePartitionAccessor; +import com.alibaba.polardbx.gms.partition.TablePartitionConfig; +import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; + +@Getter +@TaskName(name = "AddColumnarTablesPartitionInfoMetaTask") +public class AddColumnarTablesPartitionInfoMetaTask extends BaseGmsTask { + + private final TableGroupDetailConfig tableGroupConfig; + private final String primaryTable; + + @JSONCreator + public AddColumnarTablesPartitionInfoMetaTask(String schemaName, + String logicalTableName, + TableGroupDetailConfig tableGroupConfig, + String primaryTable) { + super(schemaName, logicalTableName); + this.tableGroupConfig = tableGroupConfig; + this.primaryTable = primaryTable; + onExceptionTryRecoveryThenRollback(); + } + + @Override + public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + if (!isCreateTableSupported(executionContext)) { + return; + } + TableMetaChanger.addPartitionInfoMeta(metaDbConnection, tableGroupConfig, executionContext, false); + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + @Override + public void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { + if (!isCreateTableSupported(executionContext)) { + return; + } + + TableMetaChanger.removePartitionInfoMeta(metaDbConnection, schemaName, logicalTableName); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + private boolean isCreateTableSupported(ExecutionContext executionContext) { + return !(executionContext.isUseHint()); + } + + private TablePartitionConfig getTablePartitionConfig(String primaryTable, Connection metaDbConnection) { + TablePartitionAccessor tablePartitionAccessor = new TablePartitionAccessor(); + tablePartitionAccessor.setConnection(metaDbConnection); + TablePartitionConfig + tablePartitionConfig = tablePartitionAccessor.getTablePartitionConfig(schemaName, primaryTable, false); + return tablePartitionConfig; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CciSchemaEvolutionTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CciSchemaEvolutionTask.java new file mode 100644 index 000000000..436e741b2 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CciSchemaEvolutionTask.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.DdlUtils; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableMappingRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableStatus; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; +import java.util.Map; + +@Getter +@TaskName(name = "CciSchemaEvolutionTask") +public class CciSchemaEvolutionTask extends BaseDdlTask { + + private final String primaryTableName; + private final String columnarTableName; + private final Long versionId; + private final ColumnarTableStatus afterStatus; + private final Map options; + + private ColumnarTableMappingRecord tableMappingRecord; + + @JSONCreator + private CciSchemaEvolutionTask(String schemaName, String primaryTableName, String columnarTableName, Long versionId, + ColumnarTableStatus afterStatus, ColumnarTableMappingRecord tableMappingRecord, + Map options) { + super(schemaName); + this.primaryTableName = primaryTableName; + this.columnarTableName = columnarTableName; + this.versionId = versionId; + this.afterStatus = afterStatus; + this.options = options; + this.tableMappingRecord = tableMappingRecord; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + switch (afterStatus) { + case CREATING: + case CHECKING: + this.tableMappingRecord = TableMetaChanger.addCreateCciSchemaEvolutionMeta(metaDbConnection, + schemaName, + primaryTableName, + columnarTableName, + options, + versionId, + jobId); + break; + case DROP: + updateSupportedCommands(true, false, metaDbConnection); + TableMetaChanger.addDropCciSchemaEvolutionMeta(metaDbConnection, + schemaName, + primaryTableName, + columnarTableName, + versionId, + jobId); + break; + default: + LOGGER.warn(String.format("CciSchemaEvolutionTask does nothing for afterStatus: %s", afterStatus)); + } + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + @Override + protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + switch (afterStatus) { + case CREATING: + case CHECKING: + // Need new version id for rollback, cause version id is the primary key of columnar_table_evolution table + final long rollbackVersionId = DdlUtils.generateVersionId(executionContext); + TableMetaChanger.addRollbackCreateCciSchemaEvolutionMeta(metaDbConnection, + tableMappingRecord, + rollbackVersionId, + jobId); + break; + default: + } + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + public void duringTransaction(Long ddlJobId, Connection metaDbConnection, ExecutionContext executionContext) { + setJobId(ddlJobId); + duringTransaction(metaDbConnection, executionContext); + } + + public void duringRollbackTransaction(Long ddlJobId, Connection metaDbConnection, + ExecutionContext executionContext) { + setJobId(ddlJobId); + duringRollbackTransaction(metaDbConnection, executionContext); + } + + public static CciSchemaEvolutionTask dropCci(String schemaName, String logicalTableName, String columnarTableName, + Long versionId) { + return new CciSchemaEvolutionTask(schemaName, logicalTableName, columnarTableName, versionId, + ColumnarTableStatus.DROP, null, null); + } + + public static CciSchemaEvolutionTask createCci(String schemaName, String logicalTableName, String columnarTableName, + Map options, Long versionId) { + return new CciSchemaEvolutionTask(schemaName, logicalTableName, columnarTableName, versionId, + ColumnarTableStatus.CREATING, null, options); + } + + @Override + protected String remark() { + return String.format("|ddlVersionId: %s |afterStatus: %s", versionId, afterStatus); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciBaseTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciBaseTask.java new file mode 100644 index 000000000..5438d02dd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciBaseTask.java @@ -0,0 +1,83 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.gsi.CheckerManager; +import lombok.Getter; +import org.apache.calcite.sql.SqlCheckColumnarIndex; + +import java.text.SimpleDateFormat; +import java.util.Calendar; +import java.util.Optional; + +/** + * @author yaozhili + */ +@Getter +public abstract class CheckCciBaseTask extends BaseDdlTask { + /** + * Blank if {@link #indexName} not exists in {@link #schemaName} + */ + protected final String tableName; + protected final String indexName; + protected final SqlCheckColumnarIndex.CheckCciExtraCmd extraCmd; + + @JSONCreator + public CheckCciBaseTask(String schemaName, String tableName, String indexName, + SqlCheckColumnarIndex.CheckCciExtraCmd extraCmd) { + super(schemaName); + this.tableName = tableName; + this.indexName = indexName; + this.extraCmd = extraCmd; + } + + protected CheckerManager.CheckerReport createReportRecord(CheckCciMetaTask.ReportErrorType errorType, + CheckerManager.CheckerReportStatus status, + String detail) { + return createReportRecord(errorType, status, "--", detail, "Reporter."); + } + + protected CheckerManager.CheckerReport createReportRecord(CheckCciMetaTask.ReportErrorType errorType, + CheckerManager.CheckerReportStatus status, + String primaryKey, + String detail, + String extra) { + return new CheckerManager.CheckerReport(-1, + this.jobId, + this.schemaName, + this.tableName, + this.schemaName, + this.indexName, + "", + "", + Optional + .ofNullable(errorType) + .filter(e -> e != CheckCciMetaTask.ReportErrorType.UNKNOWN) + .map(CheckCciMetaTask.ReportErrorType::name) + .orElse("--"), + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime()), + status.getValue(), + primaryKey, + detail, + extra, + null + ); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciMetaTask.java new file mode 100644 index 000000000..b96bc4457 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciMetaTask.java @@ -0,0 +1,1897 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.JSONObject; +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.cdc.CdcDdlRecord; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.entity.DDLExtInfo; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.gsi.CheckerManager; +import com.alibaba.polardbx.executor.gsi.CheckerManager.CheckerReport; +import com.alibaba.polardbx.executor.gsi.CheckerManager.CheckerReportStatus; +import com.alibaba.polardbx.gms.metadb.table.ColumnarColumnEvolutionRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableEvolutionRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableMappingRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableStatus; +import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; +import com.alibaba.polardbx.gms.metadb.table.IndexStatus; +import com.alibaba.polardbx.gms.metadb.table.IndexesRecord; +import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.metadb.table.TablesRecord; +import com.alibaba.polardbx.gms.partition.TablePartitionRecord; +import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord; +import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; +import com.alibaba.polardbx.gms.util.TableGroupNameUtil; +import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; +import com.alibaba.polardbx.optimizer.config.table.SchemaManager; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CheckCciPrepareData; +import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData; +import com.alibaba.polardbx.optimizer.parse.FastsqlParser; +import com.alibaba.polardbx.optimizer.partition.common.PartitionTableType; +import com.cronutils.utils.Preconditions; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.apache.calcite.linq4j.Ord; +import org.apache.calcite.sql.SqlCheckColumnarIndex.CheckCciExtraCmd; +import org.apache.calcite.sql.SqlCreateIndex; +import org.apache.calcite.sql.SqlCreateTable; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlIndexColumnName; +import org.apache.calcite.sql.SqlIndexDefinition; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlNode; +import org.apache.commons.lang3.tuple.Triple; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import java.sql.Connection; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +@Getter +@TaskName(name = "CheckCciMetaTask") +public class CheckCciMetaTask extends CheckCciBaseTask { + @JSONCreator + public CheckCciMetaTask(String schemaName, + String tableName, + String indexName, + CheckCciExtraCmd extraCmd) { + super(schemaName, tableName, indexName, extraCmd); + } + + public static CheckCciMetaTask create(CheckCciPrepareData prepareData) { + return new CheckCciMetaTask( + prepareData.getSchemaName(), + prepareData.getTableName(), + prepareData.getIndexName(), + prepareData.getExtraCmd() + ); + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + final TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + final Blackboard blackboard = new Blackboard(metaDbConnection, executionContext); + + final List reports = new ArrayList<>(checkAndReport(blackboard)); + + // Add finish record + reports.add( + createReportRecord( + ReportErrorType.SUMMARY, + CheckerReportStatus.FINISH, + "metadata of columnar index checked")); + + // Add reports to metadb.checker_reports + CheckerManager.insertReports(metaDbConnection, reports); + } + + @NotNull + private List checkAndReport(Blackboard blackboard) { + + // 1.Check primary and cci exists + final List cciNotExists = checkCciTableAndIndex(blackboard); + if (!cciNotExists.isEmpty()) { + return cciNotExists; + } + + // 2. Check primary and cci column identical (by default, all columnar index is clustered index) + final List reports = new ArrayList<>(checkCciColumns(blackboard)); + + // 3. Check columnar index related meta + reports.addAll(checkCciTableAndColumnEvolution(blackboard)); + + // 4. Check CDC mark + reports.addAll(checkCdcDdlMark(blackboard)); + + // 5. Check CCI partitioning (table partition, table group) + reports.addAll(checkCciPartitioning(blackboard)); + + // 6. Check CCI status in TableMetaManager + reports.addAll(checkCciMemoryStatus(blackboard)); + + return reports; + } + + @NotNull + private List checkCciMemoryStatus(Blackboard blackboard) { + final List reports = new ArrayList<>(); + final SchemaManager sm = blackboard.ec.getSchemaManager(schemaName); + if (null == sm) { + reports.add( + createReportRecord( + ReportErrorType.SCHEMA_NOT_EXISTS, + CheckerReportStatus.FOUND, + String.format("Database %s not exists", schemaName))); + return reports; + } + + final TableMeta primaryTable = sm.getTable(tableName); + if (null == primaryTable) { + reports.add( + createReportRecord( + ReportErrorType.TABLE_NOT_EXISTS, + CheckerReportStatus.FOUND, + String.format("No primary table named '%s' in database %s", tableName, schemaName))); + return reports; + } + + final GsiMetaManager.GsiTableMetaBean gsiTableMetaBean = primaryTable.getGsiTableMetaBean(); + if (null == gsiTableMetaBean || GeneralUtil.isEmpty(gsiTableMetaBean.indexMap)) { + reports.add( + createReportRecord( + ReportErrorType.TABLE_NOT_EXISTS, + CheckerReportStatus.FOUND, + String.format("No index named '%s' in database %s", indexName, schemaName))); + return reports; + } + + final Optional cciBeanOptional = gsiTableMetaBean + .indexMap + .values() + .stream() + .filter(e -> TStringUtil.equalsIgnoreCase(e.indexName, indexName)) + .filter(cciBean -> { + if (!cciBean.columnarIndex) { + reports.add( + createReportRecord( + ReportErrorType.UNEXPECTED_CACHED_INDEX_TYPE, + CheckerReportStatus.FOUND, + String.format("Unexpected cached index type %s", cciBean.indexType))); + } else if (cciBean.indexStatus != IndexStatus.PUBLIC) { + reports.add( + createReportRecord( + ReportErrorType.UNEXPECTED_CACHED_INDEX_STATUS, + CheckerReportStatus.FOUND, + String.format("Unexpected cached index status %s", cciBean.indexStatus))); + } else { + return true; + } + + return false; + }) + .findFirst(); + + if (!cciBeanOptional.isPresent()) { + reports.add( + createReportRecord( + ReportErrorType.TABLE_NOT_EXISTS, + CheckerReportStatus.FOUND, + String.format("No public cci named '%s' in table meta cache", indexName))); + return reports; + } + + return reports; + } + + /** + * Check table group / table partition config + */ + @NotNull + private List checkCciPartitioning(Blackboard blackboard) { + final List reports = new ArrayList<>(); + + // Check table partition record exists and type is PartitionTableType.COLUMNAR_TABLE + final List partitionRecords = blackboard.queryTablePartition(schemaName, indexName); + + if (GeneralUtil.isEmpty(partitionRecords)) { + reports.add( + createReportRecord( + ReportErrorType.MISSING_COLUMNAR_TABLE_PARTITION_META, + CheckerReportStatus.FOUND, + String.format( + "Missing table partition record for index %s.%s.%s", + schemaName, tableName, indexName))); + } + + final List rootRecord = new ArrayList<>(); + final List leafRecord = new ArrayList<>(); + + partitionRecords.forEach(pr -> { + if (pr.partLevel == 0) { + rootRecord.add(pr); + } else if (pr.partLevel > 0 && pr.nextLevel < 0) { + leafRecord.add(pr); + if (!TStringUtil.equals(pr.partEngine, TablePartitionRecord.PARTITION_ENGINE_COLUMNAR)) { + reports.add( + createReportRecord( + ReportErrorType.UNEXPECTED_COLUMNAR_COLUMNAR_TABLE_PARTITION_DEFINITION, + CheckerReportStatus.FOUND, + String.format( + "Unexpected partEngine value %s of table partition record %s of %s.%s.%s", + pr.partEngine, pr.id, schemaName, tableName, indexName))); + } + } + + if (pr.tblType != PartitionTableType.COLUMNAR_TABLE.getTableTypeIntValue()) { + reports.add( + createReportRecord( + ReportErrorType.UNEXPECTED_COLUMNAR_COLUMNAR_TABLE_PARTITION_DEFINITION, + CheckerReportStatus.FOUND, + String.format( + "Unexpected tblType value %s of table partition record %s of %s.%s.%s", + pr.tblType, pr.id, schemaName, tableName, indexName))); + } + }); + + Long tgId = -1L; + if (rootRecord.size() != 1) { + reports.add( + createReportRecord( + ReportErrorType.MISSING_COLUMNAR_TABLE_LOGICAL_PARTITION_META, + CheckerReportStatus.FOUND, + String.format( + "Missing logical partition record for index %s.%s.%s", + schemaName, tableName, indexName))); + } else { + // Check table group exists and type is TableGroupRecord.TG_TYPE_COLUMNAR_TBL_TG + tgId = rootRecord.get(0).getGroupId(); + final List tgRecords = blackboard.queryTableGroup(tgId); + if (tgRecords.size() != 1) { + reports.add( + createReportRecord( + ReportErrorType.MISSING_COLUMNAR_TABLE_GROUP_META, + CheckerReportStatus.FOUND, + String.format( + "Missing table group record for index %s.%s.%s", + schemaName, tableName, indexName))); + } else { + final TableGroupRecord actualTg = tgRecords.get(0); + final int expectedTgType = TableGroupRecord.TG_TYPE_COLUMNAR_TBL_TG; + final String expectedTgName = TableGroupNameUtil.autoBuildTableGroupName(tgId, expectedTgType); + if (actualTg.tg_type != expectedTgType || !TStringUtil.equals(actualTg.tg_name, expectedTgName)) { + reports.add( + createReportRecord( + ReportErrorType.UNEXPECTED_COLUMNAR_COLUMNAR_TABLE_PARTITION_DEFINITION, + CheckerReportStatus.FOUND, + String.format( + "Unexpected tblType value %s / tgName value %s table partition record %s of %s.%s.%s", + actualTg.tg_type, actualTg.tg_name, actualTg, schemaName, tableName, indexName))); + } + } + } + + if (leafRecord.isEmpty()) { + reports.add( + createReportRecord( + ReportErrorType.MISSING_COLUMNAR_TABLE_PHYSICAL_PARTITION_META, + CheckerReportStatus.FOUND, + String.format( + "Missing physical partition record for index %s.%s.%s", + schemaName, tableName, indexName))); + } else if (tgId >= 0L) { + // Check partition group config match with table partition config + final List partitionGroupRecords = blackboard.queryPartitionGroupByTgId(tgId); + reports.addAll(CheckerBuilder + .keyListChecker(leafRecord, partitionGroupRecords) + .withActualKeyGenerator(a -> a.groupId) + .withExpectedKeyGenerator(e -> e.id) + .withOrphanReporter(msgs -> createReportRecord( + ReportErrorType.ORPHAN_COLUMNAR_TABLE_PHYSICAL_PARTITION_META, + CheckerReportStatus.FOUND, + String.format( + "Orphan physical partition record with partition group of %s for index %s.%s.%s", + String.join(",", msgs), schemaName, tableName, indexName))) + .withMissingReporter(msgs -> createReportRecord( + ReportErrorType.MISSING_COLUMNAR_TABLE_PHYSICAL_PARTITION_META, + CheckerReportStatus.FOUND, + String.format( + "Missing physical partition record of partition group %s for index %s.%s.%s", + String.join(",", msgs), schemaName, tableName, indexName))) + .build() + .check() + .report()); + } + + return reports; + } + + @NotNull + private List checkCdcDdlMark(Blackboard blackboard) { + final List reports = new ArrayList<>(); + + final List columnarTableMappingRecords = + blackboard.queryColumnarTableMapping(schemaName, tableName, indexName); + + // Table mapping has been checked in #checkCciTableAndColumnEvolution + final ColumnarTableMappingRecord columnarTableMapping = columnarTableMappingRecords.get(0); + final long indexTableId = columnarTableMapping.tableId; + final long latestVersionId = columnarTableMapping.latestVersionId; + + final ColumnarTableEvolutionRecord columnarTableEvolutionRecord = + blackboard.queryColumnarTableEvolution(indexTableId, latestVersionId).get(0); + final long ddlJobId = columnarTableEvolutionRecord.ddlJobId; + + final List cdcDdlRecords = CdcManagerHelper.getInstance().queryDdlByJobId(ddlJobId); + + // For create table with cci, there will be ONLY ONE ddl mark record for CREATE TABLE. + final List filteredRecords = new ArrayList<>(); + cdcDdlRecords + .stream() + .filter(cdr -> TStringUtil.equalsIgnoreCase(cdr.getSqlKind(), SqlKind.CREATE_INDEX.name()) + || TStringUtil.equalsIgnoreCase(cdr.getSqlKind(), SqlKind.CREATE_TABLE.name())) + .filter(cdr -> TStringUtil.containsIgnoreCase(cdr.getDdlSql(), "create clustered columnar") + || TStringUtil.containsIgnoreCase(cdr.getExt(), "clustered columnar")) + .forEach(filteredRecords::add); + + if (GeneralUtil.isEmpty(filteredRecords)) { + reports.add( + createReportRecord( + ReportErrorType.MISSING_CDC_MARK_CREATE_INDEX, + CheckerReportStatus.FOUND, + String.format( + "Missing CDC mark FOR index %s.%s.%s", + schemaName, tableName, indexName))); + } else if (filteredRecords.size() > 1) { + reports.add( + createReportRecord( + ReportErrorType.DUPLICATED_CDC_MARK_CREATE_INDEX, + CheckerReportStatus.FOUND, + String.format( + "Duplicated CDC mark FOR index %s.%s.%s: %s", + schemaName, tableName, indexName, cdcDdlRecords.size()))); + } + + if (!reports.isEmpty()) { + return reports; + } + + // Get DDLExtInfo + final CdcDdlRecord cdcDdlRecord = filteredRecords.get(0); + final DDLExtInfo ddlExtInfo = JSONObject.parseObject(cdcDdlRecord.ext, DDLExtInfo.class); + if (null == ddlExtInfo.getDdlId() || ddlExtInfo.getDdlId() <= 0) { + reports.add( + createReportRecord( + ReportErrorType.WRONG_CDC_MARK_DDL_ID, + CheckerReportStatus.FOUND, + String.format( + "Wrong cdc mark ddl_id: %s", ddlExtInfo.getDdlId()))); + } + + // Check CREATE INDEX / CREATE TABLE statement + final String ddlSql = ddlExtInfo.getOriginalDdl(); + reports.addAll(checkCreateCciSql(ddlSql, latestVersionId, blackboard)); + + return reports; + } + + @NotNull + private List checkCreateCciSql(String ddlSql, Long versionId, Blackboard blackboard) { + final List reports = new ArrayList<>(); + + final String hint = CdcMarkUtil.buildVersionIdHint(versionId); + final boolean withHint = TStringUtil.contains(ddlSql, hint); + if (!withHint) { + reports.add( + createReportRecord( + ReportErrorType.WRONG_CDC_MARK_STATEMENT, + CheckerReportStatus.FOUND, + String.format("Expect hint '%s' but get %s", + hint, + Optional + .ofNullable(ddlSql) + .orElse("NULL")))); + } + + final ExecutionContext parserEc = blackboard.ec.copy(); + parserEc.setSchemaName(schemaName); + final SqlNode sqlNode = new FastsqlParser().parse(ddlSql, parserEc).get(0); + final boolean isCreateIndex = sqlNode instanceof SqlCreateIndex; + final boolean isCreateTable = sqlNode instanceof SqlCreateTable; + if (!(isCreateTable || isCreateIndex)) { + reports.add( + createReportRecord( + ReportErrorType.WRONG_CDC_MARK_STATEMENT, + CheckerReportStatus.FOUND, + String.format("Expect CREATE INDEX or CREATE TABLE but get %s", + Optional + .ofNullable(sqlNode) + .map(sn -> sn.getKind().name()) + .orElse("NULL")))); + return reports; + } + + if (isCreateIndex) { + reports.addAll(checkCreateIndex(ddlSql, blackboard, (SqlCreateIndex) sqlNode)); + } + + if (isCreateTable) { + reports.addAll(checkCreateTable(ddlSql, blackboard, (SqlCreateTable) sqlNode)); + } + + return reports; + } + + private List checkCreateTable(String ddlSql, + Blackboard blackboard, + final SqlCreateTable createTableWithCci) { + final List reports = new ArrayList<>(); + + final List> columnarKeys = + createTableWithCci.getColumnarKeys(); + + final List createCciList = Optional.ofNullable(columnarKeys) + .map(m -> m + .stream() + .filter(p -> TStringUtil.startsWithIgnoreCase(this.indexName, p.left.getLastName())) + .map(org.apache.calcite.util.Pair::getValue) + .collect(Collectors.toList())) + .orElse(null); + + final SqlIdentifier tableName = (SqlIdentifier) createTableWithCci.getName(); + if (GeneralUtil.isEmpty(createCciList) + || !TStringUtil.equalsIgnoreCase(this.tableName, tableName.getLastName())) { + reports.add( + createReportRecord( + ReportErrorType.WRONG_CDC_MARK_STATEMENT, + CheckerReportStatus.FOUND, + String.format( + "Wrong table or index name found in statement: %s", + ddlSql))); + return reports; + } + + if (createCciList.size() > 1) { + reports.add( + createReportRecord( + ReportErrorType.WRONG_CDC_MARK_STATEMENT, + CheckerReportStatus.FOUND, + String.format( + "Multi cci definition found in statement: %s", + ddlSql))); + return reports; + } + + final SqlCreateIndex createCci = CreateGlobalIndexPreparedData.indexDefinition2CreateIndex( + createCciList.get(0), + null, + null, + null, + null); + + return checkCreateCciDef(ddlSql, blackboard, createCci); + } + + private List checkCreateIndex(String ddlSql, Blackboard blackboard, final SqlCreateIndex createCci) { + final List reports = new ArrayList<>(); + + final SqlIdentifier indexName = createCci.getIndexName(); + final SqlIdentifier tableName = createCci.getOriginTableName(); + if (!TStringUtil.startsWithIgnoreCase(this.indexName, indexName.getLastName()) + || !TStringUtil.equalsIgnoreCase(this.tableName, tableName.getLastName())) { + reports.add( + createReportRecord( + ReportErrorType.WRONG_CDC_MARK_STATEMENT, + CheckerReportStatus.FOUND, + String.format( + "Wrong table or index name found in statement: %s", + ddlSql))); + } + + reports.addAll(checkCreateCciDef(ddlSql, blackboard, createCci)); + return reports; + } + + private List checkCreateCciDef(String ddlSql, Blackboard blackboard, SqlCreateIndex createCci) { + final List reports = new ArrayList<>(); + + if (!createCci.createCci() + || !createCci.createClusteredIndex() + || !createCci.createGsi()) { + reports.add( + createReportRecord( + ReportErrorType.WRONG_CDC_MARK_STATEMENT, + CheckerReportStatus.FOUND, + String.format( + "Wrong index type found in statement: %s", + ddlSql))); + } + + // Using user-input sql, it's possible that no partitioning clause exists +// if (createCci.getPartitioning() == null +// && createCci.getDbPartitionBy() == null) { +// reports.add( +// createReportRecord( +// ReportErrorType.WRONG_CDC_MARK_STATEMENT, +// CheckerReportStatus.FOUND, +// String.format( +// "Missing partitioning part in statement: %s", +// ddlSql))); +// } + + final List indexColumns = createCci.getColumns(); + final List covering = createCci.getCovering(); + + if (GeneralUtil.isEmpty(indexColumns)) { + reports.add( + createReportRecord( + ReportErrorType.WRONG_CDC_MARK_STATEMENT, + CheckerReportStatus.FOUND, + String.format( + "Missing sort key in statement: %s", + ddlSql))); + return reports; + } + + final Set indexColumnSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + final Set coveringColumnSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + final List actualColumnNames = new ArrayList<>(indexColumns); + indexColumns.forEach(ic -> { + indexColumnSet.add(ic.getColumnNameStr()); + actualColumnNames.add(ic); + }); + if (null != covering) { + covering.forEach(ic -> { + coveringColumnSet.add(ic.getColumnNameStr()); + actualColumnNames.add(ic); + }); + } + + final List expectedIndexRecords = blackboard.queryIndexes( + this.schemaName, + this.tableName, + this.indexName); + final List filteredExpectedIndexRecords = expectedIndexRecords + .stream() + .filter(ir -> !TStringUtil.equalsIgnoreCase(ir.comment, "COVERING") + || coveringColumnSet.contains(ir.columnName)) + .collect(Collectors.toList()); + + final List indexColumnReports = CheckerBuilder + .stringKeyListChecker(actualColumnNames, filteredExpectedIndexRecords, true) + .withActualKeyGenerator(SqlIndexColumnName::getColumnNameStr) + .withExpectedKeyGenerator(e -> e.columnName) + .withDefValidator((a, e) -> { + final String cn = a.getColumnNameStr(); + final boolean validateIndexColumn = indexColumnSet.contains(cn) + && TStringUtil.equalsIgnoreCase(e.comment, "INDEX") + && e.isColumnar() + && e.isClustered(); + final boolean validateCoveringColumn = coveringColumnSet.contains(cn) + && TStringUtil.equalsIgnoreCase(e.comment, "COVERING"); + return validateIndexColumn || validateCoveringColumn; + }) + .withOrphanReporter(msgs -> createReportRecord( + ReportErrorType.WRONG_CDC_MARK_STATEMENT, + CheckerReportStatus.FOUND, + String.format( + "Orphan column %s found in statement: %s", + String.join(",", msgs), + ddlSql))) + .withMissingReporter(msgs -> createReportRecord( + ReportErrorType.WRONG_CDC_MARK_STATEMENT, + CheckerReportStatus.FOUND, + String.format( + "Missing column %s in statement: %s", + String.join(",", msgs), + ddlSql))) + .withInvalidateDefReporter(msgs -> createReportRecord( + ReportErrorType.WRONG_CDC_MARK_STATEMENT, + CheckerReportStatus.FOUND, + String.format( + "Unmatched definition of column %s found in statement: %s", + String.join(",", msgs), + ddlSql))) + .build() + .check() + .report(); + + reports.addAll(indexColumnReports); + + return reports; + } + + /** + * Check cci table and column evolution meta exists + */ + @NotNull + private List checkCciTableAndColumnEvolution(Blackboard blackboard) { + final List reports = new ArrayList<>(); + + final List columnarTableMappingRecords = + blackboard.queryColumnarTableMapping(schemaName, tableName, indexName); + // Get table_id and check columnar table mapping record + if (columnarTableMappingRecords.isEmpty()) { + reports.add( + createReportRecord( + ReportErrorType.MISSING_TABLE_MAPPING_META, + CheckerReportStatus.FOUND, + String.format( + "Missing table mapping meta of index %s.%s.%s", + schemaName, tableName, indexName))); + } else if (columnarTableMappingRecords.size() > 1) { + reports.add( + createReportRecord( + ReportErrorType.DUPLICATED_TABLE_MAPPING_META, + CheckerReportStatus.FOUND, + String.format( + "Duplicated table mapping meta for index %s.%s.%s: %s", + schemaName, + tableName, + indexName, + columnarTableMappingRecords + .stream() + .map(ctmr -> String.valueOf(ctmr.tableId)) + .collect(Collectors.joining())))); + } + + if (!reports.isEmpty()) { + return reports; + } + + // Check table mapping status + final ColumnarTableMappingRecord columnarTableMapping = columnarTableMappingRecords.get(0); + final long indexTableId = columnarTableMapping.tableId; + final long latestVersionId = columnarTableMapping.latestVersionId; + if (ColumnarTableStatus.from(columnarTableMapping.status) != ColumnarTableStatus.PUBLIC) { + reports.add( + createReportRecord( + ReportErrorType.UNEXPECTED_COLUMNAR_TABLE_MAPPING_META, + CheckerReportStatus.FOUND, + String.format( + "Unexpected status %s of index %s.%s.%s", + columnarTableMapping.status, schemaName, tableName, indexName))); + } + + final List tableEvolutionRecords = + blackboard.queryColumnarTableEvolution(indexTableId, latestVersionId); + // Table evolution meta of column store + if (tableEvolutionRecords.isEmpty()) { + reports.add( + createReportRecord( + ReportErrorType.MISSING_COLUMNAR_TABLE_EVOLUTION_META, + CheckerReportStatus.FOUND, + String.format( + "Missing columnar table evolution meta of index %s.%s.%s", + schemaName, tableName, indexName))); + return reports; + } + + // Check columnar column evolution records + final List actualColumnarColumnEvoRecords = + blackboard.queryColumnarColumnEvolution(tableEvolutionRecords.get(0).columns); + if (actualColumnarColumnEvoRecords.isEmpty()) { + reports.add( + createReportRecord( + ReportErrorType.MISSING_COLUMNAR_COLUMN_EVOLUTION_META, + CheckerReportStatus.FOUND, + String.format( + "Missing columnar column evolution meta of index %s.%s.%s", + schemaName, tableName, indexName))); + return reports; + } + + // Column meta of row store (expected) + final List expectedColumnRecords = blackboard.queryColumns(schemaName, indexName); + // Column meta of column store (actual) + // map> + final Map>> colEvoMap = new HashMap<>(); + // map + final Map actualColumnNameFieldIdMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + actualColumnarColumnEvoRecords.forEach(ccer -> { + actualColumnNameFieldIdMap.put(ccer.columnsRecord.columnName, ccer.fieldId); + colEvoMap + .computeIfAbsent(ccer.fieldId, k -> new ArrayList<>()) + .add(Pair.of(ccer.id, ccer.columnsRecord)); + }); + // map + final Map> actualColLatestMap = new HashMap<>(); + colEvoMap.forEach((fieldId, columnarRecordPair) -> + columnarRecordPair + .stream() + .max(Comparator.comparing(Pair::getKey)) + .ifPresent(cr -> actualColLatestMap.put(fieldId, cr))); + // map + final ImmutableConcatMap> actualColumnRecordMap = + new ImmutableConcatMap<>(actualColumnNameFieldIdMap, actualColLatestMap); + + // Check columnar column evolution records (reverseOrder) + reports.addAll(checkColumnEvolutionRecords(expectedColumnRecords, actualColumnRecordMap)); + + // Check columnar table evolution records + reports.addAll(checkTableEvolutionRecord(tableEvolutionRecords.get(0), actualColLatestMap)); + + return reports; + } + + @NotNull + private List checkColumnEvolutionRecords(List indexColumns, + Map> expectedMap) { + return CheckerBuilder + .stringKeyListChecker(indexColumns, expectedMap, true) + .withReverseOrderCheck() + .withActualKeyGenerator(a -> a.columnName) + .withExpectedKeyGenerator(e -> e.getValue().columnName) + .withDefValidator((a, e) -> equalsColumnRecord(a, e.getValue())) + .withOrdValidator((a, e) -> a.ordinalPosition == e.getValue().ordinalPosition) + .withOrphanReporter(msgs -> createReportRecord( + ReportErrorType.ORPHAN_COLUMNAR_COLUMN_EVOLUTION_META, + CheckerReportStatus.FOUND, + String.format( + "Orphan column evolution meta found for column: %s", + String.join(",", msgs)))) + .withMissingReporter(msgs -> createReportRecord( + ReportErrorType.MISSING_COLUMNAR_COLUMN_EVOLUTION_META, + CheckerReportStatus.FOUND, + String.format( + "Missing column evolution meta for column: %s", + String.join(",", msgs)))) + .withInvalidateDefReporter(msgs -> createReportRecord( + ReportErrorType.UNMATCHED_COLUMNAR_COLUMN_EVOLUTION_DEFINITION, + CheckerReportStatus.FOUND, + String.format( + "Unmatched column evolution definition found for column: %s", + String.join(",", msgs)))) + .withInvalidateOrdReporter(msgs -> createReportRecord( + ReportErrorType.UNMATCHED_COLUMNAR_COLUMN_EVOLUTION_ORDER, + CheckerReportStatus.FOUND, + String.format( + "Unmatched column evolution order found for column: %s", + String.join(",", msgs)))) + .build() + .check() + .report(); + } + + @NotNull + private List checkTableEvolutionRecord(ColumnarTableEvolutionRecord tableEvolutionRecord, + Map> colLatestMap) { + return CheckerBuilder + .listChecker(tableEvolutionRecord.columns, colLatestMap) + .withActualKeyGenerator(Ord::getValue) + .withExpectedKeyGenerator(Pair::getKey) + .withOrdValidator((a, e) -> a.getKey() + 1 == e.getValue().ordinalPosition) + .withMissingMsgFromExpectedGenerator( + e -> String.format("%s[%s]", e.getValue().columnName, e.getValue().ordinalPosition)) + .withInvalidateOrdMsgGenerator((a, e) -> String.format("%s[%s](%s -> %s)", + e.getValue().columnName, + a.getValue(), + e.getValue().ordinalPosition, + a.getKey() + 1)) + .withOrphanReporter(msgs -> createReportRecord( + ReportErrorType.ORPHAN_COLUMNAR_TABLE_EVOLUTION_FIELD_ID, + CheckerReportStatus.FOUND, + String.format( + "Orphan table evolution field id found for column: %s", + String.join(",", msgs)))) + .withMissingReporter(msgs -> createReportRecord( + ReportErrorType.MISSING_COLUMNAR_TABLE_EVOLUTION_FIELD_ID, + CheckerReportStatus.FOUND, + String.format( + "Missing table evolution field id for column: %s", + String.join(",", msgs)))) + .withInvalidateOrdReporter(msgs -> createReportRecord( + ReportErrorType.UNMATCHED_COLUMNAR_TABLE_EVOLUTION_ORDER, + CheckerReportStatus.FOUND, + String.format( + "Unmatched table evolution column order found for column: %s", + String.join(",", msgs)))) + .build() + .check() + .report(); + } + + /** + * Check cci exists + * & Check cci belongs to primary table + * & Check cci table type + * & Check cci index information + */ + @NotNull + private List checkCciTableAndIndex(Blackboard blackboard) { + final List reports = new ArrayList<>(); + + // Table or index not exists + if (TStringUtil.isBlank(tableName)) { + reports.add( + createReportRecord( + ReportErrorType.TABLE_NOT_EXISTS, + CheckerReportStatus.FOUND, + String.format("No columnar index named '%s' in database %s", indexName, schemaName))); + return reports; + } + + final TablesRecord primaryTable = blackboard.queryTable(schemaName, tableName); + final TablesRecord indexTable = blackboard.queryTable(schemaName, indexName); + + if (null != primaryTable && null != indexTable) { + final List indexesRecords = blackboard.queryIndexes(schemaName, tableName, indexName); + if (null == indexesRecords) { + reports.add( + createReportRecord( + ReportErrorType.MISSING_INDEX_META, + CheckerReportStatus.FOUND, + String.format( + "Missing table-index relationship between table %s and index %s", + tableName, + indexName))); + } else { + final List primaryColumns = blackboard.queryColumns(schemaName, tableName); + + final List indexRecordReports = CheckerBuilder + .stringKeyListChecker(indexesRecords, primaryColumns, true) + .withActualKeyGenerator(a -> a.columnName) + .withExpectedKeyGenerator(e -> e.columnName) + .withDefValidator((a, e) -> validateIndexRecord(e, a)) + .withDistValidator() + .withOrphanReporter(msgs -> createReportRecord( + ReportErrorType.ORPHAN_INDEX_META, + CheckerReportStatus.FOUND, + String.format( + "Orphan index column meta found for column: %s", + String.join(",", msgs)))) + .withDuplicatedReporter(msgs -> createReportRecord( + ReportErrorType.DUPLICATED_INDEX_META, + CheckerReportStatus.FOUND, + String.format( + "Duplicated index column meta found for column: %s", + String.join(",", msgs)))) + .withInvalidateDefReporter(msgs -> createReportRecord( + ReportErrorType.INVALID_INDEX_META, + CheckerReportStatus.FOUND, + String.format( + "Invalid index column meta found for column: %s", + String.join(",", msgs)))) + .withMissingReporter(msgs -> createReportRecord( + ReportErrorType.MISSING_INDEX_META, + CheckerReportStatus.FOUND, + String.format( + "Missing index column meta for column: %s", + String.join(",", msgs)))) + .build() + .check() + .report(); + + reports.addAll(indexRecordReports); + } + } else if (null == primaryTable) { + // Primary table not exists + reports.add( + createReportRecord( + ReportErrorType.TABLE_NOT_EXISTS, + CheckerReportStatus.FOUND, + String.format("Primary table %s not exists in database %s", tableName, schemaName))); + } else { + // Index table not exists + reports.add( + createReportRecord( + ReportErrorType.TABLE_NOT_EXISTS, + CheckerReportStatus.FOUND, + String.format("Index table %s not exists in database %s", indexName, schemaName))); + } + + if (null != indexTable) { + if (!TStringUtil.equals(indexTable.tableType, "COLUMNAR TABLE")) { + reports.add( + createReportRecord( + ReportErrorType.WRONG_TABLE_TYPE, + CheckerReportStatus.FOUND, + String.format("Unexpected type '%s' of index table %s", indexTable.tableType, indexName))); + } + } + + return reports; + } + + /** + * Check primary and cci column identical (by default, all columnar index is clustered index) + */ + @NotNull + private List checkCciColumns(Blackboard blackboard) { + final List primaryColumns = blackboard.queryColumns(schemaName, tableName); + final List indexColumns = blackboard.queryColumns(schemaName, indexName); + + // 1. Check index column not in primary table + // 2. Check index column definition equals to primary table + // 3. Check index column order equals to primary table + // 4. Check primary column not in index table + return CheckerBuilder + .stringKeyListChecker(indexColumns, primaryColumns, true) + .withActualKeyGenerator(a -> a.columnName) + .withExpectedKeyGenerator(e -> e.columnName) + .withDefValidator((a, e) -> equalsColumnRecord(e, a)) + .withOrdValidator((a, e) -> a.ordinalPosition == e.ordinalPosition) + .withOrphanReporter(msgs -> createReportRecord( + ReportErrorType.ORPHAN_COLUMN, + CheckerReportStatus.FOUND, + String.format( + "Orphan columns found for column: %s", + String.join(",", msgs)))) + .withInvalidateDefReporter(msgs -> createReportRecord( + ReportErrorType.UNMATCHED_COLUMN_DEFINITION, + CheckerReportStatus.FOUND, + String.format( + "Unmatched column definition found for column: %s", + String.join(",", msgs)))) + .withInvalidateOrdReporter(msgs -> createReportRecord( + ReportErrorType.UNMATCHED_COLUMN_ORDER, + CheckerReportStatus.FOUND, + String.format( + "Unmatched column order found for column: %s", + String.join(",", msgs)))) + .withMissingReporter(msg -> createReportRecord( + ReportErrorType.MISSING_COLUMN, + CheckerReportStatus.FOUND, + String.format( + "Missing columns for column: %s", + String.join(",", msg)))) + .build() + .check() + .report(); + } + + private boolean equalsColumnRecord(ColumnsRecord left, ColumnsRecord right) { + if (null == left || null == right) { + return false; + } + + return TStringUtil.equals(left.columnName, right.columnName) + && TStringUtil.equals(left.columnType, right.columnType) + && TStringUtil.equals(left.isNullable, right.isNullable) + && TStringUtil.equals(left.columnDefault, right.columnDefault) + && left.numericPrecision == right.numericPrecision + && left.numericScale == right.numericScale + && TStringUtil.equals(left.characterSetName, right.characterSetName) + && TStringUtil.equals(left.collationName, right.collationName) + && TStringUtil.equals(left.extra, right.extra); + } + + private boolean validateIndexRecord(ColumnsRecord primaryColumnDef, IndexesRecord indexColumnDef) { + if (null == primaryColumnDef || null == indexColumnDef) { + return false; + } + + /* + * For information_schema.columns: The value is YES if NULL values can be stored in the column, NO if not. + * For information_schema.statistics: Contains YES if the column may contain NULL values and '' if not. + */ + final boolean equalNullable = TStringUtil.equals(primaryColumnDef.isNullable, indexColumnDef.nullable) || ( + TStringUtil.equals(primaryColumnDef.isNullable, "NO") && TStringUtil.isBlank(indexColumnDef.nullable)); + + /* + * 1. check column name + * 2. check nullable + * 3. columnar index cannot be unique + * 4. for sort key (index_column_type == 0), + * flag should be 3 (IndexesRecord.FLAG_CLUSTERED | IndexesRecord.FLAG_COLUMNAR) + * 5. for covering column (index_column_type == 1) + * flag should be 0 + */ + return TStringUtil.equals(primaryColumnDef.columnName, indexColumnDef.columnName) + && TStringUtil.equals(primaryColumnDef.tableSchema, indexColumnDef.tableSchema) + && TStringUtil.equals(primaryColumnDef.tableSchema, indexColumnDef.indexSchema) + && TStringUtil.equals(primaryColumnDef.tableName, indexColumnDef.tableName) + && equalNullable + && indexColumnDef.nonUnique == 1 + && (indexColumnDef.indexColumnType == 1 || indexColumnDef.flag == 3); + } + + private static class Blackboard { + public final TableInfoManager tableInfoManager; + public final ExecutionContext ec; + + private final Map, TablesRecord> queryTableCache = new HashMap<>(); + private final Map, List> queryColumnCache = new HashMap<>(); + private final Map, List> + queryColumnarTableMappingCache = new HashMap<>(); + private final Map, List> queryIndexesCache = new HashMap<>(); + + private final Map, List> queryColumnarTableEvolutionCache = + new HashMap<>(); + private final Map, List> + queryColumnarColumnEvolutionCache = new HashMap<>(); + private final Map, List> queryTablePartitionCache = new HashMap<>(); + private final Map> queryTableGroupCache = new HashMap<>(); + private final Map> queryPartitionGroupCache = new HashMap<>(); + + private Blackboard(Connection metaDbConnection, ExecutionContext ec) { + final TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + + this.tableInfoManager = tableInfoManager; + this.ec = ec; + } + + public TablesRecord queryTable(String schemaName, String tableName) { + return queryTableCache.computeIfAbsent( + Pair.of(schemaName, tableName), + k -> tableInfoManager.queryTable(k.getKey(), k.getValue(), false)); + } + + public List queryColumns(String schemaName, String tableName) { + return queryColumnCache.computeIfAbsent( + Pair.of(schemaName, tableName), + k -> tableInfoManager.queryColumns(k.getKey(), k.getValue())); + } + + public List queryIndexes(String schemaName, String tableName, String indexName) { + return queryIndexesCache.computeIfAbsent( + Triple.of(schemaName, tableName, indexName), + k -> tableInfoManager.queryIndexes(k.getLeft(), k.getMiddle(), k.getRight())); + } + + public List queryColumnarTableMapping(String schemaName, + String tableName, + String indexName) { + return queryColumnarTableMappingCache.computeIfAbsent( + Triple.of(schemaName, tableName, indexName), + k -> tableInfoManager.queryColumnarTableMapping(k.getLeft(), k.getMiddle(), k.getRight())); + } + + public List queryColumnarColumnEvolution(List fieldIdList) { + return queryColumnarColumnEvolutionCache.computeIfAbsent( + fieldIdList, + tableInfoManager::queryColumnarColumnEvolution); + } + + public List queryColumnarTableEvolution(long indexTableId, long versionId) { + return queryColumnarTableEvolutionCache.computeIfAbsent( + Pair.of(indexTableId, versionId), + p -> tableInfoManager.queryColumnarTableEvolution(p.getKey(), p.getValue())); + } + + public List queryTablePartition(@NotNull String schema, @NotNull String table) { + return queryTablePartitionCache.computeIfAbsent( + Pair.of(schema, table), + k -> tableInfoManager.queryTablePartitions(k.getKey(), k.getValue(), false)); + } + + public List queryTableGroup(@NotNull Long id) { + return queryTableGroupCache.computeIfAbsent( + id, + tableInfoManager::queryTableGroupById); + } + + public List queryPartitionGroupByTgId(@NotNull Long tgId) { + return queryPartitionGroupCache.computeIfAbsent( + tgId, + tableInfoManager::queryPartitionGroupByTgId); + } + } + + public enum ReportErrorType { + UNKNOWN, + SUMMARY, + MISSING_COLUMN, + UNMATCHED_COLUMN_ORDER, + UNMATCHED_COLUMN_DEFINITION, + ORPHAN_COLUMN, + SCHEMA_NOT_EXISTS, + TABLE_NOT_EXISTS, + UNEXPECTED_CACHED_INDEX_STATUS, + UNEXPECTED_CACHED_INDEX_TYPE, + MISSING_INDEX_META, + ORPHAN_INDEX_META, + INVALID_INDEX_META, + DUPLICATED_INDEX_META, + WRONG_TABLE_TYPE, + MISSING_TABLE_MAPPING_META, + DUPLICATED_TABLE_MAPPING_META, + MISSING_COLUMNAR_TABLE_EVOLUTION_META, + UNMATCHED_COLUMNAR_TABLE_EVOLUTION_ORDER, + MISSING_COLUMNAR_COLUMN_EVOLUTION_META, + ORPHAN_COLUMNAR_COLUMN_EVOLUTION_META, + UNMATCHED_COLUMNAR_COLUMN_EVOLUTION_DEFINITION, + UNMATCHED_COLUMNAR_COLUMN_EVOLUTION_ORDER, + MISSING_COLUMNAR_TABLE_EVOLUTION_FIELD_ID, + ORPHAN_COLUMNAR_TABLE_EVOLUTION_FIELD_ID, + MISSING_CDC_MARK_CREATE_INDEX, + DUPLICATED_CDC_MARK_CREATE_INDEX, + WRONG_CDC_MARK_DDL_ID, + WRONG_CDC_MARK_STATEMENT, + UNEXPECTED_COLUMNAR_TABLE_MAPPING_META, + MISSING_COLUMNAR_TABLE_PARTITION_META, + MISSING_COLUMNAR_TABLE_LOGICAL_PARTITION_META, + MISSING_COLUMNAR_TABLE_PHYSICAL_PARTITION_META, + ORPHAN_COLUMNAR_TABLE_PHYSICAL_PARTITION_META, + UNEXPECTED_COLUMNAR_COLUMNAR_TABLE_PARTITION_DEFINITION, + MISSING_COLUMNAR_TABLE_GROUP_META, + UNEXPECTED_COLUMNAR_COLUMNAR_TABLE_GROUP_DEFINITION, + ; + + public static ReportErrorType of(String value) { + if (null == value) { + return UNKNOWN; + } + try { + return ReportErrorType.valueOf(value.toUpperCase()); + } catch (IllegalArgumentException ignored) { + return UNKNOWN; + } + } + } + + private interface ElementReporter { + String orphanMsgFromActual(T actual); + + String orphanMsgFromExpected(R expected); + + String invalidateDefMsg(T actual, R expected); + + String invalidateOrdMsg(T actual, R expected); + + String duplicatedMsg(T actual); + + String missingMsgFromExpected(R expected); + + String missingMsgFromActual(T actual); + + CheckerReport reportOrphan(List msgs); + + CheckerReport reportInvalidateDef(List msgs); + + CheckerReport reportInvalidateOrd(List msgs); + + default CheckerReport reportDuplicated(List msgs) { + throw new UnsupportedOperationException(); + } + + CheckerReport reportMissing(List msgs); + } + + private interface ElementValidator { + boolean isInvalidateDef(@NotNull T actual, @NotNull R expected); + + default boolean isInvalidateOrd(@NotNull T actual, @NotNull R expected) { + return false; + } + + default boolean isDuplicated(@NotNull T actual) { + return false; + } + + @Nullable + R findInExpected(@NotNull T actual); + + @Nullable + T findInActual(@NotNull R expected); + + /** + * @return false if actualNotInExpected() ? reportOrphan : reportMissing + *

+ * true if actualNotInExpected() ? reportMissing : reportOrphan + */ + default boolean reverseOrderCheck() { + return false; + } + } + + private interface ElementChecker extends ElementValidator, ElementReporter { + @NotNull + Iterable getActualElements(); + + @NotNull + Iterable getExpectedElements(); + + @NotNull + ElementChecker check(); + + @NotNull + List report(); + } + + private static abstract class AbstractElementChecker + implements ElementChecker { + protected final Function, CheckerReport> orphanReporter; + protected final Function, CheckerReport> invalidateDefReporter; + protected final Function, CheckerReport> invalidateOrdReporter; + protected final Function, CheckerReport> missingReporter; + protected final Function, CheckerReport> duplicatedReporter; + + protected final Function orphanMsgFromActualGenerator; + protected final Function orphanMsgFromExpectedGenerator; + protected final Function missingMsgFromExpectedGenerator; + protected final Function missingMsgFromActualGenerator; + protected final BiFunction invalidateDefMsgGenerator; + protected final BiFunction invalidateOrdMsgGenerator; + + private final List orphan = new ArrayList<>(); + private final List invalidateDef = new ArrayList<>(); + private final List invalidateOrd = new ArrayList<>(); + private final List missing = new ArrayList<>(); + private final List duplicated = new ArrayList<>(); + + protected AbstractElementChecker(Function, CheckerReport> orphanReporter, + Function, CheckerReport> invalidateDefReporter, + Function, CheckerReport> invalidateOrdReporter, + Function, CheckerReport> missingReporter, + Function, CheckerReport> duplicatedReporter, + Function orphanMsgFromActualGenerator, + Function orphanMsgFromExpectedGenerator, + Function missingMsgFromExpectedGenerator, + Function missingMsgFromActualGenerator, + BiFunction invalidateDefMsgGenerator, + BiFunction invalidateOrdMsgGenerator) { + this.orphanReporter = orphanReporter; + this.invalidateDefReporter = invalidateDefReporter; + this.invalidateOrdReporter = invalidateOrdReporter; + this.missingReporter = missingReporter; + this.duplicatedReporter = duplicatedReporter; + this.orphanMsgFromActualGenerator = orphanMsgFromActualGenerator; + this.orphanMsgFromExpectedGenerator = orphanMsgFromExpectedGenerator; + this.missingMsgFromExpectedGenerator = missingMsgFromExpectedGenerator; + this.missingMsgFromActualGenerator = missingMsgFromActualGenerator; + this.invalidateDefMsgGenerator = invalidateDefMsgGenerator; + this.invalidateOrdMsgGenerator = invalidateOrdMsgGenerator; + } + + @Override + @NotNull + public ElementChecker check() { + // Natural order check + for (T actual : getActualElements()) { + if (isDuplicated(actual)) { + duplicated.add(duplicatedMsg(actual)); + } else { + final R expected = findInExpected(actual); + if (null == expected) { + if (reverseOrderCheck()) { + missing.add(missingMsgFromActual(actual)); + } else { + orphan.add(orphanMsgFromActual(actual)); + } + } else if (isInvalidateDef(actual, expected)) { + invalidateDef.add(invalidateDefMsg(actual, expected)); + } else if (isInvalidateOrd(actual, expected)) { + invalidateOrd.add(invalidateOrdMsg(actual, expected)); + } + } + } + + // Reverse order check + for (R expected : getExpectedElements()) { + final T actual = findInActual(expected); + if (null == actual) { + if (reverseOrderCheck()) { + orphan.add(orphanMsgFromExpected(expected)); + } else { + missing.add(missingMsgFromExpected(expected)); + } + } + } + return this; + } + + @Override + @NotNull + public List report() { + final List reports = new ArrayList<>(); + + if (!orphan.isEmpty()) { + reports.add(reportOrphan(orphan)); + } + + if (!invalidateDef.isEmpty()) { + reports.add(reportInvalidateDef(invalidateDef)); + } + + if (!invalidateOrd.isEmpty()) { + reports.add(reportInvalidateOrd(invalidateOrd)); + } + + if (!duplicated.isEmpty()) { + reports.add(reportDuplicated(duplicated)); + } + + if (!missing.isEmpty()) { + reports.add(reportMissing(missing)); + } + + return reports; + } + + private String unwrap(@NotNull K key) { + if (key instanceof String) { + return (String) key; + } else { + return key.toString(); + } + } + + @NotNull + public abstract K getKeyFromActual(T actual); + + @NotNull + public abstract K getKeyFromExpected(R expected); + + @Override + public String orphanMsgFromActual(T actual) { + if (null != this.orphanMsgFromActualGenerator) { + return this.orphanMsgFromActualGenerator.apply(actual); + } + return unwrap(getKeyFromActual(actual)); + } + + @Override + public String orphanMsgFromExpected(R expected) { + if (null != this.orphanMsgFromExpectedGenerator) { + return this.orphanMsgFromExpectedGenerator.apply(expected); + } + return unwrap(getKeyFromExpected(expected)); + } + + @Override + public String invalidateDefMsg(T actual, R expected) { + if (null != this.invalidateDefMsgGenerator) { + return this.invalidateDefMsgGenerator.apply(actual, expected); + } + return unwrap(getKeyFromActual(actual)); + } + + @Override + public String invalidateOrdMsg(T actual, R expected) { + if (null != this.invalidateOrdMsgGenerator) { + return this.invalidateOrdMsgGenerator.apply(actual, expected); + } + return unwrap(getKeyFromActual(actual)); + } + + @Override + public String duplicatedMsg(T actual) { + return unwrap(getKeyFromActual(actual)); + } + + @Override + public String missingMsgFromExpected(R expected) { + if (null != this.missingMsgFromExpectedGenerator) { + return this.missingMsgFromExpectedGenerator.apply(expected); + } + return unwrap(getKeyFromExpected(expected)); + } + + @Override + public String missingMsgFromActual(T actual) { + if (null != this.missingMsgFromActualGenerator) { + return this.missingMsgFromActualGenerator.apply(actual); + } + return unwrap(getKeyFromActual(actual)); + } + + @Override + public CheckerReport reportOrphan(List msgs) { + return orphanReporter.apply(msgs); + } + + @Override + public CheckerReport reportInvalidateDef(List msgs) { + return invalidateDefReporter.apply(msgs); + } + + @Override + public CheckerReport reportInvalidateOrd(List msgs) { + return invalidateOrdReporter.apply(msgs); + } + + @Override + public CheckerReport reportDuplicated(List msgs) { + return duplicatedReporter.apply(msgs); + } + + @Override + public CheckerReport reportMissing(List msgs) { + return missingReporter.apply(msgs); + } + + } + + private static class ListElementChecker extends AbstractElementChecker { + protected final Map actualMap; + protected final Map expectedMap; + protected final Function actualKeyGenerator; + protected final Function expectedKeyGenerator; + protected final BiFunction actualFinder; + protected final BiFunction expectedFinder; + protected final BiFunction defValidator; + protected final BiFunction ordValidator; + protected final Function distValidator; + protected final boolean reverseOrderCheck; + + protected ListElementChecker(Map actualMap, + Map expectedMap, + Function actualKeyGenerator, + Function expectedKeyGenerator, + BiFunction actualFinder, + BiFunction expectedFinder, + BiFunction defValidator, + BiFunction ordValidator, + Function distValidator, + Function, CheckerReport> orphanReporter, + Function, CheckerReport> invalidateDefReporter, + Function, CheckerReport> invalidateOrdReporter, + Function, CheckerReport> missingReporter, + Function, CheckerReport> duplicatedReporter, + Function orphanMsgFromActualGenerator, + Function orphanMsgFromExpectedGenerator, + Function missingMsgFromExpectedGenerator, + Function missingMsgFromActualGenerator, + BiFunction invalidateDefMsgGenerator, + BiFunction invalidateOrdMsgGenerator, + boolean reverseOrderCheck) { + super(orphanReporter, + invalidateDefReporter, + invalidateOrdReporter, + missingReporter, + duplicatedReporter, + orphanMsgFromActualGenerator, + orphanMsgFromExpectedGenerator, + missingMsgFromExpectedGenerator, + missingMsgFromActualGenerator, + invalidateDefMsgGenerator, + invalidateOrdMsgGenerator); + this.actualMap = actualMap; + this.expectedMap = expectedMap; + this.actualKeyGenerator = actualKeyGenerator; + this.expectedKeyGenerator = expectedKeyGenerator; + this.actualFinder = actualFinder; + this.expectedFinder = expectedFinder; + this.defValidator = defValidator; + this.ordValidator = ordValidator; + this.distValidator = distValidator; + this.reverseOrderCheck = reverseOrderCheck; + } + + @Override + @NotNull + public K getKeyFromActual(T actual) { + return actualKeyGenerator.apply(actual); + } + + @Override + @NotNull + public K getKeyFromExpected(R expected) { + return expectedKeyGenerator.apply(expected); + } + + @Override + @Nullable + public R findInExpected(@NotNull T actual) { + if (null == expectedFinder) { + return expectedMap.get(getKeyFromActual(actual)); + } + return expectedFinder.apply(getKeyFromActual(actual), actual); + } + + @Override + @Nullable + public T findInActual(@NotNull R expected) { + if (null == actualFinder) { + return actualMap.get(getKeyFromExpected(expected)); + } + return actualFinder.apply(getKeyFromExpected(expected), expected); + } + + @Override + public boolean isInvalidateDef(@NotNull T actual, @NotNull R expected) { + if (null == defValidator) { + return false; + } + return !defValidator.apply(actual, expected); + } + + @Override + public boolean isInvalidateOrd(@NotNull T actual, @NotNull R expected) { + if (null == ordValidator) { + return false; + } + return !ordValidator.apply(actual, expected); + } + + @Override + public boolean isDuplicated(@NotNull T actual) { + if (null == distValidator) { + return false; + } + return !distValidator.apply(actual); + } + + @Override + public boolean reverseOrderCheck() { + return reverseOrderCheck; + } + + @Override + public @NotNull Iterable getActualElements() { + return actualMap.values(); + } + + @Override + public @NotNull Iterable getExpectedElements() { + return expectedMap.values(); + } + } + + @RequiredArgsConstructor + private static final class CheckerBuilder { + private final Iterable actualList; + private final Supplier> actualMapSupplier; + private final Supplier> expectedMapSupplier; + private final Supplier> setSupplier; + private Function actualKeyGenerator; + private Function expectedKeyGenerator; + private BiFunction actualFinder; + private BiFunction expectedFinder; + private BiFunction defValidator; + private BiFunction ordValidator; + private Function distValidator; + private Function, CheckerReport> orphanReporter; + private Function, CheckerReport> invalidateDefReporter; + private Function, CheckerReport> invalidateOrdReporter; + private Function, CheckerReport> missingReporter; + private Function, CheckerReport> duplicatedReporter; + + private Function orphanMsgFromActualGenerator; + private Function orphanMsgFromExpectedGenerator; + private Function missingMsgFromExpectedGenerator; + private Function missingMsgFromActualGenerator; + private BiFunction invalidateDefMsgGenerator; + private BiFunction invalidateOrdMsgGenerator; + + private Iterable expectedList; + private Map actualMap; + private Map expectedMap; + + private boolean distCheck = false; + private boolean reverseOrderCheck = false; + + public static CheckerBuilder stringKeyListChecker(@NotNull Iterable actualList, + @NotNull Iterable expectedList, + boolean caseInsensitive) { + final CheckerBuilder builder = new CheckerBuilder<>( + actualList, + () -> caseInsensitive ? new TreeMap<>(String.CASE_INSENSITIVE_ORDER) : new HashMap<>(), + () -> caseInsensitive ? new TreeMap<>(String.CASE_INSENSITIVE_ORDER) : new HashMap<>(), + () -> caseInsensitive ? new TreeSet<>(String.CASE_INSENSITIVE_ORDER) : new HashSet<>()); + return builder.withExpectedList(expectedList); + } + + public static CheckerBuilder stringKeyListChecker(@NotNull Iterable actualList, + @NotNull Map expectedMap, + boolean caseInsensitive) { + final CheckerBuilder builder = new CheckerBuilder<>( + actualList, + () -> caseInsensitive ? new TreeMap<>(String.CASE_INSENSITIVE_ORDER) : new HashMap<>(), + () -> caseInsensitive ? new TreeMap<>(String.CASE_INSENSITIVE_ORDER) : new HashMap<>(), + () -> caseInsensitive ? new TreeSet<>(String.CASE_INSENSITIVE_ORDER) : new HashSet<>()); + return builder.withExpectedMap(expectedMap); + } + + public static CheckerBuilder keyListChecker(@NotNull Iterable actualList, + @NotNull Map expectedMap) { + final CheckerBuilder builder = new CheckerBuilder<>( + actualList, + HashMap::new, + HashMap::new, + HashSet::new); + return builder.withExpectedMap(expectedMap); + } + + public static CheckerBuilder keyListChecker(@NotNull Iterable actualList, + @NotNull Iterable expectedList) { + final CheckerBuilder builder = new CheckerBuilder<>( + actualList, + HashMap::new, + HashMap::new, + HashSet::new); + return builder.withExpectedList(expectedList); + } + + public static CheckerBuilder, R> listChecker(@NotNull Iterable actualList, + @NotNull Map expectedMap) { + final CheckerBuilder, R> builder = new CheckerBuilder<>( + Ord.zip(actualList), + HashMap::new, + HashMap::new, + HashSet::new); + + return builder.withExpectedMap(expectedMap); + } + + public static CheckerBuilder, Ord> listChecker(@NotNull Iterable actualList, + @NotNull Iterable expectedList) { + final CheckerBuilder, Ord> builder = new CheckerBuilder<>( + Ord.zip(actualList), + HashMap::new, + HashMap::new, + HashSet::new); + + return builder + .withExpectedList(Ord.zip(expectedList)) + .withActualKeyGenerator(Ord::getKey) + .withExpectedKeyGenerator(Ord::getKey); + } + + public ElementChecker build() { + Preconditions.checkNotNull(actualList); + Preconditions.checkNotNull(actualKeyGenerator); + Preconditions.checkNotNull(actualMapSupplier); + Preconditions.checkNotNull(expectedKeyGenerator); + + if (GeneralUtil.isEmpty(expectedMap)) { + Preconditions.checkNotNull(expectedList); + Preconditions.checkNotNull(expectedMapSupplier); + this.expectedMap = expectedMapSupplier.get(); + this.expectedList.forEach(e -> expectedMap.put(expectedKeyGenerator.apply(e), e)); + } + + this.actualMap = actualMapSupplier.get(); + this.actualList.forEach(a -> actualMap.put(actualKeyGenerator.apply(a), a)); + + if (this.distCheck) { + Preconditions.checkNotNull(setSupplier); + final Set actualSet = setSupplier.get(); + this.distValidator = (a) -> actualSet.add(actualKeyGenerator.apply(a)); + } + + return new ListElementChecker<>( + actualMap, + expectedMap, + actualKeyGenerator, + expectedKeyGenerator, + actualFinder, + expectedFinder, + defValidator, + ordValidator, + distValidator, + orphanReporter, + invalidateDefReporter, + invalidateOrdReporter, + missingReporter, + duplicatedReporter, + orphanMsgFromActualGenerator, + orphanMsgFromExpectedGenerator, + missingMsgFromExpectedGenerator, + missingMsgFromActualGenerator, + invalidateDefMsgGenerator, + invalidateOrdMsgGenerator, + reverseOrderCheck); + } + + public CheckerBuilder withActualKeyGenerator(@NotNull Function actualKeyGenerator) { + this.actualKeyGenerator = actualKeyGenerator; + return this; + } + + public CheckerBuilder withExpectedKeyGenerator(@NotNull Function expectedKeyGenerator) { + this.expectedKeyGenerator = expectedKeyGenerator; + return this; + } + + public CheckerBuilder withActualFinder(BiFunction actualFinder) { + this.actualFinder = actualFinder; + return this; + } + + public CheckerBuilder withExpectedFinder(BiFunction expectedFinder) { + this.expectedFinder = expectedFinder; + return this; + } + + public CheckerBuilder withExpectedList(@NotNull Iterable expectedList) { + this.expectedList = expectedList; + return this; + } + + public CheckerBuilder withExpectedMap(@NotNull Map expectedMap) { + this.expectedMap = expectedMap; + return this; + } + + public CheckerBuilder withDefValidator(BiFunction defValidator) { + this.defValidator = defValidator; + return this; + } + + public CheckerBuilder withOrdValidator(BiFunction ordValidator) { + this.ordValidator = ordValidator; + return this; + } + + public CheckerBuilder withDistValidator() { + this.distCheck = true; + return this; + } + + public CheckerBuilder withReverseOrderCheck() { + this.reverseOrderCheck = true; + return this; + } + + public CheckerBuilder withOrphanReporter( + Function, CheckerReport> orphanReporter) { + this.orphanReporter = orphanReporter; + return this; + } + + public CheckerBuilder withInvalidateDefReporter( + Function, CheckerReport> invalidateDefReporter) { + this.invalidateDefReporter = invalidateDefReporter; + return this; + } + + public CheckerBuilder withInvalidateOrdReporter( + Function, CheckerReport> invalidateOrdReporter) { + this.invalidateOrdReporter = invalidateOrdReporter; + return this; + } + + public CheckerBuilder withMissingReporter( + Function, CheckerReport> missingReporter) { + this.missingReporter = missingReporter; + return this; + } + + public CheckerBuilder withDuplicatedReporter( + Function, CheckerReport> duplicatedReporter) { + this.duplicatedReporter = duplicatedReporter; + return this; + } + + public CheckerBuilder withOrphanMsgFromActualGenerator( + Function orphanMsgFromActualGenerator) { + this.orphanMsgFromActualGenerator = orphanMsgFromActualGenerator; + return this; + } + + public CheckerBuilder withOrphanMsgFromExpectedGenerator( + Function orphanMsgFromExpectedGenerator) { + this.orphanMsgFromExpectedGenerator = orphanMsgFromExpectedGenerator; + return this; + } + + public CheckerBuilder withMissingMsgFromExpectedGenerator( + Function missingMsgFromExpectedGenerator) { + this.missingMsgFromExpectedGenerator = missingMsgFromExpectedGenerator; + return this; + } + + public CheckerBuilder withMissingMsgFromActualGenerator( + Function missingMsgFromActualGenerator) { + this.missingMsgFromActualGenerator = missingMsgFromActualGenerator; + return this; + } + + public CheckerBuilder withInvalidateDefMsgGenerator( + BiFunction invalidateDefMsgGenerator) { + this.invalidateDefMsgGenerator = invalidateDefMsgGenerator; + return this; + } + + public CheckerBuilder withInvalidateOrdMsgGenerator( + BiFunction invalidateOrdMsgGenerator) { + this.invalidateOrdMsgGenerator = invalidateOrdMsgGenerator; + return this; + } + } + + @RequiredArgsConstructor + private static class ImmutableConcatMap extends HashMap { + private final Map left; + private final Map right; + + @Override + public V get(Object key) { + return Optional + .ofNullable(left.get(key)) + .map(right::get) + .orElse(null); + } + + @Override + public Set keySet() { + return keySet(); + } + + @Override + public Collection values() { + return right.values(); + } + + @Override + public boolean isEmpty() { + return left.isEmpty() || right.isEmpty(); + } + + @Override + public int size() { + return Math.min(left.size(), right.size()); + } + + @Override + public boolean containsKey(Object key) { + return left.containsKey(key) && right.containsKey(left.get(key)); + } + + @Override + public V put(K key, V value) { + throw new UnsupportedOperationException("put is not supported"); + } + + @Override + public void putAll(Map m) { + throw new UnsupportedOperationException("putAll is not supported"); + } + + @Override + public V remove(Object key) { + throw new UnsupportedOperationException("remove is not supported"); + } + + @Override + public void clear() { + throw new UnsupportedOperationException("clear is not supported"); + } + + @Override + public Set> entrySet() { + throw new UnsupportedOperationException("entrySet is not supported"); + } + + @Override + public boolean containsValue(Object value) { + throw new UnsupportedOperationException("containsValue is not supported"); + } + } + + @Override + public String remark() { + return String.format("|CheckCci(%s.%s)", tableName, indexName); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciStartTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciStartTask.java new file mode 100644 index 000000000..619985524 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciStartTask.java @@ -0,0 +1,51 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.gsi.CheckerManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; +import java.util.ArrayList; +import java.util.List; + +/** + * @author yaozhili + */ +@Getter +@TaskName(name = "CheckCciStartTask") +public class CheckCciStartTask extends CheckCciBaseTask { + public CheckCciStartTask(String schemaName, String tableName, String indexName) { + super(schemaName, indexName, tableName, null); + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + final List reports = new ArrayList<>(); + reports.add( + createReportRecord( + CheckCciMetaTask.ReportErrorType.SUMMARY, + CheckerManager.CheckerReportStatus.START, + "Start columnar index checked. job id: ")); + + // Add reports to metadb.checker_reports + CheckerManager.insertReports(metaDbConnection, reports); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciTask.java new file mode 100644 index 000000000..8c5ebc703 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CheckCciTask.java @@ -0,0 +1,133 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.utils.LoggerUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.columnar.checker.CciChecker; +import com.alibaba.polardbx.executor.columnar.checker.CciFastChecker; +import com.alibaba.polardbx.executor.columnar.checker.ICciChecker; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.gsi.CheckerManager; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.gms.topology.InstConfigAccessor; +import com.alibaba.polardbx.gms.topology.InstConfigRecord; +import com.alibaba.polardbx.gms.util.InstIdUtil; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CheckCciPrepareData; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import com.google.common.collect.ImmutableList; +import lombok.Getter; +import org.apache.calcite.sql.SqlCheckColumnarIndex; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; + +/** + * Check consistency of clustered columnar index + * + * @author yaozhili + */ +@Getter +@TaskName(name = "CheckCciTask") +public class CheckCciTask extends CheckCciBaseTask { + + private final static Logger LOG = LoggerFactory.getLogger(CheckCciTask.class); + + final List reports = new ArrayList<>(); + + public static CheckCciTask create(CheckCciPrepareData prepareData) { + return new CheckCciTask( + prepareData.getSchemaName(), + prepareData.getTableName(), + prepareData.getIndexName(), + prepareData.getExtraCmd() + ); + } + + @JSONCreator + public CheckCciTask(String schemaName, + String tableName, + String indexName, + SqlCheckColumnarIndex.CheckCciExtraCmd extraCmd) { + super(schemaName, tableName, indexName, extraCmd); + } + + @Override + protected void beforeTransaction(ExecutionContext executionContext) { + // Check. + ICciChecker checker; + if (executionContext.isEnableFastCciChecker()) { + checker = new CciFastChecker(schemaName, tableName, indexName); + } else { + checker = new CciChecker(schemaName, tableName, indexName); + } + + Runnable recover = null; + if (executionContext.isForce2pcDuringCciCheck()) { + try { + recover = ExecUtils.forceAllTrx2PC(); + } catch (Throwable t) { + recover = null; + } + } + + try { + checker.check(executionContext, recover); + } catch (Throwable t) { + reports.add( + createReportRecord( + CheckCciMetaTask.ReportErrorType.SUMMARY, + CheckerManager.CheckerReportStatus.FOUND, + "Error occurs when checking, caused by " + t.getMessage() + )); + SQLRecorderLogger.ddlLogger.error(t); + } + + List checkReports = new ArrayList<>(); + if (!checker.getCheckReports(checkReports)) { + // Inconsistency detected. + for (String error : checkReports) { + reports.add(createReportRecord(CheckCciMetaTask.ReportErrorType.SUMMARY, + CheckerManager.CheckerReportStatus.FOUND, error)); + } + } + + reports.add( + createReportRecord( + CheckCciMetaTask.ReportErrorType.SUMMARY, + CheckerManager.CheckerReportStatus.FINISH, + "data of columnar index checked")); + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + // Add reports to metadb.checker_reports + CheckerManager.insertReports(metaDbConnection, reports); + } + + @Override + public String remark() { + return String.format("|CciCheck(%s.%s)", tableName, indexName); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/ColumnarVersionUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/ColumnarVersionUtil.java new file mode 100644 index 000000000..91648cc23 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/ColumnarVersionUtil.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.JSONObject; +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.fastjson.annotation.JSONField; +import com.alibaba.polardbx.common.utils.HttpClientHelper; +import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.net.util.ColumnarTargetUtil; +import lombok.EqualsAndHashCode; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class ColumnarVersionUtil { + private static final String RPM_PREFIX = "t-polardbx-columnar-"; + private static final String RPM_SUFFIX = ".noarch.rpm"; + + public static String getVersion() { + String result = null; + try { + String daemonEndpoint = ColumnarTargetUtil.getDaemonMasterTarget(); + String httpResult = HttpClientHelper.doGet("http://" + daemonEndpoint + "/columnar/system/getVersion"); + final ColumnarVersions columnarVersions = JSONObject.parseObject(httpResult, ColumnarVersions.class); + + result = TStringUtil.substring( + columnarVersions.polardbxColumnarRpmVersion, RPM_PREFIX.length(), + TStringUtil.indexOf(columnarVersions.polardbxColumnarRpmVersion, RPM_SUFFIX)); + } catch (Exception e) { + log.error("get columnar version error", e); + } + return result; + } + + @Getter + @EqualsAndHashCode + static class ColumnarVersions { + private static final String FIELD_NAME_POLARDBX_COLUMNAR_IMAGE_VERSION = "polardbx_columnar_image_version"; + private static final String FIELD_NAME_POLARDBX_COLUMNAR_RPM_VERSION = "polardbx_columnar_rpm_version"; + private static final String FIELD_NAME_POLARDBX_COLUMNAR_DAEMON_VERSION = "polardbx_columnar_daemon_version"; + private static final String FIELD_NAME_POLARDBX_COLUMNAR_FW_BRANCH_NAME = "polardbx_columnar_fw_branch_name"; + private static final String FIELD_NAME_POLARDBX_VERSION = "polardbx_version"; + private static final String FIELD_NAME_POLARDBX_SQL_VERSION = "polardbx_sql_version"; + private static final String FIELD_NAME_POLARDBX_CDC_CLIENT_VERSION = "polardbx_cdc_client_version"; + + @JSONField(name = FIELD_NAME_POLARDBX_COLUMNAR_IMAGE_VERSION) + private final String polardbxColumnarImageVersion; + + @JSONField(name = FIELD_NAME_POLARDBX_COLUMNAR_RPM_VERSION) + private final String polardbxColumnarRpmVersion; + + @JSONField(name = FIELD_NAME_POLARDBX_COLUMNAR_DAEMON_VERSION) + private final String polardbxColumnarDaemonVersion; + + @JSONField(name = FIELD_NAME_POLARDBX_COLUMNAR_FW_BRANCH_NAME) + private final String polardbxColumnarFwBranchName; + + @JSONField(name = FIELD_NAME_POLARDBX_VERSION) + private final String polardbxColumnarVersion; + + @JSONField(name = FIELD_NAME_POLARDBX_SQL_VERSION) + private final String polardbxColumnarSqlVersion; + + @JSONField(name = FIELD_NAME_POLARDBX_CDC_CLIENT_VERSION) + private final String polardbxColumnarCdcClientVersion; + + @JSONCreator + public ColumnarVersions( + @JSONField(name = FIELD_NAME_POLARDBX_COLUMNAR_IMAGE_VERSION) + String polardbxColumnarImageVersion, + @JSONField(name = FIELD_NAME_POLARDBX_COLUMNAR_RPM_VERSION) + String polardbxColumnarRpmVersion, + @JSONField(name = FIELD_NAME_POLARDBX_COLUMNAR_DAEMON_VERSION) + String polardbxColumnarDaemonVersion, + @JSONField(name = FIELD_NAME_POLARDBX_COLUMNAR_FW_BRANCH_NAME) + String polardbxColumnarFwBranchName, + @JSONField(name = FIELD_NAME_POLARDBX_VERSION) + String polardbxColumnarVersion, + @JSONField(name = FIELD_NAME_POLARDBX_SQL_VERSION) + String polardbxColumnarSqlVersion, + @JSONField(name = FIELD_NAME_POLARDBX_CDC_CLIENT_VERSION) + String polardbxColumnarCdcClientVersion) { + this.polardbxColumnarImageVersion = polardbxColumnarImageVersion; + this.polardbxColumnarRpmVersion = polardbxColumnarRpmVersion; + this.polardbxColumnarDaemonVersion = polardbxColumnarDaemonVersion; + this.polardbxColumnarFwBranchName = polardbxColumnarFwBranchName; + this.polardbxColumnarVersion = polardbxColumnarVersion; + this.polardbxColumnarSqlVersion = polardbxColumnarSqlVersion; + this.polardbxColumnarCdcClientVersion = polardbxColumnarCdcClientVersion; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CreateCheckCciTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CreateCheckCciTask.java new file mode 100644 index 000000000..a2dc90982 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CreateCheckCciTask.java @@ -0,0 +1,159 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.columnar.checker.CciChecker; +import com.alibaba.polardbx.executor.columnar.checker.CciFastChecker; +import com.alibaba.polardbx.executor.columnar.checker.ICciChecker; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.gms.config.impl.InstConfUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import lombok.Getter; +import lombok.SneakyThrows; + +import java.util.ArrayList; +import java.util.List; + +@TaskName(name = "CreateCheckCciTask") +@Getter +public class CreateCheckCciTask extends BaseDdlTask { + private static final Logger logger = LoggerFactory.getLogger(CreateCheckCciTask.class); + + private final String logicalTableName; + private final String indexName; + private final boolean skipCheck; + + public CreateCheckCciTask(String schemaName, String logicalTableName, String indexName, + boolean skipCheck) { + super(schemaName); + this.logicalTableName = logicalTableName; + this.indexName = indexName; + this.skipCheck = skipCheck; + } + + @Override + @SneakyThrows + protected void beforeTransaction(ExecutionContext executionContext) { + if (null != executionContext.getParamManager() + && executionContext.getParamManager().getBoolean(ConnectionParams.SKIP_CHECK_CCI_TASK)) { + // Session variable is true, skip it. + return; + } + + if (InstConfUtil.getBool(ConnectionParams.SKIP_CHECK_CCI_TASK)) { + // Global variable is true, skip it. + return; + } + + Runnable recover = null; + if (executionContext.isForce2pcDuringCciCheck()) { + recover = ExecUtils.forceAllTrx2PC(); + } + + ICciChecker checker; + if (executionContext.isEnableFastCciChecker()) { + checker = new CciFastChecker(schemaName, logicalTableName, indexName); + } else { + checker = new CciChecker(schemaName, logicalTableName, indexName); + } + + try { + long start = System.nanoTime(); + checker.check(executionContext, recover); + SQLRecorderLogger.ddlLogger.info((executionContext.isEnableFastCciChecker() ? "Fast " : "") + + "Check cci " + schemaName + "." + logicalTableName + "." + indexName + + " cost " + (System.nanoTime() - start) / 1_000_000 + " ms."); + } catch (Throwable t) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + (executionContext.isEnableFastCciChecker() ? "Fast " : "") + + "Check cci failed, caused by " + t.getMessage()); + } finally { + if (null != recover) { + recover.run(); + } + } + + List reports = new ArrayList<>(); + boolean success = true; + if (!checker.getCheckReports(reports)) { + for (String error : reports) { + SQLRecorderLogger.ddlLogger.error( + (executionContext.isEnableFastCciChecker() ? "Fast " : "") + + "Check cci " + logicalTableName + "." + indexName + " error: " + error); + } + success = false; + } + + if (success) { + return; + } + + if (executionContext.isEnableFastCciChecker()) { + // Fast checker failed, try naive checker. + checker = new CciChecker(schemaName, logicalTableName, indexName); + recover = null; + if (executionContext.isForce2pcDuringCciCheck()) { + recover = ExecUtils.forceAllTrx2PC(); + } + try { + long start = System.nanoTime(); + checker.check(executionContext, recover); + SQLRecorderLogger.ddlLogger.info("Check cci " + schemaName + "." + logicalTableName + + "." + indexName + " cost " + (System.nanoTime() - start) / 1_000_000 + " ms."); + } catch (Throwable t) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + (executionContext.isEnableFastCciChecker() ? "Fast " : "") + + "Check cci failed, caused by " + t.getMessage()); + } finally { + if (null != recover) { + recover.run(); + } + } + success = true; + if (!checker.getCheckReports(reports)) { + for (String error : reports) { + SQLRecorderLogger.ddlLogger.error( + (executionContext.isEnableFastCciChecker() ? "Fast " : "") + + "Check cci " + logicalTableName + "." + indexName + " error: " + error); + } + success = false; + } + } + + if (!success) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, reports.get(0)); + } + } + + @Override + protected boolean isSkipExecute() { + return this.skipCheck; + } + + @Override + protected boolean isSkipRollback() { + return this.skipCheck; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CreateColumnarIndexValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CreateColumnarIndexValidateTask.java new file mode 100644 index 000000000..173c94654 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CreateColumnarIndexValidateTask.java @@ -0,0 +1,69 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator; +import com.alibaba.polardbx.executor.ddl.job.validator.IndexValidator; +import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import org.apache.commons.lang3.StringUtils; + +@TaskName(name = "CreateColumnarIndexValidateTask") +@Getter +public class CreateColumnarIndexValidateTask extends BaseValidateTask { + + final private String primaryTableName; + final private String indexName; + + @JSONCreator + public CreateColumnarIndexValidateTask(String schemaName, String primaryTableName, String indexName) { + super(schemaName); + this.primaryTableName = primaryTableName; + this.indexName = indexName; + if (StringUtils.isEmpty(indexName) || StringUtils.isEmpty(primaryTableName)) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_UNEXPECTED, "validate", + "The table name shouldn't be empty"); + } + } + + @Override + protected void executeImpl(ExecutionContext executionContext) { + if (!TableValidator.checkIfTableExists(schemaName, primaryTableName)) { + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_TABLE, schemaName, primaryTableName); + } + IndexValidator.validateIndexNonExistence(schemaName, primaryTableName, indexName); + //IndexValidator.validateColumnarIndexNonExistence(schemaName, primaryTableName); + IndexValidator.validateColumnarIndexNumLimit(schemaName, primaryTableName, + executionContext.getParamManager().getLong(ConnectionParams.MAX_CCI_COUNT)); + + GsiValidator.validateGsiSupport(schemaName, executionContext); + GsiValidator.validateCreateOnGsi(schemaName, indexName, executionContext); + } + + @Override + protected String remark() { + return "|primaryTableName: " + primaryTableName; + } + +} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CreateMockColumnarIndexTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CreateMockColumnarIndexTask.java new file mode 100644 index 000000000..9d31f80cf --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/CreateMockColumnarIndexTask.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.sql.Connection; + +@TaskName(name = "CreateMockColumnarIndexTask") +@Getter +@Setter +public class CreateMockColumnarIndexTask extends BaseDdlTask { + + private final static String HANDLER_CLASS = + "com.alibaba.polardbx.columnar.core.ddl.handler.CreateMockColumnarIndexHandle"; + + private final static String HANDLER_METHOD = "handle"; + + private String tableName; + + private String mciFormat; + + private long ddlId; + + @JSONCreator + public CreateMockColumnarIndexTask(String schemaName, String tableName, long ddlId) { + super(schemaName); + this.tableName = tableName; + this.ddlId = ddlId; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + Method method = null; + Object handler = null; + try { + Class clazz = Class.forName(HANDLER_CLASS); + Constructor constructor = clazz.getConstructor(String.class, String.class, long.class, String.class); + handler = constructor.newInstance(schemaName, tableName, ddlId, mciFormat); + method = clazz.getMethod(HANDLER_METHOD); + } catch (Exception e) { + throw new UnsupportedOperationException("create mock columnar index is unsupported", e); + } + + try { + method.invoke(handler); + } catch (Exception e) { + throw new TddlNestableRuntimeException(e); + } + } + + @Override + protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/DropColumnarTableRemoveMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/DropColumnarTableRemoveMetaTask.java new file mode 100644 index 000000000..62c051126 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/DropColumnarTableRemoveMetaTask.java @@ -0,0 +1,54 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.meta.CommonMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; + +@Getter +@TaskName(name = "DropColumnarTableRemoveMetaTask") +public class DropColumnarTableRemoveMetaTask extends BaseGmsTask { + + private final String columnarTableName; + + @JSONCreator + public DropColumnarTableRemoveMetaTask(String schemaName, String logicalTableName, String columnarTableName) { + super(schemaName, logicalTableName); + this.columnarTableName = columnarTableName; + } + + @Override + public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + TableMetaChanger.removeColumnarTableMeta(metaDbConnection, schemaName, columnarTableName); + CommonMetaChanger.finalOperationsOnSuccess(schemaName, columnarTableName); + } + + @Override + protected void onExecutionSuccess(ExecutionContext executionContext) { + TableMetaChanger.afterRemovingTableMeta(schemaName, logicalTableName); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/DropMockColumnarIndexTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/DropMockColumnarIndexTask.java new file mode 100644 index 000000000..d69749be6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/DropMockColumnarIndexTask.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import lombok.Setter; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.sql.Connection; + +@TaskName(name = "DropMockColumnarIndexTask") +@Getter +@Setter +public class DropMockColumnarIndexTask extends BaseDdlTask { + + private final static String HANDLER_CLASS = + "com.alibaba.polardbx.columnar.core.ddl.handler.DropMockColumnarIndexHandle"; + + private final static String HANDLER_METHOD = "handle"; + + private String primaryTableName; + + private String indexTableName; + + @JSONCreator + public DropMockColumnarIndexTask(String schemaName, String primaryTableName, String indexTableName) { + super(schemaName); + this.primaryTableName = primaryTableName; + this.indexTableName = indexTableName; + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + Method method = null; + Object handler = null; + try { + Class clazz = Class.forName(HANDLER_CLASS); + Constructor constructor = clazz.getConstructor(String.class, String.class, String.class); + handler = constructor.newInstance(schemaName, primaryTableName, indexTableName); + method = clazz.getMethod(HANDLER_METHOD); + } catch (Exception e) { + throw new UnsupportedOperationException("drop mock columnar index is unsupported", e); + } + + try { + method.invoke(handler); + } catch (Exception e) { + throw new TddlNestableRuntimeException(e); + } + } + + @Override + protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + throw new UnsupportedOperationException(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/InsertColumnarIndexMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/InsertColumnarIndexMetaTask.java new file mode 100644 index 000000000..b753bc9c0 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/InsertColumnarIndexMetaTask.java @@ -0,0 +1,170 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.ddl.job.meta.GsiMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.gms.GmsTableMetaManager; +import com.alibaba.polardbx.executor.gsi.GsiUtils; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.metadb.table.IndexStatus; +import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.metadb.table.TablesExtRecord; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.gms.util.AppNameUtil; +import com.alibaba.polardbx.gms.util.InstIdUtil; +import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.google.common.collect.ImmutableList; +import lombok.Getter; + +import java.sql.Connection; +import java.util.ArrayList; +import java.util.List; + +/** + * generate & insert columnar index table's metadata based on primaryTable's metadata + *

+ * will insert into [indexes] + */ +@TaskName(name = "InsertColumnarIndexMetaTask") +@Getter +public class InsertColumnarIndexMetaTask extends BaseGmsTask { + + final String indexName; + final List columns; + final List coverings; + final boolean unique; + final String indexComment; + final String indexType; + final IndexStatus indexStatus; + Integer originTableType; + final boolean clusteredIndex; + + @JSONCreator + public InsertColumnarIndexMetaTask(String schemaName, + String logicalTableName, + String indexName, + List columns, + List coverings, + boolean unique, + String indexComment, + String indexType, + IndexStatus indexStatus, + boolean clusteredIndex) { + super(schemaName, logicalTableName); + this.indexName = indexName; + this.columns = ImmutableList.copyOf(columns); + this.coverings = ImmutableList.copyOf(coverings); + this.unique = unique; + this.indexComment = indexComment == null ? "" : indexComment; + this.indexType = indexType; + this.indexStatus = indexStatus; + this.clusteredIndex = clusteredIndex; + onExceptionTryRecoveryThenRollback(); + } + + @Override + protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + final List indexRecords = new ArrayList<>(); + + final String appName = AppNameUtil.buildAppNameByInstAndDbName(InstIdUtil.getInstId(), schemaName); + final TableMeta primaryTableMeta = + GmsTableMetaManager.fetchTableMeta(metaDbConnection, + schemaName, appName, logicalTableName, null, null, true, true); + + FailPoint.assertNotNull(primaryTableMeta); + primaryTableMeta.setSchemaName(schemaName); + GsiUtils.buildIndexMetaFromPrimary( + indexRecords, + primaryTableMeta, + indexName, + columns, + coverings, + !unique, + indexComment, + indexType, + indexStatus, + clusteredIndex, + true, + null, + null + ); + + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConnection); + TablesExtRecord indexTablesExtRecord = + tableInfoManager.queryTableExt(schemaName, indexName, false); + if (indexTablesExtRecord != null) { + originTableType = indexTablesExtRecord.tableType; + } + tableInfoManager.setConnection(null); + + //1. insert metadata into indexes + GsiMetaChanger.addIndexMeta(metaDbConnection, schemaName, indexRecords); + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + //2. update tables_ext.table_type to GSI + // TODO: Actually, unlike creating GSI, there is no record in tables_ext for columnar index, + // this update does nothing, maybe we should remove this step + GsiMetaChanger.changeTableToColumnar(metaDbConnection, schemaName, indexName); + + //3. notify listeners + TableMetaChanger.notifyCreateColumnarIndex(metaDbConnection, schemaName, logicalTableName); + LOGGER.info(String.format("Insert ColumnarIndex meta. schema:%s, table:%s, index:%s, state:%s", + schemaName, + logicalTableName, + indexName, + indexStatus.name() + )); + } + + /** + * see undoCreateGsi() + */ + @Override + protected void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { + if (originTableType != null) { + ExecutorContext + .getContext(schemaName) + .getGsiManager() + .getGsiMetaManager() + .changeTablesExtType(metaDbConnection, schemaName, indexName, originTableType); + } + GsiMetaChanger.removeIndexMeta(metaDbConnection, schemaName, logicalTableName, indexName); + + //sync have to be successful to continue + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); + executionContext.refreshTableMeta(); + + LOGGER.info(String.format("Rollback Insert ColumnarIndex meta. schema:%s, table:%s, index:%s, state:%s", + schemaName, + logicalTableName, + indexName, + indexStatus.name() + )); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/RenameColumnarTablesMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/RenameColumnarTablesMetaTask.java new file mode 100644 index 000000000..6009f4c6b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/RenameColumnarTablesMetaTask.java @@ -0,0 +1,59 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; + +@Getter +@TaskName(name = "RenameColumnarTablesMetaTask") +public class RenameColumnarTablesMetaTask extends BaseGmsTask { + private final String primaryTableName; + private final String newPrimaryTableName; + private final long versionId; + + @JSONCreator + public RenameColumnarTablesMetaTask(String schemaName, String primaryTableName, String newPrimaryTableName, + long versionId) { + super(schemaName, primaryTableName); + this.primaryTableName = primaryTableName; + this.newPrimaryTableName = newPrimaryTableName; + this.versionId = versionId; + onExceptionTryRecoveryThenRollback(); + } + + @Override + public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + TableMetaChanger.renameColumnarTableMeta(metaDbConnection, schemaName, primaryTableName, newPrimaryTableName, + versionId, jobId); + } + + @Override + public void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { + TableMetaChanger.renameColumnarTableMeta(metaDbConnection, schemaName, newPrimaryTableName, primaryTableName, + versionId, jobId); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/WaitColumnarTableCreationTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/WaitColumnarTableCreationTask.java new file mode 100644 index 000000000..ea20adec7 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/columnar/WaitColumnarTableCreationTask.java @@ -0,0 +1,134 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.columnar; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.gms.util.ColumnarTransactionUtils; +import com.alibaba.polardbx.executor.sync.ColumnarSnapshotUpdateSyncAction; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableMappingRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableStatus; +import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.gms.topology.SystemDbHelper; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import lombok.Getter; +import lombok.SneakyThrows; + +import java.sql.Connection; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +@TaskName(name = "WaitColumnarTableCreationTask") +@Getter +public class WaitColumnarTableCreationTask extends BaseDdlTask { + private static final Logger logger = LoggerFactory.getLogger(WaitColumnarTableCreationTask.class); + + private final String logicalTableName; + private final String indexName; + /** + * FOR TEST USE ONLY! + * If set to true, ddl returns succeed right after CN finish writing metadata + */ + private final boolean skipCheck; + + public WaitColumnarTableCreationTask(String schemaName, String logicalTableName, String indexName, + boolean skipCheck) { + super(schemaName); + this.logicalTableName = logicalTableName; + this.indexName = indexName; + this.skipCheck = skipCheck; + } + + @Override + @SneakyThrows + protected void beforeTransaction(ExecutionContext executionContext) { + // wait columnar index creation to be finished + long start = System.nanoTime(); + while (true) { + // Always create new metadb connection to get the latest snapshot + try (Connection conn = Objects.requireNonNull(MetaDbDataSource.getInstance()).getConnection()) { + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(conn); + + List records = + tableInfoManager.queryColumnarTable(schemaName, logicalTableName, indexName); + if (records.isEmpty()) { + //找不到该列存索引记录了 + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "Columnar table mapping record not found."); + } + ColumnarTableMappingRecord record = records.get(0); + if (ColumnarTableStatus.from(record.status) == ColumnarTableStatus.PUBLIC) { + //创建状态成功 + break; + } + //有额外信息, 意味着出现建索引错误,暂时先简单认为一定是错误 + if (record.extra != null && !record.extra.isEmpty()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, record.extra); + } + + if (executionContext.getDdlContext().isInterrupted()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The job '" + executionContext.getDdlContext().getJobId() + "' has been interrupted"); + } + + TimeUnit.MILLISECONDS.sleep(1000); + } finally { + SQLRecorderLogger.ddlLogger.info("Wait Columnar table created task ended, cost " + + ((System.nanoTime() - start) / 1_000_000) + " ms."); + } + } + if (executionContext.getDdlContext().isInterrupted()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "wait columnar table task is interrupted."); + } + } + + @Override + protected void onExecutionSuccess(ExecutionContext executionContext) { + Long latestTso = ColumnarTransactionUtils.getLatestTsoFromGms(); + if (latestTso != null) { + try { + SyncManagerHelper.sync(new ColumnarSnapshotUpdateSyncAction(latestTso), + SystemDbHelper.DEFAULT_DB_NAME, SyncScope.ALL); + } catch (Throwable t) { + LOGGER.error(String.format("error occurs while updating tso after columnar index creation, tso: %d.", + latestTso)); + throw GeneralUtil.nestedException(t); + } + } + } + + @Override + protected boolean isSkipExecute() { + return this.skipCheck; + } + + @Override + protected boolean isSkipRollback() { + return this.skipCheck; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/factory/GsiTaskFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/factory/GsiTaskFactory.java index 8ed3dba93..0f7047b54 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/factory/GsiTaskFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/factory/GsiTaskFactory.java @@ -28,31 +28,43 @@ import com.alibaba.polardbx.druid.sql.ast.statement.SQLTableElement; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; import com.alibaba.polardbx.druid.util.JdbcConstants; +import com.alibaba.polardbx.executor.changeset.ChangeSetManager; import com.alibaba.polardbx.executor.ddl.job.builder.AlterTableBuilder; import com.alibaba.polardbx.executor.ddl.job.builder.DdlPhyPlanBuilder; import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.executor.ddl.job.factory.AlterTableJobFactory; +import com.alibaba.polardbx.executor.ddl.job.factory.gsi.CreateGsiCheckTask; import com.alibaba.polardbx.executor.ddl.job.task.backfill.LogicalTableBackFillTask; import com.alibaba.polardbx.executor.ddl.job.task.backfill.LogicalTableColumnBackFillTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcGsiDdlMarkTask; +import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetCatchUpTask; +import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetStartTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiDropColumnCleanUpTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiInsertColumnMetaTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiUpdateIndexColumnStatusTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiUpdateIndexStatusTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; +import com.alibaba.polardbx.executor.gsi.GsiUtils; import com.alibaba.polardbx.gms.metadb.table.ColumnStatus; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; +import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.partition.PartitionInfo; +import com.sun.org.apache.xpath.internal.operations.Bool; import org.apache.calcite.plan.RelOptCluster; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; + +import static com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils.genChangeSetCatchUpTasks; /** * an interesting gsi-relevant task generator @@ -89,14 +101,19 @@ public static List createGlobalIndexTasks(String schemaName, */ public static List addGlobalIndexTasks(String schemaName, String primaryTableName, + String oldIndexName, String indexName, boolean stayAtDeleteOnly, boolean stayAtWriteOnly, boolean stayAtBackFill, Map virtualColumns, + Map backfillColumnMap, + List modifyStringColumns, PhysicalPlanData physicalPlanData, TableMeta tableMeta, boolean repartition, + boolean modifyColumn, + boolean mirrorCopy, String originalDdl) { List taskList = new ArrayList<>(); @@ -143,7 +160,10 @@ public static List addGlobalIndexTasks(String schemaName, if (stayAtWriteOnly) { return taskList; } - taskList.add(new LogicalTableBackFillTask(schemaName, primaryTableName, indexName, virtualColumns)); + String backFillSourceTableName = mirrorCopy ? oldIndexName : primaryTableName; + taskList.add( + new LogicalTableBackFillTask(schemaName, backFillSourceTableName, indexName, virtualColumns, + backfillColumnMap, modifyStringColumns, false, mirrorCopy, modifyColumn)); if (stayAtBackFill) { return taskList; } @@ -159,17 +179,135 @@ public static List addGlobalIndexTasks(String schemaName, return taskList; } + public static List addGlobalIndexTasksChangeSet(String schemaName, + String primaryTableName, + String oldIndexName, + String indexName, + boolean stayAtDeleteOnly, + boolean stayAtWriteOnly, + boolean stayAtBackFill, + Map virtualColumns, + Map backfillColumnMap, + List modifyStringColumns, + boolean modifyColumn, + boolean mirrorCopy, + PhysicalPlanData physicalPlanData, + PartitionInfo indexPartitionInfo) { + List taskList = new ArrayList<>(); + // start + Long changeSetId = ChangeSetManager.getChangeSetId(); + Map> sourcePhyTableNames = GsiUtils.getPhyTables(schemaName, oldIndexName); + Map targetTableLocations = + GsiUtils.getPhysicalTableMapping(schemaName, oldIndexName, null, physicalPlanData, indexPartitionInfo); + + ChangeSetStartTask changeSetStartTask = new ChangeSetStartTask( + schemaName, oldIndexName, sourcePhyTableNames, + ComplexTaskMetaManager.ComplexTaskType.ONLINE_MODIFY_COLUMN, + changeSetId + ); + + Map catchUpTasks = genChangeSetCatchUpTasks( + schemaName, + oldIndexName, + indexName, + sourcePhyTableNames, + targetTableLocations, + ComplexTaskMetaManager.ComplexTaskType.ONLINE_MODIFY_COLUMN, + changeSetId + ); + + CreateGsiCheckTask createGsiCheckTask = + new CreateGsiCheckTask(schemaName, primaryTableName, indexName, virtualColumns, backfillColumnMap); + + DdlTask absentTask = new GsiUpdateIndexStatusTask( + schemaName, + primaryTableName, + indexName, + IndexStatus.CREATING, + IndexStatus.CHANGE_SET_START, + true + ).onExceptionTryRecoveryThenRollback(); + + DdlTask deleteOnlyTask = new GsiUpdateIndexStatusTask( + schemaName, + primaryTableName, + indexName, + IndexStatus.CHANGE_SET_START, + IndexStatus.DELETE_ONLY, + true + ).onExceptionTryRecoveryThenRollback(); + + DdlTask writeOnlyTask = new GsiUpdateIndexStatusTask( + schemaName, + primaryTableName, + indexName, + IndexStatus.DELETE_ONLY, + IndexStatus.WRITE_ONLY, + true + ).onExceptionTryRecoveryThenRollback(); + + DdlTask writeReOrgTask = new GsiUpdateIndexStatusTask( + schemaName, + primaryTableName, + indexName, + IndexStatus.WRITE_ONLY, + IndexStatus.WRITE_REORG, + true + ).onExceptionTryRecoveryThenRollback(); + + DdlTask publicTask = new GsiUpdateIndexStatusTask( + schemaName, + primaryTableName, + indexName, + IndexStatus.WRITE_REORG, + IndexStatus.PUBLIC, + true + ).onExceptionTryRecoveryThenRollback(); + + taskList.add(changeSetStartTask); + taskList.add(absentTask); + taskList.add(new TableSyncTask(schemaName, primaryTableName)); + // backfill + taskList.add( + new LogicalTableBackFillTask(schemaName, oldIndexName, indexName, virtualColumns, backfillColumnMap, + modifyStringColumns, true, mirrorCopy, modifyColumn)); + taskList.add(catchUpTasks.get(ChangeSetManager.ChangeSetCatchUpStatus.ABSENT.toString())); + taskList.add(deleteOnlyTask); + taskList.add(new TableSyncTask(schemaName, primaryTableName)); + if (stayAtDeleteOnly) { + taskList.add(catchUpTasks.get(ChangeSetManager.ChangeSetCatchUpStatus.WRITE_ONLY_FINAL.toString())); + return taskList; + } + taskList.add(catchUpTasks.get(ChangeSetManager.ChangeSetCatchUpStatus.DELETE_ONLY.toString())); + taskList.add(writeOnlyTask); + taskList.add(new TableSyncTask(schemaName, primaryTableName)); + taskList.add(catchUpTasks.get(ChangeSetManager.ChangeSetCatchUpStatus.WRITE_ONLY_FINAL.toString())); + if (stayAtWriteOnly) { + return taskList; + } + taskList.add(createGsiCheckTask); + if (stayAtBackFill) { + return taskList; + } + taskList.add(writeReOrgTask); + taskList.add(new TableSyncTask(schemaName, primaryTableName)); + taskList.add(publicTask); + taskList.add(new TableSyncTask(schemaName, primaryTableName)); + return taskList; + } + /** * for * drop index * alter table drop index */ - public static List dropGlobalIndexTasks(String schemaName, - String primaryTableName, - String indexName) { + public static List dropIndexTasks(String schemaName, + String primaryTableName, + String indexName, + List> statusChangeList) { List taskList = new ArrayList<>(); - for (Pair statusChange : IndexStatus.dropGsiStatusChange()) { + for (Pair statusChange : statusChangeList) { DdlTask changeStatus = new GsiUpdateIndexStatusTask( schemaName, primaryTableName, @@ -185,6 +323,18 @@ public static List dropGlobalIndexTasks(String schemaName, return taskList; } + public static List dropGlobalIndexTasks(String schemaName, + String primaryTableName, + String indexName) { + return dropIndexTasks(schemaName, primaryTableName, indexName, IndexStatus.dropGsiStatusChange()); + } + + public static List dropColumnarIndexTasks(String schemaName, + String primaryTableName, + String indexName) { + return dropIndexTasks(schemaName, primaryTableName, indexName, IndexStatus.dropColumnarIndexStatusChange()); + } + /** * see changeAddColumnsStatusWithGsi() *

diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterPartitionCountSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterPartitionCountSyncTask.java index 391b148d8..89297e316 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterPartitionCountSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterPartitionCountSyncTask.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.sync.AlterPartitionCountSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -60,7 +61,8 @@ protected void executeImpl(ExecutionContext executionContext) { executionContext.getConnId(), executionContext.getTraceId() ), - schemaName + schemaName, + SyncScope.ALL ); LOGGER.info( diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterPartitionCountValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterPartitionCountValidateTask.java index 118fbab54..074254607 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterPartitionCountValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterPartitionCountValidateTask.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; +import org.apache.calcite.sql.SqlKind; import org.apache.commons.lang3.StringUtils; import java.util.List; @@ -67,6 +68,7 @@ public AlterPartitionCountValidateTask(String schemaName, protected void executeImpl(ExecutionContext executionContext) { TableValidator.validateTableExistence(schemaName, primaryTable, executionContext); GsiValidator.validateGsiSupport(schemaName, executionContext); + TableValidator.validateTableWithCCI(schemaName, primaryTable, executionContext, SqlKind.ALTER_TABLE); // validate current gsi existence tableNameMap.keySet().stream() diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterTableRemovePartitioningValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterTableRemovePartitioningValidateTask.java index c7ac968fa..b96c4b45c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterTableRemovePartitioningValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterTableRemovePartitioningValidateTask.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; +import org.apache.calcite.sql.SqlKind; import java.util.List; import java.util.stream.Collectors; @@ -66,6 +67,7 @@ protected void executeImpl(ExecutionContext executionContext) { TableValidator.validateTableExistence(schemaName, primaryTable, executionContext); GsiValidator.validateGsiSupport(schemaName, executionContext); GsiValidator.validateCreateOnGsi(schemaName, indexTableName, executionContext); + TableValidator.validateTableWithCCI(schemaName, primaryTable, executionContext, SqlKind.ALTER_TABLE); if (addGsiNames != null) { addGsiNames.forEach(e -> GsiValidator.validateCreateOnGsi(schemaName, e, executionContext)); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterTableRepartitionValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterTableRepartitionValidateTask.java index 4d10dd12b..045cc1667 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterTableRepartitionValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/AlterTableRepartitionValidateTask.java @@ -34,6 +34,7 @@ import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.google.common.collect.ImmutableList; import lombok.Getter; +import org.apache.calcite.sql.SqlKind; import org.apache.commons.lang3.StringUtils; import java.util.ArrayList; @@ -108,6 +109,7 @@ public void doValidate(ExecutionContext executionContext) { IndexValidator.validateIndexNonExistence(schemaName, primaryTableName, indexName); GsiValidator.validateGsiSupport(schemaName, executionContext); GsiValidator.validateCreateOnGsi(schemaName, indexName, executionContext); + TableValidator.validateTableWithCCI(schemaName, primaryTableName, executionContext, SqlKind.ALTER_TABLE); // validate gsi add columns if (addColumnsIndexes != null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CciUpdateIndexStatusTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CciUpdateIndexStatusTask.java new file mode 100644 index 000000000..436f3107a --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CciUpdateIndexStatusTask.java @@ -0,0 +1,175 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.gsi; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableStatus; +import com.alibaba.polardbx.gms.metadb.table.IndexStatus; +import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; + +/** + * change CCI status + *

+ * will update [indexes] + * will update [tables (version) ] + */ +@TaskName(name = "CciUpdateIndexStatusTask") +@Getter +public class CciUpdateIndexStatusTask extends BaseGmsTask { + + final String indexName; + final ColumnarTableStatus beforeColumnarTableStatus; + final ColumnarTableStatus afterColumnarTableStatus; + final IndexStatus beforeIndexStatus; + final IndexStatus afterIndexStatus; + final boolean needOnlineSchemaChange; + + @JSONCreator + public CciUpdateIndexStatusTask(String schemaName, + String logicalTableName, + String indexName, + ColumnarTableStatus beforeColumnarTableStatus, + ColumnarTableStatus afterColumnarTableStatus, + IndexStatus beforeIndexStatus, + IndexStatus afterIndexStatus, + boolean needOnlineSchemaChange) { + super(schemaName, logicalTableName); + this.indexName = indexName; + this.beforeColumnarTableStatus = beforeColumnarTableStatus; + this.afterColumnarTableStatus = afterColumnarTableStatus; + this.beforeIndexStatus = beforeIndexStatus; + this.afterIndexStatus = afterIndexStatus; + this.needOnlineSchemaChange = needOnlineSchemaChange; + } + + /** + * + */ + @Override + protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + // Update columnar table status. + if (beforeColumnarTableStatus != afterColumnarTableStatus) { + TableInfoManager.updateColumnarTableStatus( + metaDbConnection, + schemaName, + logicalTableName, + indexName, + beforeColumnarTableStatus, + afterColumnarTableStatus); + } + + // Update columnar index status. + TableInfoManager.updateIndexStatus( + metaDbConnection, + schemaName, + logicalTableName, + indexName, + afterIndexStatus + ); + + //in the case of 'drop cci', cancel ddl is not supported once the JOB has begun + boolean dropCci = (beforeColumnarTableStatus == ColumnarTableStatus.PUBLIC) + && (afterColumnarTableStatus == ColumnarTableStatus.ABSENT + || afterColumnarTableStatus == ColumnarTableStatus.DROP); + if (dropCci) { + updateSupportedCommands(true, false, metaDbConnection); + } + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + LOGGER.info( + String.format("[Job: %d Task: %d] Update CCI state. " + + "schema:%s, table:%s, index:%s, before columnar table state:%s, after columnar table state:%s, " + + "before index state:%s, after index state:%s", + getJobId(), + getTaskId(), + schemaName, + logicalTableName, + indexName, + beforeColumnarTableStatus.name(), + afterColumnarTableStatus.name(), + beforeIndexStatus.name(), + afterIndexStatus.name())); + } + + @Override + protected void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { + if (beforeColumnarTableStatus != afterColumnarTableStatus) { + TableInfoManager.updateColumnarTableStatus( + metaDbConnection, + schemaName, + logicalTableName, + indexName, + afterColumnarTableStatus, + beforeColumnarTableStatus); + } + + TableInfoManager.updateIndexStatus( + metaDbConnection, + schemaName, + logicalTableName, + indexName, + beforeIndexStatus + ); + + //sync have to be successful to continue + if (needOnlineSchemaChange) { + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); + } + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + LOGGER.info( + String.format("[Job: %d Task: %d] Rollback Update CCI table state. " + + "schema:%s, table:%s, index:%s, before columnar table state:%s, after columnar table state:%s, " + + "before index state:%s, after index state:%s", + getJobId(), + getTaskId(), + schemaName, + logicalTableName, + indexName, + beforeColumnarTableStatus.name(), + afterColumnarTableStatus.name(), + beforeIndexStatus.name(), + afterIndexStatus.name())); + } + + @Override + protected void onRollbackSuccess(ExecutionContext executionContext) { + if (needOnlineSchemaChange) { + super.onRollbackSuccess(executionContext); + } + } + + @Override + protected String remark() { + return String.format("|CCI(%s) %s to %s", indexName, beforeColumnarTableStatus.name(), + afterColumnarTableStatus.name()); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CheckColumnTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CheckColumnTask.java index 7a3728543..5a5e50145 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CheckColumnTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CheckColumnTask.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.ddl.job.task.gsi; -import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.columns.ColumnChecker; import com.alibaba.polardbx.executor.ddl.job.task.BaseBackfillTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CheckGsiTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CheckGsiTask.java index f7149e283..801403838 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CheckGsiTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CheckGsiTask.java @@ -82,6 +82,7 @@ public class CheckGsiTask extends BaseBackfillTask { private final boolean primaryBroadCast; private final boolean gsiBroadCast; private Map virtualColumnMap; + private Map backfillColumnMap; public static CheckGsiTask create(CheckGsiPrepareData prepareData) { return new CheckGsiTask( @@ -95,12 +96,14 @@ public static CheckGsiTask create(CheckGsiPrepareData prepareData) { prepareData.getSpeedLimit(), prepareData.getSpeedMin(), prepareData.getParallelism(), - prepareData.getEarlyFailNumber() + prepareData.getEarlyFailNumber(), + prepareData.isUseBinary() ), prepareData.isCorrect(), prepareData.getExtraCmd(), false, false, + null, null ); } @@ -116,7 +119,8 @@ public CheckGsiTask(String schemaName, String extraCmd, boolean primaryBroadCast, boolean gsiBroadCast, - Map virtualColumnMap) { + Map virtualColumnMap, + Map backfillColumnMap) { super(schemaName); this.tableName = tableName; this.indexName = indexName; @@ -128,6 +132,7 @@ public CheckGsiTask(String schemaName, this.primaryBroadCast = primaryBroadCast; this.gsiBroadCast = gsiBroadCast; this.virtualColumnMap = virtualColumnMap; + this.backfillColumnMap = backfillColumnMap; } @Override @@ -140,7 +145,7 @@ protected void executeImpl(ExecutionContext ec) { return; } - if (isUseFastChecker(ec) && MapUtils.isNotEmpty(virtualColumnMap)) { + if (MapUtils.isNotEmpty(virtualColumnMap) || MapUtils.isNotEmpty(backfillColumnMap)) { throw GeneralUtil.nestedException( "Fast checker failed. Please try to rollback/recover this job"); } @@ -217,7 +222,7 @@ public void checkInBackfill(ExecutionContext ec) { return; } - if (isUseFastChecker(ec) && MapUtils.isNotEmpty(virtualColumnMap)) { + if (MapUtils.isNotEmpty(virtualColumnMap) || MapUtils.isNotEmpty(backfillColumnMap)) { throw GeneralUtil.nestedException( "Fast checker failed. Please try to rollback/recover this job"); } @@ -262,15 +267,12 @@ private boolean fastCheck(ExecutionContext ec) { private boolean fastCheck(ExecutionContext ec, Map> srcPhyDbAndTables, Map> dstPhyDbAndTables) { long startTime = System.currentTimeMillis(); - SQLRecorderLogger.ddlLogger.warn(MessageFormat + SQLRecorderLogger.ddlLogger.info(MessageFormat .format("FastChecker for GSI, schema [{0}] logical src table [{1}] logic dst table [{2}] start", schemaName, tableName, indexName)); - final int parallelism = ec.getParamManager().getInt(ConnectionParams.GSI_FASTCHECKER_PARALLELISM); - final int maxRetryTimes = ec.getParamManager().getInt(ConnectionParams.FASTCHECKER_RETRY_TIMES); - FastChecker fastChecker = - GsiFastChecker.create(schemaName, tableName, indexName, virtualColumnMap, parallelism, ec); + GsiFastChecker.create(schemaName, tableName, indexName, virtualColumnMap, backfillColumnMap, ec); if (dstPhyDbAndTables != null) { fastChecker.setDstPhyDbAndTables(dstPhyDbAndTables); } @@ -281,40 +283,22 @@ private boolean fastCheck(ExecutionContext ec, Map> srcPhyDb boolean fastCheckResult = false; - int tryTimes = 0; - while (tryTimes < maxRetryTimes && !fastCheckResult) { - try { - fastCheckResult = fastChecker.check(ec); - } catch (TddlNestableRuntimeException e) { - if (StringUtils.containsIgnoreCase(e.getMessage(), "acquire lock timeout")) { - //if acquire lock timeout, we will retry - if (tryTimes < maxRetryTimes - 1) { - try { - TimeUnit.MILLISECONDS.sleep(2000L * (1 + tryTimes)); - } catch (InterruptedException ex) { - throw new TddlNestableRuntimeException(ex); - } - } else { - throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_CHECKER, - "gsi fastchecker retry exceed max times", e); - } - } else { - //other exception, we simply throw out - throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_CHECKER, - "gsi fastchecker failed to check", e); - } - } finally { - tryTimes += 1; - - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( - "FastChecker for GSI, schema [{0}] logical src table [{1}] logic dst table [{2}] finish, time use [{3}], check result [{4}]", - schemaName, tableName, indexName, - (System.currentTimeMillis() - startTime) / 1000.0, - fastCheckResult ? "pass" : "not pass") - ); - if (!fastCheckResult) { - EventLogger.log(EventType.DDL_WARN, "FastChecker failed"); - } + try { + fastCheckResult = fastChecker.check(ec); + } catch (TddlNestableRuntimeException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_CHECKER, + "gsi fastchecker failed to check", e); + } finally { + SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + "FastChecker for GSI, schema [{0}] logical src table [{1}] logic dst table [{2}] finish, time use [{3}], check result [{4}]", + schemaName, tableName, indexName, + (System.currentTimeMillis() - startTime) / 1000.0, + fastCheckResult ? "pass" : "not pass") + ); + if (!fastCheckResult) { + EventLogger.log(EventType.DDL_WARN, "FastChecker failed"); + } else { + EventLogger.log(EventType.DDL_INFO, "FastChecker succeed"); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/ClearAutoPartitionFlagTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/ClearAutoPartitionFlagTask.java index 3f16e3019..effa4c53a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/ClearAutoPartitionFlagTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/ClearAutoPartitionFlagTask.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; @@ -57,7 +58,8 @@ public void rollbackImpl(Connection metaDbConnection, ExecutionContext execution FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); //sync have to be successful to continue - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), + SyncScope.ALL); executionContext.refreshTableMeta(); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CreateGsiPhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CreateGsiPhyDdlTask.java index 0b84f2c6c..28be6bbab 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CreateGsiPhyDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CreateGsiPhyDdlTask.java @@ -17,29 +17,15 @@ package com.alibaba.polardbx.executor.ddl.job.task.gsi; import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.executor.ddl.job.builder.DropPhyTableBuilder; -import com.google.common.collect.Lists; import com.alibaba.polardbx.executor.ddl.job.builder.DdlPhyPlanBuilder; -import com.alibaba.polardbx.executor.ddl.job.builder.DropPartitionTableBuilder; -import com.alibaba.polardbx.executor.ddl.job.builder.gsi.DropGlobalIndexBuilder; +import com.alibaba.polardbx.executor.ddl.job.builder.DropPhyTableBuilder; import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; -import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation; -import com.alibaba.polardbx.optimizer.core.rel.ReplaceTableNameWithQuestionMarkVisitor; -import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropTable; import lombok.Getter; -import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.rel.RelNode; -import org.apache.calcite.rel.ddl.DropTable; -import org.apache.calcite.sql.SqlDdlNodes; -import org.apache.calcite.sql.SqlDropTable; -import org.apache.calcite.sql.SqlIdentifier; -import org.apache.calcite.sql.parser.SqlParserPos; import java.util.ArrayList; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CreateGsiValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CreateGsiValidateTask.java index bf93743c1..906d8c054 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CreateGsiValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/CreateGsiValidateTask.java @@ -19,24 +19,17 @@ import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator; import com.alibaba.polardbx.executor.ddl.job.validator.IndexValidator; -import com.alibaba.polardbx.executor.ddl.job.validator.TableGroupValidator; import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; -import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; -import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; -import com.alibaba.polardbx.gms.util.GroupInfoUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; import org.apache.commons.lang3.StringUtils; -import java.util.HashSet; import java.util.List; -import java.util.Set; @TaskName(name = "CreateGsiValidateTask") @Getter diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/DropColumnarTableHideTableMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/DropColumnarTableHideTableMetaTask.java new file mode 100644 index 000000000..c0109021c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/DropColumnarTableHideTableMetaTask.java @@ -0,0 +1,63 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.gsi; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; + +@Getter +@TaskName(name = "DropColumnarTableHideTableMetaTask") +public class DropColumnarTableHideTableMetaTask extends BaseGmsTask { + + protected final String indexName; + + @JSONCreator + public DropColumnarTableHideTableMetaTask(String schemaName, String logicalTableName, String indexName) { + super(schemaName, logicalTableName); + this.indexName = indexName; + } + + @Override + public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + updateSupportedCommands(true, false, metaDbConnection); + TableMetaChanger.hideTableMeta(metaDbConnection, schemaName, indexName); + TableMetaChanger.notifyDropColumnarIndex(metaDbConnection, schemaName, logicalTableName); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + @Override + public void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { + TableMetaChanger.showTableMeta(metaDbConnection, schemaName, indexName); + TableMetaChanger.notifyCreateColumnarIndex(metaDbConnection, schemaName, logicalTableName); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + //sync have to be successful to continue + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, indexName), SyncScope.ALL); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/DropGsiTableHideTableMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/DropGsiTableHideTableMetaTask.java index 695c3a4f6..fde02942a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/DropGsiTableHideTableMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/DropGsiTableHideTableMetaTask.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -58,7 +59,7 @@ public void rollbackImpl(Connection metaDbConnection, ExecutionContext execution FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); //sync have to be successful to continue - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, indexName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, indexName), SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/DropPartitionGsiPhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/DropPartitionGsiPhyDdlTask.java index 40b17aae1..00c137fb4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/DropPartitionGsiPhyDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/DropPartitionGsiPhyDdlTask.java @@ -17,21 +17,14 @@ package com.alibaba.polardbx.executor.ddl.job.task.gsi; import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.executor.ddl.job.builder.DdlPhyPlanBuilder; import com.alibaba.polardbx.executor.ddl.job.builder.gsi.DropGlobalIndexBuilder; import com.alibaba.polardbx.executor.ddl.job.builder.gsi.DropPartitionGlobalIndexBuilder; -import com.alibaba.polardbx.executor.ddl.job.converter.DdlJobDataConverter; import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation; import lombok.Getter; -import java.util.List; -import java.util.Map; - @Getter @TaskName(name = "DropPartitionGsiPhyDdlTask") public class DropPartitionGsiPhyDdlTask extends BasePhyDdlTask { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiInsertColumnMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiInsertColumnMetaTask.java index e403655b6..28b6d17ff 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiInsertColumnMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiInsertColumnMetaTask.java @@ -17,27 +17,24 @@ package com.alibaba.polardbx.executor.ddl.job.task.gsi; import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement; -import com.alibaba.polardbx.executor.ddl.job.task.basic.spec.AlterTableRollbacker; -import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; -import com.alibaba.polardbx.gms.metadb.table.IndexVisibility; -import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.ddl.job.meta.GsiMetaChanger; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.gsi.GsiUtils; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; +import com.alibaba.polardbx.gms.metadb.table.IndexVisibility; +import com.alibaba.polardbx.gms.metadb.table.IndexVisibility; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; import lombok.Getter; import java.sql.Connection; @@ -128,7 +125,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut } //sync have to be successful to continue - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); executionContext.refreshTableMeta(); LOGGER.info(String.format("Rollback Change GSI meta. schema:%s, table:%s, index:%s", diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiInsertIndexMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiInsertIndexMetaTask.java index 832a8dc0f..a58420e8d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiInsertIndexMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiInsertIndexMetaTask.java @@ -24,13 +24,13 @@ import com.alibaba.polardbx.executor.gms.GmsTableMetaManager; import com.alibaba.polardbx.executor.gsi.GsiUtils; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.executor.sync.TableMetaChangePreemptiveSyncAction; import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; import com.alibaba.polardbx.gms.metadb.table.IndexVisibility; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; import com.alibaba.polardbx.gms.metadb.table.TablesExtRecord; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.util.AppNameUtil; import com.alibaba.polardbx.gms.util.InstIdUtil; import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; @@ -42,7 +42,7 @@ import java.sql.Connection; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.TimeUnit; +import java.util.Map; /** * generate & insert gsiTable's metadata based on primaryTable's metadata @@ -67,6 +67,8 @@ public class GsiInsertIndexMetaTask extends BaseGmsTask { final boolean clusteredIndex; final IndexVisibility visibility; final boolean needOnlineSchemaChange; + final Map columnMapping; + final List addNewColumns; @JSONCreator public GsiInsertIndexMetaTask(String schemaName, @@ -80,7 +82,9 @@ public GsiInsertIndexMetaTask(String schemaName, IndexStatus indexStatus, boolean clusteredIndex, final IndexVisibility visibility, - boolean needOnlineSchemaChange) { + boolean needOnlineSchemaChange, + Map columnMapping, + List addNewColumns) { super(schemaName, logicalTableName); this.indexName = indexName; this.columns = ImmutableList.copyOf(columns); @@ -92,6 +96,8 @@ public GsiInsertIndexMetaTask(String schemaName, this.clusteredIndex = clusteredIndex; this.visibility = visibility; this.needOnlineSchemaChange = needOnlineSchemaChange; + this.columnMapping = columnMapping; + this.addNewColumns = addNewColumns; onExceptionTryRecoveryThenRollback(); } @@ -118,7 +124,10 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi indexComment, indexType, indexStatus, - clusteredIndex + clusteredIndex, + false, + columnMapping, + addNewColumns ); TableInfoManager tableInfoManager = new TableInfoManager(); @@ -174,7 +183,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut //sync have to be successful to continue if (needOnlineSchemaChange) { - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); executionContext.refreshTableMeta(); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiStatisticsInfoSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiStatisticsInfoSyncTask.java index 4693bc1cd..ecdd79f7e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiStatisticsInfoSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiStatisticsInfoSyncTask.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.GsiStatisticsSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -68,7 +69,8 @@ protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionC @Override protected void onExecutionSuccess(ExecutionContext executionContext) { try { - SyncManagerHelper.sync(new GsiStatisticsSyncAction(schemaName, gsiName, newValue, alterKind)); + SyncManagerHelper.sync(new GsiStatisticsSyncAction(schemaName, gsiName, newValue, alterKind), + SyncScope.ALL); } catch (Throwable ignore) { LOGGER.error( "error occurs while execute GsiStatisticsSyncAction" @@ -80,9 +82,11 @@ protected void onExecutionSuccess(ExecutionContext executionContext) { protected void onRollbackSuccess(ExecutionContext executionContext) { try { if (alterKind == GsiStatisticsSyncAction.RENAME_RECORD) { - SyncManagerHelper.sync(new GsiStatisticsSyncAction(schemaName, newValue, gsiName, alterKind)); + SyncManagerHelper.sync(new GsiStatisticsSyncAction(schemaName, newValue, gsiName, alterKind), + SyncScope.ALL); } else { - SyncManagerHelper.sync(new GsiStatisticsSyncAction(schemaName, gsiName, newValue, alterKind)); + SyncManagerHelper.sync(new GsiStatisticsSyncAction(schemaName, gsiName, newValue, alterKind), + SyncScope.ALL); } } catch (Throwable ignore) { LOGGER.error( diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexColumnStatusTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexColumnStatusTask.java index 106e8c412..5be448327 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexColumnStatusTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexColumnStatusTask.java @@ -26,6 +26,7 @@ import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.metadb.table.ColumnStatus; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -109,7 +110,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut FailPoint.injectRandomSuspendFromHint(executionContext); //sync have to be successful to continue - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); executionContext.refreshTableMeta(); LOGGER.info(String diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexStatusTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexStatusTask.java index 1248c4a54..66abd3306 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexStatusTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexStatusTask.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -93,7 +94,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut //sync have to be successful to continue if (needOnlineSchemaChange) { - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); } FailPoint.injectRandomExceptionFromHint(executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexVisibilityTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexVisibilityTask.java index 23e72c9ef..5aa97fcff 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexVisibilityTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/GsiUpdateIndexVisibilityTask.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; import com.alibaba.polardbx.gms.metadb.table.IndexVisibility; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -88,7 +89,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut beforeVisibility ); - SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName)); + SyncManagerHelper.sync(new TableMetaChangeSyncAction(schemaName, logicalTableName), SyncScope.ALL); LOGGER.info( String.format("Rollback Update GSI table visibility. schema:%s, table:%s, index:%s, from:%s to:%s", diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/LockTableSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/LockTableSyncTask.java index 45e2fbc61..ad6518153 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/LockTableSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/LockTableSyncTask.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.sync.LockTableSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -62,6 +63,7 @@ protected void executeImpl(ExecutionContext executionContext) { executionContext.getTraceId() ), schemaName, + SyncScope.ALL, true ); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/ModifyPartitionKeyCutOverTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/ModifyPartitionKeyCutOverTask.java deleted file mode 100644 index 49e22280d..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/ModifyPartitionKeyCutOverTask.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.ddl.job.task.gsi; - -import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.executor.ddl.job.meta.misc.RepartitionMetaChanger; -import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; -import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; -import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; -import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import lombok.Getter; -import org.apache.commons.lang.StringUtils; - -import java.sql.Connection; -import java.util.Map; - -/** - * @author wumu - */ -@Getter -@TaskName(name = "ModifyPartitionKeyCutOverTask") -public class ModifyPartitionKeyCutOverTask extends BaseGmsTask { - private final Map tableNameMap; - private final boolean autoPartition; - private final boolean single; - private final boolean broadcast; - - public ModifyPartitionKeyCutOverTask(final String schemaName, - final String logicalTableName, - Map tableNameMap, - boolean autoPartition, - boolean single, - boolean broadcast) { - super(schemaName, logicalTableName); - this.tableNameMap = tableNameMap; - this.autoPartition = autoPartition; - this.single = single; - this.broadcast = broadcast; - onExceptionTryRollback(); - } - - @Override - protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { - LOGGER.info( - String.format( - "[alter table modify sharding key or add drop primary key] start change meta during cutOver for primary table: %s.%s", - schemaName, logicalTableName) - ); - updateSupportedCommands(true, false, metaDbConnection); - //allowing use hint to skip clean up stage - final String skipCutover = - executionContext.getParamManager().getString(ConnectionParams.REPARTITION_SKIP_CUTOVER); - if (StringUtils.equalsIgnoreCase(skipCutover, Boolean.TRUE.toString())) { - return; - } - - RepartitionMetaChanger.alterTaleModifyColumnCutOver( - metaDbConnection, - schemaName, - logicalTableName, - tableNameMap, - autoPartition, - single, - broadcast - ); - FailPoint.injectRandomExceptionFromHint(executionContext); - FailPoint.injectRandomSuspendFromHint(executionContext); - - LOGGER.info( - String.format( - "[alter table modify sharding key or add drop primary key] finish change meta during cutOver for primary table: %s.%s", - schemaName, logicalTableName) - ); - } - - @Override - protected void onExecutionSuccess(ExecutionContext executionContext) { - //sync for CutOver should keep atomic, so we won't do notify here - //see RepartitionSyncAction.java - } - - @Override - protected void updateTableVersion(Connection metaDbConnection) { - //sync for CutOver should keep atomic, so we won't do notify here - //see RepartitionSyncAction.java - try { - TableInfoManager.updateTableVersion4Repartition(schemaName, logicalTableName, metaDbConnection); - } catch (Exception e) { - throw GeneralUtil.nestedException(e); - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/ModifyPartitionKeyValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/ModifyPartitionKeyValidateTask.java deleted file mode 100644 index 7e15069e5..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/ModifyPartitionKeyValidateTask.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.ddl.job.task.gsi; - -import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; -import lombok.Getter; - -import java.util.List; -import java.util.Map; - -/** - * @author wumu - */ -@Getter -@TaskName(name = "ModifyPartitionKeyValidateTask") -public class ModifyPartitionKeyValidateTask extends AlterPartitionCountValidateTask { - public ModifyPartitionKeyValidateTask(String schemaName, String primaryTable, - Map tableNameMap, - List tableGroupConfigList) { - super(schemaName, primaryTable, tableNameMap, tableGroupConfigList); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RebuildTableCutOverTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RebuildTableCutOverTask.java new file mode 100644 index 000000000..04ca018b9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RebuildTableCutOverTask.java @@ -0,0 +1,112 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.gsi; + +import com.alibaba.polardbx.common.eventlogger.EventLogger; +import com.alibaba.polardbx.common.eventlogger.EventType; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.ddl.job.meta.misc.RepartitionMetaChanger; +import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; +import org.apache.commons.lang.StringUtils; + +import java.sql.Connection; +import java.util.Map; + +/** + * @author wumu + */ +@Getter +@TaskName(name = "RebuildTableCutOverTask") +public class RebuildTableCutOverTask extends BaseGmsTask { + private final Map tableNameMap; + private final boolean autoPartition; + private final boolean single; + private final boolean broadcast; + + public RebuildTableCutOverTask(final String schemaName, + final String logicalTableName, + Map tableNameMap, + boolean autoPartition, + boolean single, + boolean broadcast) { + super(schemaName, logicalTableName); + this.tableNameMap = tableNameMap; + this.autoPartition = autoPartition; + this.single = single; + this.broadcast = broadcast; + onExceptionTryRollback(); + } + + @Override + protected void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + LOGGER.info( + String.format( + "[rebuild table] start change meta during cutOver for primary table: %s.%s", + schemaName, logicalTableName) + ); + updateSupportedCommands(true, false, metaDbConnection); + //allowing use hint to skip clean up stage + final String skipCutover = + executionContext.getParamManager().getString(ConnectionParams.REPARTITION_SKIP_CUTOVER); + if (StringUtils.equalsIgnoreCase(skipCutover, Boolean.TRUE.toString())) { + return; + } + + RepartitionMetaChanger.alterTaleModifyColumnCutOver( + metaDbConnection, + schemaName, + logicalTableName, + tableNameMap, + autoPartition, + single, + broadcast + ); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + LOGGER.info( + String.format( + "[rebuild table] finish change meta during cutOver for primary table: %s.%s", + schemaName, logicalTableName) + ); + + EventLogger.log(EventType.DDL_INFO, "Online modify column success"); + } + + @Override + protected void onExecutionSuccess(ExecutionContext executionContext) { + //sync for CutOver should keep atomic, so we won't do notify here + //see RepartitionSyncAction.java + } + + @Override + protected void updateTableVersion(Connection metaDbConnection) { + //sync for CutOver should keep atomic, so we won't do notify here + //see RepartitionSyncAction.java + try { + TableInfoManager.updateTableVersion4Repartition(schemaName, logicalTableName, metaDbConnection); + } catch (Exception e) { + throw GeneralUtil.nestedException(e); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RebuildTableValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RebuildTableValidateTask.java new file mode 100644 index 000000000..0594a98ea --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RebuildTableValidateTask.java @@ -0,0 +1,37 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.gsi; + +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import lombok.Getter; + +import java.util.List; +import java.util.Map; + +/** + * @author wumu + */ +@Getter +@TaskName(name = "RebuildTableValidateTask") +public class RebuildTableValidateTask extends AlterPartitionCountValidateTask { + public RebuildTableValidateTask(String schemaName, String primaryTable, + Map tableNameMap, + List tableGroupConfigList) { + super(schemaName, primaryTable, tableNameMap, tableGroupConfigList); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RepartitionCutOverTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RepartitionCutOverTask.java index b79a890d9..d487b25f4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RepartitionCutOverTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RepartitionCutOverTask.java @@ -37,18 +37,21 @@ public class RepartitionCutOverTask extends BaseGmsTask { final boolean single; final boolean broadcast; final boolean auto; + final boolean repartitionGsi; public RepartitionCutOverTask(final String schemaName, final String logicalTableName, final String targetTableName, final boolean single, final boolean broadcast, - final boolean auto) { + final boolean auto, + final boolean repartitionGsi) { super(schemaName, logicalTableName); this.targetTableName = targetTableName; this.single = single; this.broadcast = broadcast; this.auto = auto; + this.repartitionGsi = repartitionGsi; onExceptionTryRollback(); } @@ -73,7 +76,7 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi single, broadcast, auto, - false + repartitionGsi ); FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RepartitionSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RepartitionSyncTask.java index f89bbcdae..eb3244d65 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RepartitionSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/RepartitionSyncTask.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.sync.RepartitionSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -63,6 +64,7 @@ protected void executeImpl(ExecutionContext executionContext) { executionContext.getTraceId() ), schemaName, + SyncScope.ALL, true ); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/StatisticSampleTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/StatisticSampleTask.java index f224a9ed9..ce9930436 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/StatisticSampleTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/StatisticSampleTask.java @@ -19,15 +19,15 @@ import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.gms.util.StatisticUtils; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; -import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; -import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticUtils; +import com.alibaba.polardbx.gms.module.LogLevel; +import com.alibaba.polardbx.gms.module.LogPattern; +import com.alibaba.polardbx.gms.module.Module; +import com.alibaba.polardbx.gms.module.ModuleLogInfo; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; -import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.sampleTable; - @TaskName(name = "StatisticSampleTask") @Getter public class StatisticSampleTask extends BaseDdlTask { @@ -42,10 +42,14 @@ public StatisticSampleTask(String schemaName, @Override protected void beforeTransaction(ExecutionContext executionContext) { - sampleTable(schemaName, logicalTableName); + StatisticUtils.sampleOneTable(schemaName, logicalTableName); FailPoint.injectRandomExceptionFromHint(executionContext); FailPoint.injectRandomSuspendFromHint(executionContext); + ModuleLogInfo.getInstance().logRecord(Module.STATISTICS, LogPattern.PROCESS_END, + new String[] {"ddl sample task", schemaName + "," + logicalTableName}, LogLevel.NORMAL); LOGGER.info(String.format("sample table task. schema:%s, table:%s", schemaName, logicalTableName)); + + FailPoint.injectExceptionFromHint("FP_STATISTIC_SAMPLE_ERROR", executionContext); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateCutOverTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateCutOverTask.java index 724cb52d0..527edc53c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateCutOverTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateCutOverTask.java @@ -89,7 +89,8 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi LOGGER.info( String - .format("finish write meta during truncate cutover for primary table: %s.%s", schemaName, logicalTableName) + .format("finish write meta during truncate cutover for primary table: %s.%s", schemaName, + logicalTableName) ); FailPoint.injectException(FP_TRUNCATE_CUTOVER_FAILED); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateSyncTask.java index 4804adac6..f889ed135 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateSyncTask.java @@ -22,9 +22,16 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.executor.sync.TableGroupSyncAction; import com.alibaba.polardbx.executor.sync.TruncateSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; import lombok.Getter; import java.util.Set; @@ -71,8 +78,27 @@ protected void executeImpl(ExecutionContext executionContext) { executionContext.getTraceId() ), schemaName, + SyncScope.ALL, true ); + if (DbInfoManager.getInstance().isNewPartitionDb(schemaName)) { + TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(primaryTableName); + Long tableGroupId = tableMeta.getPartitionInfo().getTableGroupId(); + TableGroupInfoManager tableGroupInfoManager = + OptimizerContext.getContext(schemaName).getTableGroupInfoManager(); + TableGroupConfig tableGroupConfig = tableGroupInfoManager.getTableGroupConfigById(tableGroupId); + String targetTableGroup = tableGroupConfig.getTableGroupRecord().tg_name; + try { + SyncManagerHelper + .sync(new TableGroupSyncAction(schemaName, targetTableGroup), SyncScope.ALL); + } catch (Throwable t) { + LOGGER.error(String.format( + "error occurs while sync table group, schemaName:%s, tableGroupName:%s", schemaName, + targetTableGroup)); + throw GeneralUtil.nestedException(t); + } + } + FailPoint.injectException(FP_TRUNCATE_SYNC_FAILED); LOGGER.info( diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateTableWithGsiValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateTableWithGsiValidateTask.java index 296ecefe9..afc19988f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateTableWithGsiValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/TruncateTableWithGsiValidateTask.java @@ -19,7 +19,6 @@ import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.ddl.job.validator.GsiValidator; -import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -28,7 +27,7 @@ @Getter @TaskName(name = "TruncateTableWithGsiValidateTask") -public class TruncateTableWithGsiValidateTask extends DropPartitionTableWithGsiValidateTask{ +public class TruncateTableWithGsiValidateTask extends DropPartitionTableWithGsiValidateTask { @JSONCreator public TruncateTableWithGsiValidateTask(String schemaName, String primaryTable, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/UnlockTableSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/UnlockTableSyncTask.java index 6d9f091af..d1555ac85 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/UnlockTableSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/gsi/UnlockTableSyncTask.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.UnlockTableSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -63,6 +64,7 @@ protected void executeImpl(ExecutionContext executionContext) { executionContext.getTraceId() ), schemaName, + SyncScope.ALL, true ); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/localpartition/LocalPartitionValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/localpartition/LocalPartitionValidateTask.java index a14f90933..476f5a0fd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/localpartition/LocalPartitionValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/localpartition/LocalPartitionValidateTask.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.executor.partitionmanagement.LocalPartitionManager; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; @@ -32,6 +33,7 @@ import com.alibaba.polardbx.repo.mysql.checktable.TableDescription; import com.alibaba.polardbx.repo.mysql.spi.MyRepository; import lombok.Getter; +import org.apache.calcite.sql.SqlKind; import java.util.ArrayList; import java.util.List; @@ -51,6 +53,7 @@ public LocalPartitionValidateTask(String schemaName, String logicalTableName) { public void executeImpl(ExecutionContext executionContext) { final TableMeta primaryTableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName); + TableValidator.validateTableWithCCI(schemaName, logicalTableName, executionContext, SqlKind.LOCAL_PARTITION); final LocalPartitionDefinitionInfo definitionInfo = primaryTableMeta.getLocalPartitionDefinitionInfo(); if (definitionInfo == null) { throw new TddlNestableRuntimeException(String.format( diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnAddMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnAddMetaTask.java index c46c61eb1..2c9f248dc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnAddMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnAddMetaTask.java @@ -17,18 +17,10 @@ package com.alibaba.polardbx.executor.ddl.job.task.onlinemodifycolumn; import com.alibaba.polardbx.executor.common.ExecutorContext; -import com.alibaba.polardbx.executor.ddl.job.meta.GsiMetaChanger; import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.gsi.GsiUtils; -import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; -import com.alibaba.polardbx.gms.metadb.table.IndexStatus; -import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; -import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.google.common.collect.Lists; import lombok.Getter; import java.sql.Connection; @@ -80,7 +72,7 @@ protected void rollbackImpl(Connection metaDbConnection, ExecutionContext execut TableMetaChanger.onlineModifyColumnAddColumnRollback(metaDbConnection, schemaName, logicalTableName, newColumnName, oldColumnName, coveringGsi); - for (String gsiName: coveringGsi) { + for (String gsiName : coveringGsi) { ExecutorContext .getContext(executionContext.getSchemaName()) .getGsiManager() diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnDropMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnDropMetaTask.java index b7c348018..9eee2ff41 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnDropMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnDropMetaTask.java @@ -24,8 +24,8 @@ import lombok.Getter; import java.sql.Connection; +import java.util.Collections; import java.util.List; -import java.util.Map; @Getter @TaskName(name = "OnlineModifyColumnDropMetaTask") @@ -71,7 +71,8 @@ protected void executeImpl(Connection metaDbConnection, ExecutionContext executi TableMetaChanger.onlineModifyColumnDropColumn(metaDbConnection, schemaName, logicalTableName, dbIndex, phyTableName, droppedColumn, coveringGsi, gsiDbIndex, gsiPhyTableName, unique, keptColumn); - CommonMetaChanger.finalOperationsOnSuccess(schemaName, logicalTableName); + CommonMetaChanger.alterTableColumnFinalOperationsOnSuccess(schemaName, logicalTableName, + Collections.singletonList(oldColumnName)); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnSwapMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnSwapMetaTask.java index e9d8f0526..21a5f214f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnSwapMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/onlinemodifycolumn/OnlineModifyColumnSwapMetaTask.java @@ -16,16 +16,11 @@ package com.alibaba.polardbx.executor.ddl.job.task.onlinemodifycolumn; -import com.alibaba.polardbx.executor.common.ExecutorContext; -import com.alibaba.polardbx.executor.ddl.job.meta.GsiMetaChanger; import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.gsi.GsiUtils; -import com.alibaba.polardbx.gms.metadb.table.IndexStatus; -import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.google.common.collect.ImmutableList; import lombok.Getter; import java.sql.Connection; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/rebalance/SyncStoragePoolTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/rebalance/SyncStoragePoolTask.java new file mode 100644 index 000000000..9fd72cd22 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/rebalance/SyncStoragePoolTask.java @@ -0,0 +1,57 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.rebalance; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.sync.AlterStoragePoolSyncAction; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.optimizer.config.schema.DefaultDbSchema; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.locality.StoragePoolManager; +import lombok.Getter; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; + +import java.util.List; + +@Getter +@TaskName(name = "SyncStoragePoolTask") +public class SyncStoragePoolTask extends BaseValidateTask { + List dnIds; + + @JSONCreator + public SyncStoragePoolTask(List dnIds) { + super(DefaultDbSchema.NAME); + this.dnIds = dnIds; + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + StoragePoolManager storagePoolManager = StoragePoolManager.getInstance(); + if (!CollectionUtils.isEmpty(dnIds)) { + String dnIdStr = StringUtils.join(dnIds, ","); + storagePoolManager.shrinkStoragePoolSimply(StoragePoolManager.DEFAULT_STORAGE_POOL_NAME, dnIdStr); + } else { + storagePoolManager.autoExpandDefaultStoragePool(); + } + SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", ""), SyncScope.ALL); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/rebalance/WriteDataDistLogTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/rebalance/WriteDataDistLogTask.java index 978b97b64..3c34fee45 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/rebalance/WriteDataDistLogTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/rebalance/WriteDataDistLogTask.java @@ -28,10 +28,8 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; -import java.util.ArrayList; import java.util.HashMap; import java.util.Map; -import java.util.stream.Collectors; import static com.alibaba.polardbx.executor.balancer.Balancer.collectBalanceStatsOfDatabase; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/AddStorageInfoTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/AddStorageInfoTask.java index d7042f096..a3cb97bf1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/AddStorageInfoTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/AddStorageInfoTask.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.AlterStoragePoolSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.DbTopologyManager; import com.alibaba.polardbx.gms.topology.StorageInfoAccessor; import com.alibaba.polardbx.gms.topology.StorageInfoExtraFieldJSON; @@ -157,12 +158,12 @@ protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionC @Override protected void onRollbackSuccess(ExecutionContext executionContext) { //ComplexTaskMetaManager.getInstance().reload(); - SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", "")); + SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", ""), SyncScope.ALL); } @Override protected void onExecutionSuccess(ExecutionContext executionContext) { - SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", "")); + SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", ""), SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/AlterDatabaseLocalityTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/AlterDatabaseLocalityTask.java new file mode 100644 index 000000000..ab3fea701 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/AlterDatabaseLocalityTask.java @@ -0,0 +1,79 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.storagepool; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.locality.LocalityManager; +import lombok.Getter; + +import java.sql.Connection; +import java.util.List; + +@Getter +@TaskName(name = "AlterDatabaseLocalityTask") +// here is add meta to complex_task_outline table, no need to update tableVersion, +// so no need to extends from BaseGmsTask +public class AlterDatabaseLocalityTask extends BaseDdlTask { + + String schemaName; + + String instId; + + String targetLocality; + + @JSONCreator + public AlterDatabaseLocalityTask(String schemaName, String instId, + String targetLocality) { + super(schemaName); + this.schemaName = schemaName; + this.instId = instId; + this.targetLocality = targetLocality; + } + + public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { + long dbId = DbInfoManager.getInstance().getDbInfo(schemaName).id; + LocalityManager.getInstance().setLocalityOfDb(dbId, targetLocality); + } + + public void rollbackImpl(Connection metaDbConnection, ExecutionContext executionContext) { +// executeImpl(metaDbConnection, executionContext); + } + + @Override + protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + executeImpl(metaDbConnection, executionContext); + } + + @Override + protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { +// rollbackImpl(metaDbConnection, executionContext); + } + + @Override + protected void onRollbackSuccess(ExecutionContext executionContext) { + //ComplexTaskMetaManager.getInstance().reload(); + } + + @Override + protected void onExecutionSuccess(ExecutionContext executionContext) { + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/AppendStorageInfoTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/AppendStorageInfoTask.java index 64b26a565..367eba1be 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/AppendStorageInfoTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/AppendStorageInfoTask.java @@ -17,11 +17,14 @@ package com.alibaba.polardbx.executor.ddl.job.task.storagepool; import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.common.StorageInfoManager; import com.alibaba.polardbx.executor.ddl.job.factory.storagepool.StoragePoolUtils; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.AlterStoragePoolSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; +import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.StorageInfoAccessor; import com.alibaba.polardbx.gms.topology.StorageInfoExtraFieldJSON; @@ -38,6 +41,8 @@ import java.util.Optional; import java.util.stream.Collectors; +import static com.alibaba.polardbx.executor.ddl.job.factory.storagepool.StoragePoolUtils.RECYCLE_STORAGE_POOL; + @Getter @TaskName(name = "AppendStorageInfoTask") // here is add meta to complex_task_outline table, no need to update tableVersion, @@ -50,18 +55,19 @@ public class AppendStorageInfoTask extends BaseDdlTask { List dnIds; - List originalStoragePoolName; + Map originalStorageInfoMap; String undeletableDnId; String storagePoolName; @JSONCreator - public AppendStorageInfoTask(String schemaName, String instId, List originalStoragePoolName, + public AppendStorageInfoTask(String schemaName, String instId, + Map originalStorageInfoMap, List dnIds, String undeletableDnId, String storagePoolName) { super(schemaName); this.schemaName = schemaName; this.instId = instId; - this.originalStoragePoolName = originalStoragePoolName; + this.originalStorageInfoMap = originalStorageInfoMap; this.dnIds = dnIds; this.undeletableDnId = undeletableDnId; this.storagePoolName = storagePoolName; @@ -74,24 +80,35 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC List originalStorageInfoRecords = storageInfoRecords.stream().filter(o -> dnIds.contains(o.storageInstId)).collect(Collectors.toList()); Boolean shrinkRecycleStoragePool = false; + Boolean notifyStorageInfo = false; for (StorageInfoRecord record : originalStorageInfoRecords) { + int status = record.status; StorageInfoExtraFieldJSON extras = Optional.ofNullable(record.extras).orElse(new StorageInfoExtraFieldJSON()); - if (extras.storagePoolName.equalsIgnoreCase(StoragePoolUtils.RECYCLE_STORAGE_POOL)) { + if (extras.storagePoolName.equalsIgnoreCase(RECYCLE_STORAGE_POOL)) { shrinkRecycleStoragePool = true; } + if (status != StorageInfoRecord.STORAGE_STATUS_READY) { + notifyStorageInfo = true; + storageInfoAccessor.updateStorageStatus(record.storageInstId, StorageInfoRecord.STORAGE_STATUS_READY); + } extras.setStoragePoolName(storagePoolName); storageInfoAccessor.updateStoragePoolName(record.storageInstId, extras); } StoragePoolManager storagePoolManager = StoragePoolManager.getInstance(); String dnIdStr = StringUtils.join(this.dnIds, ","); - if (!storagePoolName.equalsIgnoreCase(StoragePoolUtils.RECYCLE_STORAGE_POOL) && StringUtils.isEmpty( + if (!storagePoolName.equalsIgnoreCase(RECYCLE_STORAGE_POOL) && StringUtils.isEmpty( undeletableDnId)) { undeletableDnId = dnIds.get(0); } storagePoolManager.appendStoragePool(storagePoolName, dnIdStr, undeletableDnId); - if (shrinkRecycleStoragePool) { - storagePoolManager.shrinkStoragePoolSimply(StoragePoolUtils.RECYCLE_STORAGE_POOL, dnIdStr); + if (shrinkRecycleStoragePool && !storagePoolName.equalsIgnoreCase(RECYCLE_STORAGE_POOL)) { + storagePoolManager.shrinkStoragePoolSimply(RECYCLE_STORAGE_POOL, dnIdStr); + } + if (notifyStorageInfo) { + // update op-version + MetaDbConfigManager.getInstance() + .notify(MetaDbDataIdBuilder.getStorageInfoDataId(instId), metaDbConnection); } } @@ -103,27 +120,37 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e @Override protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { // executeImpl(metaDbConnection, executionContext); - Map originalStoragePoolMap = new HashMap<>(); - for (int i = 0; i < originalStoragePoolName.size(); i++) { - originalStoragePoolMap.put(dnIds.get(i), originalStoragePoolName.get(i)); - } StorageInfoAccessor storageInfoAccessor = new StorageInfoAccessor(); storageInfoAccessor.setConnection(metaDbConnection); - List storageInfoRecords = storageInfoAccessor.getStorageInfosByInstId(instId); - List originalStorageInfoRecords = - storageInfoRecords.stream().filter(o -> dnIds.contains(o.storageInstId)).collect(Collectors.toList()); - for (StorageInfoRecord record : originalStorageInfoRecords) { + List storageInfoRecords = storageInfoAccessor.getStorageInfosByInstId(instId) + .stream().filter(o -> dnIds.contains(o.storageInstId)).collect(Collectors.toList()); + Boolean notifyStorageInfo = false; + for (StorageInfoRecord record : storageInfoRecords) { StorageInfoExtraFieldJSON extras = Optional.ofNullable(record.extras).orElse(new StorageInfoExtraFieldJSON()); // String originalStoragePool = originalStoragePoolMap.get(record.storageInstId); - String originalStoragePool = StoragePoolUtils.RECYCLE_STORAGE_POOL; + StorageInfoRecord originalStorageInfoRecord = originalStorageInfoMap.get(record.storageInstId); + String originalStoragePool = + Optional.ofNullable(originalStorageInfoRecord.extras).orElse(new StorageInfoExtraFieldJSON()) + .getStoragePoolName(); + int originalStatus = originalStorageInfoRecord.status; extras.setStoragePoolName(originalStoragePool); storageInfoAccessor.updateStoragePoolName(record.storageInstId, extras); + if (originalStatus != StorageInfoRecord.STORAGE_STATUS_READY) { + notifyStorageInfo = true; + storageInfoAccessor.updateStorageStatus(record.storageInstId, originalStatus); + } // if(record.storageInstId.equals(undeletableDnId)){ // storageInfoAccessor.updateStorageInfoDeletable(undeletableDnId, false); // } } + if (notifyStorageInfo) { + // update op-version + MetaDbConfigManager.getInstance() + .notify(MetaDbDataIdBuilder.getStorageInfoDataId(instId), metaDbConnection); + + } StoragePoolManager storagePoolManager = StoragePoolManager.getInstance(); String dnIdStr = StringUtils.join(this.dnIds, ","); storagePoolManager.shrinkStoragePool(storagePoolName, dnIdStr, undeletableDnId); @@ -131,12 +158,12 @@ protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionC @Override protected void onRollbackSuccess(ExecutionContext executionContext) { - SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", "")); + SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", ""), SyncScope.ALL); } @Override protected void onExecutionSuccess(ExecutionContext executionContext) { - SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", "")); + SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", ""), SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DeleteAllStorageInfoTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DeleteAllStorageInfoTask.java index b53d22e83..3c80df3e0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DeleteAllStorageInfoTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DeleteAllStorageInfoTask.java @@ -21,20 +21,17 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.AlterStoragePoolSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.StorageInfoAccessor; import com.alibaba.polardbx.gms.topology.StorageInfoExtraFieldJSON; import com.alibaba.polardbx.gms.topology.StorageInfoRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.locality.StoragePoolManager; import lombok.Getter; -import org.apache.commons.lang.StringUtils; import java.sql.Connection; import java.util.List; import java.util.Optional; -import java.util.stream.Collectors; - -import static com.alibaba.polardbx.executor.ddl.job.factory.storagepool.StoragePoolUtils.RECYCLE_STORAGE_POOL; @Getter @TaskName(name = "DeleteAllStorageInfoTask") @@ -76,12 +73,13 @@ protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionC @Override protected void onRollbackSuccess(ExecutionContext executionContext) { - SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", "")); + SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", ""), SyncScope.ALL); } @Override protected void onExecutionSuccess(ExecutionContext executionContext) { - SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", "")); + SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", ""), + SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DeleteStorageInfoTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DeleteStorageInfoTask.java index f5e83f2d0..02f1bc841 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DeleteStorageInfoTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DeleteStorageInfoTask.java @@ -18,7 +18,6 @@ import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; -import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.AlterStoragePoolSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; @@ -108,12 +107,14 @@ protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionC @Override protected void onRollbackSuccess(ExecutionContext executionContext) { - SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", "")); + SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", ""), + SyncScope.ALL); } @Override protected void onExecutionSuccess(ExecutionContext executionContext) { - SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", "")); + SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", ""), + SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DrainStorageInfoTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DrainStorageInfoTask.java index 4d085e321..deaa22555 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DrainStorageInfoTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/DrainStorageInfoTask.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.ddl.job.task.storagepool; import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.common.DefaultSchema; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.executor.ddl.job.factory.storagepool.StoragePoolUtils; @@ -25,6 +24,8 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.AlterStoragePoolSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; +import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.StorageInfoAccessor; import com.alibaba.polardbx.gms.topology.StorageInfoExtraFieldJSON; @@ -41,10 +42,11 @@ import java.util.Optional; import java.util.stream.Collectors; +import static com.alibaba.polardbx.gms.topology.StorageInfoRecord.STORAGE_STATUS_NOT_READY; +import static com.alibaba.polardbx.gms.topology.StorageInfoRecord.STORAGE_STATUS_REMOVED; + @Getter @TaskName(name = "DrainStorageInfoTask") -// here is add meta to complex_task_outline table, no need to update tableVersion, -// so no need to extends from BaseGmsTask public class DrainStorageInfoTask extends BaseDdlTask { String schemaName; @@ -80,19 +82,43 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC String.format("storage pool '%s' doesn't contains all of storage inst: '%s'", storagePoolName, StringUtils.join(dnIds, ","))); } - - List storageInfoRecords = storageInfoAccessor.getStorageInfosByInstId(instId); - List originalStorageInfoRecords = - storageInfoRecords.stream().filter(o -> dnIds.contains(o.storageInstId)).collect(Collectors.toList()); - for (StorageInfoRecord record : originalStorageInfoRecords) { - StorageInfoExtraFieldJSON extras = - Optional.ofNullable(record.extras).orElse(new StorageInfoExtraFieldJSON()); - extras.setStoragePoolName(StoragePoolUtils.RECYCLE_STORAGE_POOL); - storageInfoAccessor.updateStoragePoolName(record.storageInstId, extras); + Boolean notifyStorageInfo = true; + if (storagePoolName.equalsIgnoreCase(StoragePoolManager.RECYCLE_STORAGE_POOL_NAME)) { + String dnIdStr = StringUtils.join(this.dnIds, ","); +// Maybe There is no need. + // but for add node to _recycle, this is neccessary. + List storageInfoRecords = storageInfoAccessor.getStorageInfosByInstId(instId); + List originalStorageInfoRecords = + storageInfoRecords.stream().filter(o -> dnIds.contains(o.storageInstId)).collect(Collectors.toList()); + for (StorageInfoRecord record : originalStorageInfoRecords) { + StorageInfoExtraFieldJSON extras = + Optional.ofNullable(record.extras).orElse(new StorageInfoExtraFieldJSON()); + extras.setStoragePoolName(""); + storageInfoAccessor.updateStoragePoolName(record.storageInstId, extras); + } + for (String dnId : dnIds) { + storageInfoAccessor.updateStorageStatus(dnId, STORAGE_STATUS_REMOVED); + } + storagePoolManager.shrinkStoragePoolSimply(storagePoolName, dnIdStr); + } else { + List storageInfoRecords = storageInfoAccessor.getStorageInfosByInstId(instId); + List originalStorageInfoRecords = + storageInfoRecords.stream().filter(o -> dnIds.contains(o.storageInstId)).collect(Collectors.toList()); + for (StorageInfoRecord record : originalStorageInfoRecords) { + StorageInfoExtraFieldJSON extras = + Optional.ofNullable(record.extras).orElse(new StorageInfoExtraFieldJSON()); + extras.setStoragePoolName(StoragePoolUtils.RECYCLE_STORAGE_POOL); + storageInfoAccessor.updateStoragePoolName(record.storageInstId, extras); + } + + String dnIdStr = StringUtils.join(this.dnIds, ","); + storagePoolManager.shrinkStoragePool(storagePoolName, dnIdStr, undeletableDnId); + } + if (notifyStorageInfo) { + // update op-version + MetaDbConfigManager.getInstance() + .notify(MetaDbDataIdBuilder.getStorageInfoDataId(instId), metaDbConnection); } - - String dnIdStr = StringUtils.join(this.dnIds, ","); - storagePoolManager.shrinkStoragePool(storagePoolName, dnIdStr, undeletableDnId); } @@ -127,12 +153,12 @@ protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionC @Override protected void onRollbackSuccess(ExecutionContext executionContext) { - SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", "")); + SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", ""), SyncScope.ALL); } @Override protected void onExecutionSuccess(ExecutionContext executionContext) { - SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", "")); + SyncManagerHelper.sync(new AlterStoragePoolSyncAction("", ""), SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/StorageInstValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/StorageInstValidateTask.java index 2caf1dfc7..735ecf405 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/StorageInstValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/storagepool/StorageInstValidateTask.java @@ -37,6 +37,7 @@ public class StorageInstValidateTask extends BaseValidateTask { private List validStorageInsts; private String schemaName; private Boolean checkAttached; + private Boolean checkAlived; private Boolean checkIdle; public StorageInstValidateTask(String schemaName, String instId, List validStorageInsts) { @@ -44,17 +45,31 @@ public StorageInstValidateTask(String schemaName, String instId, List va this.schemaName = schemaName; this.instId = instId; this.validStorageInsts = validStorageInsts; + this.checkAlived = true; this.checkAttached = true; this.checkIdle = true; } + public StorageInstValidateTask(String schemaName, String instId, List validStorageInsts, + Boolean checkAttached, Boolean checkIdle) { + super(schemaName); + this.schemaName = schemaName; + this.instId = instId; + this.validStorageInsts = validStorageInsts; + this.checkAlived = true; + this.checkAttached = checkAttached; + this.checkIdle = checkIdle; + } + @JSONCreator public StorageInstValidateTask(String schemaName, String instId, List validStorageInsts, + Boolean checkAlived, Boolean checkAttached, Boolean checkIdle) { super(schemaName); this.schemaName = schemaName; this.instId = instId; this.validStorageInsts = validStorageInsts; + this.checkAlived = checkAlived; this.checkAttached = checkAttached; this.checkIdle = checkIdle; } @@ -64,7 +79,7 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e if (GeneralUtil.isEmpty(validStorageInsts)) { throw new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, "valid storage insts can't be empty"); } - StoragePoolValidator.validateStoragePool(instId, validStorageInsts, checkAttached, checkIdle); + StoragePoolValidator.validateStoragePool(instId, validStorageInsts, checkAlived, checkAttached, checkIdle); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterComplexTaskUpdateJobStatusTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterComplexTaskUpdateJobStatusTask.java index 5f4e87222..b74273602 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterComplexTaskUpdateJobStatusTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterComplexTaskUpdateJobStatusTask.java @@ -20,10 +20,10 @@ import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.executor.sync.TableMetaChangePreemptiveSyncAction; import com.alibaba.polardbx.executor.sync.TablesMetaChangePreemptiveSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -122,7 +122,7 @@ protected void onRollbackSuccess(ExecutionContext executionContext) { // sync to restore the status of table meta SyncManagerHelper.sync( new TablesMetaChangePreemptiveSyncAction(schemaName, relatedLogicalTables, 1500L, 1500L, - TimeUnit.MICROSECONDS)); + TimeUnit.MICROSECONDS), SyncScope.ALL); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterJoinGroupAddMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterJoinGroupAddMetaTask.java index ebe38f46a..29a7ab541 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterJoinGroupAddMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterJoinGroupAddMetaTask.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.ddl.ImplicitTableGroupUtil; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; @@ -36,6 +37,7 @@ import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupAccessor; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig; import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupUtils; import com.alibaba.polardbx.gms.util.TableGroupNameUtil; @@ -145,6 +147,7 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC private void addNewTableGroupInfo(Set tableTobeAlter, Connection metaDbConnection, ExecutionContext executionContext) { + ImplicitTableGroupUtil.checkAutoCreateTableGroup(executionContext); TableGroupAccessor tableGroupAccessor = new TableGroupAccessor(); PartitionGroupAccessor partitionGroupAccessor = new PartitionGroupAccessor(); TablePartitionAccessor tablePartitionAccessor = new TablePartitionAccessor(); @@ -176,8 +179,8 @@ private void addNewTableGroupInfo(Set tableTobeAlter, Connection metaDbC PartitionInfo partitionInfo = executionContext.getSchemaManager(schemaName).getTable(firstTableName).getPartitionInfo(); Long oldTableGroupId = partitionInfo.getTableGroupId(); - TableGroupConfig tableGroupConfig = - TableGroupUtils.getTableGroupInfoByGroupId(metaDbConnection, oldTableGroupId); + TableGroupDetailConfig tableGroupConfig = + TableGroupUtils.getTableGroupDetailInfoByGroupId(metaDbConnection, oldTableGroupId); List partitionGroupRecords = tableGroupConfig.getPartitionGroupRecords(); Map partitionIdsMap = new HashMap<>(); for (PartitionGroupRecord partitionGroupRecord : partitionGroupRecords) { @@ -187,7 +190,8 @@ private void addNewTableGroupInfo(Set tableTobeAlter, Connection metaDbC } for (String tableName : tableTobeAlter) { Optional tablePartRecordInfoContext = - tableGroupConfig.getAllTables().stream().filter(o -> o.getTableName().equalsIgnoreCase(tableName)) + tableGroupConfig.getTablesPartRecordInfoContext().stream() + .filter(o -> o.getTableName().equalsIgnoreCase(tableName)) .findFirst(); if (!tablePartRecordInfoContext.isPresent()) { throw new TddlRuntimeException(ErrorCode.ERR_TABLEGROUP_META_TOO_OLD, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterJoinGroupValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterJoinGroupValidateTask.java index 0fb8eaf1c..66022fdc4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterJoinGroupValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterJoinGroupValidateTask.java @@ -106,7 +106,7 @@ public void executeImpl(ExecutionContext executionContext) { .getTableGroupConfigByName(tableGroupName); Set curTables = new TreeSet<>(String::compareToIgnoreCase); curTables.addAll( - tableGroupConfig.getAllTables().stream().map(o -> o.getTableName()).collect(Collectors.toSet())); + tableGroupConfig.getAllTables().stream().collect(Collectors.toSet())); if (!originTables.equals(curTables)) { throw new TddlRuntimeException(ErrorCode.ERR_TABLEGROUP_META_TOO_OLD, String.format( diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupAddMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupAddMetaTask.java index ceb4f3af5..7a989791c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupAddMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupAddMetaTask.java @@ -32,7 +32,11 @@ import lombok.Getter; import java.sql.Connection; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Set; @Getter @TaskName(name = "AlterTableGroupAddMetaTask") diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupAddSubTaskMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupAddSubTaskMetaTask.java index daf41f833..1102424b6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupAddSubTaskMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupAddSubTaskMetaTask.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.ddl.job.task.tablegroup; import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.BaseGmsTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupMovePartitionRefreshMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupMovePartitionRefreshMetaTask.java index 153974bd7..424f09f45 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupMovePartitionRefreshMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupMovePartitionRefreshMetaTask.java @@ -73,7 +73,7 @@ public void refreshTableGroupMeta(Connection metaDbConnection) { long tableGroupId = tableGroupConfig.getTableGroupRecord().id; - boolean isFileStore = TableGroupNameUtil.isOssTg(tableGroupName); + boolean isFileStore = TableGroupNameUtil.isFileStorageTg(tableGroupName); // map logicalTb.physicalTb to new physical db group Map phyTableToNewGroup = new TreeMap<>(String::compareToIgnoreCase); @@ -120,8 +120,7 @@ public void refreshTableGroupMeta(Connection metaDbConnection) { ColumnMetaAccessor columnMetaAccessor = new ColumnMetaAccessor(); columnMetaAccessor.setConnection(metaDbConnection); - for (TablePartRecordInfoContext tablePartRecordInfoContext : tableGroupConfig.getTables()) { - String logTb = tablePartRecordInfoContext.getTableName(); + for (String logTb : tableGroupConfig.getTables()) { List files = filesAccessor.queryByLogicalSchemaTable(schemaName, logTb); @@ -159,10 +158,9 @@ public void refreshTableGroupMeta(Connection metaDbConnection) { // 2、cleanup partition_group_delta partitionGroupAccessor.deletePartitionGroupsByTableGroupId(tableGroupId, true); - for (TablePartRecordInfoContext infoContext : tableGroupConfig.getAllTables()) { + for (String tableName : tableGroupConfig.getAllTables()) { // 3、cleanup table_partition_delta // only delete the related records - String tableName = infoContext.getTableName(); tablePartitionAccessor .deleteTablePartitionConfigsForDeltaTable(schemaName, tableName); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupRefreshMetaBaseTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupRefreshMetaBaseTask.java index b3525a9dd..4f620bef3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupRefreshMetaBaseTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupRefreshMetaBaseTask.java @@ -31,6 +31,7 @@ import com.alibaba.polardbx.gms.tablegroup.PartitionGroupAccessor; import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; import com.alibaba.polardbx.gms.topology.DbGroupInfoAccessor; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; @@ -109,7 +110,8 @@ public void refreshTableGroupMeta(Connection metaDbConnection) { updateTaskStatus(metaDbConnection); - long tableGroupId = tableGroupConfig.getTableGroupRecord().id; + TableGroupRecord tableGroupRecord = tableGroupConfig.getTableGroupRecord(); + long tableGroupId = tableGroupRecord.id; /** * Fetch all pg that are to be deleted from partition_group_delta and @@ -149,9 +151,8 @@ public void refreshTableGroupMeta(Connection metaDbConnection) { } } - for (TablePartRecordInfoContext infoContext : tableGroupConfig.getAllTables()) { - String tableName = infoContext.getLogTbRec().tableName; - schemaName = infoContext.getLogTbRec().tableSchema; + for (String tableName : tableGroupConfig.getAllTables()) { + schemaName = tableGroupRecord.getSchema(); TableMeta tableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(tableName); SQLRecorderLogger.ddlMetaLogger.info( @@ -271,8 +272,7 @@ protected void updateAllTablesVersion(Connection metaDbConnection, ExecutionCont TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager() .getTableGroupConfigByName(tableGroupName); SchemaManager schemaManager = executionContext.getSchemaManager(schemaName); - for (TablePartRecordInfoContext infoContext : tableGroupConfig.getAllTables()) { - String tableName = infoContext.getLogTbRec().tableName; + for (String tableName : tableGroupConfig.getAllTables()) { TableMeta tableMeta = schemaManager.getTable(tableName); if (tableMeta.isGsi()) { //all the gsi table version change will be behavior by primary table diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupRenamePartitionChangeMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupRenamePartitionChangeMetaTask.java index 87da47e11..85968751b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupRenamePartitionChangeMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupRenamePartitionChangeMetaTask.java @@ -17,8 +17,6 @@ package com.alibaba.polardbx.executor.ddl.job.task.tablegroup; import com.alibaba.fastjson.annotation.JSONCreator; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; @@ -31,7 +29,9 @@ import com.alibaba.polardbx.gms.tablegroup.PartitionGroupAccessor; import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.gms.tablegroup.TableGroupDetailConfig; import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; +import com.alibaba.polardbx.gms.tablegroup.TableGroupUtils; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.SchemaManager; import com.alibaba.polardbx.optimizer.config.table.TableMeta; @@ -39,13 +39,11 @@ import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.PartitionSpec; -import com.alibaba.polardbx.optimizer.partition.common.PartitionLocation; import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; import lombok.Getter; import java.sql.Connection; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; @@ -79,8 +77,11 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC final TableGroupInfoManager tableGroupInfoManager = OptimizerContext.getContext(schemaName).getTableGroupInfoManager(); - final TableGroupConfig tableGroupConfig = tableGroupInfoManager.getTableGroupConfigByName(tableGroupName); - final List tablePartitionInfoRecords = tableGroupConfig.getAllTables(); + final TableGroupConfig tgCofig = tableGroupInfoManager.getTableGroupConfigByName(tableGroupName); + final TableGroupDetailConfig tableGroupConfig = + TableGroupUtils.getTableGroupDetailInfoByGroupId(null, tgCofig.getTableGroupRecord().id); + final List tablePartitionInfoRecords = + tableGroupConfig.getTablesPartRecordInfoContext(); String firstTableInTg = tablePartitionInfoRecords.get(0).getTableName(); TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(firstTableInTg); PartitionInfo partitionInfo = tableMeta.getPartitionInfo(); @@ -202,7 +203,7 @@ private void processRenameSubPartition( } } - private void processRenameFirstLevelLogicalPartition(TableGroupConfig tableGroupConfig, + private void processRenameFirstLevelLogicalPartition(TableGroupDetailConfig tableGroupConfig, PartitionInfo partitionInfo, List partitionGroupRecords, List newTablePartitionRecords, @@ -218,7 +219,7 @@ private void processRenameFirstLevelLogicalPartition(TableGroupConfig tableGroup boolean useSubPartTemplate = subPartBy.isUseSubPartTemplate(); Map> firstLevelPartitionRecords = new TreeMap<>(String::compareToIgnoreCase); - for (TablePartRecordInfoContext tablePartInfo : tableGroupConfig.getTables()) { + for (TablePartRecordInfoContext tablePartInfo : tableGroupConfig.getTablesPartRecordInfoContext()) { tablePartInfo.getPartitionRecList().stream().forEach( o -> firstLevelPartitionRecords.computeIfAbsent(o.partName, k -> new ArrayList<>()).add(o.copy())); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupSetPartitionsLocalityChangeMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupSetPartitionsLocalityChangeMetaTask.java index ed5b1f5fe..795f35d7f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupSetPartitionsLocalityChangeMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupSetPartitionsLocalityChangeMetaTask.java @@ -18,21 +18,17 @@ import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.executor.balancer.policy.PolicyUtils; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.locality.LocalityDetailInfoRecord; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; import com.alibaba.polardbx.gms.partition.TablePartitionAccessor; -import com.alibaba.polardbx.gms.partition.TablePartitionRecord; import com.alibaba.polardbx.gms.tablegroup.PartitionGroupAccessor; -import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupAccessor; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil; import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; import lombok.Getter; @@ -55,7 +51,8 @@ public class AlterTableGroupSetPartitionsLocalityChangeMetaTask extends BaseDdlT @JSONCreator public AlterTableGroupSetPartitionsLocalityChangeMetaTask(String schemaName, String tableGroupName, - List logicalTableNames, String partitionName, String targetLocality, + List logicalTableNames, String partitionName, + String targetLocality, List toChangeMetaLocalityItems) { super(schemaName); this.tableGroupName = tableGroupName; @@ -66,7 +63,7 @@ public AlterTableGroupSetPartitionsLocalityChangeMetaTask(String schemaName, Str this.rollback = false; } - public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext){ + public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { final TableGroupInfoManager tableGroupInfoManager = OptimizerContext.getContext(schemaName).getTableGroupInfoManager(); final TableGroupConfig tableGroupConfig = tableGroupInfoManager.getTableGroupConfigByName(tableGroupName); @@ -82,14 +79,14 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC LocalityDetailInfoRecord localityDetailInfoRecord = toChangeMetaLocalityItems.get(0); String targetLocality = ""; - if(rollback){ + if (rollback) { targetLocality = localityDetailInfoRecord.getLocality(); - }else{ + } else { targetLocality = this.targetLocality; } List tableNames; - try{ + try { List pgIds = new ArrayList<>(); pgIds.add(localityDetailInfoRecord.getObjectId()); tablePartitionAccessor.resetTablePartitionsLocalityByGroupIds(schemaName, pgIds, targetLocality); @@ -97,11 +94,11 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC tableNames = logicalTableNames; } catch (Throwable t) { LOGGER.error(String.format( - "error occurs while update tablegroup, schemaName:%s, tableGroupName:%s", - schemaName, tableGroupName)); + "error occurs while update tablegroup, schemaName:%s, tableGroupName:%s", + schemaName, tableGroupName)); throw GeneralUtil.nestedException(t); } - for(String table:tableNames) { + for (String table : tableNames) { try { TableInfoManager.updateTableVersion(schemaName, table, metaDbConnection); } catch (Exception e) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupValidateTask.java index 387bfb2b2..c33cc4702 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableGroupValidateTask.java @@ -25,10 +25,10 @@ import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.ddl.job.validator.TableGroupValidator; +import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; import com.alibaba.polardbx.gms.metadb.table.TablesAccessor; import com.alibaba.polardbx.gms.metadb.table.TablesRecord; -import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.SchemaManager; @@ -36,6 +36,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import lombok.Getter; +import org.apache.calcite.sql.SqlKind; import java.sql.Connection; import java.util.Map; @@ -56,19 +57,30 @@ public class AlterTableGroupValidateTask extends BaseValidateTask { @JSONCreator public AlterTableGroupValidateTask(String schemaName, String tableGroupName, Map tablesVersion, - boolean compareTablesList, Set targetPhysicalGroups) { + boolean compareTablesList, Set targetPhysicalGroups, + boolean allowEmptyGroup) { super(schemaName); this.tableGroupName = tableGroupName; this.tablesVersion = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); this.tablesVersion.putAll(tablesVersion); this.compareTablesList = compareTablesList; this.targetPhysicalGroups = targetPhysicalGroups; + this.allowEmptyGroup = allowEmptyGroup; } @Override public void executeImpl(ExecutionContext executionContext) { TableGroupValidator - .validateTableGroupInfo(schemaName, tableGroupName, false, executionContext.getParamManager()); + .validateTableGroupInfo(schemaName, tableGroupName, allowEmptyGroup, executionContext.getParamManager()); + TableGroupConfig tableGroupConfig = + OptimizerContext.getContext(schemaName).getTableGroupInfoManager() + .getTableGroupConfigByName(tableGroupName); + + for (String primaryTableName : tableGroupConfig.getAllTables()) { + TableValidator.validateTableWithCCI(schemaName, primaryTableName, executionContext, + SqlKind.ALTER_TABLEGROUP); + } + if (GeneralUtil.isNotEmpty(tablesVersion)) { for (Map.Entry tableVersionInfo : tablesVersion.entrySet()) { Long newTableVersion = @@ -96,15 +108,10 @@ public void executeImpl(ExecutionContext executionContext) { } } if (compareTablesList) { - TableGroupConfig tableGroupConfig = - OptimizerContext.getContext(schemaName).getTableGroupInfoManager() - .getTableGroupConfigByName(tableGroupName); - Set primaryTableNames = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); final SchemaManager schemaManager = executionContext.getSchemaManager(schemaName); - for (TablePartRecordInfoContext tablePartRecordInfoContext : tableGroupConfig.getAllTables()) { - String primaryTableName = tablePartRecordInfoContext.getTableName(); + for (String primaryTableName : tableGroupConfig.getAllTables()) { TableMeta tableMeta = schemaManager.getTable(primaryTableName); if (tableMeta.isGsi()) { //all the gsi table version change will be behavior by primary table diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableRenamePartitionChangeMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableRenamePartitionChangeMetaTask.java index 0c5ec8d45..efc61aaac 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableRenamePartitionChangeMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableRenamePartitionChangeMetaTask.java @@ -163,7 +163,7 @@ private void processRenameSubPartition(PartitionInfo partitionInfo, } } Optional partGroupRecord = partitionGroupRecords.stream() - .filter(o -> o.partition_name.equalsIgnoreCase(subPartSpec.getName())).findFirst(); + .filter(o -> o.partition_name.equalsIgnoreCase(tablePartitionRecord.getPartName())).findFirst(); if (!partGroupRecord.isPresent()) { throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_NAME_NOT_EXISTS, String.format( diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableSetGroupAddSubTaskMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableSetGroupAddSubTaskMetaTask.java index 3cca0cd12..ddc2abf53 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableSetGroupAddSubTaskMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableSetGroupAddSubTaskMetaTask.java @@ -68,7 +68,7 @@ public void executeImpl(Connection metaDbConnection, ExecutionContext executionC TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager() .getTableGroupConfigByName(targetTableGroupName); if (tableGroupConfig != null && GeneralUtil.isNotEmpty(tableGroupConfig.getAllTables())) { - String firstTable = tableGroupConfig.getAllTables().get(0).getTableName(); + String firstTable = tableGroupConfig.getAllTables().get(0); JoinGroupTableDetailRecord joinGroupTableDetailRecord = joinGroupTableDetailAccessor.getJoinGroupDetailBySchemaTableName(schemaName, firstTable); if (joinGroupTableDetailRecord != null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableSetTableGroupChangeMetaOnlyTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableSetTableGroupChangeMetaOnlyTask.java index 54794844c..7d2d8e34f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableSetTableGroupChangeMetaOnlyTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/AlterTableSetTableGroupChangeMetaOnlyTask.java @@ -18,11 +18,11 @@ import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.ddl.ImplicitTableGroupUtil; import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.partitionmanagement.AlterTableGroupUtils; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; -import com.alibaba.polardbx.gms.locality.LocalityDesc; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext; import com.alibaba.polardbx.gms.partition.TablePartitionAccessor; @@ -61,11 +61,13 @@ public class AlterTableSetTableGroupChangeMetaOnlyTask extends BaseDdlTask { protected String logicalTable; protected boolean tableGroupExists; protected boolean reCreatePartitionGroups; + protected boolean withImplicitTableGroup; @JSONCreator public AlterTableSetTableGroupChangeMetaOnlyTask(String schemaName, String logicalTable, String curTableGroup, String targetTableGroup, boolean reCreatePartitionGroups, - boolean tableGroupExists, String curJoinGroup) { + boolean tableGroupExists, String curJoinGroup, + boolean withImplicitTableGroup) { super(schemaName); this.logicalTable = logicalTable; this.curTableGroup = curTableGroup; @@ -73,6 +75,8 @@ public AlterTableSetTableGroupChangeMetaOnlyTask(String schemaName, String logic this.reCreatePartitionGroups = reCreatePartitionGroups; this.tableGroupExists = tableGroupExists; this.curJoinGroup = curJoinGroup; + this.withImplicitTableGroup = withImplicitTableGroup; + onExceptionTryRollback(); } public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { @@ -153,7 +157,7 @@ public void changeMeta(Connection metaDbConnection, ExecutionContext executionCo } addNewPartitionGroupFromPartitionInfo(partitionInfo, partitionGroupRecords, - tableGroupId, metaDbConnection); + tableGroupId, executionContext, metaDbConnection); updateTableVersion(metaDbConnection, schemaName, logicalTable, executionContext); } @@ -219,6 +223,7 @@ private void restoreMeta(Connection metaDbConnection, ExecutionContext execution private void addNewPartitionGroupFromPartitionInfo(PartitionInfo partitionInfo, List partitionGroupRecords, Long tableGroupId, + ExecutionContext ec, Connection connection) { PartitionGroupAccessor partitionGroupAccessor = new PartitionGroupAccessor(); TablePartitionAccessor tablePartitionAccessor = new TablePartitionAccessor(); @@ -231,7 +236,9 @@ private void addNewPartitionGroupFromPartitionInfo(PartitionInfo partitionInfo, if (!tableGroupExists) { TableGroupRecord tableGroupRecord = new TableGroupRecord(); tableGroupRecord.schema = partitionInfo.getTableSchema(); - tableGroupRecord.tg_name = String.valueOf(System.currentTimeMillis()); + tableGroupRecord.tg_name = + withImplicitTableGroup ? (StringUtils.isNotEmpty(targetTableGroup) ? targetTableGroup : + String.valueOf(System.currentTimeMillis())) : String.valueOf(System.currentTimeMillis()); tableGroupRecord.meta_version = 0L; if (partitionInfo.getTableType() == PartitionTableType.SINGLE_TABLE) { if (partitionInfo.getTableGroupId() != TableGroupRecord.INVALID_TABLE_GROUP_ID) { @@ -246,18 +253,25 @@ private void addNewPartitionGroupFromPartitionInfo(PartitionInfo partitionInfo, tableGroupRecord.tg_type = TableGroupRecord.TG_TYPE_PARTITION_TBL_TG; } tableGroupId = tableGroupAccessor.addNewTableGroup(tableGroupRecord); - int tgType = tableGroupRecord.tg_type; - String finalTgName = TableGroupNameUtil.autoBuildTableGroupName(tableGroupId, tgType); - String localiity = partitionInfo.getLocality(); - List tableGroupRecords = - tableGroupAccessor - .getTableGroupsBySchemaAndName(partitionInfo.getTableSchema(), finalTgName, false); - if (GeneralUtil.isNotEmpty(tableGroupRecords)) { - finalTgName = "tg" + tableGroupRecord.tg_name; + String locality = partitionInfo.getLocality(); + if (!withImplicitTableGroup || StringUtils.isEmpty(targetTableGroup)) { + if (!withImplicitTableGroup) { + ImplicitTableGroupUtil.checkAutoCreateTableGroup(ec); + } + int tgType = tableGroupRecord.tg_type; + String finalTgName = TableGroupNameUtil.autoBuildTableGroupName(tableGroupId, tgType); + List tableGroupRecords = + tableGroupAccessor + .getTableGroupsBySchemaAndName(partitionInfo.getTableSchema(), finalTgName, false); + if (GeneralUtil.isNotEmpty(tableGroupRecords)) { + finalTgName = "tg" + tableGroupRecord.tg_name; + } + tableGroupAccessor.updateTableGroupName(tableGroupId, finalTgName); + tableGroupAccessor.updateTableGroupLocalityById(tableGroupId, locality); + targetTableGroup = finalTgName;//will pass the new create targetTableGroup to the following tasks + } else { + tableGroupAccessor.updateTableGroupLocalityById(tableGroupId, locality); } - tableGroupAccessor.updateTableGroupName(tableGroupId, finalTgName); - tableGroupAccessor.updateTableGroupLocalityById(tableGroupId, localiity); - targetTableGroup = finalTgName;//will pass the new create targetTableGroup to the following tasks } else { int tableGroupType = TableGroupRecord.TG_TYPE_PARTITION_TBL_TG; @@ -344,8 +358,7 @@ private void addNewPartitionGroupFromPartitionInfo(PartitionInfo partitionInfo, TableGroupConfig tableGroupConfig = OptimizerContext.getContext(schemaName).getTableGroupInfoManager() .getTableGroupConfigById(tableGroupId); if (tableGroupConfig != null && GeneralUtil.isNotEmpty(tableGroupConfig.getTables())) { - TablePartRecordInfoContext tablePartRecordInfoContext = tableGroupConfig.getAllTables().get(0); - String tableName = tablePartRecordInfoContext.getTableName(); + String tableName = tableGroupConfig.getAllTables().get(0); JoinGroupInfoRecord joinGroupInfoRecord = JoinGroupUtils.getJoinGroupInfoByTable(schemaName, tableName, connection); joinGroupTableDetailAccessor.deleteJoinGroupTableDetailBySchemaTable(schemaName, logicalTable); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/BackgroupRebalanceTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/BackgroupRebalanceTask.java index 2665ddace..fb22c90d3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/BackgroupRebalanceTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/BackgroupRebalanceTask.java @@ -20,8 +20,6 @@ import com.alibaba.polardbx.executor.ddl.job.task.BaseDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; -import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; -import com.alibaba.polardbx.gms.tablegroup.ComplexTaskOutlineAccessor; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/JoinGroupValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/JoinGroupValidateTask.java index 43a743919..14ffddc98 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/JoinGroupValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/JoinGroupValidateTask.java @@ -20,30 +20,22 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.ddl.job.validator.JoinGroupValidator; import com.alibaba.polardbx.executor.ddl.job.validator.TableGroupValidator; import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext; -import com.alibaba.polardbx.gms.tablegroup.JoinGroupInfoAccessor; import com.alibaba.polardbx.gms.tablegroup.JoinGroupInfoRecord; import com.alibaba.polardbx.gms.tablegroup.JoinGroupUtils; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; import lombok.Getter; import java.sql.Connection; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.stream.Collectors; @Getter @TaskName(name = "JoinGroupValidateTask") @@ -54,26 +46,28 @@ public class JoinGroupValidateTask extends BaseValidateTask { private boolean onlyCompareTableGroup; @JSONCreator - public JoinGroupValidateTask(String schemaName, List tableGroups, String tableName, boolean onlyCompareTableGroup) { + public JoinGroupValidateTask(String schemaName, List tableGroups, String tableName, + boolean onlyCompareTableGroup) { super(schemaName); this.tableGroups = tableGroups; - this.tableName=tableName; + this.tableName = tableName; this.onlyCompareTableGroup = onlyCompareTableGroup; } @Override protected void duringTransaction(Connection metaDbConnection, ExecutionContext executionContext) { if (GeneralUtil.isEmpty(tableGroups)) { - throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT,"the tableGroup list can't be empty"); + throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, "the tableGroup list can't be empty"); } String targetTableGroup = tableGroups.get(0); - TableGroupValidator.validateTableGroupInfo(schemaName, targetTableGroup, true, executionContext.getParamManager()); - TableGroupInfoManager tableGroupInfoManager = OptimizerContext.getContext(schemaName).getTableGroupInfoManager(); + TableGroupValidator.validateTableGroupInfo(schemaName, targetTableGroup, true, + executionContext.getParamManager()); + TableGroupInfoManager tableGroupInfoManager = + OptimizerContext.getContext(schemaName).getTableGroupInfoManager(); TableGroupConfig tableGroupConfig = tableGroupInfoManager.getTableGroupConfigByName(targetTableGroup); String targetJoinGroup = ""; if (GeneralUtil.isNotEmpty(tableGroupConfig.getTables())) { - TablePartRecordInfoContext tablePartRecordInfoContext = tableGroupConfig.getTables().get(0); - String tbName = tablePartRecordInfoContext.getTableName(); + String tbName = tableGroupConfig.getTables().get(0); TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(tbName); if (tableMeta.isGsi()) { tbName = tableMeta.getGsiTableMetaBean().gsiMetaBean.tableName; @@ -81,11 +75,12 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e JoinGroupInfoRecord record = JoinGroupUtils.getJoinGroupInfoByTable(schemaName, tbName, metaDbConnection); targetJoinGroup = record == null ? "" : record.joinGroupName; } - if(onlyCompareTableGroup) { + if (onlyCompareTableGroup) { if (tableGroups.size() < 2) { - throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT,"the tableGroup list should great than 2"); + throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, + "the tableGroup list should great than 2"); } - for (int i=1; i< tableGroups.size();i++) { + for (int i = 1; i < tableGroups.size(); i++) { String sourceTableGroup = tableGroups.get(i); String errMsg = String.format( "The joinGroup of tableGroup:[%s] is not match with the joinGroup of tableGroup[%s]", @@ -100,8 +95,9 @@ protected void duringTransaction(Connection metaDbConnection, ExecutionContext e if (tableMeta.isGsi()) { tbName = tableMeta.getGsiTableMetaBean().gsiMetaBean.tableName; } - if(GeneralUtil.isNotEmpty(tableGroupConfig.getTables())) { - JoinGroupInfoRecord joinGroupInfoRecord = JoinGroupUtils.getJoinGroupInfoByTable(schemaName, tbName, metaDbConnection); + if (GeneralUtil.isNotEmpty(tableGroupConfig.getTables())) { + JoinGroupInfoRecord joinGroupInfoRecord = + JoinGroupUtils.getJoinGroupInfoByTable(schemaName, tbName, metaDbConnection); String sourceJoinGroup = joinGroupInfoRecord == null ? "" : joinGroupInfoRecord.joinGroupName; boolean isValid = targetJoinGroup.equalsIgnoreCase(sourceJoinGroup); if (!isValid) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/RefreshTopologyAddMetaTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/RefreshTopologyAddMetaTask.java index afa8b69d2..6da70289c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/RefreshTopologyAddMetaTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/RefreshTopologyAddMetaTask.java @@ -43,7 +43,8 @@ public RefreshTopologyAddMetaTask(String schemaName, String tableGroupName, Long @JSONCreator public RefreshTopologyAddMetaTask(String schemaName, String tableGroupName, Long tableGroupId, String sourceSql, - int status, int type, List targetDbList, List newPartitions, List localities) { + int status, int type, List targetDbList, List newPartitions, + List localities) { super(schemaName, tableGroupName, tableGroupId, sourceSql, status, type, new HashSet<>(), targetDbList, newPartitions, localities); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/RefreshTopologyValidateTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/RefreshTopologyValidateTask.java index 25e90253c..4e590c67b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/RefreshTopologyValidateTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/RefreshTopologyValidateTask.java @@ -23,23 +23,14 @@ import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.executor.ddl.job.task.BaseValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.ddl.job.validator.TableGroupValidator; -import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext; -import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.gms.topology.DbTopologyManager; -import com.alibaba.polardbx.gms.topology.GroupDetailInfoAccessor; import com.alibaba.polardbx.gms.topology.GroupDetailInfoRecord; -import com.alibaba.polardbx.gms.util.InstIdUtil; -import com.alibaba.polardbx.gms.util.MetaDbUtil; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; -import java.sql.Connection; import java.util.List; import java.util.Map; -import java.util.Set; @Getter @TaskName(name = "RefreshTopologyValidateTask") diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/ReloadTableMetaAfterChangeTableGroupTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/ReloadTableMetaAfterChangeTableGroupTask.java index 15ffecd55..ee08f3849 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/ReloadTableMetaAfterChangeTableGroupTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/ReloadTableMetaAfterChangeTableGroupTask.java @@ -25,6 +25,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableGroupSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; @@ -71,7 +72,8 @@ protected void reloadTableGroup() { DdlJobManager jobManager = new DdlJobManager(); List prevTasks = jobManager.getTasksFromMetaDB(getJobId(), - (new AlterTableSetTableGroupChangeMetaOnlyTask(null, null, null, null, false, false, null)).getName()); + (new AlterTableSetTableGroupChangeMetaOnlyTask(null, null, null, null, false, false, null, + false)).getName()); assert prevTasks.size() == 1; AlterTableSetTableGroupChangeMetaOnlyTask setTableGroupChangeMetaOnlyTask = (AlterTableSetTableGroupChangeMetaOnlyTask) prevTasks.get(0); @@ -84,7 +86,7 @@ protected void reloadTableGroup() { private void syncTableGroup() { try { SyncManagerHelper - .sync(new TableGroupSyncAction(schemaName, targetTableGroup)); + .sync(new TableGroupSyncAction(schemaName, targetTableGroup), SyncScope.ALL); } catch (Throwable t) { LOGGER.error(String.format( "error occurs while sync table group, schemaName:%s, tableGroupName:%s", schemaName, targetTableGroup)); @@ -92,4 +94,4 @@ private void syncTableGroup() { } } -} \ No newline at end of file +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/StorePartitionGroupLocalityTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/StorePartitionGroupLocalityTask.java index e05cc0acd..ab0ecc9e8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/StorePartitionGroupLocalityTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/StorePartitionGroupLocalityTask.java @@ -43,7 +43,7 @@ public class StorePartitionGroupLocalityTask extends BaseDdlTask { @JSONCreator public StorePartitionGroupLocalityTask(String schemaName, String tableGroupName, Long tableGroupId, List outDatedPartitionGroupIds, - List newPartitionGroupNames, List localities){ + List newPartitionGroupNames, List localities) { super(schemaName); this.tableGroupName = tableGroupName; this.tableGroupId = tableGroupId; @@ -54,10 +54,11 @@ public StorePartitionGroupLocalityTask(String schemaName, String tableGroupName, public void executeImpl(Connection metaDbConnection, ExecutionContext executionContext) { LocalityManager localityManager = LocalityManager.getInstance(); - TableGroupInfoManager tableGroupInfoManager = OptimizerContext.getContext(schemaName).getTableGroupInfoManager(); - outDatedPartitionGroupIds.forEach(id->localityManager.deleteLocalityOfPartitionGroup(id)); + TableGroupInfoManager tableGroupInfoManager = + OptimizerContext.getContext(schemaName).getTableGroupInfoManager(); + outDatedPartitionGroupIds.forEach(id -> localityManager.deleteLocalityOfPartitionGroup(id)); TableGroupConfig tableGroupConfig = tableGroupInfoManager.getTableGroupConfigByName(tableGroupName); - for(int i = 0; i < newPartitionGroupNames.size(); i++){ + for (int i = 0; i < newPartitionGroupNames.size(); i++) { Long pgId = tableGroupConfig.getPartitionGroupByName(newPartitionGroupNames.get(i)).getId(); localityManager.setLocalityOfPartitionGroup(schemaName, pgId, localities.get(i)); } @@ -91,5 +92,4 @@ protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionC rollbackImpl(metaDbConnection, executionContext); } - } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TableGroupSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TableGroupSyncTask.java index 96bff18ef..1e23041c6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TableGroupSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TableGroupSyncTask.java @@ -21,9 +21,12 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableGroupSyncAction; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; +import java.sql.Connection; + @Getter @TaskName(name = "TableGroupSyncTask") public class TableGroupSyncTask extends BaseSyncTask { @@ -41,9 +44,14 @@ public void executeImpl(ExecutionContext executionContext) { syncTableGroup(); } - private void syncTableGroup() { + @Override + protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + syncTableGroup(); + } + + protected void syncTableGroup() { try { - SyncManagerHelper.sync(new TableGroupSyncAction(schemaName, tableGroupName), true); + SyncManagerHelper.sync(new TableGroupSyncAction(schemaName, tableGroupName), SyncScope.ALL, true); } catch (Throwable t) { LOGGER.error(String.format( "error occurs while sync table group, schemaName:%s, tableGroupName:%s", schemaName, tableGroupName)); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TableGroupsSyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TableGroupsSyncTask.java index 66dc1d60b..d56386e84 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TableGroupsSyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TableGroupsSyncTask.java @@ -22,9 +22,11 @@ import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TableGroupsSyncAction; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import lombok.Getter; +import java.sql.Connection; import java.util.List; @Getter @@ -34,9 +36,9 @@ public class TableGroupsSyncTask extends BaseSyncTask { List tableGroupNameList; @JSONCreator - public TableGroupsSyncTask(String schemaName, List tableGroupName) { + public TableGroupsSyncTask(String schemaName, List tableGroupNameList) { super(schemaName); - this.tableGroupNameList = tableGroupName; + this.tableGroupNameList = tableGroupNameList; } @Override @@ -44,9 +46,14 @@ public void executeImpl(ExecutionContext executionContext) { syncTableGroup(); } - private void syncTableGroup() { + @Override + protected void duringRollbackTransaction(Connection metaDbConnection, ExecutionContext executionContext) { + syncTableGroup(); + } + + protected void syncTableGroup() { try { - SyncManagerHelper.sync(new TableGroupsSyncAction(schemaName, tableGroupNameList)); + SyncManagerHelper.sync(new TableGroupsSyncAction(schemaName, tableGroupNameList), SyncScope.ALL); } catch (Throwable t) { LOGGER.error(String.format( "error occurs while sync table group, schemaName:%s, tableGroupName:%s", schemaName, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TopologySyncTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TopologySyncTask.java index 41991cd77..aabb1a1dc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TopologySyncTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TopologySyncTask.java @@ -19,8 +19,6 @@ import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.ddl.job.task.BaseSyncTask; import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; -import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.executor.sync.TableGroupSyncAction; import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; import com.alibaba.polardbx.optimizer.context.ExecutionContext; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TopologySyncThenReleaseXLockTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TopologySyncThenReleaseXLockTask.java index e0db64836..4fb98fa0a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TopologySyncThenReleaseXLockTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/tablegroup/TopologySyncThenReleaseXLockTask.java @@ -30,6 +30,7 @@ public class TopologySyncThenReleaseXLockTask extends TopologySyncTask { private String schemaXLockToRelease; + public TopologySyncThenReleaseXLockTask(String schemaName, String schemaXLockToRelease) { super(schemaName); this.schemaXLockToRelease = schemaXLockToRelease; @@ -47,7 +48,8 @@ public void executeImpl(ExecutionContext executionContext) { } } catch (Exception e) { LOGGER.error(String.format( - "error occurs while TopologySyncThenReleaseXLockTask, schemaName:%s, schemaXLockToRelease:%s", schemaName, schemaXLockToRelease)); + "error occurs while TopologySyncThenReleaseXLockTask, schemaName:%s, schemaXLockToRelease:%s", + schemaName, schemaXLockToRelease)); throw GeneralUtil.nestedException(e); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/CommitTwoPhaseDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/CommitTwoPhaseDdlTask.java new file mode 100644 index 000000000..e1255a4eb --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/CommitTwoPhaseDdlTask.java @@ -0,0 +1,116 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.twophase; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlManager; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +@TaskName(name = "CommitTwoPhaseDdlTask") +@Getter +public class CommitTwoPhaseDdlTask extends BasePhyDdlTask { + final private String logicalTableName; + final private Map> sourcePhyTableNames; + final private ComplexTaskMetaManager.ComplexTaskType taskType; + final private Long twoPhaseDdlId; + final private String sqlTemplate; + final private int commitDelay; + + @JSONCreator + public CommitTwoPhaseDdlTask(String schemaName, String logicalTableName, + Map> sourcePhyTableNames, + ComplexTaskMetaManager.ComplexTaskType taskType, + Long twoPhaseDdlId, String sqlTemplate, + int commitDelay + ) { + super(schemaName, null); + this.logicalTableName = logicalTableName; + this.sourcePhyTableNames = sourcePhyTableNames; + this.taskType = taskType; + this.twoPhaseDdlId = twoPhaseDdlId; + this.sqlTemplate = sqlTemplate; + this.commitDelay = commitDelay; + onExceptionTryRecoveryThenPause(); + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + //TODO(2pc-ddl)COMP, process exception. + // even if CN HA, we should try our best to commit. + // if partial failed, we would try to commit again. + // if partial failed and commit again failed(DN restart/DN partition) + // we would commit all that can be committed, and process inconsistency in further task. + executionContext = executionContext.copy(); + executionContext.setSchemaName(schemaName); + + TwoPhaseDdlManager twoPhaseDdlManager = TwoPhaseDdlManager.globalTwoPhaseDdlManagerMap.get(twoPhaseDdlId); + if (twoPhaseDdlManager == null) { + twoPhaseDdlManager = + new TwoPhaseDdlManager(schemaName, logicalTableName, sqlTemplate, sourcePhyTableNames, twoPhaseDdlId); + twoPhaseDdlManager.setJobId(jobId); +// Boolean allWaitCommitted = twoPhaseDdlManager.checkAllPhyDdlWaitCommitted(); +// if(allWaitCommitted) { +// throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, +// "can't find twoPhaseDdlManager, HA may has happened! will rollback"); +// } + } + + // should be idempotent + try { + updateSupportedCommands(true, false, null); + Thread.sleep(commitDelay * 1000L); + twoPhaseDdlManager.twoPhaseDdlCommit( + schemaName, + logicalTableName, + executionContext + ); + } catch (RuntimeException | InterruptedException | ExecutionException exception) { + throw new TddlRuntimeException( + ErrorCode.ERR_DDL_JOB_ERROR, exception.getMessage() + ); + } + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + @Override + public void rollbackImpl(ExecutionContext executionContext) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + String.format("We don't support rollback in this phase, please continue")); + } + + public static String getTaskName() { + return "CommitTwoPhaseDdlTask"; + } + + @Override + public String remark() { + return "|commit TwoPhaseDdl, tableName: " + logicalTableName; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/CompensationPhyDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/CompensationPhyDdlTask.java new file mode 100644 index 000000000..e6bbb274b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/CompensationPhyDdlTask.java @@ -0,0 +1,204 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.twophase; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.eventlogger.EventLogger; +import com.alibaba.polardbx.common.eventlogger.EventType; +import com.alibaba.polardbx.common.exception.PhysicalDdlException; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableItem; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement; +import com.alibaba.polardbx.executor.ddl.job.builder.AlterTableBuilder; +import com.alibaba.polardbx.executor.ddl.job.builder.DdlPhyPlanBuilder; +import com.alibaba.polardbx.executor.ddl.job.converter.DdlJobDataConverter; +import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; +import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.basic.spec.AlterTableRollbacker; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlJobManagerUtils; +import com.alibaba.polardbx.optimizer.PlannerContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; +import com.alibaba.polardbx.optimizer.core.rel.ReplaceTableNameWithQuestionMarkVisitor; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTable; +import com.alibaba.polardbx.optimizer.parse.FastsqlParser; +import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; +import com.google.common.collect.Lists; +import lombok.Getter; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.ddl.AlterTable; +import org.apache.calcite.sql.SqlAlterTable; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang.StringUtils; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +@Getter +@TaskName(name = "ComposationPhyDdlTask") +public class CompensationPhyDdlTask extends BasePhyDdlTask { + + private String logicalTableName; + + private String sourceSql; + + private String rollbackSql; + + private String rollbackSqlTemplate; + + public void setSourceSql(String sourceSql) { + this.sourceSql = sourceSql; + } + + public void setRollbackSql(String rollbackSql) { + this.rollbackSql = rollbackSql; + } + + public void setRollbackSqlTemplate(String rollbackSqlTemplate) { + this.rollbackSqlTemplate = rollbackSqlTemplate; + } + + @JSONCreator + public CompensationPhyDdlTask(String schemaName, String logicalTableName, PhysicalPlanData physicalPlanData) { + super(schemaName, physicalPlanData); + this.logicalTableName = logicalTableName; + onExceptionTryRecoveryThenPause(); + } + + @Override + protected List getPhysicalPlans(ExecutionContext executionContext) { + // phyDbName/phyTableName => hashCode + Map hashCodeForPhysicalTableBefore = DdlJobManagerUtils.reloadPhyTablesHashCode(jobId); + Set alterFinishedPhysicalTables = + DdlJobDataConverter.getPhysicalDoneTables(physicalPlanData, executionContext, + hashCodeForPhysicalTableBefore); + List physicalPlans = + DdlJobDataConverter.convertToPhysicalPlans(physicalPlanData, executionContext, alterFinishedPhysicalTables); + if (!CollectionUtils.isEmpty(physicalPlans)) { + String info = + String.format("generate non-empty compensation ddl task!, there are %d physical tables failing before.", + physicalPlans.size()); + EventLogger.log(EventType.TWO_PHASE_DDL_WARN, info); + } + return physicalPlans; + + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + try { + //TODO: filter physicalPlanData by which has been done. + super.executeImpl(executionContext); + } catch (PhysicalDdlException e) { + // we don't support rollback in this task +// int successCount = e.getSuccessCount(); +// if (successCount == 0) { +// enableRollback(this); +// } else { +// // Some physical DDLs failed && they do not support rollback, +// // so we forbid CANCEL DDL command here. +// if (!AlterTableRollbacker.checkIfRollbackable(executionContext.getDdlContext().getDdlStmt())) { +// updateSupportedCommands(true, false, null); +// } +// } + throw new PhysicalDdlException(e.getTotalCount(), e.getSuccessCount(), e.getFailCount(), + e.getErrMsg(), e.getSimpleErrMsg()); + } + } + + @Override + protected List genRollbackPhysicalPlans(ExecutionContext executionContext) { + if (StringUtils.isNotEmpty(rollbackSqlTemplate)) { + return genReversedPhysicalPlansFromTemplate(rollbackSqlTemplate, executionContext); + } + + if (StringUtils.isNotEmpty(rollbackSql)) { + return genReversedPhysicalPlans(rollbackSql, executionContext); + } + + String origSql = StringUtils.isNotEmpty(sourceSql) ? sourceSql : executionContext.getDdlContext().getDdlStmt(); + SQLAlterTableStatement alterTableStmt = (SQLAlterTableStatement) FastsqlUtils.parseSql(origSql).get(0); + if (AlterTableRollbacker.checkIfRollbackable(alterTableStmt)) { + String reversedSql = genReversedAlterTableStmt(alterTableStmt); + return genReversedPhysicalPlans(reversedSql, executionContext); + } else { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The DDL job is not rollbackable because the DDL includes some operations that doesn't support rollback"); + } + } + + protected List genReversedPhysicalPlansFromTemplate(String reversedSqlTemplate, + ExecutionContext executionContext) { + PhysicalPlanData newPhysicalPlanData = physicalPlanData.clone(); + newPhysicalPlanData.setSqlTemplate(reversedSqlTemplate); + return DdlJobDataConverter.convertToPhysicalPlans(newPhysicalPlanData, executionContext); + } + + protected List genReversedPhysicalPlans(String reversedSql, ExecutionContext executionContext) { + ReplaceTableNameWithQuestionMarkVisitor visitor = + new ReplaceTableNameWithQuestionMarkVisitor(schemaName, executionContext); + + SqlAlterTable reversedAlterTable = + (SqlAlterTable) new FastsqlParser().parse(reversedSql, executionContext).get(0); + reversedAlterTable = (SqlAlterTable) reversedAlterTable.accept(visitor); + + SqlIdentifier tableNameNode = + new SqlIdentifier(Lists.newArrayList(schemaName, logicalTableName), SqlParserPos.ZERO); + + final RelOptCluster cluster = + SqlConverter.getInstance(executionContext).createRelOptCluster(new PlannerContext(executionContext)); + AlterTable alterTable = AlterTable.create(cluster, reversedAlterTable, tableNameNode, null); + + LogicalAlterTable logicalAlterTable = LogicalAlterTable.create(alterTable); + logicalAlterTable.prepareData(); + + DdlPhyPlanBuilder alterTableBuilder = + AlterTableBuilder.create(alterTable, logicalAlterTable.getAlterTablePreparedData(), executionContext) + .build(); + + return convertToRelNodes(alterTableBuilder.getPhysicalPlans()); + } + + protected String genReversedAlterTableStmt(SQLAlterTableStatement alterTableStmt) { + List reversedAlterItems = new ArrayList<>(); + + for (SQLAlterTableItem alterItem : alterTableStmt.getItems()) { + List reversedItems = convertToReversedItem(alterItem); + reversedAlterItems.addAll(reversedItems); + } + + alterTableStmt.getItems().clear(); + alterTableStmt.getItems().addAll(reversedAlterItems); + + return alterTableStmt.toString(); + } + + private List convertToReversedItem(SQLAlterTableItem origAlterItem) { + // One original alter item may be reversed to multiple items. For example, + // ALTER TABLE XXX ADD COLUMN (ca INT, cb INT, cc INT) + return AlterTableRollbacker.reverse(schemaName, logicalTableName, origAlterItem); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/EmitPhysicalDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/EmitPhysicalDdlTask.java new file mode 100644 index 000000000..3f431f9ad --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/EmitPhysicalDdlTask.java @@ -0,0 +1,137 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.twophase; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.newengine.cross.CrossEngineValidator; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlExceptionAction; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlManager; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.fasterxml.jackson.annotation.JsonCreator; +import lombok.Getter; + +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; + +@TaskName(name = "EmitPhysicalDdlTask") +@Getter +public class EmitPhysicalDdlTask extends BasePhyDdlTask { + final private String logicalTableName; + final private Map> sourcePhyTableNames; + final private ComplexTaskMetaManager.ComplexTaskType taskType; + final private Long twoPhaseDdlId; + + final private String sqlTemplate; + ConcurrentHashMap> sourcePhyTableEmitted; + + @JsonCreator + public EmitPhysicalDdlTask(String schemaName, String logicalTableName, + Map> sourcePhyTableNames, + String sqlTemplate, + ComplexTaskMetaManager.ComplexTaskType taskType, + Long twoPhaseDdlId, + ConcurrentHashMap> sourcePhyTableEmitted + ) { + super(schemaName, null); + this.logicalTableName = logicalTableName; + this.sourcePhyTableNames = sourcePhyTableNames; + this.taskType = taskType; + this.sqlTemplate = sqlTemplate; + this.twoPhaseDdlId = twoPhaseDdlId; + this.sourcePhyTableEmitted = sourcePhyTableEmitted; + onExceptionTryRollback(); + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + executionContext.setSchemaName(schemaName); + + TwoPhaseDdlManager twoPhaseDdlManager = TwoPhaseDdlManager.globalTwoPhaseDdlManagerMap.get(twoPhaseDdlId); + if (twoPhaseDdlManager == null) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "can't find twoPhaseDdlManager, HA may has happened! will rollback"); + } + + // should be idempotent + try { + twoPhaseDdlManager.twoPhaseDdlEmit( + logicalTableName, + sourcePhyTableEmitted, + executionContext + ); + } catch (Exception exception) { + String cause = "unknown casue!"; + if (exception.getMessage() != null) { + cause = exception.getMessage(); + } + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, cause); + } + + if (!twoPhaseDdlManager.checkAllPhyDdlEmited(logicalTableName, executionContext)) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The task ended but not all physical table emitted, maybe paused handly!"); + } + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + @Override + public void rollbackImpl(ExecutionContext executionContext) { + executionContext = executionContext.copy(); + executionContext.setSchemaName(schemaName); + + TwoPhaseDdlManager twoPhaseDdlManager = TwoPhaseDdlManager.globalTwoPhaseDdlManagerMap.get(twoPhaseDdlId); + if (twoPhaseDdlManager == null) { + twoPhaseDdlManager = + new TwoPhaseDdlManager(schemaName, logicalTableName, sqlTemplate, sourcePhyTableNames, twoPhaseDdlId); + twoPhaseDdlManager.setJobId(jobId); + } + try { + twoPhaseDdlManager.twoPhaseDdlRollback( + schemaName, + logicalTableName, + executionContext + ); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + + } + + public static String getTaskName() { + return "EmitPhysicalDdlTask"; + } + + @Override + public String remark() { + return "|emit TwoPhaseDdl, tableName: " + logicalTableName; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/FinishTwoPhaseDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/FinishTwoPhaseDdlTask.java new file mode 100644 index 000000000..a06bf2d5b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/FinishTwoPhaseDdlTask.java @@ -0,0 +1,104 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.twophase; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlManager; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +@TaskName(name = "FinishTwoPhaseDdlTask") +@Getter +public class FinishTwoPhaseDdlTask extends BasePhyDdlTask { + final private String logicalTableName; + final private Map> sourcePhyTableNames; + final private ComplexTaskMetaManager.ComplexTaskType taskType; + final private Long twoPhaseDdlId; + + final private String sqlTemplate; + + @JSONCreator + public FinishTwoPhaseDdlTask(String schemaName, String logicalTableName, + Map> sourcePhyTableNames, + String sqlTemplate, + ComplexTaskMetaManager.ComplexTaskType taskType, + Long twoPhaseDdlId + ) { + super(schemaName, null); + this.logicalTableName = logicalTableName; + this.sqlTemplate = sqlTemplate; + this.sourcePhyTableNames = sourcePhyTableNames; + this.taskType = taskType; + this.twoPhaseDdlId = twoPhaseDdlId; + onExceptionTryRecoveryThenPause(); + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + executionContext = executionContext.copy(); + executionContext.setSchemaName(schemaName); + + TwoPhaseDdlManager twoPhaseDdlManager = TwoPhaseDdlManager.globalTwoPhaseDdlManagerMap.get(twoPhaseDdlId); + if (twoPhaseDdlManager == null) { + twoPhaseDdlManager = + new TwoPhaseDdlManager(schemaName, logicalTableName, sqlTemplate, sourcePhyTableNames, twoPhaseDdlId); + twoPhaseDdlManager.setJobId(jobId); + } + + // should be idempotent + try { + twoPhaseDdlManager.twoPhaseDdlFinish( + schemaName, + logicalTableName, + executionContext + ); + } catch (RuntimeException | InterruptedException | ExecutionException exception) { + throw new TddlRuntimeException( + ErrorCode.ERR_DDL_JOB_ERROR, exception.getMessage() + ); + //TODO, process exception. + } + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + @Override + public void rollbackImpl(ExecutionContext executionContext) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + String.format("We don't support rollback in this phase, please continue")); + } + + public static String getTaskName() { + return "FinishTwoPhaseDdlTask"; + } + + @Override + public String remark() { + return "|finish TwoPhaseDdl, tableName: " + logicalTableName; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/InitTwoPhaseDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/InitTwoPhaseDdlTask.java new file mode 100644 index 000000000..aeee96dea --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/InitTwoPhaseDdlTask.java @@ -0,0 +1,128 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.twophase; + +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; +import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlJobManagerUtils; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlManager; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.sql.Connection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import static com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper.genHashCodeForPhyTableDDL; + +@TaskName(name = "InitTwoPhaseDdlTask") +@Getter +public class InitTwoPhaseDdlTask extends BasePhyDdlTask { + final private String logicalTableName; + final private Map> sourcePhyTableNames; + final private ComplexTaskMetaManager.ComplexTaskType taskType; + final private Long twoPhaseDdlId; + + final private String sqlTemplate; + private Map physicalTableHashCodeMap; + + @JSONCreator + public InitTwoPhaseDdlTask(String schemaName, String logicalTableName, + Map> sourcePhyTableNames, + String sqlTemplate, + ComplexTaskMetaManager.ComplexTaskType taskType, + Long twoPhaseDdlId, + Map physicalTableHashCodeMap + ) { + super(schemaName, null); + this.logicalTableName = logicalTableName; + this.sourcePhyTableNames = sourcePhyTableNames; + this.taskType = taskType; + this.sqlTemplate = sqlTemplate; + this.twoPhaseDdlId = twoPhaseDdlId; + this.physicalTableHashCodeMap = physicalTableHashCodeMap; + onExceptionTryRecoveryThenRollback(); + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + executionContext = executionContext.copy(); + executionContext.setSchemaName(schemaName); + + TwoPhaseDdlManager twoPhaseDdlManager = TwoPhaseDdlManager.globalTwoPhaseDdlManagerMap.get(twoPhaseDdlId); + if (twoPhaseDdlManager == null) { + twoPhaseDdlManager = + new TwoPhaseDdlManager(schemaName, logicalTableName, sqlTemplate, sourcePhyTableNames, twoPhaseDdlId); + } + twoPhaseDdlManager.setJobId(jobId); + + // should be idempotent + twoPhaseDdlManager.twoPhaseDdlInit( + logicalTableName, + executionContext + ); + + physicalTableHashCodeMap = TwoPhaseDdlManager.calPhyTableHashCodeMap(schemaName, sourcePhyTableNames); + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + @Override + public void rollbackImpl(ExecutionContext executionContext) { + executionContext = executionContext.copy(); + executionContext.setSchemaName(schemaName); + + TwoPhaseDdlManager twoPhaseDdlManager = TwoPhaseDdlManager.globalTwoPhaseDdlManagerMap.get(twoPhaseDdlId); + if (twoPhaseDdlManager == null) { + twoPhaseDdlManager = + new TwoPhaseDdlManager(schemaName, logicalTableName, sqlTemplate, sourcePhyTableNames, twoPhaseDdlId); + twoPhaseDdlManager.setJobId(jobId); + } + try { + twoPhaseDdlManager.twoPhaseDdlFinish( + schemaName, + logicalTableName, + executionContext + ); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + public static String getTaskName() { + return "InitTwoPhaseDdlTask"; + } + + @Override + public String remark() { + return "|init TwoPhaseDdl, tableName: " + logicalTableName; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/LogTwoPhaseDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/LogTwoPhaseDdlTask.java new file mode 100644 index 000000000..14aa35893 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/LogTwoPhaseDdlTask.java @@ -0,0 +1,98 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.twophase; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlManager; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +@TaskName(name = "LogTwoPhaseDdlTask") +@Getter +public class LogTwoPhaseDdlTask extends BasePhyDdlTask { + final private String logicalTableName; + final private Map> sourcePhyTableNames; + final private ComplexTaskMetaManager.ComplexTaskType taskType; + final private Long twoPhaseDdlId; + + final private String sqlTemplate; + + @JSONCreator + public LogTwoPhaseDdlTask(String schemaName, String logicalTableName, + Map> sourcePhyTableNames, + String sqlTemplate, + ComplexTaskMetaManager.ComplexTaskType taskType, + Long twoPhaseDdlId + ) { + super(schemaName, null); + this.logicalTableName = logicalTableName; + this.sqlTemplate = sqlTemplate; + this.sourcePhyTableNames = sourcePhyTableNames; + this.taskType = taskType; + this.twoPhaseDdlId = twoPhaseDdlId; + onExceptionTryRollback(); + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + executionContext = executionContext.copy(); + executionContext.setSchemaName(schemaName); + + TwoPhaseDdlManager twoPhaseDdlManager = TwoPhaseDdlManager.globalTwoPhaseDdlManagerMap.get(twoPhaseDdlId); + if (twoPhaseDdlManager == null) { + twoPhaseDdlManager = + new TwoPhaseDdlManager(schemaName, logicalTableName, sqlTemplate, sourcePhyTableNames, twoPhaseDdlId); + twoPhaseDdlManager.setJobId(jobId); + } + + // should be idempotent + try { + twoPhaseDdlManager.twoPhaseDdlLog(schemaName, logicalTableName, executionContext); + } catch (RuntimeException | InterruptedException | ExecutionException exception) { + throw new TddlRuntimeException( + ErrorCode.ERR_DDL_JOB_ERROR, exception.getMessage() + ); + //TODO, process exception. + } + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + @Override + public void rollbackImpl(ExecutionContext executionContext) { + } + + public static String getTaskName() { + return "LogTwoPhaseDdlTask"; + } + + @Override + public String remark() { + return "|log TwoPhaseDdl, tableName: " + logicalTableName; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/PrepareTwoPhaseDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/PrepareTwoPhaseDdlTask.java new file mode 100644 index 000000000..6d03ce1d1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/PrepareTwoPhaseDdlTask.java @@ -0,0 +1,104 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.twophase; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlManager; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import lombok.Getter; + +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +@TaskName(name = "PrepareTwoPhaseDdlTask") +@Getter +public class PrepareTwoPhaseDdlTask extends BasePhyDdlTask { + final private String logicalTableName; + final private Map> sourcePhyTableNames; + final private ComplexTaskMetaManager.ComplexTaskType taskType; + final private Long twoPhaseDdlId; + final private int prepareDelay; + + @JSONCreator + public PrepareTwoPhaseDdlTask(String schemaName, String logicalTableName, + Map> sourcePhyTableNames, + ComplexTaskMetaManager.ComplexTaskType taskType, + Long twoPhaseDdlId, + int prepareDelay + ) { + super(schemaName, null); + this.logicalTableName = logicalTableName; + this.sourcePhyTableNames = sourcePhyTableNames; + this.taskType = taskType; + this.twoPhaseDdlId = twoPhaseDdlId; + this.prepareDelay = prepareDelay; + onExceptionTryRollback(); + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + executionContext = executionContext.copy(); + executionContext.setSchemaName(schemaName); + + TwoPhaseDdlManager twoPhaseDdlManager = TwoPhaseDdlManager.globalTwoPhaseDdlManagerMap.get(twoPhaseDdlId); + if (twoPhaseDdlManager == null) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "can't find twoPhaseDdlManager, CN HA may has happened! will rollback"); + } + + // should be idempotent + try { + // TODO(2pc-ddl) COMPUSORY. + // before setting prepared, we should first check if physical ddl running. + Thread.sleep(prepareDelay * 1000L); + twoPhaseDdlManager.twoPhaseDdlPrepare( + schemaName, + logicalTableName, + executionContext + ); + } catch (RuntimeException | InterruptedException | ExecutionException exception) { + throw new TddlRuntimeException( + ErrorCode.ERR_DDL_JOB_ERROR, exception.getMessage() + ); + //TODO(2pc-ddl) IMPROVEMENT, process exception. + // for example, if ExecuionException or Interrupted Exception, we should retry. + } + + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + @Override + public void rollbackImpl(ExecutionContext executionContext) { + } + + public static String getTaskName() { + return "PrepareTwoPhaseDdlTask"; + } + + @Override + public String remark() { + return "|prepare TwoPhaseDdl, tableName: " + logicalTableName; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/WaitTwoPhaseDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/WaitTwoPhaseDdlTask.java new file mode 100644 index 000000000..65612940b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/task/twophase/WaitTwoPhaseDdlTask.java @@ -0,0 +1,128 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.job.task.twophase; + +import com.alibaba.fastjson.annotation.JSONCreator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.ddl.job.task.BasePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.util.TaskName; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlManager; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; +import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.mysql.cj.x.protobuf.PolarxDatatypes; +import io.grpc.netty.shaded.io.netty.util.internal.StringUtil; +import lombok.Getter; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlData.REACHED_BARRIER; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlData.REACHED_BARRIER_RUNNING; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlData.RUNNING; + +@TaskName(name = "WaitTwoPhaseDdlTask") +@Getter +public class WaitTwoPhaseDdlTask extends BasePhyDdlTask { + final private String logicalTableName; + final private Map> sourcePhyTableNames; + final private ComplexTaskMetaManager.ComplexTaskType taskType; + final private Long twoPhaseDdlId; + + final private String twoPhaseTaskName; + final private int waitDelay; + + @JSONCreator + public WaitTwoPhaseDdlTask(String schemaName, String logicalTableName, + Map> sourcePhyTableNames, + String twoPhaseTaskName, + ComplexTaskMetaManager.ComplexTaskType taskType, + Long twoPhaseDdlId, + int waitDelay + ) { + super(schemaName, null); + this.logicalTableName = logicalTableName; + this.sourcePhyTableNames = sourcePhyTableNames; + this.twoPhaseTaskName = twoPhaseTaskName; + this.taskType = taskType; + this.twoPhaseDdlId = twoPhaseDdlId; + this.waitDelay = waitDelay; + onExceptionTryRollback(); + } + + @Override + public void executeImpl(ExecutionContext executionContext) { + executionContext = executionContext.copy(); + executionContext.setSchemaName(schemaName); + + TwoPhaseDdlManager twoPhaseDdlManager = TwoPhaseDdlManager.globalTwoPhaseDdlManagerMap.get(twoPhaseDdlId); + if (twoPhaseDdlManager == null) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "can't find twoPhaseDdlManager, HA may has happened! will rollback"); + } + + Set expectedPhyDdlState = + new HashSet<>(Arrays.asList(RUNNING, REACHED_BARRIER, REACHED_BARRIER_RUNNING)); + // should be idempotent + + int status; + try { + if (waitDelay > 0) { + Thread.sleep(waitDelay * 1000L); + } + status = twoPhaseDdlManager.twoPhaseDdlWait( + schemaName, + logicalTableName, + twoPhaseTaskName, + expectedPhyDdlState, + executionContext + ); + } catch (RuntimeException | InterruptedException | ExecutionException exception) { + throw new TddlRuntimeException( + ErrorCode.ERR_DDL_JOB_ERROR, exception.getMessage() + ); + //TODO, process exception. + } + if (status == 0) { + throw new TddlRuntimeException( + ErrorCode.ERR_DDL_JOB_ERROR, + "there are some physical ddl failed! maybe self killing multiple-phase ddl or other unknown causes!" + ); + } + FailPoint.injectRandomExceptionFromHint(executionContext); + FailPoint.injectRandomSuspendFromHint(executionContext); + } + + @Override + public void rollbackImpl(ExecutionContext executionContext) { + } + + public static String getTaskName() { + return "WaitTwoPhaseDdlTask"; + } + + @Override + public String remark() { + return "|wait TwoPhaseDdl, tableName: " + logicalTableName; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/ForeignKeyValidator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/ForeignKeyValidator.java index ac0326e91..8bdc5be6a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/ForeignKeyValidator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/ForeignKeyValidator.java @@ -17,6 +17,8 @@ package com.alibaba.polardbx.executor.ddl.job.validator; import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.charset.CharsetName; +import com.alibaba.polardbx.common.charset.CollationName; import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; @@ -27,6 +29,7 @@ import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.GeneratedColumnUtil; +import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; import com.alibaba.polardbx.optimizer.config.table.IndexMeta; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; @@ -231,17 +234,10 @@ public static void validateFkConstraints(SqlCreateTable sqlCreateTableOrigin, St if (!data.refTableName.equalsIgnoreCase(tableName)) { // charset and collation must be same - - if (!StringUtils.equalsIgnoreCase(sqlCreateTable.getDefaultCharset(), - referringTableMeta.getDefaultCharset())) { - throw new TddlRuntimeException(ErrorCode.ERR_ADD_FK_CHARSET_COLLATION, - schemaName, tableName, data.refSchema, data.refTableName); - } - if (!StringUtils.equalsIgnoreCase(sqlCreateTable.getDefaultCollation(), - referringTableMeta.getDefaultCollation())) { - throw new TddlRuntimeException(ErrorCode.ERR_ADD_FK_CHARSET_COLLATION, - schemaName, tableName, data.refSchema, data.refTableName); - } + validateCharset(schemaName, tableName, data.refSchema, data.refTableName, + sqlCreateTable.getDefaultCharset(), referringTableMeta.getDefaultCharset()); + validateCollate(schemaName, tableName, data.refSchema, data.refTableName, + sqlCreateTable.getDefaultCollation(), referringTableMeta.getDefaultCollation()); } // engine must be innodb @@ -258,21 +254,6 @@ public static void validateFkConstraints(SqlCreateTable sqlCreateTableOrigin, St def.getDataType().getTypeName().getLastName() .toUpperCase()); - String charSetName = def.getDataType().getCharSetName(); - String collationName = - def.getDataType().getCollationName(); - - RelDataTypeFactory factory = new TddlTypeFactoryImpl(TddlRelDataTypeSystemImpl.getInstance()); - boolean nullable = - Optional.ofNullable(def.getNotNull()).map(cn -> SqlColumnDeclaration.ColumnNull.NULL == cn) - .orElse(true); - RelDataType type = def.getDataType().deriveType(factory, nullable); - - if (charSetName == null && SqlTypeUtil.inCharFamily(type)) { - charSetName = sqlCreateTable.getDefaultCharset(); - collationName = sqlCreateTable.getDefaultCollation(); - } - if (!columnTypeName.equals( SqlDataTypeSpec.DrdsTypeName.from(column.getDataType().getStringSqlType().toUpperCase()))) { throw new TddlRuntimeException(ErrorCode.ERR_CHANGE_COLUMN_FK_CONSTRAINT, @@ -280,16 +261,15 @@ public static void validateFkConstraints(SqlCreateTable sqlCreateTableOrigin, St data.refTableName, data.constraint); } - if (charSetName != null && !StringUtils.equalsIgnoreCase(charSetName, - column.getDataType().getCharsetName().name())) { - throw new TddlRuntimeException(ErrorCode.ERR_ADD_FK_CHARSET_COLLATION, - schemaName, tableName, data.refSchema, data.refTableName); - } + Pair charsetCollationName = getCharsetCollationName(def, sqlCreateTable); + if (charsetCollationName != null) { + String charSetName = charsetCollationName.getKey(); + String collationName = charsetCollationName.getValue(); - if (collationName != null && !StringUtils.equalsIgnoreCase(collationName, - column.getDataType().getCollationName().name())) { - throw new TddlRuntimeException(ErrorCode.ERR_ADD_FK_CHARSET_COLLATION, - schemaName, tableName, data.refSchema, data.refTableName); + validateCharset(schemaName, tableName, data.refSchema, data.refTableName, charSetName, + column.getDataType().getCharsetName().name()); + validateCollate(schemaName, tableName, data.refSchema, data.refTableName, collationName, + column.getDataType().getCollationName().name()); } // can not add fk on generated column @@ -319,6 +299,58 @@ public static void validateFkConstraints(SqlCreateTable sqlCreateTableOrigin, St } } + public static Pair getCharsetCollationName(SqlColumnDeclaration def, + SqlCreateTable sqlCreateTable) { + String charSetName; + String collationName; + + CollationName collationNameOfColSpec = + CollationName.findCollationName(def.getDataType().getCollationName()); + CharsetName charsetNameOfColSpec = CollationName.getCharsetOf(collationNameOfColSpec); + + RelDataTypeFactory factory = new TddlTypeFactoryImpl(TddlRelDataTypeSystemImpl.getInstance()); + boolean nullable = + Optional.ofNullable(def.getNotNull()).map(cn -> SqlColumnDeclaration.ColumnNull.NULL == cn) + .orElse(true); + RelDataType type = def.getDataType().deriveType(factory, nullable); + + if (charsetNameOfColSpec != null) { + charSetName = charsetNameOfColSpec.name(); + collationName = collationNameOfColSpec.name(); + } else if (SqlTypeUtil.inCharFamily(type)) { + charSetName = sqlCreateTable.getDefaultCharset(); + collationName = sqlCreateTable.getDefaultCollation(); + } else { + return null; + } + + return new Pair<>(charSetName, collationName); + } + + public static void validateCharset(String srcSchemaName, String srcTableName, String refSchemaName, + String refTableName, String srcCharSet, String refTableCharset) { + CharsetName srcTableCharsetName = CharsetName.of(srcCharSet); + CharsetName refTableCharsetName = CharsetName.of(refTableCharset); + + if (!StringUtils.equalsIgnoreCase(srcTableCharsetName.name(), + refTableCharsetName.name())) { + throw new TddlRuntimeException(ErrorCode.ERR_ADD_FK_CHARSET_COLLATION, + srcSchemaName, srcTableName, refSchemaName, refTableName); + } + } + + public static void validateCollate(String srcSchemaName, String srcTableName, String refSchemaName, + String refTableName, String srcCollate, String refTableCollate) { + CollationName srcTableCollateName = CollationName.of(srcCollate); + CollationName refTableCollateName = CollationName.of(refTableCollate); + + if (!StringUtils.equalsIgnoreCase(srcTableCollateName.name(), + refTableCollateName.name())) { + throw new TddlRuntimeException(ErrorCode.ERR_ADD_FK_CHARSET_COLLATION, + srcSchemaName, srcTableName, refSchemaName, refTableName); + } + } + public static void validateFkConstraints(SqlAlterTable sqlAlterTable, String schemaName, String tableName, ExecutionContext executionContext) { // Even disable foreign_key_checks, you still can't drop the index used for foreign key constrain. @@ -452,7 +484,7 @@ public static void validateFkConstraints(SqlAlterTable sqlAlterTable, String sch // BLOB/TEXT can not be fk columns for (String column : data.columns) { SqlDataTypeSpec.DrdsTypeName columnTypeName = SqlDataTypeSpec.DrdsTypeName.from( - tableMeta.getColumn(column).getField().getRelType().getSqlTypeName().getName().toUpperCase()); + tableMeta.getColumn(column).getDataType().getStringSqlType().toUpperCase()); if (columnTypeName.equals(SqlDataTypeSpec.DrdsTypeName.BLOB) || columnTypeName.equals( SqlDataTypeSpec.DrdsTypeName.TEXT)) { throw new TddlRuntimeException(ErrorCode.ERR_ADD_FK_CONSTRAINT, @@ -499,8 +531,8 @@ public static void validateFkConstraints(SqlAlterTable sqlAlterTable, String sch for (ColumnMeta column : referringTableMeta.getAllColumns()) { if (data.refColumns.stream().anyMatch(column.getName()::equalsIgnoreCase)) { SqlDataTypeSpec.DrdsTypeName columnTypeName = SqlDataTypeSpec.DrdsTypeName.from( - tableMeta.getColumn(columnMap.get(column.getName())).getField().getRelType().getSqlTypeName() - .getName().toUpperCase()); + tableMeta.getColumn(columnMap.get(column.getName())).getDataType().getStringSqlType() + .toUpperCase()); String charSetName = tableMeta.getColumn(columnMap.get(column.getName())).getDataType().getCharsetName().name(); @@ -536,7 +568,7 @@ public static void validateFkConstraints(SqlAlterTable sqlAlterTable, String sch refGeneratedReferencedColumns.addAll( GeneratedColumnUtil.getAllReferencedColumnByRef(referringTableMeta).keySet()); GeneratedColumnUtil.getAllReferencedColumnByRef(referringTableMeta).values() - .forEach(r -> refGeneratedReferencedColumns.addAll(r)); + .forEach(refGeneratedReferencedColumns::addAll); for (String column : data.refColumns) { if (refGeneratedReferencedColumns.contains(column)) { throw new TddlRuntimeException(ErrorCode.ERR_ADD_FK_GENERATED_COLUMN, column); @@ -553,7 +585,7 @@ public static void validateFkConstraints(SqlAlterTable sqlAlterTable, String sch Set generatedReferencedColumns = new TreeSet<>(String::compareToIgnoreCase); generatedReferencedColumns.addAll(GeneratedColumnUtil.getAllReferencedColumnByRef(tableMeta).keySet()); GeneratedColumnUtil.getAllReferencedColumnByRef(tableMeta).values() - .forEach(r -> generatedReferencedColumns.addAll(r)); + .forEach(generatedReferencedColumns::addAll); for (String column : data.columns) { if (generatedReferencedColumns.contains(column)) { throw new TddlRuntimeException(ErrorCode.ERR_ADD_FK_GENERATED_COLUMN, column); @@ -569,16 +601,37 @@ public static void validateAddReferredTableFkIndex(ForeignKeyData data, Executio // table referenced itself if (sqlCreateTable != null && data.refTableName.equalsIgnoreCase(tableName)) { + List> keys = new ArrayList<>(); if (sqlCreateTable.getKeys() != null) { - for (Pair key : sqlCreateTable.getKeys()) { - if (hasFkColumnIndex(key.getValue(), columnsHash)) { - return; - } + keys.addAll(sqlCreateTable.getKeys()); + } + if (sqlCreateTable.getUniqueKeys() != null) { + keys.addAll(sqlCreateTable.getUniqueKeys()); + } + if (sqlCreateTable.getClusteredKeys() != null) { + keys.addAll(sqlCreateTable.getClusteredKeys()); + } + if (sqlCreateTable.getGlobalKeys() != null) { + keys.addAll(sqlCreateTable.getGlobalKeys()); + } + if (sqlCreateTable.getGlobalUniqueKeys() != null) { + keys.addAll(sqlCreateTable.getGlobalUniqueKeys()); + } + + for (Pair key : keys) { + List indexColumnList = new ArrayList<>(); + key.getValue().getColumns().stream().map(c -> c.getColumnName().getLastName()) + .forEach(indexColumnList::add); + if (hasFkColumnIndex(indexColumnList, columnsHash)) { + return; } } if (sqlCreateTable.getPrimaryKey() != null) { - if (hasFkColumnIndex(sqlCreateTable.getPrimaryKey(), columnsHash)) { + List indexColumnList = new ArrayList<>(); + sqlCreateTable.getPrimaryKey().getColumns().stream().map(c -> c.getColumnName().getLastName()) + .forEach(indexColumnList::add); + if (hasFkColumnIndex(indexColumnList, columnsHash)) { return; } } @@ -602,43 +655,34 @@ public static void validateAddReferredTableFkIndex(ForeignKeyData data, Executio List indexes = tableMeta.getIndexes(); for (IndexMeta im : indexes) { - boolean hasFkColumnIndex = true; - final List indexColumnList = new ArrayList<>(); im.getKeyColumns().stream().map(ColumnMeta::getName).forEach(indexColumnList::add); - final Set indexColumns = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); - indexColumns.addAll(indexColumnList); - - if (indexColumns.containsAll(columnsHash)) { - for (int i = 0; i < columnsHash.size(); i++) { - if (!columnsHash.contains(indexColumnList.get(i))) { - hasFkColumnIndex = false; - break; - } - } - } else { - hasFkColumnIndex = false; + if (hasFkColumnIndex(indexColumnList, columnsHash)) { + return; } + } - if (hasFkColumnIndex) { - return; + if (tableMeta.withGsi()) { + for (GsiMetaManager.GsiIndexMetaBean gsiIndexMetaBean : tableMeta.getGsiTableMetaBean().indexMap.values()) { + final List indexColumnList = new ArrayList<>(); + gsiIndexMetaBean.indexColumns.forEach(c -> indexColumnList.add(c.columnName)); + if (hasFkColumnIndex(indexColumnList, columnsHash)) { + return; + } } } throw new TddlRuntimeException(ErrorCode.ERR_CREATE_FK_MISSING_INDEX); } - private static boolean hasFkColumnIndex(SqlIndexDefinition key, Set columnsHash) { - boolean hasFkColumnIndex = true; - - final List indexColumnList = new ArrayList<>(); - key.getColumns().stream().map(c -> c.getColumnName().getLastName()) - .forEach(indexColumnList::add); - - final Set indexColumns = new HashSet<>(indexColumnList); + private static boolean hasFkColumnIndex(List indexColumnList, Set columnsHash) { + boolean hasFkColumnIndex = false; + final Set indexColumns = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + indexColumns.addAll(indexColumnList); if (indexColumns.containsAll(columnsHash)) { + hasFkColumnIndex = true; for (int i = 0; i < columnsHash.size(); i++) { if (!columnsHash.contains(indexColumnList.get(i))) { hasFkColumnIndex = false; @@ -647,11 +691,7 @@ private static boolean hasFkColumnIndex(SqlIndexDefinition key, Set colu } } - if (hasFkColumnIndex) { - return true; - } - - return false; + return hasFkColumnIndex; } public static void validateDropReferredTableFkIndex(TableMeta tableMeta, String indexName) { @@ -674,7 +714,8 @@ public static void validateDropReferredTableFkIndex(TableMeta tableMeta, String HashSet columnsHash = new HashSet<>(e.getValue().refColumns); boolean hasFkColumnIndex = true; - final Set indexColumns = new HashSet<>(indexColumnList); + final Set indexColumns = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + indexColumns.addAll(indexColumnList); if (indexColumns.containsAll(columnsHash)) { for (int i = 0; i < columnsHash.size(); i++) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/GsiValidator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/GsiValidator.java index 698e2f075..b0db16c7a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/GsiValidator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/GsiValidator.java @@ -25,23 +25,16 @@ import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.gms.metadb.limit.LimitValidator; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; -import com.alibaba.polardbx.gms.metadb.table.IndexVisibility; import com.alibaba.polardbx.gms.topology.DbInfoManager; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.GlobalIndexMeta; -import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; import com.alibaba.polardbx.rule.TableRule; -import com.google.common.collect.ImmutableSet; import org.apache.commons.lang3.StringUtils; -import javax.annotation.concurrent.Immutable; -import java.util.EnumSet; import java.util.List; -import java.util.Objects; import static com.alibaba.polardbx.common.ddl.Attribute.RANDOM_SUFFIX_LENGTH_OF_PHYSICAL_TABLE_NAME; @@ -97,6 +90,15 @@ public static void validateGsi(String schemaName, String indexName) { } } + public static void validateGsiOrCci(String schemaName, String indexName) { + if (!TableValidator.checkTableIsGsiOrCci(schemaName, indexName)) { + final String errMsg = String.format( + "Global Secondary Index or Clustered Columnar Index %s doesn't exists", + indexName); + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, errMsg); + } + } + /** * validate if the indexName is exist */ @@ -106,7 +108,7 @@ public static void validateGsiExistence(String schemaName, ExecutionContext executionContext) { // check if the indexName is already exist List tableMetaList = - GlobalIndexMeta.getIndex(primaryTableName, schemaName, IndexStatus.ALL, executionContext); + GlobalIndexMeta.getIndex(primaryTableName, schemaName, IndexStatus.ALL, executionContext, true); for (TableMeta tableMeta : tableMetaList) { if (StringUtils.equalsIgnoreCase(tableMeta.getTableName(), indexName)) { return; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/IndexValidator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/IndexValidator.java index 09c3a115e..19e643e7e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/IndexValidator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/IndexValidator.java @@ -104,6 +104,20 @@ public static void validateIndexNonExistence(String schemaName, String logicalTa } } + public static void validateColumnarIndexNonExistence(String schemaName, String logicalTableName) { + if (checkIfColumnarIndexExists(schemaName, logicalTableName)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "Columnar index on table '" + logicalTableName + "' already exists"); + } + } + + public static void validateColumnarIndexNumLimit(String schemaName, String logicalTableName, long limit) { + if (checkIfColumnarIndexNumLimit(schemaName, logicalTableName, limit)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "Columnar index on table '" + logicalTableName + "' already exists " + limit); + } + } + public static void validateDropPrimaryKey(String indexName) { if (PRIMARY_KEY.equalsIgnoreCase(indexName)) { throw new TddlRuntimeException(ErrorCode.ERR_DROP_PRIMARY_KEY); @@ -119,6 +133,24 @@ protected Boolean invoke() { }.execute(); } + public static boolean checkIfColumnarIndexExists(String schemaName, String logicalTableName) { + return new TableInfoManagerDelegate(new TableInfoManager()) { + @Override + protected Boolean invoke() { + return tableInfoManager.checkIfColumnarIndexExists(schemaName, logicalTableName); + } + }.execute(); + } + + public static boolean checkIfColumnarIndexNumLimit(String schemaName, String logicalTableName, long limit) { + return new TableInfoManagerDelegate(new TableInfoManager()) { + @Override + protected Boolean invoke() { + return tableInfoManager.getColumnarIndexNum(schemaName, logicalTableName) >= limit; + } + }.execute(); + } + public static void validateDropLocalIndex(String schemaName, String logicalTableName, String indexName) { TableMeta tableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/JoinGroupValidator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/JoinGroupValidator.java index cf2389798..06b5a79d4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/JoinGroupValidator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/JoinGroupValidator.java @@ -68,24 +68,25 @@ public static void validateJoinGroupExistence(String schemaName, String joinGrou } } - public static void validateJoinGroupInfo(String schemaName, String tableGroupName, String joinGroupName, String errMsg, ExecutionContext ec, Connection metaDbConn) { + public static void validateJoinGroupInfo(String schemaName, String tableGroupName, String joinGroupName, + String errMsg, ExecutionContext ec, Connection metaDbConn) { TableGroupInfoManager tableGroupInfoManager = OptimizerContext.getContext(schemaName).getTableGroupInfoManager(); TableGroupConfig tableGroupConfig = tableGroupInfoManager.getTableGroupConfigByName(tableGroupName); if (tableGroupConfig != null && GeneralUtil.isNotEmpty(tableGroupConfig.getTables())) { - TablePartRecordInfoContext tablePartRecordInfoContext = tableGroupConfig.getTables().get(0); - String tableName = tablePartRecordInfoContext.getTableName(); + String tableName = tableGroupConfig.getTables().get(0); TableMeta tableMeta = ec.getSchemaManager(schemaName).getTable(tableName); if (tableMeta.isGsi()) { tableName = tableMeta.getGsiTableMetaBean().gsiMetaBean.tableName; } JoinGroupInfoRecord record = JoinGroupUtils.getJoinGroupInfoByTable(schemaName, tableName, metaDbConn); - if (record==null && StringUtils.isEmpty(joinGroupName)) { + if (record == null && StringUtils.isEmpty(joinGroupName)) { return; } - if ((record == null && StringUtils.isNotEmpty(joinGroupName)) || (record != null && StringUtils.isEmpty(joinGroupName))) { + if ((record == null && StringUtils.isNotEmpty(joinGroupName)) || (record != null && StringUtils.isEmpty( + joinGroupName))) { throw new TddlRuntimeException(ErrorCode.ERR_JOIN_GROUP_NOT_MATCH, errMsg); } boolean isValid = joinGroupName.equalsIgnoreCase(record.joinGroupName); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/SequenceValidator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/SequenceValidator.java index 17b2dbd17..de954b343 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/SequenceValidator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/SequenceValidator.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.properties.DynamicConfig; import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.ddl.job.meta.delegate.TableInfoManagerDelegate; @@ -38,7 +39,7 @@ public class SequenceValidator { - public static void validate(SequenceBean sequence, ExecutionContext executionContext) { + public static void validate(SequenceBean sequence, ExecutionContext executionContext, boolean validateExistence) { if (sequence == null || sequence.getName() == null) { throw new SequenceException("Invalid sequence bean"); } @@ -53,7 +54,9 @@ public static void validate(SequenceBean sequence, ExecutionContext executionCon validateSimpleSequence(sequence, executionContext); - validateExistence(sequence); + if (validateExistence) { + validateExistence(sequence); + } String newSeqName = sequence.getNewName(); if (sequence.getKind() == SqlKind.RENAME_SEQUENCE && StringUtils.isEmpty(newSeqName)) { @@ -98,7 +101,7 @@ private static void validateNewSequence(SequenceBean sequence) { public static void validateSimpleSequence(SequenceBean sequence, ExecutionContext executionContext) { if (sequence == null || sequence.getKind() == null || - ConfigDataMode.isAllowSimpleSequence() || + DynamicConfig.getInstance().isAllowSimpleSequence() || executionContext.getParamManager().getBoolean(ConnectionParams.ALLOW_SIMPLE_SEQUENCE)) { return; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/StoragePoolValidator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/StoragePoolValidator.java index 16b9c3ae0..cfea815f8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/StoragePoolValidator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/StoragePoolValidator.java @@ -36,22 +36,33 @@ public class StoragePoolValidator { + public static void validateStoragePool(String instId, List involvedStorageInsts, Boolean checkAttached, + Boolean checkIdle) { + validateStoragePool(instId, involvedStorageInsts, false, checkAttached, checkIdle); + } + /** * check if storage inst is ready and idle. * WHEN CreateStoragePool, DropStoragePool, AlterStoragePoolAddNode */ - public static void validateStoragePool(String instId, List involvedStorageInsts, Boolean checkAttached, + public static void validateStoragePool(String instId, List involvedStorageInsts, Boolean checkAlive, + Boolean checkAttached, Boolean checkIdle) { - Connection conn = MetaDbDataSource.getInstance().getConnection(); - StorageInfoAccessor storageInfoAccessor = new StorageInfoAccessor(); - storageInfoAccessor.setConnection(conn); - List storageInfoRecords = storageInfoAccessor.getStorageInfosByInstId(instId); - checkIfStorageInstReady(instId, conn, storageInfoRecords, involvedStorageInsts); - if (checkIdle) { - checkIfStorageInstIdle(instId, conn, storageInfoRecords, involvedStorageInsts); - } - if (checkAttached) { - checkIfStorageInstNotAttached(instId, conn, storageInfoRecords, involvedStorageInsts); + try (Connection conn = MetaDbDataSource.getInstance().getConnection()) { + StorageInfoAccessor storageInfoAccessor = new StorageInfoAccessor(); + storageInfoAccessor.setConnection(conn); + List storageInfoRecords = storageInfoAccessor.getStorageInfosByInstId(instId); + if (checkAlive) { + checkIfStorageInstReady(instId, conn, storageInfoRecords, involvedStorageInsts); + } + if (checkIdle) { + checkIfStorageInstIdle(instId, conn, storageInfoRecords, involvedStorageInsts); + } + if (checkAttached) { + checkIfStorageInstNotAttached(instId, conn, storageInfoRecords, involvedStorageInsts); + } + } catch (Exception exception) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_FAILED, exception.getMessage()); } } @@ -63,11 +74,14 @@ public static void validateStoragePool(String instId, List involvedStora * check if storage inst is ready. */ public static void validateStoragePoolReady(String instId, List toCheckReadyStorageInsts) { - Connection conn = MetaDbDataSource.getInstance().getConnection(); - StorageInfoAccessor storageInfoAccessor = new StorageInfoAccessor(); - storageInfoAccessor.setConnection(conn); - List storageInfoRecords = storageInfoAccessor.getStorageInfosByInstId(instId); - checkIfStorageInstReady(instId, conn, storageInfoRecords, toCheckReadyStorageInsts); + try (Connection conn = MetaDbDataSource.getInstance().getConnection()) { + StorageInfoAccessor storageInfoAccessor = new StorageInfoAccessor(); + storageInfoAccessor.setConnection(conn); + List storageInfoRecords = storageInfoAccessor.getStorageInfosByInstId(instId); + checkIfStorageInstReady(instId, conn, storageInfoRecords, toCheckReadyStorageInsts); + } catch (Exception exception) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_FAILED, exception.getMessage()); + } } /** diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/TableValidator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/TableValidator.java index d74955eb2..99f237900 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/TableValidator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/TableValidator.java @@ -65,6 +65,7 @@ import org.apache.calcite.sql.SqlCreateTable; import org.apache.calcite.sql.SqlDataTypeSpec; import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlLiteral; import org.apache.calcite.sql.SqlModifyColumn; import org.apache.calcite.util.Pair; @@ -317,6 +318,15 @@ public static boolean checkTableIsGsi(String schemaName, String logicalTableName return tableMeta.isGsi(); } + public static boolean checkTableIsGsiOrCci(String schemaName, String logicalTableName) { + TableMeta tableMeta = + OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTableWithNull(logicalTableName); + if (tableMeta == null) { + return false; + } + return tableMeta.isGsi() || tableMeta.isColumnar(); + } + public static boolean checkTableWithGsi(String schemaName, String logicalTableName) { TableMeta tableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTableWithNull(logicalTableName); @@ -326,6 +336,16 @@ public static boolean checkTableWithGsi(String schemaName, String logicalTableNa return tableMeta.withGsi(); } + public static void validateTableWithCCI(String schemaName, String logicalTableName, + ExecutionContext executionContext, SqlKind sqlKind) { + TableMeta tableMeta = + OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTableWithNull(logicalTableName); + boolean forbidDdlWithCci = executionContext.getParamManager().getBoolean(ConnectionParams.FORBID_DDL_WITH_CCI); + if (forbidDdlWithCci && tableMeta != null && tableMeta.withCci()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_WITH_CCI, sqlKind.name()); + } + } + /** * Expect the logical table to exist, such as DROP TABLE. */ diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/ddl/RepartitionValidator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/ddl/RepartitionValidator.java index d74a61948..ff6bbb77c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/ddl/RepartitionValidator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/job/validator/ddl/RepartitionValidator.java @@ -34,7 +34,6 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; -import com.alibaba.polardbx.optimizer.partition.PartitionInfoManager; import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil; import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; import com.alibaba.polardbx.optimizer.sequence.SequenceManagerProxy; @@ -47,7 +46,6 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineDagExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineDagExecutor.java index ec6616ec4..7ec26a7d8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineDagExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineDagExecutor.java @@ -55,6 +55,8 @@ import com.alibaba.polardbx.executor.ddl.newengine.sync.DdlResponseSyncAction; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; import com.alibaba.polardbx.executor.ddl.newengine.utils.TaskHelper; +import com.alibaba.polardbx.executor.ddl.sync.JobRequest; +import com.alibaba.polardbx.executor.ddl.workqueue.FastCheckerThreadPool; import com.alibaba.polardbx.executor.ddl.newengine.utils.TaskHelper; import com.alibaba.polardbx.executor.sync.ddl.RemoteDdlTaskSyncAction; import com.alibaba.polardbx.executor.sync.ddl.RemoteDdlTaskSyncAction; @@ -65,6 +67,7 @@ import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; import com.alibaba.polardbx.gms.sync.GmsSyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.DdlContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.statis.SQLRecord; @@ -92,6 +95,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static com.alibaba.polardbx.common.properties.ConnectionProperties.SKIP_DDL_RESPONSE; import static com.alibaba.polardbx.common.properties.ConnectionProperties.SKIP_DDL_RESPONSE; import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.DDL_LEADER_ELECTION_NAME; import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.DDL_LEADER_TTL_IN_MILLIS; @@ -366,7 +370,7 @@ private void onRunning() { private void onRollingBack() { if (!allowRollback()) { - updateDdlState(DdlState.ROLLBACK_RUNNING, DdlState.ROLLBACK_PAUSED); + updateDdlState(DdlState.ROLLBACK_RUNNING, DdlState.PAUSED); return; } // Load tasks reversely if needed. @@ -529,6 +533,9 @@ private void onFinished() { // Save the result in memory as the last result. saveLastResult(response); + //clean fastchecker info + FastCheckerThreadPool.getInstance().invalidateTaskInfo(ddlContext.getJobId()); + // Clean the job up. try { ddlJobManager.removeJob(ddlContext.getJobId()); @@ -537,7 +544,11 @@ private void onFinished() { } if (ddlContext.getExtraCmds().containsKey(SKIP_DDL_RESPONSE)) { - return; + try { + TimeUnit.MILLISECONDS.sleep(5000L); + } catch (InterruptedException ex) { + throw new TddlNestableRuntimeException(ex); + } } // Respond to the worker. @@ -917,7 +928,7 @@ private void respond(Response response) { GmsSyncManagerHelper .sync(responseSyncAction, ddlContext.getSchemaName(), ddlContext.getResponseNode()); } else { - GmsSyncManagerHelper.sync(responseSyncAction, ddlContext.getSchemaName()); + GmsSyncManagerHelper.sync(responseSyncAction, ddlContext.getSchemaName(), SyncScope.ALL); } } catch (Throwable t) { StringBuilder errMsg = new StringBuilder(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineRemoteTaskExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineRemoteTaskExecutor.java index d8031840f..a839482f8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineRemoteTaskExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineRemoteTaskExecutor.java @@ -38,7 +38,8 @@ public class DdlEngineRemoteTaskExecutor { private static final Logger LOGGER = SQLRecorderLogger.ddlEngineLogger; - public static void executeRemoteTask(String schemaName, Long jobId, Long taskId, ExecutionContext executionContext){ + public static void executeRemoteTask(String schemaName, Long jobId, Long taskId, + ExecutionContext executionContext) { LoggerUtil.buildMDC(schemaName); LOGGER.info(String.format("start execute/rollback remote DDL TASK, jobId:%s, taskId:%s", jobId, taskId)); @@ -46,10 +47,10 @@ public static void executeRemoteTask(String schemaName, Long jobId, Long taskId, Optional leaseRecordOptional = new LeaseManagerImpl().acquire( schemaName, String.valueOf(taskId), DDL_LEADER_TTL_IN_MILLIS); final DdlEngineDagExecutor dagExecutor; - if(leaseRecordOptional.isPresent()){ + if (leaseRecordOptional.isPresent()) { dagExecutor = DdlEngineDagExecutor.create(jobId, executionContext); dagExecutor.getJobLease().set(leaseRecordOptional.get()); - }else { + } else { final String errMsg = "failed to acquire DDL TASK lease. task_id:" + taskId; LOGGER.error(errMsg); throw new TddlNestableRuntimeException(errMsg); @@ -61,11 +62,11 @@ public static void executeRemoteTask(String schemaName, Long jobId, Long taskId, try { jobLeaseSchedulerThread.scheduleAtFixedRate( - AsyncTask.build(()->{ + AsyncTask.build(() -> { Optional optional = new LeaseManagerImpl().extend(String.valueOf(taskId)); - if(optional.isPresent()){ + if (optional.isPresent()) { dagExecutor.getJobLease().compareAndSet(dagExecutor.getJobLease().get(), optional.get()); - }else { + } else { //extend job lease failed, so shutdown the scheduler thread jobLeaseSchedulerThread.shutdown(); } @@ -78,7 +79,7 @@ public static void executeRemoteTask(String schemaName, Long jobId, Long taskId, //execute task dagExecutor.executeSingleTask(taskId); LOGGER.info(String.format("execute/rollback remote DDL TASK success, jobId:%s, taskId:%s", jobId, taskId)); - }finally { + } finally { new LeaseManagerImpl().release(String.valueOf(taskId)); jobLeaseSchedulerThread.shutdown(); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineRequester.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineRequester.java index 2dc96d2b5..dce98748d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineRequester.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineRequester.java @@ -41,6 +41,7 @@ import com.google.common.cache.CacheBuilder; import com.google.common.collect.Lists; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang.StringUtils; import java.time.Duration; import java.util.ArrayList; @@ -49,6 +50,7 @@ import java.util.Map; import java.util.stream.Collectors; +import static com.alibaba.polardbx.common.TddlConstants.INFORMATION_SCHEMA; import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.LESS_WAITING_TIME; import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.MORE_WAITING_TIME; import static com.alibaba.polardbx.common.ddl.newengine.DdlType.ALTER_TABLEGROUP; @@ -102,6 +104,11 @@ public long executeSubJob(long parentJobId, long parentTaskId, boolean forRollba } public void execute() { + if (StringUtils.equalsIgnoreCase(ddlContext.getSchemaName(), INFORMATION_SCHEMA)) { + throw DdlHelper.logAndThrowError(LOGGER, + "The DDL job can not be executed under the database 'information_schema'"); + } + ddlContext.setResources(ddlJob.getExcludeResources()); // Create a new job and put it in the queue. @@ -114,7 +121,7 @@ public void execute() { if (ddlContext.isAsyncMode()) { return; } - respond(ddlRequest, ddlJobManager, executionContext, true, false); + respond(ddlRequest, ddlJobManager, executionContext, true, false, ddlContext.isEnableTrace()); } public static DdlRequest notifyLeader(String schemaName, List jobId) { @@ -145,9 +152,11 @@ public static void respond(DdlRequest ddlRequest, DdlJobManager ddlJobManager, ExecutionContext executionContext, boolean checkResponseInMemory, - boolean rollbackOpt) { + boolean rollbackOpt, + boolean forceCheckResInMemory) { DdlResponse ddlResponse = - waitForComplete(ddlRequest.getJobIds(), ddlJobManager, checkResponseInMemory, rollbackOpt); + waitForComplete(ddlRequest.getJobIds(), ddlJobManager, checkResponseInMemory, rollbackOpt, + forceCheckResInMemory); Response response = ddlResponse.getResponse(ddlRequest.getJobIds().get(0)); @@ -176,7 +185,8 @@ public static void respond(DdlRequest ddlRequest, public static DdlResponse waitForComplete(List jobIds, DdlJobManager ddlJobManager, boolean checkResponseInMemory, - boolean rollbackOpt) { + boolean rollbackOpt, + boolean forceCheckResInMemory) { DdlResponse ddlResponse = new DdlResponse(); // Wait until the response is received or the job(s) failed. @@ -197,7 +207,7 @@ public static DdlResponse waitForComplete(List jobIds, // Only a worker checks if the job(s) are paused or failed, but leader // wasn't able to respond to the worker. - if (totalWaitingTime > checkInterval) { + if (totalWaitingTime > checkInterval && !forceCheckResInMemory) { // Check if the job(s) have been pended. if (ddlJobManager.checkRecords(ddlResponse, jobIds, rollbackOpt)) { // Double check to avoid miss message @@ -249,43 +259,37 @@ public static void pauseJob(Long jobId, ExecutionContext executionContext) { return; } DdlJobManager ddlJobManager = new DdlJobManager(); - List records = ddlJobManager.fetchRecords(Lists.newArrayList(jobId)); - if (CollectionUtils.isEmpty(records)) { - return; - } - pauseJobs(records, false, false, executionContext); + DdlEngineRecord record = ddlJobManager.fetchRecordByJobId(jobId); + pauseJob(record, false, false, executionContext); } - public static int pauseJobs(List records, boolean enableOperateSubJob, - boolean enableContinueRunningSubJob, ExecutionContext executionContext) { - int countDone = 0; - for (DdlEngineRecord record : records) { - if (record.isSubJob() && !enableOperateSubJob) { - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on subjob is not allowed"); - } + public static int pauseJob(DdlEngineRecord record, boolean enableOperateSubJob, + boolean enableContinueRunningSubJob, ExecutionContext executionContext) { + if (record == null) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "The ddl job does not exist"); + } - List pausedJobs = new ArrayList<>(); - List traceIds = new ArrayList<>(); + if (record.isSubJob() && !enableOperateSubJob) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on subjob is not allowed"); + } - if (enableOperateSubJob && enableContinueRunningSubJob) { - pauseJobs(record, true, pausedJobs, traceIds, true, executionContext); - } else { - pauseJobs(record, true, pausedJobs, traceIds, false, executionContext); - } + List pausedJobs = new ArrayList<>(); + List traceIds = new ArrayList<>(); - Collections.reverse(pausedJobs); - DdlHelper.interruptJobs(record.schemaName, pausedJobs); - DdlHelper.killActivePhyDDLs(record.schemaName, traceIds); + if (enableOperateSubJob && enableContinueRunningSubJob) { + pauseJob(record, true, pausedJobs, traceIds, true, executionContext); + } else { + pauseJob(record, true, pausedJobs, traceIds, false, executionContext); + } - DdlEngineRequester.notifyLeader(executionContext.getSchemaName(), pausedJobs); + Collections.reverse(pausedJobs); + DdlEngineRequester.notifyLeader(executionContext.getSchemaName(), pausedJobs); - countDone += pausedJobs.size(); - } - return countDone; + return pausedJobs.size(); } - private static void pauseJobs(DdlEngineRecord record, boolean subJob, List pausedJobs, List traceIds, - Boolean continueRunningSubJob, ExecutionContext executionContext) { + private static void pauseJob(DdlEngineRecord record, boolean subJob, List pausedJobs, List traceIds, + Boolean continueRunningSubJob, ExecutionContext executionContext) { DdlState before = DdlState.valueOf(record.state); DdlState after = DdlState.PAUSE_JOB_STATE_TRANSFER.get(before); @@ -313,6 +317,10 @@ private static void pauseJobs(DdlEngineRecord record, boolean subJob, List LOGGER.info(String.format("revert job %d", record.jobId)); pausedJobs.add(record.jobId); traceIds.add(record.traceId); + + // 中断子任务 + DdlHelper.interruptJobs(record.schemaName, Collections.singletonList(record.jobId)); + DdlHelper.killActivePhyDDLs(record.schemaName, record.traceId); } return; } @@ -323,6 +331,10 @@ private static void pauseJobs(DdlEngineRecord record, boolean subJob, List pausedJobs.add(record.jobId); traceIds.add(record.traceId); + // 先中断父任务 + DdlHelper.interruptJobs(record.schemaName, Collections.singletonList(record.jobId)); + DdlHelper.killActivePhyDDLs(record.schemaName, record.traceId); + if (subJob) { pauseSubJobs(record.jobId, pausedJobs, traceIds, continueRunningSubJob, executionContext); } @@ -343,7 +355,7 @@ private static void pauseSubJobs(long jobId, List pausedJobs, List List records = schedulerManager.fetchRecords(subJobIds); for (DdlEngineRecord record : GeneralUtil.emptyIfNull(records)) { - pauseJobs(record, false, pausedJobs, traceIds, false, executionContext); + pauseJob(record, false, pausedJobs, traceIds, false, executionContext); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineScheduler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineScheduler.java index 03002fb2c..4642ab233 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineScheduler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineScheduler.java @@ -31,6 +31,7 @@ import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; import com.alibaba.polardbx.common.utils.thread.ServerThreadPool; import com.alibaba.polardbx.executor.changeset.ChangeSetApplyExecutorMap; +import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineSchedulerManager; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlJobManager; import com.alibaba.polardbx.executor.ddl.newengine.sync.DdlRequest; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; @@ -43,18 +44,42 @@ import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import com.google.common.collect.Sets; +import org.apache.calcite.rel.metadata.BuiltInMetadata; import org.apache.commons.lang3.StringUtils; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.concurrent.*; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; -import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.*; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.DDL_ARCHIVE_CLEANER_NAME; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.DDL_DISPATCHER_NAME; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.DDL_LEADER_ELECTION_NAME; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.DDL_LEADER_KEY; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.DDL_LEADER_TTL_IN_MILLIS; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.DDL_SCHEDULER_NAME; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.DEFAULT_LOGICAL_DDL_PARALLELISM; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.DEFAULT_PAUSED_DDL_RESCHEDULE_INTERVAL_IN_MINUTES; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.DEFAULT_RUNNING_DDL_RESCHEDULE_INTERVAL_IN_MINUTES; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.MEDIAN_WAITING_TIME; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.MORE_WAITING_TIME; +import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.ROLLBACK_DDL_WAIT_TIMES; import static com.alibaba.polardbx.common.properties.ConnectionProperties.LOGICAL_DDL_PARALLELISM; import static com.alibaba.polardbx.gms.topology.SystemDbHelper.DEFAULT_DB_NAME; @@ -382,6 +407,12 @@ private void uniqueOffer(DdlEngineRecord record) { if (DdlEngineDagExecutorMap.contains(schemaName, record.jobId)) { return; } + // double check ddl record exits + List records + = ddlJobManager.fetchRecords(Collections.singletonList(record.jobId)); + if (records == null || records.isEmpty()) { + return; + } boolean success = ddlJobDeliveryQueue.offer(record); if (!success) { LOGGER.error(String.format( diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineStats.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineStats.java index b6ca01d59..87e858447 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineStats.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlEngineStats.java @@ -46,6 +46,15 @@ public class DdlEngineStats { public static Metric METRIC_CHECKER_ROWS_FINISHED = new Metric("CHECKER_ROWS_FINISHED"); public static Metric METRIC_CHECKER_TIME_MILLIS = new Metric("CHECKER_TIME_MILLIS"); + public static Metric METRIC_FASTCHECKER_TASK_RUNNING = new Metric("FASTCHECKER_TASK_RUNNING"); + public static Metric METRIC_FASTCHECKER_TASK_WAITING = new Metric("FASTCHECKER_TASK_WAITING"); + + public static Metric METRIC_FASTCHECKER_THREAD_POOL_NOW_SIZE = new Metric("FASTCHECKER_THREAD_POOL_NOW_SIZE"); + + public static Metric METRIC_FASTCHECKER_THREAD_POOL_MAX_SIZE = new Metric("FASTCHECKER_THREAD_POOL_MAX_SIZE"); + + public static Metric METRIC_FASTCHECKER_THREAD_POOL_NUM = new Metric("FASTCHECKER_THREAD_POOL_NUM"); + public static Metric METRIC_BACKFILL_ROWS_FINISHED = new Metric("BACKFILL_ROWS_FINISHED"); public static Metric METRIC_BACKFILL_ROWS_SPEED = new Metric("BACKFILL_ROWS_SPEED"); public static Metric METRIC_BACKFILL_TIME_MILLIS = new Metric("BACKFILL_TIME_MILLIS"); @@ -58,6 +67,8 @@ public class DdlEngineStats { public static Metric METRIC_CHANGESET_APPLY_PARALLELISM = new Metric("CHANGESET_APPLY_PARALLELISM"); public static Metric METRIC_CHANGESET_APPLY_ROWS_SPEED = new Metric("CHANGESET_APPLY_ROWS_SPEED"); + public static Metric METRIC_TWO_PHASE_DDL_PARALLISM = new Metric("TWO_PHASE_DDL_PARALLISM"); + public static Metric METRIC_THROTTLE_RATE = new Metric.DelegatorMetric("THROTTLE_RATE", (x) -> Throttle.getTotalThrottleRate()); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlPlanScheduler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlPlanScheduler.java index 301447732..a51d6b7da 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlPlanScheduler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/DdlPlanScheduler.java @@ -33,6 +33,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; + import static com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper.getInstConfigAsLong; public class DdlPlanScheduler { @@ -78,11 +79,11 @@ public void run() { return; } synchronized (DdlPlanScheduler.class) { - for (DdlPlanRecord record: ddlPlanRecordList){ + for (DdlPlanRecord record : ddlPlanRecordList) { try { RebalanceDdlPlanManager rebalanceDdlPlanManager = new RebalanceDdlPlanManager(); rebalanceDdlPlanManager.process(record); - }catch (Exception e){ + } catch (Exception e) { LOGGER.error("process ddl plan error, planId:" + record.getPlanId(), e); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/cross/GenericPhyObjectRecorder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/cross/GenericPhyObjectRecorder.java index 57dcd348e..cd98e2cbf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/cross/GenericPhyObjectRecorder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/cross/GenericPhyObjectRecorder.java @@ -100,10 +100,6 @@ public boolean checkIfDone() { return false; } - if (!executionContext.needToRenamePhyTables()) { - return true; - } - if (!ExecUtils.hasLeadership(schemaName)) { // The node doesn't have leadership any longer, so let's terminate current job. String nodeInfo = TddlNode.getNodeInfo(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/dag/TaskScheduler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/dag/TaskScheduler.java index f713bf9cf..fb84947a5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/dag/TaskScheduler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/dag/TaskScheduler.java @@ -16,12 +16,9 @@ package com.alibaba.polardbx.executor.ddl.newengine.dag; -import com.alibaba.polardbx.executor.ddl.job.task.backfill.AlterTableGroupBackFillTask; -import com.alibaba.polardbx.executor.ddl.job.task.backfill.MoveTableBackFillTask; -import com.alibaba.polardbx.executor.ddl.newengine.DdlEngineStats; -import com.google.common.base.Joiner; import com.alibaba.polardbx.common.model.lifecycle.AbstractLifecycle; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; +import com.google.common.base.Joiner; import org.apache.commons.collections.CollectionUtils; import java.util.ArrayList; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/AbstractDdlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/AbstractDdlTask.java index 2599112e4..f028c3710 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/AbstractDdlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/AbstractDdlTask.java @@ -37,6 +37,7 @@ import java.sql.Connection; import java.util.ArrayList; import java.util.List; +import java.util.function.BooleanSupplier; import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_EACH_DDL_TASK_FAIL_ONCE; import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_FAIL_ON_DDL_TASK_NAME; @@ -68,14 +69,14 @@ public AbstractDdlTask(final String schemaName) { @Override public void execute(ExecutionContext executionContext) { beginExecuteTs = System.nanoTime(); - beforeTransaction(executionContext); + skipExecuteWrapper("beforeTransaction", () -> beforeTransaction(executionContext)); final DdlTask currentTask = this; DdlEngineAccessorDelegate delegate = new DdlEngineAccessorDelegate() { @Override protected Integer invoke() { int result = 0; - duringTransaction(getConnection(), executionContext); + skipExecuteWrapper("duringTransaction", () -> duringTransaction(getConnection(), executionContext)); DdlEngineTaskRecord taskRecord = TaskHelper.toDdlEngineTaskRecord(currentTask); taskRecord.setState(DdlTaskState.SUCCESS.name()); result += engineTaskAccessor.updateTask(taskRecord); @@ -96,7 +97,7 @@ protected Integer invoke() { }; delegate.execute(); //will not execute this if there's a failure - onExecutionSuccess(executionContext); + skipExecuteWrapper("onExecutionSuccess", () -> onExecutionSuccess(executionContext)); currentTask.setState(DdlTaskState.SUCCESS); endExecuteTs = System.nanoTime(); } @@ -119,13 +120,15 @@ public void handleError(ExecutionContext executionContext) { public void rollback(ExecutionContext executionContext) { beginRollbackTs = System.nanoTime(); final DdlTask currentTask = this; - beforeRollbackTransaction(executionContext); + skipRollbackWrapper("beforeRollbackTransaction", () -> beforeRollbackTransaction(executionContext)); DdlEngineAccessorDelegate delegate = new DdlEngineAccessorDelegate() { @Override protected Integer invoke() { int result = 0; - duringRollbackTransaction(getConnection(), executionContext); + skipRollbackWrapper( + "duringRollbackTransaction", + () -> duringRollbackTransaction(getConnection(), executionContext)); DdlEngineTaskRecord taskRecord = TaskHelper.toDdlEngineTaskRecord(currentTask); if (executionContext.getDdlContext().isRollbackToReady()) { taskRecord.setState(DdlTaskState.READY.name()); @@ -150,7 +153,7 @@ protected Integer invoke() { } }; delegate.execute(); - onRollbackSuccess(executionContext); + skipRollbackWrapper("onRollbackSuccess", () -> onRollbackSuccess(executionContext)); currentTask.setState(DdlTaskState.ROLLBACK_SUCCESS); endRollbackTs = System.nanoTime(); } @@ -368,13 +371,48 @@ private String cost(String format, Long begin, Long end) { } private String color(DdlTaskState ddlTaskState) { - if (ddlTaskState == DdlTaskState.SUCCESS) { + if (ddlTaskState == DdlTaskState.SUCCESS && !isSkipExecute()) { return "fillcolor=\"#90ee90\" style=filled"; } else if (ddlTaskState == DdlTaskState.DIRTY) { return "fillcolor=\"#fff68f\" style=filled"; - } else if (ddlTaskState == DdlTaskState.ROLLBACK_SUCCESS) { + } else if (ddlTaskState == DdlTaskState.ROLLBACK_SUCCESS && !isSkipRollback()) { return "fillcolor=\"#f08080\" style=filled"; } return ""; } + + private void skipWrapper(BooleanSupplier skip, String actMsg, Runnable action) { + if (skip.getAsBoolean()) { + LOGGER.warn( + String.format( + "%s for %s is skipped because of hint SKIP_DDL_TASKS = %s", + actMsg, + this.getName(), + this.getName())); + } else { + action.run(); + } + } + + private void skipExecuteWrapper(String step, Runnable action) { + skipWrapper(this::isSkipExecute, "execute " + step, action); + } + + private void skipRollbackWrapper(String step, Runnable action) { + skipWrapper(this::isSkipRollback, "rollback " + step, action); + } + + /** + * FOR TEST USE ONLY !! + */ + protected boolean isSkipExecute() { + return false; + } + + /** + * FOR TEST USE ONLY !! + */ + protected boolean isSkipRollback() { + return false; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4CreateColumnarIndex.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4CreateColumnarIndex.java new file mode 100644 index 000000000..dd94a0fdc --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4CreateColumnarIndex.java @@ -0,0 +1,46 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.newengine.job.wrapper; + +import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableShowTableMetaTask; +import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcCreateColumnarIndexTask; +import com.alibaba.polardbx.executor.ddl.job.task.columnar.*; +import com.alibaba.polardbx.executor.ddl.job.task.gsi.CciUpdateIndexStatusTask; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; +import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; +import lombok.Data; + +@Data +public class ExecutableDdlJob4CreateColumnarIndex extends ExecutableDdlJob { + + private CreateColumnarIndexValidateTask createColumnarIndexValidateTask; + private AddColumnarTablesPartitionInfoMetaTask addColumnarTablesPartitionInfoMetaTask; + private CdcCreateColumnarIndexTask cdcCreateColumnarIndexTask; + private CreateMockColumnarIndexTask createMockColumnarIndexTask; + private CreateTableShowTableMetaTask createTableShowTableMetaTask; + private InsertColumnarIndexMetaTask insertColumnarIndexMetaTask; + private WaitColumnarTableCreationTask waitColumnarTableCreationTask; + private CciUpdateIndexStatusTask changeCreatingToChecking; + private CreateCheckCciTask createCheckCciTask; + private CciUpdateIndexStatusTask cciUpdateIndexStatusTask; + + /** + * last task + */ + private DdlTask lastTask; + +} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4CreateGsi.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4CreateGsi.java index 34019f792..0bea71c90 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4CreateGsi.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4CreateGsi.java @@ -22,7 +22,6 @@ import com.alibaba.polardbx.executor.ddl.job.task.gsi.CreateGsiPhyDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.CreateGsiValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiInsertIndexMetaTask; -import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiUpdateIndexStatusTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; import lombok.Data; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4CreateOssPartitionTable.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4CreateOssPartitionTable.java index 8099c61ea..ad16edaec 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4CreateOssPartitionTable.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4CreateOssPartitionTable.java @@ -16,15 +16,15 @@ package com.alibaba.polardbx.executor.ddl.newengine.job.wrapper; -import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.BindingArchiveTableMetaTask; -import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.CreateOssTableAddTablesMetaTask; -import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.CreateOssTableFormatTask; -import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.CreateOssTableGenerateDataTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.CreatePartitionTableValidateTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableAddTablesPartitionInfoMetaTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTablePhyDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.CreateTableShowTableMetaTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; +import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.BindingArchiveTableMetaTask; +import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.CreateOssTableAddTablesMetaTask; +import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.CreateOssTableFormatTask; +import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.CreateOssTableGenerateDataTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; import lombok.Data; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4DropColumnarIndex.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4DropColumnarIndex.java new file mode 100644 index 000000000..3e48f9d32 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4DropColumnarIndex.java @@ -0,0 +1,53 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.newengine.job.wrapper; + +import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; +import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcDropColumnarIndexTask; +import com.alibaba.polardbx.executor.ddl.job.task.columnar.CciSchemaEvolutionTask; +import com.alibaba.polardbx.executor.ddl.job.task.columnar.DropColumnarTableRemoveMetaTask; +import com.alibaba.polardbx.executor.ddl.job.task.columnar.DropMockColumnarIndexTask; +import com.alibaba.polardbx.executor.ddl.job.task.gsi.DropColumnarTableHideTableMetaTask; +import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiDropCleanUpTask; +import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateGsiExistenceTask; +import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; +import lombok.Data; + +/** + * plan for dropping columnar index + */ +@Data +public class ExecutableDdlJob4DropColumnarIndex extends ExecutableDdlJob { + + private ValidateGsiExistenceTask validateTask; + + private DropColumnarTableHideTableMetaTask dropColumnarTableHideTableMetaTask; + + private GsiDropCleanUpTask gsiDropCleanUpTask; + + private TableSyncTask tableSyncTaskAfterCleanUpGsiIndexesMeta; + + private CdcDropColumnarIndexTask cdcDropColumnarIndexTask; + + private DropMockColumnarIndexTask dropMockColumnarIndexTask; + + private DropColumnarTableRemoveMetaTask dropColumnarTableRemoveMetaTask; + + private CciSchemaEvolutionTask cciSchemaEvolutionTask; + + private TableSyncTask finalSyncTask; +} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4DropGsi.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4DropGsi.java index b33a28068..960ef89a1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4DropGsi.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4DropGsi.java @@ -26,7 +26,7 @@ import lombok.Data; /** - * plain drop GSI + * plan drop GSI */ @Data public class ExecutableDdlJob4DropGsi extends ExecutableDdlJob { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4DropPartitionGsi.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4DropPartitionGsi.java index 418a49b15..f73d40b7a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4DropPartitionGsi.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/job/wrapper/ExecutableDdlJob4DropPartitionGsi.java @@ -21,7 +21,6 @@ import com.alibaba.polardbx.executor.ddl.job.task.basic.TablesSyncTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.DropGsiPhyDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.DropGsiTableHideTableMetaTask; -import com.alibaba.polardbx.executor.ddl.job.task.gsi.DropGsiTableRemoveMetaTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.DropPartitionGsiPhyDdlTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiDropCleanUpTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateGsiExistenceTask; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlEngineResourceManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlEngineResourceManager.java index 73feed687..0dce6b4d7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlEngineResourceManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlEngineResourceManager.java @@ -81,7 +81,7 @@ public void acquireResource(@NotNull String schemaName, @NotNull long jobId, @NotNull Set shared, @NotNull Set exclusive) { - acquireResource(schemaName, jobId, __->false, shared, exclusive, (Connection conn) -> true); + acquireResource(schemaName, jobId, __ -> false, shared, exclusive, (Connection conn) -> true); } public void acquireResource(@NotNull String schemaName, @@ -104,7 +104,7 @@ public void acquireResource(@NotNull String schemaName, try { while (!lockManager.tryReadWriteLockBatch(schemaName, owner, readLocks, writeLocks, func)) { LocalDateTime now = LocalDateTime.now(); - if (now.minusHours(1L).isAfter(beginTs)){ + if (now.minusHours(1L).isAfter(beginTs)) { throw new TddlNestableRuntimeException("GET DDL LOCK TIMEOUT"); } @@ -127,7 +127,9 @@ public void acquireResource(@NotNull String schemaName, "tryReadWriteLockBatch failed, schemaName:[%s], jobId:[%s], retryCount:[%d], shared:[%s], exclusive:[%s], blockers:[%s]", schemaName, jobId, retryCount++, setToString(shared), setToString(exclusive), setToString(blockers)) ); - Set ddlBlockers = blockers.stream().filter(e-> StringUtils.startsWith(e, PersistentReadWriteLock.OWNER_PREFIX)).collect(Collectors.toSet()); + Set ddlBlockers = + blockers.stream().filter(e -> StringUtils.startsWith(e, PersistentReadWriteLock.OWNER_PREFIX)) + .collect(Collectors.toSet()); if (CollectionUtils.isNotEmpty(ddlBlockers)) { List blockerJobRecords = getBlockerJobRecords(schemaName, ddlBlockers); if (CollectionUtils.isEmpty(blockerJobRecords)) { @@ -178,7 +180,7 @@ public void acquireResource(@NotNull String schemaName, } } - public boolean downGradeWriteLock(Connection connection, long jobId, String writeLock){ + public boolean downGradeWriteLock(Connection connection, long jobId, String writeLock) { String owner = PersistentReadWriteLock.OWNER_PREFIX + String.valueOf(jobId); return lockManager.downGradeWriteLock(connection, owner, writeLock); } @@ -196,7 +198,7 @@ public int releaseResource(Connection connection, long jobId) { } public int releaseResource(Connection connection, long jobId, Set resouceSet) { - if(CollectionUtils.isEmpty(resouceSet)){ + if (CollectionUtils.isEmpty(resouceSet)) { return 0; } String owner = PersistentReadWriteLock.OWNER_PREFIX + String.valueOf(jobId); @@ -226,37 +228,37 @@ protected List invoke() { return result; } - private String setToString(Set lockSet){ - if(CollectionUtils.isEmpty(lockSet)){ + private String setToString(Set lockSet) { + if (CollectionUtils.isEmpty(lockSet)) { return ""; } return Joiner.on(",").join(lockSet); } - public static void startAcquiringLock(String schemaName, DdlContext ddlContext){ - synchronized (allLocksTryingToAcquire){ - if(!allLocksTryingToAcquire.containsKey(schemaName)){ + public static void startAcquiringLock(String schemaName, DdlContext ddlContext) { + synchronized (allLocksTryingToAcquire) { + if (!allLocksTryingToAcquire.containsKey(schemaName)) { allLocksTryingToAcquire.put(schemaName, new ArrayList<>()); } allLocksTryingToAcquire.get(schemaName).add(ddlContext); } } - public static void finishAcquiringLock(String schemaName, DdlContext ddlContext){ - synchronized (allLocksTryingToAcquire){ - if(allLocksTryingToAcquire.containsKey(schemaName)){ + public static void finishAcquiringLock(String schemaName, DdlContext ddlContext) { + synchronized (allLocksTryingToAcquire) { + if (allLocksTryingToAcquire.containsKey(schemaName)) { allLocksTryingToAcquire.get(schemaName).remove(ddlContext); - if(CollectionUtils.isEmpty(allLocksTryingToAcquire.get(schemaName))){ + if (CollectionUtils.isEmpty(allLocksTryingToAcquire.get(schemaName))) { allLocksTryingToAcquire.remove(schemaName); } } } } - public static List getAllDdlAcquiringLocks(String schemaName){ + public static List getAllDdlAcquiringLocks(String schemaName) { List result = new ArrayList<>(); - synchronized (allLocksTryingToAcquire){ - if(CollectionUtils.isEmpty(allLocksTryingToAcquire.get(schemaName))){ + synchronized (allLocksTryingToAcquire) { + if (CollectionUtils.isEmpty(allLocksTryingToAcquire.get(schemaName))) { return result; } result.addAll(allLocksTryingToAcquire.get(schemaName)); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlEngineSchedulerManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlEngineSchedulerManager.java index dd79bc05e..14c1718f2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlEngineSchedulerManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlEngineSchedulerManager.java @@ -32,7 +32,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Set; -import java.util.stream.Collectors; public class DdlEngineSchedulerManager { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlJobManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlJobManager.java index be5c16632..3c7411584 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlJobManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlJobManager.java @@ -438,47 +438,90 @@ private List> split(List list, in public boolean removeJob(long jobId) { // Execute the following operations within a transaction. - return new DdlEngineAccessorDelegate() { + List jobIds = new DdlEngineAccessorDelegate>() { @Override - protected Boolean invoke() { - int subJobCount = 0; + protected List invoke() { + List jobList = new ArrayList<>(); // remove subjob cascade List subjobs = fetchSubJobsRecursive(jobId, engineTaskAccessor, false); for (SubJobTask subjob : GeneralUtil.emptyIfNull(subjobs)) { for (long subJobId : subjob.fetchAllSubJobs()) { - DdlEngineRecord subJobRecord = engineAccessor.query(subJobId); - validateDdlStateContains(DdlState.valueOf(subJobRecord.state), DdlState.FINISHED); - subJobCount += engineAccessor.delete(subJobId); - engineTaskAccessor.deleteByJobId(subJobId); + jobList.add(subJobId); } } - DdlEngineRecord jobRecord = engineAccessor.query(jobId); - validateDdlStateContains(DdlState.valueOf(jobRecord.state), DdlState.FINISHED); - int count = engineAccessor.delete(jobId); - engineTaskAccessor.deleteByJobId(jobId); + jobList.add(jobId); - getResourceManager().releaseResource(getConnection(), jobId); - DdlEngineStats.METRIC_DDL_JOBS_FINISHED.update(count + subJobCount); - - return count > 0; + return jobList; } }.execute(); + + jobIds.forEach(o -> { + new DdlEngineAccessorDelegate() { + + @Override + protected Boolean invoke() { + + DdlEngineRecord jobRecord = engineAccessor.query(o); + validateDdlStateContains(DdlState.valueOf(jobRecord.state), DdlState.FINISHED); + int count = engineAccessor.delete(o); + engineTaskAccessor.deleteByJobId(o); + + if (o == jobId) { + getResourceManager().releaseResource(getConnection(), o); + } + DdlEngineStats.METRIC_DDL_JOBS_FINISHED.update(count); + + return count > 0; + } + }.execute(); + }); + return true; } public int cleanUpArchive(long minutes) { - return new DdlEngineAccessorDelegate() { + + List archiveDdlEngineRecords = new DdlEngineAccessorDelegate>() { + @Override + protected List invoke() { + return engineAccessor.queryOutdateArchiveDDLEngine(minutes); + } + }.execute(); + deleteArchive(archiveDdlEngineRecords); + return 0; + } + + public static int cleanUpArchiveSchema(String schemaName) { + List archiveDdlEngineRecords = new DdlEngineAccessorDelegate>() { @Override - protected Integer invoke() { - int count = engineAccessor.cleanUpArchive(minutes); - return count; + protected List invoke() { + return engineAccessor.queryArchive(schemaName); } }.execute(); + deleteArchive(archiveDdlEngineRecords); + return 0; + } + + private static void deleteArchive(List archiveDdlEngineRecords) { + List jobIds = new ArrayList(archiveDdlEngineRecords.size()); + jobIds.addAll(archiveDdlEngineRecords.stream().map(o -> o.getJobId()).collect(Collectors.toList())); + if (!jobIds.isEmpty()) { + jobIds.forEach(jobId -> { + new DdlEngineAccessorDelegate() { + @Override + protected Boolean invoke() { + engineAccessor.deleteArchive(jobId); + engineTaskAccessor.deleteArchiveByJobId(jobId); + return true; + } + }.execute(); + }); + } } - private void validateDdlStateContains(DdlState currentState, Set ddlStateSet) { + protected void validateDdlStateContains(DdlState currentState, Set ddlStateSet) { Preconditions.checkNotNull(ddlStateSet); Preconditions.checkNotNull(currentState); if (ddlStateSet.contains(currentState)) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlPlanAccessorDelegate.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlPlanAccessorDelegate.java index 930d7eb0b..b42701073 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlPlanAccessorDelegate.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/meta/DdlPlanAccessorDelegate.java @@ -20,8 +20,6 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.gms.metadb.delegate.MetaDbAccessorWrapper; -import com.alibaba.polardbx.gms.metadb.misc.DdlEngineAccessor; -import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskAccessor; import com.alibaba.polardbx.gms.scheduler.DdlPlanAccessor; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.statistics.SQLRecorderLogger; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlBackFillSpeedSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlBackFillSpeedSyncAction.java index 5de2565f1..cd5fc5552 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlBackFillSpeedSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlBackFillSpeedSyncAction.java @@ -43,7 +43,7 @@ public DdlBackFillSpeedSyncAction(Long backFillId, double speed, long totalRows) @Override public ResultCursor sync() { ArrayResultCursor resultCursor = buildResultCursor(); - for(ThrottleInfo throttleInfo: Throttle.getThrottleInfoList()){ + for (ThrottleInfo throttleInfo : Throttle.getThrottleInfoList()) { resultCursor.addRow(buildRow(throttleInfo)); } return resultCursor; @@ -58,14 +58,12 @@ public static ArrayResultCursor buildResultCursor() { return resultCursor; } - private Object[] buildRow(ThrottleInfo throttleInfo) { return new Object[] { - throttleInfo.getBackFillId(), - throttleInfo.getSpeed(), - throttleInfo.getTotalRows() + throttleInfo.getBackFillId(), + throttleInfo.getSpeed(), + throttleInfo.getTotalRows() }; } - } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlCacheCollectionSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlCacheCollectionSyncAction.java index b942ca5a5..ba3429cc9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlCacheCollectionSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlCacheCollectionSyncAction.java @@ -22,8 +22,8 @@ import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.ddl.newengine.DdlEngineDagExecutorMap; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineResourceManager; -import com.alibaba.polardbx.gms.metadb.misc.PersistentReadWriteLock; import com.alibaba.polardbx.executor.sync.ISyncAction; +import com.alibaba.polardbx.gms.metadb.misc.PersistentReadWriteLock; import com.alibaba.polardbx.optimizer.context.DdlContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.google.common.base.Joiner; @@ -58,8 +58,8 @@ public ResultCursor sync() { // All DDls List ddlAcquiringLocksList = DdlEngineResourceManager.getAllDdlAcquiringLocks(schemaName); - if (CollectionUtils.isNotEmpty(ddlAcquiringLocksList)){ - for(DdlContext ddlContext: ddlAcquiringLocksList){ + if (CollectionUtils.isNotEmpty(ddlAcquiringLocksList)) { + for (DdlContext ddlContext : ddlAcquiringLocksList) { resultCursor.addRow(buildRowFromDdlContext(ddlContext, serverInfo)); } } @@ -90,7 +90,8 @@ public static ArrayResultCursor buildResultCursor() { private Object[] buildRowFromDdlContext(DdlContext ddlContext, String nodeInfo) { Set blockers = - lockManager.queryBlocker(Sets.union(Sets.newHashSet(ddlContext.getSchemaName()), ddlContext.getResources())); + lockManager.queryBlocker( + Sets.union(Sets.newHashSet(ddlContext.getSchemaName()), ddlContext.getResources())); return new Object[] { ENGINE_TYPE_DAG, nodeInfo, @@ -128,8 +129,8 @@ public void setSchemaName(String schemaName) { this.schemaName = schemaName; } - private String setToString(Set lockSet){ - if(CollectionUtils.isEmpty(lockSet)){ + private String setToString(Set lockSet) { + if (CollectionUtils.isEmpty(lockSet)) { return ""; } return Joiner.on(",").join(lockSet); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlInterruptSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlInterruptSyncAction.java index 695485539..d21536153 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlInterruptSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlInterruptSyncAction.java @@ -16,6 +16,8 @@ package com.alibaba.polardbx.executor.ddl.newengine.sync; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.ddl.newengine.DdlEngineDagExecutor; import com.alibaba.polardbx.executor.ddl.newengine.DdlEngineDagExecutorMap; import com.alibaba.polardbx.gms.sync.IGmsSyncAction; @@ -23,6 +25,8 @@ public class DdlInterruptSyncAction implements IGmsSyncAction { + private static final Logger logger = LoggerFactory.getLogger(DdlInterruptSyncAction.class); + private DdlRequest ddlRequest; public DdlInterruptSyncAction() { @@ -42,9 +46,14 @@ public Object sync() { for (Long jobId : ddlRequest.getJobIds()) { DdlEngineDagExecutor ddlEngineDagExecutor = DdlEngineDagExecutorMap.get(schemaName, jobId); if (ddlEngineDagExecutor == null) { + logger.warn( + String.format("The ddl job %s on schema %s does not exits", jobId, schemaName)); continue; } ddlEngineDagExecutor.interrupt(); + logger.warn( + String.format("The ddl job %s on schema %s has been interrupted by DdlInterruptSyncAction", jobId, + schemaName)); } return null; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlResponse.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlResponse.java index a39f25c81..010c05fec 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlResponse.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlResponse.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.ddl.newengine.sync; +import com.alibaba.fastjson.annotation.JSONCreator; import com.alibaba.polardbx.optimizer.statis.SQLTracer; import java.util.HashMap; @@ -78,6 +79,25 @@ public Response(long jobId, this.responseContent = responseContent; } + @JSONCreator + public Response(long jobId, + String schemaName, + String objectName, + String ddlType, + ResponseType responseType, + String responseContent, + Object warning, + SQLTracer tracer) { + this.jobId = jobId; + this.schemaName = schemaName; + this.objectName = objectName; + this.ddlType = ddlType; + this.responseType = responseType; + this.responseContent = responseContent; + this.warning = warning; + this.tracer = tracer; + } + public long getJobId() { return this.jobId; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlResponseCollectSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlResponseCollectSyncAction.java index 721ffeca5..fe9b6c524 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlResponseCollectSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/sync/DdlResponseCollectSyncAction.java @@ -42,15 +42,16 @@ public DdlResponseCollectSyncAction(final String schemaName, final List jo public Object sync() { //fetch DDL response, filter by schemaName and jobId List responseList = DdlEngineRequester.getResponse().stream().filter( - e-> StringUtils.equalsIgnoreCase(e.getSchemaName(), schemaName) + e -> StringUtils.equalsIgnoreCase(e.getSchemaName(), schemaName) ).collect(Collectors.toList()); - if(CollectionUtils.isNotEmpty(jobIds)){ - responseList = responseList.stream().filter(e->jobIds.contains(e.getJobId())).collect(Collectors.toList()); + if (CollectionUtils.isNotEmpty(jobIds)) { + responseList = + responseList.stream().filter(e -> jobIds.contains(e.getJobId())).collect(Collectors.toList()); } //return - ArrayResultCursor resultCursor = buildResultCursor(); - responseList.stream().forEach(e-> resultCursor.addRow(buildRow(e))); + ArrayResultCursor resultCursor = buildResultCursor(); + responseList.stream().forEach(e -> resultCursor.addRow(buildRow(e))); return resultCursor; } @@ -77,6 +78,4 @@ private Object[] buildRow(DdlResponse.Response response) { }; } - - } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/DdlHelper.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/DdlHelper.java index be22ebe09..9f89092c3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/DdlHelper.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/DdlHelper.java @@ -55,6 +55,7 @@ import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; import com.alibaba.polardbx.gms.node.GmsNodeManager; import com.alibaba.polardbx.gms.sync.GmsSyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.DbGroupInfoManager; import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.group.jdbc.TGroupDataSource; @@ -74,6 +75,7 @@ import com.alibaba.polardbx.rpc.compatible.XDataSource; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlShowCreateTable; import org.apache.calcite.sql.parser.SqlParserPos; @@ -361,7 +363,7 @@ public static String genHashCodeForPhyTableDDL(String schemaName, String groupNa int delay) { String phyTableDDL = null; - String sql = String.format("SHOW CREATE TABLE %s", phyTableName); + String sql = "SHOW CREATE TABLE " + phyTableName; String errMsg = String.format("fetch the DDL of %s on %s. Caused by: %%s", phyTableName, groupName); try (Connection conn = getPhyConnection(schemaName, groupName); @@ -513,7 +515,7 @@ private static void killActivePhyDDL(Connection conn, String connId, String grou public static void interruptJobs(String schemaName, List jobIds) { DdlRequest ddlRequest = new DdlRequest(schemaName, jobIds); - GmsSyncManagerHelper.sync(new DdlInterruptSyncAction(ddlRequest), schemaName); + GmsSyncManagerHelper.sync(new DdlInterruptSyncAction(ddlRequest), schemaName, SyncScope.ALL); } public static void killActivePhyDDLs(String schemaName, List traceIds) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/DdlJobManagerUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/DdlJobManagerUtils.java index 867cb2f75..ccf57afb8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/DdlJobManagerUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/DdlJobManagerUtils.java @@ -16,11 +16,15 @@ package com.alibaba.polardbx.executor.ddl.newengine.utils; +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.JSONObject; import com.alibaba.polardbx.common.ddl.newengine.DdlConstants; import com.alibaba.polardbx.common.ddl.newengine.DdlState; import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.executor.ddl.job.task.twophase.InitTwoPhaseDdlTask; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineAccessorDelegate; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineSchedulerManager; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineAccessor; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; @@ -28,6 +32,8 @@ import com.alibaba.polardbx.optimizer.context.PhyDdlExecutionRecord; import java.sql.Connection; +import java.util.HashMap; +import java.util.Map; import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.SEMICOLON; import static com.alibaba.polardbx.executor.ddl.newengine.sync.DdlResponse.Response; @@ -172,6 +178,17 @@ public static void reloadPhyTablesDone(PhyDdlExecutionRecord phyDdlExecutionReco } } + public static Map reloadPhyTablesHashCode(Long jobId) { + DdlEngineTaskRecord record = + SCHEDULER_MANAGER.fetchTaskRecord(jobId, TwoPhaseDdlUtils.TWO_PHASE_DDL_INIT_TASK_NAME).get(0); + if (record != null && TStringUtil.isNotEmpty(record.value)) { + InitTwoPhaseDdlTask initTwoPhaseDdlTask = JSONObject.parseObject(record.value, InitTwoPhaseDdlTask.class); + return initTwoPhaseDdlTask.getPhysicalTableHashCodeMap(); + } else { + return new HashMap<>(); + } + } + /** * Clear physical tables done from cache. */ diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/TaskHelper.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/TaskHelper.java index 68f20f1b1..f064cf886 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/TaskHelper.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/newengine/utils/TaskHelper.java @@ -88,14 +88,14 @@ public static DdlTask fromDdlEngineTaskRecord(DdlEngineTaskRecord record) { public static CostEstimableDdlTask.CostInfo decodeCostInfo(String str) { if (StringUtils.isEmpty(str)) { - return CostEstimableDdlTask.createCostInfo(0L, 0L); + return CostEstimableDdlTask.createCostInfo(0L, 0L, 0L); } return JSONObject.parseObject(str, CostEstimableDdlTask.CostInfo.class); } public static String encodeCostInfo(CostEstimableDdlTask.CostInfo costInfo) { if (costInfo == null) { - costInfo = CostEstimableDdlTask.createCostInfo(0L, 0L); + costInfo = CostEstimableDdlTask.createCostInfo(0L, 0L, 0L); } return JSONObject.toJSONString(costInfo); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/sync/ClearPlanCacheAndBaselineSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/sync/ClearPlanCacheAndBaselineSyncAction.java new file mode 100644 index 000000000..dd9f0bd4e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/sync/ClearPlanCacheAndBaselineSyncAction.java @@ -0,0 +1,61 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.sync; + +import com.alibaba.polardbx.executor.cursor.ResultCursor; +import com.alibaba.polardbx.executor.sync.ISyncAction; +import com.alibaba.polardbx.optimizer.planmanager.PlanManager; +import org.apache.commons.lang.StringUtils; + +public class ClearPlanCacheAndBaselineSyncAction implements ISyncAction { + + private String schema; + private String table; + + public ClearPlanCacheAndBaselineSyncAction() { + } + + public ClearPlanCacheAndBaselineSyncAction(String schema, String table) { + this.schema = schema; + this.table = table; + } + + @Override + public ResultCursor sync() { + if (StringUtils.isEmpty(schema) || StringUtils.isEmpty(table)) { + return null; + } + PlanManager.getInstance().invalidateTable(schema, table); + return null; + } + + public String getSchema() { + return schema; + } + + public void setSchema(String schema) { + this.schema = schema; + } + + public String getTable() { + return table; + } + + public void setTable(String table) { + this.table = table; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/sync/ClearPlanCacheSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/sync/ClearPlanCacheSyncAction.java index 73af5892d..720b8fde8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/sync/ClearPlanCacheSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/sync/ClearPlanCacheSyncAction.java @@ -18,12 +18,13 @@ import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.executor.sync.ISyncAction; -import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.planmanager.PlanManager; +import com.alibaba.polardbx.optimizer.core.planner.PlanCache; +import org.apache.commons.lang.StringUtils; public class ClearPlanCacheSyncAction implements ISyncAction { private String schemaName; + private String tableName; public ClearPlanCacheSyncAction() { } @@ -32,9 +33,20 @@ public ClearPlanCacheSyncAction(String schemaName) { this.schemaName = schemaName; } + public ClearPlanCacheSyncAction(String schemaName, String tableName) { + this.schemaName = schemaName; + this.tableName = tableName; + } + @Override public ResultCursor sync() { - PlanManager.getInstance().invalidateSchema(schemaName); + if (StringUtils.isNotEmpty(tableName)) { + // sync with table name meaning seq handler had been fixed, clean baseline+plancache + PlanCache.getInstance().invalidateByTable(schemaName, tableName); + } else { + // sync without table name meaning seq handler hadn't been fixed, only clean plancache by schema + PlanCache.getInstance().invalidateBySchema(schemaName); + } return null; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/DnStats.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/DnStats.java new file mode 100644 index 000000000..5ca2738f6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/DnStats.java @@ -0,0 +1,111 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.twophase; + +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.gms.topology.DbTopologyManager; +import com.alibaba.polardbx.gms.topology.GroupDetailInfoAccessor; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.gms.topology.DbTopologyManager.getGroupNameToStorageInstIdMap; + +public class DnStats { + private final static Logger LOG = SQLRecorderLogger.ddlLogger; + + String storageInstId; + int maxConnection; + int maxUserConnection; + int connection; + + Long jobId; + + public DnStats(String storageInstId, int maxConnection, int maxUserConnection, int connection, Long jobId) { + this.storageInstId = storageInstId; + this.maxConnection = maxConnection; + this.maxUserConnection = maxUserConnection; + this.connection = connection; + this.jobId = jobId; + } + + public int getResidueConnection() { + if (maxUserConnection == 0) { + return maxConnection - connection; + } else { + return Math.min(maxConnection, maxUserConnection) - connection; + } + } + + public Boolean checkConnectionNum(int requiredConnectionNum) { + int residueConnectionNum = getResidueConnection(); + Boolean result = + (residueConnectionNum * 4 / 5 > requiredConnectionNum) && (residueConnectionNum - requiredConnectionNum + > 32); + String logInfo = String.format( + " check dn %s, max_connection=%d, max_user_connection=%d, used_connection=%d, require_connection=%d, check %s", + jobId, storageInstId, maxConnection, maxUserConnection, connection, requiredConnectionNum, + result); + LOG.info(logInfo); + return result; + } + + public static Map buildGroupToDnMap(String schemaName, String tableName, + ExecutionContext executionContext) { + return getGroupNameToStorageInstIdMap(schemaName); + } + + public static Map buildDnStats(String schemaName, String tableName, + Map groupToDnMap, + Long jobId, + ExecutionContext executionContext) { + String taskName = "TWO_PHASE_DDL_INIT_TASK"; + Map dnToGroupMap = + groupToDnMap.keySet().stream() + .collect(Collectors.toMap(groupToDnMap::get, o -> o, (before, after) -> after)); + String showMaxConnectionSql = "show global variables like 'max_connections';"; + String showMaxUserConnectionSql = "show global variables like 'max_user_connections';"; + String showConnectionStatusSql = "show global status like 'Threads_connected';"; + ConcurrentHashMap results = new ConcurrentHashMap<>(); + dnToGroupMap.keySet().forEach(dn -> { + String groupName = dnToGroupMap.get(dn); + int maxConnection = + Integer.parseInt( + TwoPhaseDdlUtils.queryGroupBypassConnPool(executionContext, jobId, taskName, schemaName, tableName, + groupName, + showMaxConnectionSql).get(0).get("Value").toString()); + int maxUserConnection = + Integer.parseInt( + TwoPhaseDdlUtils.queryGroupBypassConnPool(executionContext, jobId, taskName, schemaName, tableName, + groupName, + showMaxUserConnectionSql).get(0).get("Value").toString()); + int connection = + Integer.parseInt( + TwoPhaseDdlUtils.queryGroupBypassConnPool(executionContext, jobId, taskName, schemaName, tableName, + groupName, + showConnectionStatusSql).get(0).get("Value").toString()); + results.put(dn, new DnStats(dn, maxConnection, maxUserConnection, connection, jobId)); + } + ); + return results; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlData.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlData.java new file mode 100644 index 000000000..ed1dc56d0 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlData.java @@ -0,0 +1,116 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.twophase; + +import com.alibaba.polardbx.druid.util.StringUtils; +import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; +import org.apache.calcite.util.Pair; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class TwoPhaseDdlData { + public static Boolean initialPhyDdlSuccess(List> results) { + Boolean succeed = results.stream().allMatch(o -> o.get(1).toString().equals("1")); + return succeed; + } + + public static Boolean evolvePhyDdlSuccess(List> results) { + Boolean succeed = results.stream().allMatch(o -> o.get(0).toString().equals("1")); + return succeed; + } + + public static String REACHED_BARRIER = "REACHED_BARRIER"; + public static String REACHED_BARRIER_RUNNING = "REACHED_BARRIER_RUNNING"; + public static String RUNNING = "RUNNING"; + + public static Long REACHED_BARRIER_STATE = 1L; + + public static Long NON_REACHED_BARRIER_STATE = 0L; + + // physicalDbName / physicalTable + // phase + // state + // queryId + public static Map resultsToQueryIdMap(List> results) { + Map queryIdMap = new HashMap<>(); + for (Map row : results) { + String physicalDbTableName = (String) row.get("PHYSICAL_TABLE"); + Long queryId = (Long) row.get("PROCESS_ID"); + queryIdMap.put(physicalDbTableName, queryId); + } + return queryIdMap; + } + // physicalDbName / physicalTable + // phase + // state + // queryId + + public static Map resultsToPhysicalDdlStateMap(List> results) { + Map physicalDdlRunningStateMap = new HashMap<>(); + for (Map row : results) { + String physicalDbTableName = (String) (row.get("PHYSICAL_TABLE")); + String phyDdlState = (String) (row.get("STATE")); + physicalDdlRunningStateMap.put(physicalDbTableName, phyDdlState); + } + return physicalDdlRunningStateMap; + } + + public static String resultsToPhysicalDdlPhase(List> results) { + if (results.isEmpty()) { + return "FINISH"; + } else { + return (String) (results.get(0).get("PHASE")); + } + } + + public static Map, String> resultsToQueryIdToProcessInfoMap(List> results) { + Map, String> queryIdToProcessInfoMap = new HashMap<>(); + for (Map row : results) { + Long processId = (Long) (row.get("Id")); + String dbName = (String) (row.get("db")); + String processInfo = (String) (row.get("Info")); + // While check finished, the thread pool in dn may cause thread hang for a long time before return. + if (StringUtils.equalsIgnoreCase(processInfo, "NULL")) { + processInfo = null; + } + queryIdToProcessInfoMap.put(Pair.of(dbName, processId), processInfo); + } + return queryIdToProcessInfoMap; + } + + public static Map resultsToStateMap(List> results) { + Map phyTableDdlState = new HashMap<>(); + for (Map row : results) { + String physicalDbTableName = (String) (row.get("PHYSICAL_TABLE")); + String state = (String) (row.get("STATE")); + if (state.equalsIgnoreCase(REACHED_BARRIER)) { + phyTableDdlState.put(physicalDbTableName, REACHED_BARRIER_STATE); + } else { + phyTableDdlState.put(physicalDbTableName, NON_REACHED_BARRIER_STATE); + } + } + return phyTableDdlState; + } + + public static Boolean resultsToFinishSuccess(List> results) { + String firstColumnName = results.get(0).keySet().stream().findFirst().get(); + Long result = (Long) (results.get(0).get(firstColumnName)); + return result == 1; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlManager.java new file mode 100644 index 000000000..dea7d0d16 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlManager.java @@ -0,0 +1,1097 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.twophase; + +import com.alibaba.fastjson.JSON; +import com.alibaba.fastjson.serializer.SerializerFeature; +import com.alibaba.polardbx.common.IdGenerator; +import com.alibaba.polardbx.common.eventlogger.EventLogger; +import com.alibaba.polardbx.common.eventlogger.EventType; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.MDC; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLExprTableSource; +import com.alibaba.polardbx.druid.util.StringUtils; +import com.alibaba.polardbx.executor.ddl.newengine.cross.CrossEngineValidator; +import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; +import com.alibaba.polardbx.executor.ddl.workqueue.PriorityFIFOTask; +import com.alibaba.polardbx.executor.ddl.workqueue.TwoPhaseDdlThreadPool; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; +import com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import io.grpc.netty.shaded.io.netty.util.internal.StringUtil; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.util.Pair; + +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.common.properties.ConnectionParams.CHECK_PHY_CONN_NUM; +import static com.alibaba.polardbx.common.properties.ConnectionParams.EMIT_PHY_TABLE_DDL_DELAY; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlData.REACHED_BARRIER_STATE; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlData.resultsToFinishSuccess; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlData.resultsToPhysicalDdlPhase; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlData.resultsToPhysicalDdlStateMap; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlData.resultsToQueryIdMap; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlData.resultsToQueryIdToProcessInfoMap; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlData.resultsToStateMap; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.COMMIT_STATE; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.PHYSICAL_DDL_EMIT_TASK_NAME; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.SQL_COMMIT_TWO_PHASE_DDL; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.SQL_FINISH_TWO_PHASE_DDL; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.SQL_INIT_TWO_PHASE_DDL; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.SQL_KILL_QUERY; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.SQL_PREPARE_TWO_PHASE_DDL; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.SQL_ROLLBACK_TWO_PHASE_DDL; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.SQL_SHOW_PROCESS_LIST; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.SQL_STATS_TWO_PHASE_DDL; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.SQL_WAIT_TWO_PHASE_DDL; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.TWO_PHASE_DDL_COMMIT_TASK_NAME; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.TWO_PHASE_DDL_FINISH_TASK_NAME; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.TWO_PHASE_DDL_INIT_TASK_NAME; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.TWO_PHASE_DDL_PREPARE_TASK_NAME; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.TWO_PHASE_DDL_ROLLBACK_TASK_NAME; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.TWO_PHASE_DDL_STATS; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.TWO_PHASE_PHYSICAL_DDL_HINT_TEMPLATE; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.WAIT_ROLLBACK_STATE; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.buildPhyDbTableNameFromGroupNameAndPhyTableName; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.buildTwoPhaseKeyFromLogicalTableNameAndGroupName; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.executePhyDdlBypassConnPool; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.formatString; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.queryGroup; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.queryGroupBypassConnPool; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.updateGroupBypassConnPool; +import static com.alibaba.polardbx.gms.util.GroupInfoUtil.buildPhysicalDbNameFromGroupName; + +public class TwoPhaseDdlManager { + //TODO: what if HA happen when rollback? + private final static Logger LOG = SQLRecorderLogger.ddlLogger; + private static final IdGenerator ID_GENERATOR = IdGenerator.getIdGenerator(); + + public static int DEFAULT_WAIT_TIME_MS = 400; + + public static int FINISH_DDL_WAIT_TIME = 100; + + public static long EMIT_PHYSICAL_DDL_CHECK_INTERVAL_MS = 200L; + + public static Map globalTwoPhaseDdlManagerMap = new ConcurrentHashMap<>(); + + public Map queryIdMap = new ConcurrentHashMap<>(); + + public Map> queryProcessInfoMap = new ConcurrentHashMap<>(); + + public Map> queryPhyDdlRunningStateMap = new ConcurrentHashMap<>(); + + public Map queryPhyDdlPhaseMap = new ConcurrentHashMap<>(); + + private final String schemaName; + + private final String tableName; + + private final Long twoPhaseDdlManagerId; + + private Long jobId = -1L; + + List phyDdlExceps = new CopyOnWriteArrayList<>(); + + List> phyDdlTasks = new ArrayList<>(0); + + public ConcurrentHashMap> phyTableEmitted = null; + + public ConcurrentHashMap lastEmitPhyTableNameMap = null; + + public ConcurrentHashMap>>> phyDdlTaskGroupByGroupName = null; + + String phyDdlStmt; + + // physical db => list of physical table + Map> sourcePhyTableNames; + + public TwoPhaseDdlReporter twoPhaseDdlReporter; + + AtomicReference runningState = new AtomicReference<>(RunningState.INIT); + + enum RunningState { + INIT, + PREPARE, + COMMIT, + ROLLBACK, + FINISH, + PAUSED + + } + + public static Boolean checkEnableTwoPhaseDdlOnDn(String schemaName, String logicalTableName, + ExecutionContext executionContext) { + Map sourceGroupDnMap = + DnStats.buildGroupToDnMap(schemaName, logicalTableName, executionContext); + Map dnToGroupMap = new HashMap<>(); + for (String group : sourceGroupDnMap.keySet()) { + dnToGroupMap.put(sourceGroupDnMap.get(group), group); + } + AtomicBoolean enableTwoPhaseDdlOnDn = new AtomicBoolean(true); + dnToGroupMap.keySet().forEach( + dn -> { + String group = dnToGroupMap.get(dn); + String sql = TwoPhaseDdlUtils.SQL_SHOW_VARIABLES_LIKE_ENABLE_TWO_PHASE_DDL; + List> results = + queryGroup(executionContext, -1L, "preCheck", schemaName, logicalTableName, group, sql); + if (!results.isEmpty()) { + if (results.get(0).get(1).toString().equalsIgnoreCase("OFF")) { + enableTwoPhaseDdlOnDn.set(false); + } + } else { + enableTwoPhaseDdlOnDn.set(false); + } + } + ); + return enableTwoPhaseDdlOnDn.get(); + } + + public static Long generateTwoPhaseDdlManagerId(String schemaName, String tableName) { + Long id = ID_GENERATOR.nextId(); + // there would be problem if multiple cn receive job. so we need to generate global unique id from gms like sequence. + while (!TwoPhaseDdlReporter.acquireTwoPhaseDdlId(schemaName, tableName, id)) { + id = ID_GENERATOR.nextId(); + } + return id; + } + + public TwoPhaseDdlManager(String schemaName, String tableName, String phyDdlStmt, + Map> sourcePhyTableNames, Long twoPhaseDdlManagerId + ) { + this.schemaName = schemaName; + this.tableName = tableName; + this.phyDdlStmt = phyDdlStmt; + this.sourcePhyTableNames = sourcePhyTableNames; + this.twoPhaseDdlManagerId = twoPhaseDdlManagerId; + this.runningState.set(RunningState.INIT); + this.twoPhaseDdlReporter = new TwoPhaseDdlReporter(); + globalTwoPhaseDdlManagerMap.put(this.twoPhaseDdlManagerId, this); + } + + public void setJobId(Long jobId) { + this.jobId = jobId; + } + + Boolean checkThreadAndConnectionEnough(String schemaName, String logicalTableName, + ExecutionContext executionContext) { + Map sourceGroupDnMap = + DnStats.buildGroupToDnMap(schemaName, logicalTableName, executionContext); + Map dnStateMap = + DnStats.buildDnStats(schemaName, tableName, sourceGroupDnMap, jobId, executionContext); + Map requiredConnections = new HashMap<>(); + Boolean checkEnough = true; + for (String sourceGroupName : sourcePhyTableNames.keySet()) { + String dn = sourceGroupDnMap.get(sourceGroupName); + requiredConnections.put(dn, + requiredConnections.getOrDefault(dn, 0) + sourcePhyTableNames.get(sourceGroupName).size()); + } + for (String dn : dnStateMap.keySet()) { + if (requiredConnections.containsKey(dn) && !dnStateMap.get(dn) + .checkConnectionNum(requiredConnections.get(dn))) { + checkEnough = false; + } + } + //if(check alive thread num ok){ + // + //} + return checkEnough; + } + + public static Map calPhyTableHashCodeMap(String schemaName, + Map> sourcePhyTableNames) { + Map phyTableHashCodeMap = new HashMap<>(); + sourcePhyTableNames.keySet().forEach( + sourceGroupName -> { + for (String phyTableName : sourcePhyTableNames.get(sourceGroupName)) { + String hashCode = DdlHelper.genHashCodeForPhyTableDDL(schemaName, sourceGroupName, + SqlIdentifier.surroundWithBacktick(phyTableName), 0); + String fullPhyTableName = + buildPhyDbTableNameFromGroupNameAndPhyTableName(sourceGroupName, phyTableName); + phyTableHashCodeMap.put(fullPhyTableName, hashCode); + } + } + ); + return phyTableHashCodeMap; + + } + + /** + * init two phase ddl for logical table + */ + public void twoPhaseDdlInit(String logicalTableName, + ExecutionContext originEc) { + if (sourcePhyTableNames == null || sourcePhyTableNames.isEmpty()) { + return; + } //TODO, report error when sourcePhyTableName error + + if (originEc.getParamManager().getBoolean(CHECK_PHY_CONN_NUM)) { + if (!checkThreadAndConnectionEnough(schemaName, logicalTableName, originEc)) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "there are no enough connection or thread to alter table by consistency commit! you can use " + + "/*+TDDL:cmd_extra(ENABLE_DRDS_MULTI_PHASE_DDL=false)*/ to execute this ddl. "); + } + } + sourcePhyTableNames.forEach((sourceGroupName, phyTableNames) -> { + //TODO implement initPhyTableDdl result process! + initPhyTableDdl( + schemaName, + logicalTableName, + sourceGroupName, + phyTableNames, + originEc + ); + }); + this.runningState.set(RunningState.INIT); + } + + // Return true when: not interrupted. + // Return false when: interrupted + public Boolean twoPhaseDdlEmit(String logicalTableName, + ConcurrentHashMap> phyTableEmittedInTask, + ExecutionContext originEc) throws InterruptedException { + // 1. Initial some state(count), dict(sourceGroupToDnMap, dnToSourceGroupMap), parameter(concurrencyPolicy) + AtomicInteger count = new AtomicInteger(); + Map sourceGroupToDnMap = DnStats.buildGroupToDnMap(schemaName, logicalTableName, originEc); + Map> dnToSourceGroupMap = sourceGroupToDnMap + .entrySet().stream().collect(Collectors.groupingBy(Map.Entry::getValue, + Collectors.mapping(Map.Entry::getKey, Collectors.toList()))); + QueryConcurrencyPolicy concurrencyPolicy = getConcurrencyPolicy(originEc); + // 2. Check jobInterrupted. + Boolean jobInterrupted = CrossEngineValidator.isJobInterrupted(originEc); + if (!jobInterrupted) { + runningState.set(RunningState.INIT); + } + if (sourcePhyTableNames == null || sourcePhyTableNames.isEmpty()) { + return true; + } + // 3. Reload more state(groupName => phyDdlTask Map, phyTableEmitted). + if (phyDdlTaskGroupByGroupName == null) { + phyDdlTaskGroupByGroupName = new ConcurrentHashMap<>(); + } + + if (this.phyTableEmitted == null) { + phyTableEmitted = phyTableEmittedInTask; + } + if (this.phyTableEmitted == null) { + phyTableEmitted = new ConcurrentHashMap<>(); + } + // 4. Initial phyTableEmitted, if all emitted, return !jobInterrupted. + if (checkAllPhyDdlEmited(logicalTableName, originEc)) { + return !jobInterrupted; + } + Map mdcContext = MDC.getCopyOfContextMap(); + // 5. Loop groupName: + for (String sourceGroupName : sourceGroupToDnMap.keySet()) { + Set phyTableNames = sourcePhyTableNames.getOrDefault(sourceGroupName + , new HashSet<>()); + phyDdlTaskGroupByGroupName.put(sourceGroupName, new LinkedList<>()); + if (!phyTableEmitted.containsKey(sourceGroupName)) { + phyTableEmitted.put(sourceGroupName, new HashSet<>()); + } + // 5.2 secondly, if generate task for phy table not emitted. + for (String phyTableName : phyTableNames) { + if (!phyTableEmitted.get(sourceGroupName).contains(phyTableName) || !isPhysicalRunning( + phyTableName)) { + FutureTask task = new FutureTask<>( + () -> { + MDC.setContextMap(mdcContext); + emitPhyTableDdl( + logicalTableName, + PHYSICAL_DDL_EMIT_TASK_NAME, + sourceGroupName, phyTableName, + phyDdlStmt, + originEc, + phyDdlExceps, + count + ); + }, null); + phyDdlTasks.add(task); + phyDdlTaskGroupByGroupName.get(sourceGroupName).add(Pair.of(phyTableName, task)); + count.addAndGet(1); + } + } + } + + // 6. Initialize phy table ddl state and last emit phy table... + Boolean checkPhyDdlExceps = true; + Map phyTableDdlStateMap = new HashMap<>(); + if (lastEmitPhyTableNameMap == null) { + lastEmitPhyTableNameMap = new ConcurrentHashMap<>(); + } + List sourceGroupNames = new ArrayList<>(phyDdlTaskGroupByGroupName.keySet()); + // 7. Loop: + while (count.get() > 0 && checkPhyDdlExceps && !jobInterrupted && inRunningState(runningState)) { + // 7.1 for each group, collect stats into phyTableDdlStateMap. + sourceGroupNames.forEach(sourceGroupName -> { + String sql = + String.format(SQL_STATS_TWO_PHASE_DDL, schemaName, + buildTwoPhaseKeyFromLogicalTableNameAndGroupName( + logicalTableName, sourceGroupName + )); + List> results = + queryGroupBypassConnPool(originEc, jobId, PHYSICAL_DDL_EMIT_TASK_NAME, schemaName, + logicalTableName, + sourceGroupName, + sql); + phyTableDdlStateMap.putAll(resultsToStateMap(results)); + }); + // 7.2 sort groups on dn by phyTableDdl task size + dnToSourceGroupMap.keySet().forEach(dnInst -> + dnToSourceGroupMap.get(dnInst) + .sort((o1, o2) -> phyDdlTaskGroupByGroupName.get(o1).size() - phyDdlTaskGroupByGroupName.get(o2) + .size())); + + // 7.3 for each group, + sourceGroupNames.forEach(sourceGroupName -> { + // 7.3.1 fetch remainPhyDdlTasks, if empty then remove group from remainPhyDdlTasks + Long phyTableDdlState = REACHED_BARRIER_STATE; + Queue>> remainPhyDdlTasks = + phyDdlTaskGroupByGroupName.get(sourceGroupName); + // TODO: if state map contains no exceptions, then continue, otherwise just stop and rollback. + String dnInst = sourceGroupToDnMap.get(sourceGroupName); + if (remainPhyDdlTasks.isEmpty() && dnToSourceGroupMap.get(dnInst).contains(sourceGroupName)) { + dnToSourceGroupMap.get(dnInst).remove(sourceGroupName); + } + // 7.3.2 get last emit ddl state + if (lastEmitPhyTableNameMap.containsKey(sourceGroupName)) { + String lastEmitPhyTableName = lastEmitPhyTableNameMap.get(sourceGroupName); + String fullPhyTableName = + buildPhyDbTableNameFromGroupNameAndPhyTableName(sourceGroupName, lastEmitPhyTableName); + phyTableDdlState = phyTableDdlStateMap.get(fullPhyTableName); + } + // 7.3.3 if state is ok & remain some physical ddl task. & concurrent, poll more task... + if (phyTableDdlState == REACHED_BARRIER_STATE && !remainPhyDdlTasks.isEmpty() + || concurrencyPolicy == QueryConcurrencyPolicy.CONCURRENT) { + // only the first group in drds mode is considered in INSTANCE_CONCURRENT level + // and only when it's empty, will we do poll next physical table. + if (concurrencyPolicy == QueryConcurrencyPolicy.INSTANCE_CONCURRENT) { + if (!dnToSourceGroupMap.get(dnInst).isEmpty() && !dnToSourceGroupMap.get(dnInst).get(0) + .equalsIgnoreCase(sourceGroupName)) { + return; + } + } + int delay = originEc.getParamManager().getInt(EMIT_PHY_TABLE_DDL_DELAY); + if (delay > 0) { + try { + Thread.sleep(delay * 1000L); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + // poll a task from remainPhyDdlTasks, update last Emit task, decrement count. + // notice: update lastEmitTask in thread. + Pair> remainPhyDdlTask = remainPhyDdlTasks.poll(); + String phyTableName = remainPhyDdlTask.getKey(); + FutureTask task = remainPhyDdlTask.getValue(); + Thread thread = new Thread(task); + thread.setName(String.format("MultiPhaseDdlThread_%s_%s", sourceGroupName, phyTableName)); + thread.start(); + } + }); + Thread.sleep(EMIT_PHYSICAL_DDL_CHECK_INTERVAL_MS); + + checkPhyDdlExceps = checkPhyDdlExcepsEmpty(); + jobInterrupted = CrossEngineValidator.isJobInterrupted(originEc); + } + + while (checkPhyDdlExceps && !jobInterrupted && inRunningState(runningState)) { + if (checkAllPhyDdlEmited(logicalTableName, originEc)) { + break; + } + Thread.sleep(EMIT_PHYSICAL_DDL_CHECK_INTERVAL_MS); + checkPhyDdlExceps = checkPhyDdlExcepsEmpty(); + jobInterrupted = CrossEngineValidator.isJobInterrupted(originEc); + } + if (!phyDdlExceps.isEmpty() || inRollbackState(runningState)) { + // Interrupt all. + phyDdlTasks.forEach(f -> { + try { + f.cancel(true); + } catch (Throwable ignore) { + } + }); + runningState.set(RunningState.ROLLBACK); +// twoPhaseDdlReporter.collectStatsAndUpdateState(runningState); + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + StringUtil.join(",", phyDdlExceps.stream().map( + Throwable::getMessage).collect(Collectors.toList())).toString()); + } else if (jobInterrupted) { + // Interrupted. + runningState.set(RunningState.PAUSED); + } + return !jobInterrupted; + } + + public int twoPhaseDdlPrepare(String schemaName, String logicalTable, + ExecutionContext executionContext) throws InterruptedException, ExecutionException { + return twoPhaseDdlEvolve(schemaName, logicalTable, SQL_PREPARE_TWO_PHASE_DDL, + TWO_PHASE_DDL_PREPARE_TASK_NAME, + executionContext, false); + } + + public int twoPhaseDdlWait(String schemaName, String logicalTable, String taskName, + Set expectedPhyDdlStates, + ExecutionContext executionContext) throws InterruptedException, ExecutionException { + checkAllPhyDdlInState(logicalTable, expectedPhyDdlStates, executionContext); + return twoPhaseDdlEvolve(schemaName, logicalTable, SQL_WAIT_TWO_PHASE_DDL, taskName, + executionContext, true); + } + + public int twoPhaseDdlCommit(String schemaName, String logicalTable, + ExecutionContext executionContext) throws InterruptedException, ExecutionException { + return twoPhaseDdlCommit(schemaName, logicalTable, SQL_COMMIT_TWO_PHASE_DDL, + TWO_PHASE_DDL_COMMIT_TASK_NAME, + executionContext); + } + + public int twoPhaseDdlFinish(String schemaName, String logicalTable, + ExecutionContext executionContext) throws InterruptedException, ExecutionException { + ExecutionContext ec = executionContext.copy(); + AtomicInteger unfinishedCount = new AtomicInteger(sourcePhyTableNames.size()); + this.runningState.set(RunningState.FINISH); + waitAllPhysicalDdlFinished(schemaName, logicalTable, executionContext); + while (unfinishedCount.get() > 0) { + sourcePhyTableNames.keySet().forEach(sourceGroupName -> { + String sql = + String.format(SQL_FINISH_TWO_PHASE_DDL, schemaName, + buildTwoPhaseKeyFromLogicalTableNameAndGroupName( + logicalTable, sourceGroupName + )); + List> results = + queryGroupBypassConnPool(ec, jobId, TWO_PHASE_DDL_FINISH_TASK_NAME, schemaName, + logicalTable, + sourceGroupName, + sql); + if (resultsToFinishSuccess(results)) { + unfinishedCount.getAndDecrement(); + } + }); + Thread.sleep(DEFAULT_WAIT_TIME_MS); + } + return 1; + } + + public void twoPhaseDdlLog(String schemaName, String logicalTable, + ExecutionContext executionContext) throws InterruptedException, ExecutionException { + List> results = DdlHelper.getServerConfigManager() + .executeQuerySql(TwoPhaseDdlUtils.SQL_SHOW_PHYSICAL_DDL, schemaName, null); +// result.addColumn("PHYSICAL_DB_NAME", DataTypes.StringType); +// result.addColumn("PHYSICAL_TABLE_NAME", DataTypes.StringType); +// result.addColumn("PHASE", DataTypes.StringType); +// result.addColumn("STATE", DataTypes.StringType); +// result.addColumn("PROCESS_ID", DataTypes.LongType); +// result.addColumn("PROCESS_STATE", DataTypes.StringType); +// result.addColumn("TIME", DataTypes.LongType); +// +// result.addColumn("REACHED_PREPARED_MOMENT", DataTypes.StringType); +// result.addColumn("REACHED_COMMIT_MOMENT", DataTypes.StringType); +// result.addColumn("COMMIT_MOMENT", DataTypes.StringType); +// result.addColumn("PREPARE_MOMENT", DataTypes.StringType); +// result.addColumn("PREPARED_RUNNING_CONSUME_BLOCKS", DataTypes.LongType); +// result.addColumn("PREPARED_RUNNING_CONSUME_TIME", DataTypes.LongType); +// result.addColumn("COMMIT_CONSUME_BLOCKS", DataTypes.LongType); +// result.addColumn("COMMIT_CONSUME_TIME", DataTypes.LongType); +// result.addColumn("LOCK_TABLE_TIME", DataTypes.LongType); + int totalPhyTableNum = results.size(); + long maxLockTableTime = 0L; + long minLockTableTime = 1000_000_000_000L; + int finished = 0; + for (Map line : results) { + long lockTableTime = (Long) line.get("LOCK_TABLE_TIME"); + if (lockTableTime >= maxLockTableTime) { + maxLockTableTime = lockTableTime; + } else if (lockTableTime <= minLockTableTime) { + minLockTableTime = lockTableTime; + } + String phase = (String) line.get("PHASE"); + if (phase.equalsIgnoreCase("COMMIT")) { + finished += 1; + } + } + String logInfo = String.format( + " schema: %s, table: %s, sql: %s, Two Phase ddl task finished! finished %d physcial table, max_lock_table_time is %d, min_lock_table_time is %d", + jobId, schemaName, logicalTable, phyDdlStmt, + finished, + maxLockTableTime, minLockTableTime); + LOG.info(logInfo); + EventLogger.log(EventType.TWO_PHASE_DDL_INFO, logInfo); + String showDdlResults = JSON.toJSONString(results, SerializerFeature.DisableCircularReferenceDetect); + if (showDdlResults.length() > 1000_00L) { + showDdlResults = DdlHelper.compress(showDdlResults); + } + EventLogger.log(EventType.TWO_PHASE_DDL_INFO, showDdlResults); + } + + public int waitAllPhysicalDdlFinished(String schemaName, String logicalTable, + ExecutionContext executionContext) + throws InterruptedException, ExecutionException { + int status = 1; + ExecutionContext ec = executionContext.copy(); + if (sourcePhyTableNames == null || sourcePhyTableNames.isEmpty()) { + return status; + } + Set unfinishedSourceGroupNames = new HashSet<>(sourcePhyTableNames.keySet()); + while (!unfinishedSourceGroupNames.isEmpty()) { + collectPhysicalDdlStats(schemaName, logicalTable, executionContext); + List unfinishedSourceGroupNameList = new ArrayList<>(unfinishedSourceGroupNames); + for (String sourceGroupName : unfinishedSourceGroupNameList) { + Map processInfoMap = this.queryProcessInfoMap.get(sourceGroupName); + if (checkProcessFinished(schemaName, sourceGroupName, processInfoMap)) { + unfinishedSourceGroupNames.remove(sourceGroupName); + } + } + Thread.sleep(DEFAULT_WAIT_TIME_MS); + } + return status; + } + + public Boolean checkIfProcessInfoRunning(String processInfo) { + return !StringUtils.isEmpty(processInfo) && processInfo.contains(twoPhaseDdlManagerId.toString()); + } + + public Boolean checkProcessFinished(String schemaName, String logicalTable, + Map processInfoMap) { + for (String phyTableName : processInfoMap.keySet()) { + String processInfo = processInfoMap.get(phyTableName); + // for jdbc: the thread would disappear soon. + // for xprotocal: the thread may remain alive for other request. + if (checkIfProcessInfoRunning(processInfo)) { + return false; + } + } + String logInfo = + String.format(" %s.%s ddl finished, the process info is %s", jobId, schemaName, + logicalTable, + processInfoMap); + LOG.info(logInfo); + return true; + } + + public void collectPhysicalDdlStats(String schemaName, String logicalTable, + ExecutionContext executionContext) { + if (sourcePhyTableNames == null || sourcePhyTableNames.isEmpty()) { + return; + } + ExecutionContext ec = executionContext.copy(); + sourcePhyTableNames.keySet().forEach(sourceGroupName -> { + String sql = + String.format(SQL_STATS_TWO_PHASE_DDL, schemaName, buildTwoPhaseKeyFromLogicalTableNameAndGroupName( + logicalTable, sourceGroupName + )); + List> results = + queryGroupBypassConnPool(ec, jobId, TWO_PHASE_DDL_STATS, schemaName, + logicalTable, + sourceGroupName, + sql); + Map queryIdMapOnSourceGroup = resultsToQueryIdMap(results); + Map physicalDdlStateMapOnSourceGroup = resultsToPhysicalDdlStateMap(results); + String phase = resultsToPhysicalDdlPhase(results); + this.queryIdMap.putAll(queryIdMapOnSourceGroup); + this.queryPhyDdlRunningStateMap.put(sourceGroupName, physicalDdlStateMapOnSourceGroup); + this.queryPhyDdlPhaseMap.put(sourceGroupName, phase); + List> processInfoResults = + queryGroupBypassConnPool(ec, jobId, TWO_PHASE_DDL_STATS, schemaName, + logicalTable, + sourceGroupName, + SQL_SHOW_PROCESS_LIST); + Map, String> queryIdToProcessInfoMap = + resultsToQueryIdToProcessInfoMap(processInfoResults); + Map physicalTableNameToProcessInfoMap = new HashMap<>(); + String phyDbName = buildPhysicalDbNameFromGroupName(sourceGroupName); + for (String physicalTableName : queryIdMapOnSourceGroup.keySet()) { + Long queryId = queryIdMapOnSourceGroup.get(physicalTableName); + physicalTableNameToProcessInfoMap.put(physicalTableName, + queryIdToProcessInfoMap.getOrDefault(Pair.of(phyDbName, queryId), null)); + } + this.queryProcessInfoMap.put(sourceGroupName, physicalTableNameToProcessInfoMap); + }); + } + + public Boolean phyTableInCommitState(String groupName) { + return this.queryPhyDdlPhaseMap.get(groupName).equalsIgnoreCase(COMMIT_STATE); + } + + public Boolean phyTableInRollbackState(String groupName) { + return this.queryPhyDdlRunningStateMap.get(groupName).values().stream() + .anyMatch(o -> o.equalsIgnoreCase(WAIT_ROLLBACK_STATE)); + } + + public int twoPhaseDdlRollback(String schemaName, String logicalTable, + ExecutionContext executionContext) throws InterruptedException, ExecutionException { + + long startTime = System.currentTimeMillis(); + int status = 1; + this.runningState.set(RunningState.ROLLBACK); + ExecutionContext ec = executionContext.copy(); + if (sourcePhyTableNames == null || sourcePhyTableNames.isEmpty()) { + return status; + } + EventLogger.log(EventType.TWO_PHASE_DDL_INFO, "multiple phase ddl start rollbacking..."); + + collectPhysicalDdlStats(schemaName, logicalTable, executionContext); + sourcePhyTableNames.forEach((sourceGroupName, phyTableNames) -> { + List sqls = new ArrayList<>(); + phyTableNames.forEach(phyTableName -> { + String fullPhyTableName = + buildPhyDbTableNameFromGroupNameAndPhyTableName(sourceGroupName, phyTableName); + Long queryId = queryIdMap.get(fullPhyTableName); + String processInfo = queryProcessInfoMap.get(sourceGroupName).get(fullPhyTableName); + // if there is truely running ddl, kill query. + if (queryId != null && queryId > 0 && checkIfProcessInfoRunning(processInfo)) { + String sql = String.format(SQL_KILL_QUERY, queryId); + sqls.add(sql); + } + }); + if (!sqls.isEmpty()) { + String multipleKillSql = StringUtil.join(";", sqls).toString(); + updateGroupBypassConnPool(ec, jobId, TWO_PHASE_DDL_ROLLBACK_TASK_NAME, schemaName, logicalTable, + sourceGroupName, multipleKillSql); + } + }); + sourcePhyTableNames.forEach((sourceGroupName, phyTableNames) -> { + String sql = + String.format(SQL_ROLLBACK_TWO_PHASE_DDL, schemaName, buildTwoPhaseKeyFromLogicalTableNameAndGroupName( + logicalTable, sourceGroupName + )); + queryGroupBypassConnPool(ec, jobId, TWO_PHASE_DDL_ROLLBACK_TASK_NAME, schemaName, + logicalTable, + sourceGroupName, + sql); + }); + waitAllPhysicalDdlFinished(schemaName, logicalTable, executionContext); + Set residueSourceGroupNames = new HashSet<>(sourcePhyTableNames.keySet()); + while (!residueSourceGroupNames.isEmpty()) { + String sourceGroupName = residueSourceGroupNames.iterator().next(); + String sql = + String.format(SQL_FINISH_TWO_PHASE_DDL, schemaName, + buildTwoPhaseKeyFromLogicalTableNameAndGroupName( + logicalTable, sourceGroupName + )); + List> results = + queryGroupBypassConnPool(ec, jobId, TWO_PHASE_DDL_ROLLBACK_TASK_NAME, schemaName, + logicalTable, + sourceGroupName, + sql); + if (resultsToFinishSuccess(results)) { + LOG.info( + String.format(" execute %s success on group %s", jobId, sql, sourceGroupName)); + residueSourceGroupNames.remove(sourceGroupName); + } + Thread.sleep(FINISH_DDL_WAIT_TIME); + } + long finishTime = System.currentTimeMillis(); + long duration = finishTime - startTime; + String logInfo = + String.format(" multiple phase ddl finished rollbacking, which cost %d ms", jobId, + duration); + EventLogger.log(EventType.TWO_PHASE_DDL_INFO, logInfo); + LOG.info(logInfo); + List rollbackCause = + phyDdlExceps.stream().filter(o -> !StringUtil.isNullOrEmpty(o.getMessage())).map(o -> o.getMessage()) + .collect( + Collectors.toList()); + String rollbackCauseString = StringUtil.join(", ", rollbackCause).toString(); + EventLogger.log(EventType.TWO_PHASE_DDL_INFO, + String.format(" multiple phase ddl rollback because %s ", jobId, rollbackCauseString)); + return status; + } + + public Boolean checkPhyDdlExcepsEmpty() { + return phyDdlExceps.isEmpty(); +// if (!phyDdlExceps.isEmpty()) { + // Interrupt all. +// phyDdlTasks.forEach(f -> { +// try { +// f.cancel(true); +// } catch (Throwable ignore) { +// } +// }); + } + + public void initPhyTableDdl(String schemaName, String logicalTable, String sourceGroupName, + Set phyTableNames, ExecutionContext executionContext) { + ExecutionContext ec = executionContext.copy(); + // there may contain "'" in phy table names. + List phyTableNamesList = + phyTableNames.stream().map(o -> String.format("'%s'", o.toLowerCase().replace("'", "\\'"))).collect( + Collectors.toList()); + String phyTableNameStr = TStringUtil.join(phyTableNamesList, ","); + String sql = + String.format(SQL_INIT_TWO_PHASE_DDL, schemaName, buildTwoPhaseKeyFromLogicalTableNameAndGroupName( + logicalTable, sourceGroupName + ), + phyTableNameStr); + List> results = + queryGroup(ec, jobId, TWO_PHASE_DDL_INIT_TASK_NAME, schemaName, logicalTable, sourceGroupName, sql); + + if (!TwoPhaseDdlData.initialPhyDdlSuccess(results)) { + throw GeneralUtil.nestedException( + String.format("failed to initialize two phase ddl on group(%s): %s , Caused by unknown reason", + sourceGroupName, formatString(sql))); + } + } + + public int twoPhaseDdlEvolve(String schemaName, String logicalTable, + String sqlStmt, String taskName, + ExecutionContext executionContext, + Boolean checkDdlInterrupted) throws InterruptedException, ExecutionException { + int status = 1; + ExecutionContext ec = executionContext.copy(); + List futures = new ArrayList<>(); + if (sourcePhyTableNames == null || sourcePhyTableNames.isEmpty()) { + return status; + } + + Map futureTaskMap = new HashMap<>(); + String hint = String.format(TWO_PHASE_PHYSICAL_DDL_HINT_TEMPLATE, twoPhaseDdlManagerId); + sourcePhyTableNames.forEach((sourceGroupName, phyTableNames) -> { + String sql = hint + String.format(sqlStmt, schemaName, buildTwoPhaseKeyFromLogicalTableNameAndGroupName( + logicalTable, sourceGroupName + )); + FutureTask>> task = new FutureTask<>( + () -> queryGroup(ec, jobId, taskName, schemaName, + logicalTable, + sourceGroupName, + sql)); + futures.add(task); + futureTaskMap.put(task, sourceGroupName); + TwoPhaseDdlThreadPool.getInstance() + .executeWithContext(task, PriorityFIFOTask.TaskPriority.HIGH_PRIORITY_TASK); + }); + + while (!futures.stream().allMatch(o -> o.isDone())) { + Boolean phyDdlExcepsEmpty = checkPhyDdlExcepsEmpty(); + Boolean jobInterrupted = CrossEngineValidator.isJobInterrupted(executionContext); + if (!phyDdlExcepsEmpty || (checkDdlInterrupted && jobInterrupted)) { + status = 0; + break; + } else { + Thread.sleep(TwoPhaseDdlManager.DEFAULT_WAIT_TIME_MS); + } + } + + if (status == 0 && !futures.stream().allMatch(o -> o.isDone())) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + StringUtil.join(",", phyDdlExceps.stream().map( + Throwable::getMessage).collect(Collectors.toList())).toString()); + } + + for (Future future : futures) { + List> results = (List>) future.get(); + LOG.info(String.format(" [%s] %s FutureTask on group [%s] get result {%s}", + jobId, Thread.currentThread().getName(), taskName, futureTaskMap.get(future), + TwoPhaseDdlUtils.resultToString(results))); + if (!TwoPhaseDdlData.evolvePhyDdlSuccess(results)) { + status = 0; + } + } + + if (status == 0) { + Boolean phyDdlExcepsEmpty = checkPhyDdlExcepsEmpty(); + if (!phyDdlExcepsEmpty) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + StringUtil.join(",", phyDdlExceps.stream().map( + Throwable::getMessage).collect(Collectors.toList())).toString()); + } + } + return status; + } + + public int twoPhaseDdlCommit(String schemaName, String logicalTable, + String sqlStmt, String taskName, + ExecutionContext executionContext) throws InterruptedException, ExecutionException { + // TODO:(2pc-ddl): commit + // Firstly try best to commit for 3 times. + // Then rollback which count not commit failed for 3 times. + int status = 1; + int totalRetryTimes = 3; + int retryTime = 0; + ExecutionContext ec = executionContext.copy(); + if (sourcePhyTableNames == null || sourcePhyTableNames.isEmpty()) { + return status; + } + long startTime = System.currentTimeMillis(); + EventLogger.log(EventType.TWO_PHASE_DDL_INFO, "multiple phase ddl start committing..."); + + Set failedGroups = new HashSet<>(sourcePhyTableNames.keySet()); + while (retryTime < totalRetryTimes) { + List futures = new ArrayList<>(); + status = 1; + collectPhysicalDdlStats(schemaName, logicalTable, executionContext); + Map futureTaskMap = new HashMap<>(); + sourcePhyTableNames.forEach((sourceGroupName, phyTableNames) -> { + if (!failedGroups.contains(sourceGroupName)) { + return; + } + String sql = String.format(sqlStmt, schemaName, buildTwoPhaseKeyFromLogicalTableNameAndGroupName( + logicalTable, sourceGroupName + )); + FutureTask>> task = new FutureTask<>( + () -> queryGroup(ec, jobId, taskName, schemaName, + logicalTable, + sourceGroupName, + sql)); + futures.add(task); + futureTaskMap.put(task, sourceGroupName); + TwoPhaseDdlThreadPool.getInstance() + .executeWithContext(task, PriorityFIFOTask.TaskPriority.HIGH_PRIORITY_TASK); + }); + + while (!futures.stream().allMatch(o -> o.isDone())) { + Thread.sleep(TwoPhaseDdlManager.DEFAULT_WAIT_TIME_MS); + // We will still continue to guaranteen all is committed. + } + + collectPhysicalDdlStats(schemaName, logicalTable, executionContext); + + for (Future future : futures) { + List> results = (List>) future.get(); + LOG.info(String.format(" [%s] %s FutureTask on group [%s] get result %s", + jobId, Thread.currentThread().getName(), taskName, futureTaskMap.get(future), + TwoPhaseDdlUtils.resultToString(results))); + String group = futureTaskMap.get(future); + if (!TwoPhaseDdlData.evolvePhyDdlSuccess(results) || !phyTableInCommitState(group)) { + status = 0; + failedGroups.add(group); + } else { + failedGroups.remove(group); + } + } + + LOG.info(String.format(" commit try time %d, get status %d", + jobId, retryTime, status)); + retryTime += 1; + if (status == 1) { + break; + } + } + retryTime = 0; + while (status == 0 && retryTime < totalRetryTimes) { + status = 1; + collectPhysicalDdlStats(schemaName, logicalTable, executionContext); + List futures = new ArrayList<>(); + Map futureTaskMap = new HashMap<>(); + sourcePhyTableNames.forEach((sourceGroupName, phyTableNames) -> { + if (!failedGroups.contains(sourceGroupName)) { + return; + } + String sql = String.format(SQL_ROLLBACK_TWO_PHASE_DDL, schemaName, + buildTwoPhaseKeyFromLogicalTableNameAndGroupName( + logicalTable, sourceGroupName + )); + FutureTask>> task = new FutureTask<>( + () -> queryGroup(ec, jobId, taskName, schemaName, + logicalTable, + sourceGroupName, + sql)); + futures.add(task); + futureTaskMap.put(task, sourceGroupName); + TwoPhaseDdlThreadPool.getInstance() + .executeWithContext(task, PriorityFIFOTask.TaskPriority.HIGH_PRIORITY_TASK); + }); + + while (!futures.stream().allMatch(o -> o.isDone())) { + Thread.sleep(TwoPhaseDdlManager.DEFAULT_WAIT_TIME_MS); + status = 0; + // We will still continue to guaranteen all is committed. + } + + collectPhysicalDdlStats(schemaName, logicalTable, executionContext); + for (Future future : futures) { + List> results = (List>) future.get(); + String group = futureTaskMap.get(future); + LOG.info(String.format(" [%s] %s FutureTask on group [%s] get result %s", + jobId, Thread.currentThread().getName(), taskName, group, + TwoPhaseDdlUtils.resultToString(results))); + if (!TwoPhaseDdlData.evolvePhyDdlSuccess(results) || !phyTableInRollbackState(group)) { + status = 0; + failedGroups.add(group); + } else { + failedGroups.remove(group); + } + } + retryTime += 1; + } + long finishTime = System.currentTimeMillis(); + long duration = finishTime - startTime; + EventLogger.log(EventType.TWO_PHASE_DDL_INFO, + String.format("multiple phase ddl finished committing, which cost %d ms", duration)); + return status; + } + + public String generateAlterTableStmt(String phyDdlStmt, String phyTableName) { + String drdsHint = String.format(TWO_PHASE_PHYSICAL_DDL_HINT_TEMPLATE, twoPhaseDdlManagerId); + SQLAlterTableStatement alterTable = (SQLAlterTableStatement) FastsqlUtils.parseSql(phyDdlStmt).get(0); + alterTable.setTableSource(new SQLExprTableSource(new SQLIdentifierExpr( + SqlIdentifier.surroundWithBacktick(phyTableName)))); + String phyDdl = drdsHint + alterTable.toString(); + return phyDdl; + } + + public void emitPhyTableDdl(String logicalTable, String taskName, String groupName, String phyTableName, + String phyDdlStmt, + ExecutionContext ec, + List exceps, + AtomicInteger count) { + // this is only used for logging, we still use prepare statement for executing sql. + String phyDdl = generateAlterTableStmt(phyDdlStmt, phyTableName); + String drdsHint = String.format(TWO_PHASE_PHYSICAL_DDL_HINT_TEMPLATE, twoPhaseDdlManagerId); + try { + if (inRunningState(runningState)) { + LOG.info(String.format( + " [%s] %s execute physical ddl %s on group %s EMIT, logical table %s", + jobId, Thread.currentThread().getName(), taskName, formatString(phyDdl), groupName, logicalTable)); + lastEmitPhyTableNameMap.put(groupName, phyTableName); + count.decrementAndGet(); + phyTableEmitted.get(groupName).add(phyTableName); + executePhyDdlBypassConnPool(ec, jobId, schemaName, groupName, phyDdl, drdsHint, phyTableName); + LOG.info(String.format( + " [%s] %s execute physical ddl %s on group %s END, logical table %s", + jobId, Thread.currentThread().getName(), taskName, formatString(phyDdl), groupName, logicalTable)); + } else { + LOG.info( + String.format( + " [%s] %s skip execute physical ddl %s on group %s END, logical table %s", + jobId, Thread.currentThread().getName(), taskName, formatString(phyDdl), groupName, + logicalTable)); + } + } catch (RuntimeException exception) { + LOG.info(String.format( + " [%s] %s skip execute physical ddl %s on group %s FAILED, logical table %s, return exception %s", + jobId, Thread.currentThread().getName(), taskName, formatString(phyDdl), groupName, logicalTable, + formatString(exception.getMessage()))); + exceps.add(exception); + } + } + + public Boolean inRunningState(AtomicReference runningState) { + Set continueRunningPhase = new HashSet<>( + Arrays.asList(RunningState.INIT, RunningState.PREPARE, RunningState.COMMIT) + ); + return continueRunningPhase.contains(runningState.get()); + } + + public Boolean inRollbackState(AtomicReference runningState) { + Set continueRunningPhase = new HashSet<>( + Arrays.asList(RunningState.ROLLBACK) + ); + return continueRunningPhase.contains(runningState.get()); + } + + public Boolean checkAllPhyDdlEmited(String logicalTableName, ExecutionContext executionContext) { + collectPhysicalDdlStats(schemaName, logicalTableName, executionContext); + Boolean allPhysicalTableEmitted = true; + int emittedNum = 0; + int remainNum = 0; + for (String sourceGroupName : sourcePhyTableNames.keySet()) { + Map queryProcessInfoOnGroup = queryProcessInfoMap.get(sourceGroupName); + Set phyTableNames = sourcePhyTableNames.get(sourceGroupName); + Set emittedPhyTableNames = new HashSet<>(); + for (String phyTableName : phyTableNames) { + String fullPhyTableName = + buildPhyDbTableNameFromGroupNameAndPhyTableName(sourceGroupName, phyTableName); + String processInfo = queryProcessInfoOnGroup.get(fullPhyTableName); + if (checkIfProcessInfoRunning(processInfo)) { + emittedPhyTableNames.add(phyTableName); + emittedNum++; + } else { + allPhysicalTableEmitted = false; + remainNum++; + } + } + phyTableEmitted.put(sourceGroupName, emittedPhyTableNames); + } + String logInfo = String.format( + " check all the physical ddl emitted for %s, %d emitted, %d remain, emitted physical table name: %s", + jobId, logicalTableName, emittedNum, remainNum, phyTableEmitted); + LOG.info(logInfo); + return allPhysicalTableEmitted; + } + + public Boolean checkAllPhyDdlInState(String logicalTableName, Set expectedStates, + ExecutionContext executionContext) { + collectPhysicalDdlStats(schemaName, logicalTableName, executionContext); + Boolean allPhysicalTableInState = true; + for (String sourceGroupName : sourcePhyTableNames.keySet()) { + Map queryProcessInfoOnGroup = queryProcessInfoMap.get(sourceGroupName); + Map phyDdlStateOnGroup = queryPhyDdlRunningStateMap.get(sourceGroupName); + Set phyTableNames = sourcePhyTableNames.get(sourceGroupName); + for (String phyTableName : phyTableNames) { + String fullPhyTableName = + buildPhyDbTableNameFromGroupNameAndPhyTableName(sourceGroupName, phyTableName); + if (queryProcessInfoOnGroup.get(fullPhyTableName) == null || !expectedStates.contains( + phyDdlStateOnGroup.get(fullPhyTableName))) { + allPhysicalTableInState = false; + } + } + } + return allPhysicalTableInState; + } + + public Boolean isPhysicalRunning(String phyTableName) { + //TODO(2pc-ddl) IMPROVE: check if is physical running. + //if allow retry, then we must deal with count problem on DN.... + //which require some check... + return true; + } + + private QueryConcurrencyPolicy getConcurrencyPolicy(ExecutionContext executionContext) { + boolean mergeConcurrent = executionContext.getParamManager().getBoolean(ConnectionParams.MERGE_CONCURRENT); + + boolean mergeDdlConcurrent = + executionContext.getParamManager().getBoolean(ConnectionParams.MERGE_DDL_CONCURRENT); + + boolean sequential = + executionContext.getParamManager().getBoolean(ConnectionParams.SEQUENTIAL_CONCURRENT_POLICY); + + if (mergeConcurrent && mergeDdlConcurrent) { + return QueryConcurrencyPolicy.CONCURRENT; + } else if (mergeConcurrent) { + return QueryConcurrencyPolicy.GROUP_CONCURRENT_BLOCK; + } else if (sequential) { + return QueryConcurrencyPolicy.SEQUENTIAL; + } + + return QueryConcurrencyPolicy.INSTANCE_CONCURRENT; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlReporter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlReporter.java new file mode 100644 index 000000000..6d1f21d49 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlReporter.java @@ -0,0 +1,60 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.twophase; + +import com.alibaba.polardbx.gms.metadb.multiphase.MultiPhaseDdlInfoAccessor; +import com.alibaba.polardbx.gms.util.MetaDbUtil; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +public class TwoPhaseDdlReporter { + public static Boolean acquireTwoPhaseDdlId(String schemaName, String logicalTableName, Long multiPhaseDdlId) { + // TODO(2pc-ddl): insert into record. + int affectedRows = 1; + try (Connection connection = MetaDbUtil.getConnection()) { + MultiPhaseDdlInfoAccessor multiPhaseDdlInfoAccessor = new MultiPhaseDdlInfoAccessor(); + multiPhaseDdlInfoAccessor.setConnection(connection); + affectedRows = + multiPhaseDdlInfoAccessor.insertIgnoreMultiPhaseDdlId(multiPhaseDdlId, schemaName, logicalTableName); + } catch (SQLException e) { + throw new RuntimeException(e); + } + return affectedRows > 0; + } + + public Boolean updateStats(String phase) { + return true; + } + + public Map> fetchEmittedPhyTables() { + Map> objectObjectHashMap = new HashMap<>(); + return objectObjectHashMap; + } + + public Boolean appendEmitPhyTable(String phyTable) { + return true; + } + + public Boolean collectStatsAndUpdateState(AtomicReference state) { + return true; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlUtils.java new file mode 100644 index 000000000..1a03824da --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/twophase/TwoPhaseDdlUtils.java @@ -0,0 +1,439 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.twophase; + +import com.alibaba.druid.filter.Filter; +import com.alibaba.druid.pool.DruidAbstractDataSource; +import com.alibaba.druid.pool.DruidDataSource; +import com.alibaba.druid.proxy.jdbc.DataSourceProxy; +import com.alibaba.druid.stat.JdbcDataSourceStat; +import com.alibaba.polardbx.atom.TAtomConnectionProxy; +import com.alibaba.polardbx.atom.TAtomDataSource; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.jdbc.BytesSql; +import com.alibaba.polardbx.common.model.Group; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.spi.IGroupExecutor; +import com.alibaba.polardbx.group.config.Weight; +import com.alibaba.polardbx.group.jdbc.TGroupDataSource; +import com.alibaba.polardbx.group.jdbc.TGroupDirectConnection; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.repo.mysql.spi.MyRepository; +import com.alibaba.polardbx.rpc.compatible.XDataSource; +import com.alibaba.polardbx.rpc.compatible.XStatement; +import com.alibaba.polardbx.rpc.pool.XConnection; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import com.google.common.collect.Maps; +import com.mysql.jdbc.ConnectionImpl; +import io.grpc.netty.shaded.io.netty.util.internal.StringUtil; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.Driver; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.gms.util.GroupInfoUtil.buildGroupNameFromPhysicalDb; +import static com.alibaba.polardbx.gms.util.GroupInfoUtil.buildPhysicalDbNameFromGroupName; + +public class TwoPhaseDdlUtils { + + public static final String SQL_SHOW_VARIABLES_LIKE_ENABLE_TWO_PHASE_DDL = + "show global variables like 'enable_two_phase_ddl'"; + public static final String SQL_INIT_TWO_PHASE_DDL = "call polarx.two_phase_ddl_init('%s.%s', %s)"; + + public static final String SQL_WAIT_TWO_PHASE_DDL = "call polarx.two_phase_ddl_wait('%s.%s')"; + + public static final String SQL_PREPARE_TWO_PHASE_DDL = "call polarx.two_phase_ddl_prepare('%s.%s')"; + + public static final String SQL_ROLLBACK_TWO_PHASE_DDL = "call polarx.two_phase_ddl_rollback('%s.%s')"; + + public static final String SQL_COMMIT_TWO_PHASE_DDL = "call polarx.two_phase_ddl_commit('%s.%s')"; + + public static final String SQL_TRACE_TWO_PHASE_DDL = "call polarx.two_phase_ddl_trace('%s.%s')"; + public static final String SQL_FINISH_TWO_PHASE_DDL = "call polarx.two_phase_ddl_finish('%s.%s')"; + + public static final String SQL_STATS_TWO_PHASE_DDL = "call polarx.two_phase_ddl_stats('%s.%s')"; + + public static final String SQL_PROF_TWO_PHASE_DDL = "call polarx.two_phase_ddl_prof('%s.%s')"; + + public static final String SQL_STATS_FULL_TWO_PHASE_DDL = "call polarx.two_phase_ddl_stats()"; + + public static final String SQL_PROF_FULL_TWO_PHASE_DDL = "call polarx.two_phase_ddl_prof()"; + + public static final String SQL_SHOW_PROCESS_LIST = "show processlist"; + + public static final String SQL_KILL_QUERY = "kill %s"; + + public static final String SQL_SHOW_PHYSICAL_DDL = "show physical ddl"; + public static final String TWO_PHASE_DDL_INIT_TASK_NAME = "InitTwoPhaseDdlTask"; + + public static final String TWO_PHASE_DDL_WAIT_PREPARE_TASK_NAME = "TwoPhaseDdlWaitPrepareTask"; + + public static final String TWO_PHASE_DDL_WAIT_COMMIT_TASK_NAME = "TwoPhaseDdlWaitCommitTask"; + + public static final String TWO_PHASE_DDL_COMMIT_TASK_NAME = "TwoPhaseDdlCommitTask"; + + public static final String TWO_PHASE_DDL_PREPARE_TASK_NAME = "TwoPhaseDdlPrepareTask"; + + public static final String TWO_PHASE_DDL_FINISH_TASK_NAME = "TwoPhaseDdlFinishTask"; + + public static final String PHYSICAL_DDL_EMIT_TASK_NAME = "PhysicalDdlEmitTask"; + + public static final String TWO_PHASE_DDL_ROLLBACK_TASK_NAME = "TwoPhaseDdlRollbackTask"; + + public static final String TWO_PHASE_DDL_STATS = "TwoPhaseDdlStats"; + + public static final String COMMIT_STATE = "COMMIT"; + + public static final String WAIT_ROLLBACK_STATE = "WAIT_ROLLBACK"; + + public static final String TWO_PHASE_PHYSICAL_DDL_HINT_TEMPLATE = "/* drds_two_phase_ddl(%d)*/"; + + public static final Long DDL_SOCKET_TIMEOUT = 3600 * 24 * 7 * 1000L; + + public static final Long MS_DIV_NS = 1000_000L; + + public static AtomicLong ACCUMULATE_QUERY_STATS_SQL_NUM = new AtomicLong(0); + + public static List> queryGroup(ExecutionContext ec, Long jobId, String taskName, String schema, + String logicalTable, String groupName, String sql) { + List> result = new ArrayList<>(); + + ExecutorContext executorContext = ExecutorContext.getContext(schema); + IGroupExecutor ge = executorContext.getTopologyExecutor().getGroupExecutor(groupName); + Long socketTimeOut = ec.getParamManager().getLong(ConnectionParams.MERGE_DDL_TIMEOUT); + if (socketTimeOut == -1L) { + socketTimeOut = DDL_SOCKET_TIMEOUT; + } + Executor socketTimeoutExecutor = TGroupDirectConnection.socketTimeoutExecutor; + try (Connection conn = ge.getDataSource().getConnection()) { + SQLRecorderLogger.ddlLogger.info(String.format( + " [%s] [%s] get connection and send query sql [%s] on group %s for logical table %s, socket timeout %d ms.", + jobId, Thread.currentThread().getName(), taskName, formatString(sql), groupName, logicalTable, + socketTimeOut.intValue())); + conn.setNetworkTimeout(socketTimeoutExecutor, socketTimeOut.intValue()); + Statement stmt = conn.createStatement(); + try (ResultSet rs = stmt.executeQuery(sql)) { + int columns = rs.getMetaData().getColumnCount(); + while (rs.next()) { + List row = new ArrayList<>(); + for (int i = 1; i <= columns; i++) { + row.add(rs.getObject(i)); + } + result.add(row); + } + } + return result; + } catch (SQLException e) { + throw GeneralUtil.nestedException( + String.format("failed to execute on group(%s): %s , Caused by: %s", groupName, formatString(sql), + formatString(e.getMessage())), e); + } + } + + public static void updateGroup(ExecutionContext ec, Long jobId, String schema, String groupName, String sql) { + ExecutorContext executorContext = ExecutorContext.getContext(schema); + IGroupExecutor ge = executorContext.getTopologyExecutor().getGroupExecutor(groupName); + Executor socketTimeoutExecutor = TGroupDirectConnection.socketTimeoutExecutor; + Long socketTimeOut = ec.getParamManager().getLong(ConnectionParams.MERGE_DDL_TIMEOUT); + if (socketTimeOut == -1L) { + socketTimeOut = DDL_SOCKET_TIMEOUT; + } + try (Connection conn = ge.getDataSource().getConnection()) { + SQLRecorderLogger.ddlLogger.info(String.format( + " [%s] get connection and send update sql [%s] on group %s. ", + jobId, Thread.currentThread().getName(), formatString(sql), groupName)); + Statement stmt = conn.createStatement(); + conn.setNetworkTimeout(socketTimeoutExecutor, socketTimeOut.intValue()); + stmt.executeUpdate(sql); + } catch (SQLException e) { + throw GeneralUtil.nestedException( + String.format(" failed to execute on group(%s): %s , Caused by: %s", groupName, + formatString(sql), + formatString(e.getMessage())), e); + } + } + + public static void updateGroupBypassConnPool(ExecutionContext ec, Long jobId, String taskName, String schema, + String logicalTable, String groupName, String sql) { + MyRepository repo = (MyRepository) ExecutorContext.getContext(schema) + .getRepositoryHolder() + .get(Group.GroupType.MYSQL_JDBC.toString()); + Long socketTimeOut = ec.getParamManager().getLong(ConnectionParams.SOCKET_TIMEOUT); + if (socketTimeOut == -1L) { + socketTimeOut = 900_000L; + } + Executor socketTimeoutExecutor = TGroupDirectConnection.socketTimeoutExecutor; + TGroupDataSource groupDataSource = (TGroupDataSource) repo.getDataSource(groupName); + TAtomDataSource atom = findMasterAtomForGroup(groupDataSource); + final DataSource dataSource = atom.getDataSource(); + Connection conn = null; + try { + if (dataSource instanceof DruidDataSource) { + DruidDataSource druid = (DruidDataSource) dataSource; + conn = druid.createPhysicalConnection().getPhysicalConnection(); + } else if (dataSource instanceof XDataSource) { + conn = dataSource.getConnection(); + } else { + throw GeneralUtil.nestedException("Unknown datasource. " + dataSource.getClass()); + } + SQLRecorderLogger.ddlLogger.info(String.format( + " [%s] [%s] send update sql [%s] bypass conn pool on group %s for logical table %s, with socket timeout %d ms", + jobId, Thread.currentThread().getName(), taskName, formatString(sql), groupName, logicalTable, + socketTimeOut.intValue())); + conn.setNetworkTimeout(socketTimeoutExecutor, socketTimeOut.intValue()); + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate(sql); + } + } catch (SQLException e) { + throw new RuntimeException(e); + } finally { + try { + if (conn != null) { + conn.close(); + } + } catch (Exception ignored) { + } + } + } + + public static void executePhyDdlBypassConnPool(ExecutionContext ec, Long jobId, String schema, String groupName, + String sql, + String hint, String physicalTableName) { + MyRepository repo = (MyRepository) ExecutorContext.getContext(schema) + .getRepositoryHolder() + .get(Group.GroupType.MYSQL_JDBC.toString()); + Long socketTimeOut = ec.getParamManager().getLong(ConnectionParams.MERGE_DDL_TIMEOUT); + if (socketTimeOut == -1L) { + socketTimeOut = DDL_SOCKET_TIMEOUT; + } + Executor socketTimeoutExecutor = TGroupDirectConnection.socketTimeoutExecutor; + TGroupDataSource groupDataSource = (TGroupDataSource) repo.getDataSource(groupName); + TAtomDataSource atom = findMasterAtomForGroup(groupDataSource); + final DataSource dataSource = atom.getDataSource(); + Connection conn = null; + try { + /* + For XConnection, we would set LastException to avoid this connection reused by other session. + For JDBC, we would get physical connection directly, and call close() of physical connection + to close the connection rather than give it back to the connection pool. + In both case, the connection would never be reused. + */ + conn = getConnectionFromDatasource(groupDataSource, dataSource, ec); + try (Statement stmt = conn.createStatement()) { + if (stmt instanceof XStatement) { + ((XConnection) conn).setNetworkTimeoutNanos(socketTimeOut * MS_DIV_NS); + ((XConnection) conn).setLastException(new Exception("discard by multiple phase ddl emitter"), true); + SQLRecorderLogger.ddlLogger.info(String.format( + " [%s] get connection and send physical ddl [%s] on group %s, with socket timeout %d ms. ", + jobId, Thread.currentThread().getName(), formatString(sql), groupName, + socketTimeOut.intValue())); + ((XStatement) stmt).executeUpdateX(BytesSql.getBytesSql(sql), hint.getBytes()); + } else { + conn.setNetworkTimeout(socketTimeoutExecutor, socketTimeOut.intValue()); + SQLRecorderLogger.ddlLogger.info(String.format( + " [%s] get connection and send physical ddl [%s] on group %s, with socket timeout %d ms. ", + jobId, Thread.currentThread().getName(), formatString(sql), groupName, socketTimeOut)); + stmt.executeUpdate(sql); + } + } + } catch (SQLException e) { + throw GeneralUtil.nestedException( + String.format("failed to execute on group(%s): %s , Caused by: %s", groupName, formatString(sql), + formatString(e.getMessage())), e); + } finally { + try { + if (conn != null) { + conn.close(); + } + } catch (Exception ignored) { + SQLRecorderLogger.ddlLogger.info(String.format( + " [%s] close connection with exception %s. ", + jobId, Thread.currentThread().getName(), ignored)); + } + } + } + + public static List> queryGroupBypassConnPool(ExecutionContext ec, Long jobId, String taskName, + String schema, + String logicalTable, String groupName, + String sql) { + List> result = new ArrayList<>(); + MyRepository repo = (MyRepository) ExecutorContext.getContext(schema) + .getRepositoryHolder() + .get(Group.GroupType.MYSQL_JDBC.toString()); + Long socketTimeOut = ec.getParamManager().getLong(ConnectionParams.SOCKET_TIMEOUT); + if (socketTimeOut == -1L) { + socketTimeOut = 900_000L; + } + Executor socketTimeoutExecutor = TGroupDirectConnection.socketTimeoutExecutor; + TGroupDataSource groupDataSource = (TGroupDataSource) repo.getDataSource(groupName); + TAtomDataSource atom = findMasterAtomForGroup(groupDataSource); + final DataSource dataSource = atom.getDataSource(); + Connection conn = null; + try { + if (dataSource instanceof DruidDataSource) { + DruidDataSource druid = (DruidDataSource) dataSource; + conn = druid.createPhysicalConnection().getPhysicalConnection(); + } else if (dataSource instanceof XDataSource) { + conn = dataSource.getConnection(); + } else { + throw GeneralUtil.nestedException("Unknown datasource. " + dataSource.getClass()); + } + String statsSql = + String.format(SQL_STATS_TWO_PHASE_DDL, schema, buildTwoPhaseKeyFromLogicalTableNameAndGroupName( + logicalTable, groupName + )); + if (sql.equalsIgnoreCase(SQL_SHOW_PROCESS_LIST) || sql.equalsIgnoreCase(statsSql)) { + long time = ACCUMULATE_QUERY_STATS_SQL_NUM.incrementAndGet(); + if (time % 100L == 0) { + SQLRecorderLogger.ddlLogger.info(String.format( + " [%s] [%s] send query sql [%s] bypass conn pool on group %s for logical table %s, with socket timeout %d ms, times [%d]", + jobId, Thread.currentThread().getName(), taskName, formatString(sql), groupName, logicalTable, + socketTimeOut.intValue(), time)); + } + } else { + SQLRecorderLogger.ddlLogger.info(String.format( + " [%s] [%s] send query sql [%s] bypass conn pool on group %s for logical table %s, with socket timeout %d ms", + jobId, Thread.currentThread().getName(), taskName, formatString(sql), groupName, logicalTable, + socketTimeOut.intValue())); + } + conn.setNetworkTimeout(socketTimeoutExecutor, socketTimeOut.intValue()); + try (Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery(sql)) { + int columns = rs.getMetaData().getColumnCount(); + while (rs.next()) { + Map row = new HashMap<>(); + for (int i = 1; i <= columns; i++) { + String columnName = rs.getMetaData().getColumnName(i); + Object column = rs.getObject(i); + row.put(columnName, column); + } + result.add(row); + } + } + } catch (SQLException e) { + throw new RuntimeException(e); + } finally { + try { + if (conn != null) { + conn.close(); + } + } catch (Exception ignored) { + } + } + return result; + } + + public static String resultToString(List> results) { + List rowStrings = new ArrayList<>(); + for (List row : results) { + rowStrings.add("[" + (String) StringUtil.join(", ", + row.stream().map(o -> formatString(o.toString())).collect(Collectors.toList())) + "]"); + } + return "[" + (String) StringUtil.join(",", rowStrings) + "]"; + } + + public static String formatString(String origin) { + return origin.replace("\n", "\\n").replace("\t", "\\t"); + } + + public static String buildPhyDbTableNameFromGroupNameAndPhyTableName(String groupName, String phyTableName) { + String formatString = "%s/%s"; + return String.format(formatString, buildPhysicalDbNameFromGroupName(groupName).toLowerCase(), + phyTableName.toLowerCase()); + } + + public static String buildTwoPhaseKeyFromLogicalTableNameAndGroupName(String logicalTableName, String groupName) { + String formatString = "%s_%s"; + return String.format(formatString, logicalTableName, groupName); + } + + public static String buildLogicalTableNameFromTwoPhaseKeyAndPhyDbName(String keyName, String phyDbName) { + String groupName = buildGroupNameFromPhysicalDb(phyDbName); + int index = keyName.lastIndexOf("_" + groupName); + if (index == -1) { + return "__unknown_table"; + } else { + return keyName.substring(0, index); + } + } + + private static Connection getConnectionFromDatasource(TGroupDataSource groupDataSource, DataSource dataSource, + ExecutionContext ec) + throws SQLException { + Connection conn; + TGroupDirectConnection tGroupDirectConnection; + if (dataSource instanceof DruidDataSource) { + DruidDataSource druid = (DruidDataSource) dataSource; + conn = druid.createPhysicalConnection().getPhysicalConnection(); + tGroupDirectConnection = new TGroupDirectConnection(groupDataSource, conn); + } else if (dataSource instanceof XDataSource) { + conn = dataSource.getConnection(); + tGroupDirectConnection = new TGroupDirectConnection(groupDataSource, conn); + } else { + throw GeneralUtil.nestedException("Unknown datasource. " + dataSource.getClass()); + } + if (ec.getServerVariables() != null) { + tGroupDirectConnection.setServerVariables(ec.getServerVariables()); + } + if (ec.getEncoding() != null) { + tGroupDirectConnection.setEncoding(ec.getEncoding()); + } + if (ec.getSqlMode() != null) { + tGroupDirectConnection.setSqlMode(ec.getSqlMode()); + } + return conn; + } + + public static TAtomDataSource findMasterAtomForGroup(TGroupDataSource groupDs) { + TAtomDataSource targetAtom = null; + Weight targetAtomWeight = null; + boolean isFindMaster = false; + List atomList = groupDs.getAtomDataSources(); + Map atomDsWeightMaps = groupDs.getAtomDataSourceWeights(); + for (Map.Entry atomWeightItem : atomDsWeightMaps.entrySet()) { + targetAtom = atomWeightItem.getKey(); + targetAtomWeight = atomWeightItem.getValue(); + if (targetAtomWeight.w > 0) { + isFindMaster = true; + break; + } + } + + if (isFindMaster) { + return targetAtom; + } else { + throw new TddlRuntimeException(ErrorCode.ERR_ATOM_GET_CONNECTION_FAILED_UNKNOWN_REASON, + String.format("failed to get master of atom on group %s, dn %s", groupDs.getDbGroupKey(), + groupDs.getMasterDNId())); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/util/ChangeSetUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/util/ChangeSetUtils.java index 348a03e3d..adfc9f3e0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/util/ChangeSetUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/util/ChangeSetUtils.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.ddl.util; +import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.jdbc.BytesSql; import com.alibaba.polardbx.common.jdbc.ParameterContext; @@ -23,6 +24,7 @@ import com.alibaba.polardbx.common.jdbc.ZeroDate; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.executor.ExecutorHelper; import com.alibaba.polardbx.executor.changeset.ChangeSetManager; import com.alibaba.polardbx.executor.common.ExecutorContext; @@ -31,14 +33,20 @@ import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetApplyFinishTask; import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetCatchUpTask; import com.alibaba.polardbx.executor.ddl.job.task.changset.ChangeSetStartTask; -import com.alibaba.polardbx.executor.ddl.job.task.shared.EmptyTask; import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.AlterComplexTaskUpdateJobStatusTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; +import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineAccessorDelegate; +import com.alibaba.polardbx.executor.ddl.newengine.utils.TaskHelper; import com.alibaba.polardbx.executor.gsi.GsiUtils; import com.alibaba.polardbx.executor.gsi.PhysicalPlanBuilder; import com.alibaba.polardbx.executor.gsi.utils.Transformer; import com.alibaba.polardbx.executor.spi.IGroupExecutor; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.executor.sync.TablesMetaChangePreemptiveSyncAction; import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; +import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; @@ -94,17 +102,21 @@ import static com.alibaba.polardbx.common.properties.ConnectionParams.CHANGE_SET_APPLY_OPTIMIZATION; import static com.alibaba.polardbx.common.properties.ConnectionParams.CHANGE_SET_CHECK_TWICE; -import static com.alibaba.polardbx.executor.gsi.GsiUtils.RETRY_COUNT; -import static com.alibaba.polardbx.executor.gsi.GsiUtils.RETRY_WAIT; public class ChangeSetUtils { public final static String SQL_START_CHANGESET = "call polarx.changeset_start(%s, %s);"; public final static String SQL_FETCH_CHANGESET_TIMES = "call polarx.changeset_times(%s);"; public final static String SQL_FETCH_CAHNGESET = "call polarx.changeset_fetch(%s, %d);"; - public final static String SQL_STOP_CHANGESET = "call polarx.changeset_stop(%s);"; public final static String SQL_FINISH_CHANGESET = "call polarx.changeset_finish(%s);"; public final static String SQL_CALL_CHANGESET_STATS = "call polarx.changeset_stats('');"; + public static final int RETRY_COUNT = 10; + public static final long[] RETRY_WAIT = new long[RETRY_COUNT]; + + static { + IntStream.range(0, RETRY_COUNT).forEach(i -> RETRY_WAIT[i] = 500L * i); + } + public static boolean isChangeSetProcedure(ExecutionContext ec) { return ec.getParamManager().getBoolean(ConnectionParams.CN_ENABLE_CHANGESET) && ExecutorContext.getContext(ec.getSchemaName()).getStorageInfoManager().supportChangeSet(); @@ -155,6 +167,8 @@ public static CursorMeta buildCursorMeta(String schemaName, String tableName) { public static PhyTableOperation buildSelectWithInPk(String schemaName, String tableName, String grpKey, String phyTbName, + List tableColumns, + List notUsingBinaryStringColumns, List data, ExecutionContext ec, boolean lock) { if (data.isEmpty()) { return null; @@ -164,12 +178,9 @@ public static PhyTableOperation buildSelectWithInPk(String schemaName, String ta Pair, List> primaryKeys = GlobalIndexMeta.getPrimaryKeysNotOrdered(baseTableMeta); - List tableColumns = baseTableMeta.getWriteColumns() - .stream() - .map(ColumnMeta::getName) - .collect(Collectors.toList()); - - final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); + final boolean useBinary = ec.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY); + final PhysicalPlanBuilder builder = + new PhysicalPlanBuilder(schemaName, useBinary, notUsingBinaryStringColumns, ec); final Pair selectWithInPk = builder .buildSelectWithInForChecker(baseTableMeta, tableColumns, primaryKeys.getKey(), "PRIMARY"); @@ -258,8 +269,10 @@ public static SqlNodeList buildInValues(Pair, List> primar return inValues; } - static public PhyTableOperation buildReplace(String schemaName, String tableName, + static public PhyTableOperation buildReplace(String schemaName, + String tableName, String indexName, String grpKey, String phyTbName, + List tableColumns, Parameters parameters, ExecutionContext ec) { int dataSize = parameters.getBatchParameters().size(); @@ -267,12 +280,7 @@ static public PhyTableOperation buildReplace(String schemaName, String tableName return null; } final SchemaManager sm = OptimizerContext.getContext(schemaName).getLatestSchemaManager(); - final TableMeta baseTableMeta = sm.getTable(tableName); - - final List tableColumns = baseTableMeta.getWriteColumns() - .stream() - .map(ColumnMeta::getName) - .collect(Collectors.toList()); + final TableMeta baseTableMeta = indexName == null ? sm.getTable(tableName) : sm.getTable(indexName); final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); final Pair replace = @@ -308,8 +316,11 @@ static public PhyTableOperation buildReplace(String schemaName, String tableName return PhyTableOperationFactory.getInstance().buildPhyTableOperationByPhyOp(targetPhyOp, buildParams); } - public static Parameters executePhySelectPlan(PhyTableOperation selectPlan, ExecutionContext ec) { + public static Parameters executePhySelectPlan(PhyTableOperation selectPlan, + List notConvertColumns, + ExecutionContext ec) { Cursor cursor = null; + boolean useBinary = ec.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY); Parameters parameters = new Parameters(); final List> batchParams = new ArrayList<>(); try { @@ -320,7 +331,12 @@ public static Parameters executePhySelectPlan(PhyTableOperation selectPlan, Exec final Map item = new HashMap<>(columns.size()); for (int i = 0; i < columns.size(); i++) { - item.put(i + 1, Transformer.buildColumnParam(row, i)); + ColumnMeta columnMeta = columns.get(i); + String colName = columnMeta.getName(); + boolean canConvert = + useBinary && (notConvertColumns == null || !notConvertColumns.contains(colName)); + + item.put(i + 1, Transformer.buildColumnParam(row, i, canConvert)); } batchParams.add(item); } @@ -378,7 +394,8 @@ public static String buildNextDigestByTableMeta(TableMeta tableMeta) { return schemaName + "." + tableName + "#version:" + version; } - public static Map genChangeSetCatchUpTasks(String schemaName, String tableName, + public static Map genChangeSetCatchUpTasks(String schemaName, + String tableName, String indexName, Map> sourcePhyTableNames, Map targetTableLocations, ComplexTaskMetaManager.ComplexTaskType taskType, @@ -387,8 +404,9 @@ public static Map genChangeSetCatchUpTasks(String catchUpTasks.put( ChangeSetManager.ChangeSetCatchUpStatus.ABSENT.toString(), new ChangeSetCatchUpTask( - schemaName, tableName, - null, + schemaName, + tableName, + indexName, sourcePhyTableNames, targetTableLocations, ChangeSetManager.ChangeSetCatchUpStatus.ABSENT, @@ -400,8 +418,9 @@ public static Map genChangeSetCatchUpTasks(String catchUpTasks.put( ChangeSetManager.ChangeSetCatchUpStatus.DELETE_ONLY.toString(), new ChangeSetCatchUpTask( - schemaName, tableName, - null, + schemaName, + tableName, + indexName, sourcePhyTableNames, targetTableLocations, ChangeSetManager.ChangeSetCatchUpStatus.DELETE_ONLY, @@ -413,8 +432,9 @@ public static Map genChangeSetCatchUpTasks(String catchUpTasks.put( ChangeSetManager.ChangeSetCatchUpStatus.WRITE_ONLY.toString(), new ChangeSetCatchUpTask( - schemaName, tableName, - null, + schemaName, + tableName, + indexName, sourcePhyTableNames, targetTableLocations, ChangeSetManager.ChangeSetCatchUpStatus.WRITE_ONLY, @@ -426,8 +446,9 @@ public static Map genChangeSetCatchUpTasks(String catchUpTasks.put( ChangeSetManager.ChangeSetCatchUpStatus.ABSENT_FINAL.toString(), new ChangeSetCatchUpTask( - schemaName, tableName, - null, + schemaName, + tableName, + indexName, sourcePhyTableNames, targetTableLocations, ChangeSetManager.ChangeSetCatchUpStatus.ABSENT_FINAL, @@ -439,8 +460,9 @@ public static Map genChangeSetCatchUpTasks(String catchUpTasks.put( ChangeSetManager.ChangeSetCatchUpStatus.DELETE_ONLY_FINAL.toString(), new ChangeSetCatchUpTask( - schemaName, tableName, - null, + schemaName, + tableName, + indexName, sourcePhyTableNames, targetTableLocations, ChangeSetManager.ChangeSetCatchUpStatus.DELETE_ONLY_FINAL, @@ -452,8 +474,9 @@ public static Map genChangeSetCatchUpTasks(String catchUpTasks.put( ChangeSetManager.ChangeSetCatchUpStatus.WRITE_ONLY_FINAL.toString(), new ChangeSetCatchUpTask( - schemaName, tableName, - null, + schemaName, + tableName, + indexName, sourcePhyTableNames, targetTableLocations, ChangeSetManager.ChangeSetCatchUpStatus.WRITE_ONLY_FINAL, @@ -473,6 +496,7 @@ public static List genChangeSetOnlineSchemaChangeTasks(String schemaNam DdlTask changeSetCheckTask, DdlTask changeSetCheckTwiceTask, ChangeSetApplyFinishTask changeSetApplyFinishTask, + List outDdlTasks, ExecutionContext executionContext) { List ddlTasks = new ArrayList<>(); final boolean stayAtCreating = @@ -584,7 +608,13 @@ public static List genChangeSetOnlineSchemaChangeTasks(String schemaNam ddlTasks.add(changeSetStartTask); ddlTasks.add(absentTask); ddlTasks.add(tableSyncTasks.get(0)); - ddlTasks.add(backFillTask); + if (backFillTask != null) { + ddlTasks.add(backFillTask); + } else { + outDdlTasks.add(tableSyncTasks.get(0)); + outDdlTasks.add(catchUpTasks.get(ChangeSetManager.ChangeSetCatchUpStatus.ABSENT.toString())); + } + ddlTasks.add(catchUpTasks.get(ChangeSetManager.ChangeSetCatchUpStatus.ABSENT.toString())); ddlTasks.add(deleteOnlyTask); @@ -637,6 +667,55 @@ public static List genChangeSetOnlineSchemaChangeTasks(String schemaNam return ddlTasks; } + public static void doChangeSetSchemaChange(String schemaName, String logicalTableName, List relatedTables, + DdlTask currentTask, ComplexTaskMetaManager.ComplexTaskStatus oldStatus, + ComplexTaskMetaManager.ComplexTaskStatus newStatus) { + final Logger LOGGER = SQLRecorderLogger.ddlEngineLogger; + + new DdlEngineAccessorDelegate() { + @Override + protected Integer invoke() { + ComplexTaskMetaManager + .updateSubTasksStatusByJobIdAndObjName(currentTask.getJobId(), + schemaName, + logicalTableName, + oldStatus, + newStatus, + getConnection()); + try { + for (String tbName : relatedTables) { + TableInfoManager.updateTableVersionWithoutDataId(schemaName, tbName, getConnection()); + } + } catch (Exception e) { + throw GeneralUtil.nestedException(e); + } + currentTask.setState(DdlTaskState.DIRTY); + DdlEngineTaskRecord taskRecord = TaskHelper.toDdlEngineTaskRecord(currentTask); + return engineTaskAccessor.updateTask(taskRecord); + } + }.execute(); + + LOGGER.info( + String.format( + "Update table status[ schema:%s, table:%s, before state:%s, after state:%s]", + schemaName, + logicalTableName, + ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG.name(), + ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER.name())); + + try { + SyncManagerHelper.sync( + new TablesMetaChangePreemptiveSyncAction(schemaName, relatedTables, 1500L, 1500L, + TimeUnit.SECONDS), + SyncScope.ALL); + } catch (Throwable t) { + LOGGER.error(String.format( + "error occurs while sync table meta, schemaName:%s, tableName:%s", schemaName, + logicalTableName)); + throw GeneralUtil.nestedException(t); + } + } + public static List> genBatchRowList(List rows, int batchSize) { List> RowBatchList = new ArrayList<>(); IntStream.range(0, (rows.size() + batchSize - 1) / batchSize) @@ -762,8 +841,7 @@ public static Pair getTargetGroupNameAndPhyTableName( targetGroup = targetTableLocations.get(targetTable); } else if (taskType == ComplexTaskMetaManager.ComplexTaskType.SPLIT_PARTITION || taskType == ComplexTaskMetaManager.ComplexTaskType.SPLIT_HOT_VALUE) { - targetGroup = null; - targetTable = null; + // need route } else if (taskType == ComplexTaskMetaManager.ComplexTaskType.MOVE_PARTITION) { // move partitions or move database targetTable = sourceTable; @@ -771,6 +849,9 @@ public static Pair getTargetGroupNameAndPhyTableName( } else if (taskType == ComplexTaskMetaManager.ComplexTaskType.MOVE_DATABASE) { targetTable = sourceTable; targetGroup = targetTableLocations.get(sourceGroup); + } else if (taskType == ComplexTaskMetaManager.ComplexTaskType.ONLINE_MODIFY_COLUMN) { + targetGroup = sourceGroup; + targetTable = targetTableLocations.get(sourceTable); } return new Pair<>(targetGroup, targetTable); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/util/ConfigUtil.java similarity index 100% rename from polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/engine/AsyncDDLManager.java rename to polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/util/ConfigUtil.java diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/workqueue/FastCheckerThreadPool.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/workqueue/FastCheckerThreadPool.java index 5d8aba693..a8ab855f2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/workqueue/FastCheckerThreadPool.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/workqueue/FastCheckerThreadPool.java @@ -16,33 +16,75 @@ package com.alibaba.polardbx.executor.ddl.workqueue; -import com.alibaba.polardbx.common.async.AsyncTask; +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.utils.thread.ExecutorUtil; import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; -import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; +import com.alibaba.polardbx.executor.ddl.newengine.DdlEngineStats; +import com.alibaba.polardbx.gms.config.impl.MetaDbInstConfigManager; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.topology.StorageInfoAccessor; +import com.alibaba.polardbx.gms.topology.StorageInfoRecord; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import org.apache.calcite.util.Pair; -import java.util.concurrent.PriorityBlockingQueue; +import java.sql.Connection; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static java.lang.Math.max; /** * Created by zhuqiwei. * * @author zhuqiwei */ -public class FastCheckerThreadPool extends ThreadPoolExecutor { +public class FastCheckerThreadPool { private static volatile FastCheckerThreadPool instance = null; - private FastCheckerThreadPool(int corePoolSize) { - super(corePoolSize, - corePoolSize, - 0, - TimeUnit.SECONDS, - new PriorityBlockingQueue<>(), - new NamedThreadFactory("FastChecker-Worker", true)); - } + /** + * fastChecker Thread pool: + * concurrentMap< storageInstId, executor > + */ + private final CaseInsensitiveConcurrentHashMap executors; + + /** + * this cache is used to count fastChecker's progress by ddl jobId + * cache < ddlJobId, FastCheckerInfo> + */ + Cache cache; + + private final ScheduledExecutorService threadPoolCleanThread = + ExecutorUtil.createScheduler(1, + new NamedThreadFactory("FastChecker-ThreadPool-Cleaner-Thread-"), + new ThreadPoolExecutor.DiscardPolicy()); private FastCheckerThreadPool() { - this(Math.max(ThreadCpuStatUtil.NUM_CORES, 8)); + executors = new CaseInsensitiveConcurrentHashMap<>(); + + cache = CacheBuilder.newBuilder() + .maximumSize(4096) + .expireAfterWrite(7, TimeUnit.DAYS) + .expireAfterAccess(7, TimeUnit.DAYS) + .build(); + + threadPoolCleanThread.scheduleAtFixedRate( + new OfflineStorageThreadPoolCleaner(), + 0L, + 3, + TimeUnit.DAYS + ); } public static FastCheckerThreadPool getInstance() { @@ -56,23 +98,226 @@ public static FastCheckerThreadPool getInstance() { return instance; } - @Override - public void execute(Runnable command) { - if (!(command instanceof PriorityFIFOTask)) { - throw new ClassCastException("Not instance of PriorityFIFOTask."); + /** + * 根据storageInstName分配线程池 + * pair< storageInstName, task> + */ + public void submitTasks(List> storageInstNameAndTasks) { + synchronized (this.executors) { + for (Pair instNameAndTask : storageInstNameAndTasks) { + String instName = instNameAndTask.getKey(); + Runnable task = instNameAndTask.getValue(); + + if (!executors.containsKey(instName)) { + int defaultThreadPoolSize = Integer.valueOf( + MetaDbInstConfigManager.getInstance().getInstProperty( + ConnectionProperties.FASTCHECKER_THREAD_POOL_SIZE, + ConnectionParams.FASTCHECKER_THREAD_POOL_SIZE.getDefault() + ) + ); + + ThreadPoolExecutor executor = new ThreadPoolExecutor( + defaultThreadPoolSize, + defaultThreadPoolSize, + 5, TimeUnit.MINUTES, + new LinkedBlockingQueue<>(), + new NamedThreadFactory(String.format("FastChecker-Thread-on-[%s]", instName), true) + ); + /** + * when core thread idle exceeds 5 minutes, it will be destroyed + * */ + executor.allowCoreThreadTimeOut(true); + + executors.put( + instName, executor + ); + + SQLRecorderLogger.ddlLogger.info(String.format( + "FastChecker create new thread pool for storage inst [%s]", + instName + )); + SQLRecorderLogger.ddlLogger.info(String.format( + "Now FastChecker Thread Pool is %s, %s", + executors.keySet(), + executors.values() + )); + } + + ThreadPoolExecutor executor = executors.get(instName); + executor.execute(task); + } + } + } + + public void updateStats() { + int taskUnfinished = 0, runningThreads = 0; + int threadPoolMaxSize = 0, threadPoolNowSize = 0, threadPoolNum = 0; + synchronized (this.executors) { + for (ThreadPoolExecutor executor : this.executors.values()) { + runningThreads += executor.getActiveCount(); + taskUnfinished += executor.getQueue().size(); + threadPoolMaxSize = max(threadPoolMaxSize, executor.getCorePoolSize()); + threadPoolNowSize = max(threadPoolNowSize, executor.getPoolSize()); + } + threadPoolNum = this.executors.values().size(); + } + + DdlEngineStats.METRIC_FASTCHECKER_TASK_RUNNING.set(runningThreads); + DdlEngineStats.METRIC_FASTCHECKER_TASK_WAITING.set(taskUnfinished); + DdlEngineStats.METRIC_FASTCHECKER_THREAD_POOL_NOW_SIZE.set(threadPoolNowSize); + DdlEngineStats.METRIC_FASTCHECKER_THREAD_POOL_MAX_SIZE.set(threadPoolMaxSize); + DdlEngineStats.METRIC_FASTCHECKER_THREAD_POOL_NUM.set(threadPoolNum); + } + + public void clearStats() { + DdlEngineStats.METRIC_FASTCHECKER_TASK_RUNNING.set(0); + DdlEngineStats.METRIC_FASTCHECKER_TASK_WAITING.set(0); + DdlEngineStats.METRIC_FASTCHECKER_THREAD_POOL_NOW_SIZE.set(0); + DdlEngineStats.METRIC_FASTCHECKER_THREAD_POOL_MAX_SIZE.set(0); + DdlEngineStats.METRIC_FASTCHECKER_THREAD_POOL_NUM.set(0); + } + + public void setParallelism(int parallelism) { + if (parallelism <= 0) { + return; + } + synchronized (this.executors) { + for (ThreadPoolExecutor executor : executors.values()) { + if (parallelism > executor.getMaximumPoolSize()) { + executor.setMaximumPoolSize(parallelism); + executor.setCorePoolSize(parallelism); + } else { + executor.setCorePoolSize(parallelism); + executor.setMaximumPoolSize(parallelism); + } + } + } + } + + private class OfflineStorageThreadPoolCleaner implements Runnable { + @Override + public void run() { + try { + cleanupOfflineStorageThreadPool(); + SQLRecorderLogger.ddlLogger.info( + "fastChecker threadPool cleaner is working..." + ); + } catch (Throwable t) { + SQLRecorderLogger.ddlLogger.error( + "failed to clean up fastChecker threadPool", t + ); + } + } + + private void cleanupOfflineStorageThreadPool() { + try (Connection metaDbConn = MetaDbDataSource.getInstance().getConnection()) { + StorageInfoAccessor accessor = new StorageInfoAccessor(); + accessor.setConnection(metaDbConn); + Set validStorageNameList = new TreeSet<>(String::compareToIgnoreCase); + List nameList = accessor + .getStorageInfosByInstKind(StorageInfoRecord.INST_KIND_MASTER) + .stream() + .map(StorageInfoRecord::getStorageInstId) + .collect(Collectors.toList()); + validStorageNameList.addAll(nameList); + + List tobeRemoveNameList = executors.keySet() + .stream() + .filter(x -> !validStorageNameList.contains(x)) + .collect(Collectors.toList()); + + for (String toBeRemove : tobeRemoveNameList) { + ThreadPoolExecutor executor = executors.get(toBeRemove); + executor.shutdownNow(); + executors.remove(toBeRemove); + SQLRecorderLogger.ddlLogger.info( + String.format("FastChecker shutdown and remove threadPool [%s]", toBeRemove) + ); + } + + } catch (Exception e) { + throw new TddlNestableRuntimeException("FastChecker clean offline storage thread pool failed", e); + } + } + } + + public void increaseCheckTaskInfo(long ddlJobId, int newTaskSum, int newTaskFinished) { + try { + FastCheckerInfo info = this.cache.get(ddlJobId, FastCheckerInfo::new); + info.getPhyTaskSum().addAndGet(newTaskSum); + info.getPhyTaskFinished().addAndGet(newTaskFinished); + } catch (Exception e) { + SQLRecorderLogger.ddlLogger.error( + "failed to increase fastChecker task info", e + ); } - super.execute(command); } - public Runnable executeWithContext(Runnable command, PriorityFIFOTask.TaskPriority priority) { - final Runnable task = AsyncTask.build(command); - Runnable realTask = new PriorityFIFOTask(priority) { - @Override - public void run() { - task.run(); + public void rollbackCheckTaskInfo(long ddlJobId, int alreadyTaskSum, int alreadyTaskFinished) { + try { + FastCheckerInfo info = this.cache.getIfPresent(ddlJobId); + if (info == null) { + return; } - }; - execute(realTask); - return realTask; + info.getPhyTaskSum().addAndGet(-1 * alreadyTaskSum); + info.getPhyTaskFinished().addAndGet(-1 * alreadyTaskFinished); + } catch (Exception e) { + SQLRecorderLogger.ddlLogger.error( + "failed to rollback fastChecker task info", e + ); + } + } + + public FastCheckerInfo queryCheckTaskInfo(long ddlJobId) { + try { + return this.cache.getIfPresent(ddlJobId); + } catch (Exception e) { + SQLRecorderLogger.ddlLogger.error( + "failed to query fastChecker task info", e + ); + } + return null; + } + + public void invalidateTaskInfo(long ddlJobId) { + this.cache.invalidate(ddlJobId); + } + + private static class CaseInsensitiveConcurrentHashMap extends ConcurrentHashMap { + + @Override + public T put(String key, T value) { + return super.put(key.toLowerCase(), value); + } + + public T get(String key) { + return super.get(key.toLowerCase()); + } + + public boolean containsKey(String key) { + return super.containsKey(key.toLowerCase()); + } + + public T remove(String key) { + return super.remove(key.toLowerCase()); + } + } + + public static class FastCheckerInfo { + private volatile AtomicInteger phyTaskSum; + private volatile AtomicInteger phyTaskFinished; + + public FastCheckerInfo() { + phyTaskSum = new AtomicInteger(0); + phyTaskFinished = new AtomicInteger(0); + } + + public AtomicInteger getPhyTaskSum() { + return phyTaskSum; + } + + public AtomicInteger getPhyTaskFinished() { + return phyTaskFinished; + } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/workqueue/TwoPhaseDdlThreadPool.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/workqueue/TwoPhaseDdlThreadPool.java new file mode 100644 index 000000000..d0c322abf --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/ddl/workqueue/TwoPhaseDdlThreadPool.java @@ -0,0 +1,146 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.ddl.workqueue; + +import com.alibaba.polardbx.common.async.AsyncTask; +import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; +import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; +import com.alibaba.polardbx.executor.ddl.newengine.DdlEngineStats; + +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +/** + * @version 1.0 + */ +public class TwoPhaseDdlThreadPool extends ThreadPoolExecutor { + + private static final TwoPhaseDdlThreadPool INSTANCE = new TwoPhaseDdlThreadPool(); + + public TwoPhaseDdlThreadPool() { + this(Math.max(ThreadCpuStatUtil.NUM_CORES, 128)); + } + + public TwoPhaseDdlThreadPool(int corePoolSize) { + super(corePoolSize * 1, + corePoolSize * 2, + 0, + TimeUnit.SECONDS, + new SynchronousQueue<>(), + new NamedThreadFactory("2PC-Ddl-Worker", true), + new ThreadPoolExecutor.AbortPolicy()); +// super.setRejectedExecutionHandler(new WaitTimeoutRejectHandler()); +// super.setQueueCapacity(0); + } + + /** + * @param command Should be instance of PriorityFIFOTask. + */ + @Override + public void execute(Runnable command) { + if (!(command instanceof PriorityFIFOTask)) { + throw new ClassCastException("Not instance of PriorityFIFOTask."); + } + super.execute(command); + } + + /** + * executeWithContext will automatically copy the execution context. + */ + public Runnable executeWithContext(Runnable command, PriorityFIFOTask.TaskPriority priority) { + final Runnable task = AsyncTask.build(command); + Runnable realTask = new PriorityFIFOTask(priority) { + @Override + public void run() { + task.run(); + } + }; + execute(realTask); + return realTask; + } + + public static TwoPhaseDdlThreadPool getInstance() { + return INSTANCE; + } + + public static void updateStats() { + DdlEngineStats.METRIC_TWO_PHASE_DDL_PARALLISM.set(INSTANCE.getActiveCount()); + } + + /** + * Test code. + */ + public static void main(String[] argv) { + TwoPhaseDdlThreadPool queue = new TwoPhaseDdlThreadPool(1); + queue.execute(new PriorityFIFOTask(PriorityFIFOTask.TaskPriority.LOW_PRIORITY_TASK) { + @Override + public void run() { + System.out.println("task1 then sleep 1s"); + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + }); + queue.execute(new PriorityFIFOTask(PriorityFIFOTask.TaskPriority.LOW_PRIORITY_TASK) { + @Override + public void run() { + System.out.println("task2 then sleep 1s"); + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + }); + + try { + queue.execute(new PriorityFIFOTask(PriorityFIFOTask.TaskPriority.MEDIUM_PRIORITY_TASK) { + @Override + public void run() { + System.out.println("task1 then sleep 1s"); + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + }); + } catch (Exception e) { + System.out.println(e.getMessage()); + } + + try { + queue.execute(new PriorityFIFOTask(PriorityFIFOTask.TaskPriority.HIGH_PRIORITY_TASK) { + @Override + public void run() { + System.out.println("task0 then sleep 1s"); + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + }); + } catch (Exception e) { + System.out.println(e.getMessage()); + } + queue.shutdown(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/fastchecker/FastChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/fastchecker/FastChecker.java index 9193938a6..f6a6c77ca 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/fastchecker/FastChecker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/fastchecker/FastChecker.java @@ -21,45 +21,45 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.jdbc.ITransactionPolicy; -import com.alibaba.polardbx.common.jdbc.MasterSlave; import com.alibaba.polardbx.common.jdbc.ParameterContext; -import com.alibaba.polardbx.common.jdbc.ParameterMethod; import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.properties.ParamManager; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.common.utils.logger.MDC; import com.alibaba.polardbx.executor.ExecutorHelper; import com.alibaba.polardbx.executor.balancer.stats.StatsUtils; import com.alibaba.polardbx.executor.common.ExecutorContext; -import com.alibaba.polardbx.executor.common.TopologyHandler; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.ddl.newengine.cross.CrossEngineValidator; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineAccessorDelegate; import com.alibaba.polardbx.executor.ddl.newengine.utils.TaskHelper; +import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils; import com.alibaba.polardbx.executor.ddl.workqueue.FastCheckerThreadPool; +import com.alibaba.polardbx.executor.ddl.workqueue.BackFillThreadPool; +import com.alibaba.polardbx.executor.ddl.workqueue.PriorityFIFOTask; import com.alibaba.polardbx.executor.gsi.CheckerManager; import com.alibaba.polardbx.executor.gsi.GsiUtils; import com.alibaba.polardbx.executor.gsi.PhysicalPlanBuilder; import com.alibaba.polardbx.executor.gsi.utils.Transformer; import com.alibaba.polardbx.executor.spi.ITransactionManager; -import com.alibaba.polardbx.executor.ddl.workqueue.PriorityFIFOTask; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.TablesMetaChangePreemptiveSyncAction; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.topology.GroupDetailInfoAccessor; +import com.alibaba.polardbx.gms.topology.ServerInstIdManager; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.util.GroupInfoUtil; -import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; import com.alibaba.polardbx.optimizer.config.table.SchemaManager; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.PhyOperationBuilderCommon; import com.alibaba.polardbx.optimizer.core.rel.PhyTableOpBuildParams; import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; @@ -69,22 +69,17 @@ import com.alibaba.polardbx.optimizer.utils.RelUtils; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; import org.apache.calcite.sql.OptimizerHint; import org.apache.calcite.sql.SqlSelect; import org.apache.calcite.util.Pair; import org.apache.commons.lang3.StringUtils; import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; import java.text.MessageFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; +import java.util.Arrays; import java.util.Calendar; -import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; @@ -93,16 +88,11 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutionException; import java.util.concurrent.FutureTask; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -import static com.alibaba.polardbx.executor.gsi.GsiUtils.RETRY_WAIT; -import static com.alibaba.polardbx.executor.gsi.GsiUtils.SQLSTATE_DEADLOCK; import static java.lang.Math.max; public class FastChecker extends PhyOperationBuilderCommon { @@ -113,7 +103,6 @@ public class FastChecker extends PhyOperationBuilderCommon { private final String dstSchemaName; private final String srcLogicalTableName; private final String dstLogicalTableName; - private final Map sourceTargetGroup; private Map> srcPhyDbAndTables; private Map> dstPhyDbAndTables; private final List srcColumns; @@ -123,9 +112,6 @@ public class FastChecker extends PhyOperationBuilderCommon { private final ITransactionManager tm; - private final long parallelism; - private final int lockTimeOut; - private final PhyTableOperation planSelectHashCheckSrc; private final PhyTableOperation planSelectHashCheckWithUpperBoundSrc; private final PhyTableOperation planSelectHashCheckWithLowerBoundSrc; @@ -141,17 +127,8 @@ public class FastChecker extends PhyOperationBuilderCommon { private final PhyTableOperation planSelectSampleSrc; private final PhyTableOperation planSelectSampleDst; - enum ParallelPolicy { - /** - * parallel by group, one group only allows single task at the same time. - */ - PhyGroupParallel, - - /** - * parallel by tables - */ - PhyTableParallel - } + private volatile AtomicInteger phyTaskSum; + private volatile AtomicInteger phyTaskFinished; /** * srcColumns and dstColumns must have the same order, @@ -161,11 +138,12 @@ enum ParallelPolicy { /** * 重要:构造planSelectSampleSrc 和 planSelectSampleDst时,传入的主键必须按原本的主键顺序!!! */ - public FastChecker(String srcSchemaName, String dstSchemaName, String srcLogicalTableName, - String dstLogicalTableName, Map sourceTargetGroup, + public FastChecker(String srcSchemaName, String dstSchemaName, + String srcLogicalTableName, String dstLogicalTableName, Map> srcPhyDbAndTables, Map> dstPhyDbAndTables, - List srcColumns, List dstColumns, List srcPks, List dstPks, - long parallelism, int lockTimeOut, PhyTableOperation planSelectHashCheckSrc, + List srcColumns, List dstColumns, + List srcPks, List dstPks, + PhyTableOperation planSelectHashCheckSrc, PhyTableOperation planSelectHashCheckWithUpperBoundSrc, PhyTableOperation planSelectHashCheckWithLowerBoundSrc, PhyTableOperation planSelectHashCheckWithLowerUpperBoundSrc, @@ -178,7 +156,6 @@ public FastChecker(String srcSchemaName, String dstSchemaName, String srcLogical this.dstSchemaName = dstSchemaName; this.srcLogicalTableName = srcLogicalTableName; this.dstLogicalTableName = dstLogicalTableName; - this.sourceTargetGroup = sourceTargetGroup; this.srcPhyDbAndTables = srcPhyDbAndTables; this.dstPhyDbAndTables = dstPhyDbAndTables; this.srcColumns = srcColumns; @@ -200,8 +177,9 @@ public FastChecker(String srcSchemaName, String dstSchemaName, String srcLogical this.planSelectSampleSrc = planSelectSampleSrc; this.planSelectSampleDst = planSelectSampleDst; - this.parallelism = parallelism; - this.lockTimeOut = lockTimeOut; + this.phyTaskSum = new AtomicInteger(0); + this.phyTaskFinished = new AtomicInteger(0); + this.tm = ExecutorContext.getContext(srcSchemaName).getTransactionManager(); } @@ -209,9 +187,9 @@ public static boolean isSupported(String schema) { return ExecutorContext.getContext(schema).getStorageInfoManager().supportFastChecker(); } - public static FastChecker create(String schemaName, String tableName, Map sourceTargetGroup, + public static FastChecker create(String schemaName, String tableName, Map> srcPhyDbAndTables, - Map> dstPhyDbAndTables, long parallelism, + Map> dstPhyDbAndTables, ExecutionContext ec) { final SchemaManager sm = OptimizerContext.getContext(schemaName).getLatestSchemaManager(); final TableMeta tableMeta = sm.getTable(tableName); @@ -223,19 +201,13 @@ public static FastChecker create(String schemaName, String tableName, Map allColumns = tableMeta.getAllColumns().stream().map(ColumnMeta::getName).collect(Collectors.toList()); final List allColumnsDst = new ArrayList<>(allColumns); - final List srcPks = getorderedPrimaryKeys(tableMeta, ec); + final List srcPks = getorderedPrimaryKeys(tableMeta); final List dstPks = new ArrayList<>(srcPks); - if (parallelism <= 0) { - parallelism = Math.max(FastCheckerThreadPool.getInstance().getCorePoolSize() / 2, 1); - } - - final int lockTimeOut = ec.getParamManager().getInt(ConnectionParams.FASTCHECKER_LOCK_TIMEOUT); - final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); - return new FastChecker(schemaName, schemaName, tableName, tableName, sourceTargetGroup, srcPhyDbAndTables, - dstPhyDbAndTables, allColumns, allColumnsDst, srcPks, dstPks, parallelism, lockTimeOut, + return new FastChecker(schemaName, schemaName, tableName, tableName, srcPhyDbAndTables, + dstPhyDbAndTables, allColumns, allColumnsDst, srcPks, dstPks, builder.buildSelectHashCheckForChecker(tableMeta, allColumns, srcPks, false, false), builder.buildSelectHashCheckForChecker(tableMeta, allColumns, srcPks, false, true), builder.buildSelectHashCheckForChecker(tableMeta, allColumns, srcPks, true, false), @@ -249,8 +221,8 @@ public static FastChecker create(String schemaName, String tableName, Map params, + private PhyTableOperation buildSamplePlanWithParam(String dbIndex, String phyTable, float calSamplePercentage, boolean isSrcSchema) { Map planParams = new HashMap<>(); // Physical table is 1st parameter @@ -387,55 +359,110 @@ private List> splitPhyTableIntoBatch(final Execut calSamplePercentage = maxSamplePercentage; } - PhyTableOperation plan = - buildSamplePlanWithParam(phyDbName, phyTable, new ArrayList<>(), calSamplePercentage, isSrcSchema); - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + PhyTableOperation plan = buildSamplePlanWithParam(phyDbName, phyTable, calSamplePercentage, isSrcSchema); + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "[{0}] FastChecker {1}[{2}][{3}], begin to sample, phy table rows {4}, " + "actual sample rate {5}%, phySqlInfo: {6}, param: {7}", baseEc.getTraceId(), phyDbName, phyTable, isSrcSchema ? "src" : "dst", tableRowsCount, calSamplePercentage, plan.getBytesSql(), plan.getParam())); - // execute query - final List> sampleResult = GsiUtils.wrapWithSingleDbTrx(tm, baseEc, (ec) -> { - final Cursor cursor = ExecutorHelper.execute(plan, ec); - try { - return Transformer.convertUpperBoundWithDefault(cursor, (columnMeta, i) -> { - // Generate default parameter context for upper bound of empty source table - ParameterMethod defaultMethod = ParameterMethod.setString; - Object defaultValue = "0"; - - final DataType columnType = columnMeta.getDataType(); - if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.DateType, DataTypes.TimestampType, - DataTypes.DatetimeType, DataTypes.TimeType, DataTypes.YearType)) { - // For time data type, use number zero as upper bound - defaultMethod = ParameterMethod.setLong; - defaultValue = 0L; - } + List> sampledUnorderedRowsValue = new ArrayList<>(); + List> sampledUnorderedRowsPc = null; + List> returnedSampledRowsPc = new ArrayList<>(); - return new ParameterContext(defaultMethod, new Object[] {i, defaultValue}); - }); - } finally { + // execute sample query + Cursor cursor = null; + List columnMetas = null; + try { + cursor = ExecutorHelper.execute(plan, baseEc); + columnMetas = cursor.getReturnColumns(); + sampledUnorderedRowsPc = + Transformer.convertUpperBoundWithDefaultForFastChecker(cursor, false, sampledUnorderedRowsValue); + } finally { + if (cursor != null) { cursor.close(new ArrayList<>()); } - }); + } + + if (sampledUnorderedRowsPc.isEmpty() || columnMetas == null || columnMetas.isEmpty()) { + return ImmutableList.of(); + } + + final List metasForSort = columnMetas; + Map, Integer> orderedRowWithIdx = new TreeMap<>( + (r1, r2) -> { + for (int i = 0; i < metasForSort.size(); i++) { + ColumnMeta columnMeta = metasForSort.get(i); + int re = columnMeta.getDataType().compare(r1.get(i), r2.get(i)); + if (re != 0) { + return re; + } + } + return 0; + } + ); - long step = sampleResult.size() / batchNum; + //get bound and sort bound + long step = sampledUnorderedRowsValue.size() / batchNum; if (step <= 0) { return batchBoundList; } for (int i = 1; i < batchNum; i++) { long boundIndex = i * step; - if (boundIndex < sampleResult.size()) { - batchBoundList.add(sampleResult.get((int) boundIndex)); + if (boundIndex < sampledUnorderedRowsValue.size()) { + orderedRowWithIdx.put(sampledUnorderedRowsValue.get((int) boundIndex), (int) boundIndex); } } - return batchBoundList; + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( + "[{0}] FastChecker {1}[{2}][{3}] sampled rows num {4}, batchNum {5}, step {6}", + baseEc.getTraceId(), phyDbName, phyTable, isSrcSchema ? "src" : "dst", + sampledUnorderedRowsValue.size(), + batchNum, + step + ) + ); + + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( + "[{0}] FastChecker {1}[{2}][{3}] sampled bound after sort is {4} {5}", + baseEc.getTraceId(), phyDbName, phyTable, isSrcSchema ? "src" : "dst", + metasForSort, + orderedRowWithIdx + ) + ); + + //get ordered pc + for (Map.Entry, Integer> entry : orderedRowWithIdx.entrySet()) { + int idx = entry.getValue(); + returnedSampledRowsPc.add(sampledUnorderedRowsPc.get(idx)); + } + + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( + "[{0}] FastChecker {1}[{2}][{3}] pc for check after sort is {4} {5}", + baseEc.getTraceId(), phyDbName, phyTable, isSrcSchema ? "src" : "dst", + metasForSort, + returnedSampledRowsPc + .stream() + .map(pcMap -> pcMap.values() + .stream() + .map(parameterContext -> { + if (parameterContext.getValue() instanceof byte[]) { + return Arrays.toString((byte[]) parameterContext.getValue()); + } else { + return parameterContext.getValue().toString(); + } + }) + .collect(Collectors.joining(", ", "[", "]")) + ) + .collect(Collectors.joining(", ")) + ) + ); + + return returnedSampledRowsPc; } private Long getPhyTableDegistByBatch(String phyDbName, String phyTable, ExecutionContext baseEc, - boolean isSrcTableTask, List> batchBoundList, - final long tableRowsCount) { + boolean isSrcTableTask, List> batchBoundList) { if (batchBoundList.isEmpty()) { return null; } @@ -487,12 +514,12 @@ private Long getPhyTableDegistByBatch(String phyDbName, String phyTable, Executi buildHashcheckPlanWithDnfParam(phyDbName, phyTable, lastBoundPc, operationLastBound, true, false)); //log batch sql - for (int i = 0; i < hashcheckPlans.size(); i++) { - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( - "[{0}] FastChecker {1}[{2}][{3}], batch {4}, phySqlInfo: {5}, param: {6}", - baseEc.getTraceId(), phyDbName, phyTable, isSrcTableTask ? "src" : "dst", i, - hashcheckPlans.get(i).getBytesSql(), hashcheckPlans.get(i).getParam())); - } +// for (int i = 0; i < hashcheckPlans.size(); i++) { +// SQLRecorderLogger.ddlLogger.info(MessageFormat.format( +// "[{0}] FastChecker {1}[{2}][{3}], batch {4}, phySqlInfo: {5}, param: {6}", +// baseEc.getTraceId(), phyDbName, phyTable, isSrcTableTask ? "src" : "dst", i, +// hashcheckPlans.get(i).getBytesSql(), hashcheckPlans.get(i).getParam())); +// } //excute for (int i = 0; i < hashcheckPlans.size(); i++) { @@ -521,13 +548,15 @@ private Long getPhyTableDegistByBatch(String phyDbName, String phyTable, Executi private Long getPhyTableDegistByFullScan(String phyDbName, String phyTable, ExecutionContext baseEc, boolean isSrcTableTask) { + + if (CrossEngineValidator.isJobInterrupted(baseEc) || Thread.currentThread().isInterrupted()) { + long jobId = baseEc.getDdlJobId(); + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The job '" + jobId + "' has been cancelled"); + } + final Map params = new HashMap<>(1); params.put(1, PlannerUtils.buildParameterContextForTableName(phyTable, 1)); -// PhyTableOperation plan = -// new PhyTableOperation(isSrcTableTask ? this.planSelectHashCheckSrc : this.planSelectHashCheckDst); -// plan.setDbIndex(phyDbName); -// plan.setTableNames(ImmutableList.of(ImmutableList.of(phyTable))); -// plan.setParam(params); PhyTableOperation targetPhyOp = isSrcTableTask ? this.planSelectHashCheckSrc : this.planSelectHashCheckDst; PhyTableOpBuildParams buildParams = new PhyTableOpBuildParams(); @@ -538,7 +567,7 @@ private Long getPhyTableDegistByFullScan(String phyDbName, String phyTable, Exec PhyTableOperationFactory.getInstance().buildPhyTableOperationByPhyOp(targetPhyOp, buildParams); //log batch sql - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "[{0}] FastChecker {1}[{2}][{3}], full scan, phySqlInfo: {4}", baseEc.getTraceId(), phyDbName, phyTable, isSrcTableTask ? "src" : "dst", plan)); @@ -546,49 +575,48 @@ private Long getPhyTableDegistByFullScan(String phyDbName, String phyTable, Exec } private Long executeHashcheckPlan(PhyTableOperation plan, ExecutionContext ec) { - return GsiUtils.retryOnException(() -> { - Cursor cursor = null; - Long result = null; - try { - cursor = ExecutorHelper.executeByCursor(plan, ec, false); - Row row; - if (cursor != null && (row = cursor.next()) != null) { - result = (Long) row.getObject(0); - while (cursor.next() != null) { - //do nothing - } - } - } finally { - if (cursor != null) { - cursor.close(new ArrayList<>()); + Cursor cursor = null; + Long result = null; + try { + cursor = ExecutorHelper.executeByCursor(plan, ec, false); + Row row; + if (cursor != null && (row = cursor.next()) != null) { + result = (Long) row.getObject(0); + while (cursor.next() != null) { + //do nothing } } - - return result; - }, (e) -> { - if (e.getSQLState() != null && e.getSQLState().equals(SQLSTATE_DEADLOCK) - && ErrorCode.ER_LOCK_DEADLOCK.getCode() == e.getErrorCode()) { - return true; + } finally { + if (cursor != null) { + cursor.close(new ArrayList<>()); } + } + return result; + } + + /** + * 1. 含local partition的表将不进行batch check (因为sample结果是乱序的) + * 2. 逻辑表为unique gsi的表将不进行batch check (因为其物理表只含主键列,但主键列没有主键属性) + */ + private boolean whetherCanSplitIntoBatch(ExecutionContext baseEc, boolean isSrc) { + //won't do sample for local partition table + TableMeta tableMeta = isSrc ? + baseEc.getSchemaManager(srcSchemaName).getTable(srcLogicalTableName) + : baseEc.getSchemaManager(dstSchemaName).getTable(dstLogicalTableName); + if (tableMeta != null && tableMeta.getLocalPartitionDefinitionInfo() != null) { return false; - }, (e, retryCount) -> { - if (retryCount < 3) { - // Only sleep on no retry operation(dead lock). - try { - TimeUnit.MILLISECONDS.sleep(RETRY_WAIT[retryCount]); - } catch (InterruptedException ex) { - // Throw it out, because this may caused by user interrupt. - throw GeneralUtil.nestedException(ex); - } - } else { - throw new TddlRuntimeException(ErrorCode.ERR_FAST_CHECKER, - "FastChecker max retry times exceeded: " + e.getMessage()); - } - }); + } + + //won't sample for missing primary key table + List pks = isSrc ? this.srcPks : this.dstPks; + if (pks.isEmpty()) { + return false; + } + return true; } // use Long to store uint64_t hash result generated by DN, since java doesn't support unsigned type. - private Pair hashcheckForSinglePhyTable(String phyDbName, String phyTable, ExecutionContext baseEc, + private Pair hashCheckForSinglePhyTable(String phyDbName, String phyTable, ExecutionContext baseEc, boolean isSrcTableTask, long maxBatchRows) { String schema = isSrcTableTask ? srcSchemaName : dstSchemaName; @@ -613,6 +641,10 @@ private Pair hashcheckForSinglePhyTable(String phyDbName, String boolean failedToSplitBatch = false; Long hashResult = null; + if (!whetherCanSplitIntoBatch(baseEc, isSrcTableTask)) { + needBatchCheck = false; + } + if (needBatchCheck) { long finalBatchRows = maxBatchRows; if (tableRowsCount * tableAvgRowLength > fastcheckerMaxBatchFileSize) { @@ -626,20 +658,19 @@ private Pair hashcheckForSinglePhyTable(String phyDbName, String List> batchBoundList = splitPhyTableIntoBatch(baseEc, phyDbName, phyTable, tableRowsCount, finalBatchRows, isSrcTableTask); if (!batchBoundList.isEmpty()) { - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "[{0}] FastChecker start hash phy for {1}[{2}][{3}], and phy table is divided into {4} batches", baseEc.getTraceId(), phyDbName, phyTable, isSrcTableTask ? "src" : "dst", batchBoundList.size() + 1)); - hashResult = getPhyTableDegistByBatch(phyDbName, phyTable, baseEc, isSrcTableTask, batchBoundList, - tableRowsCount); + hashResult = getPhyTableDegistByBatch(phyDbName, phyTable, baseEc, isSrcTableTask, batchBoundList); } else { failedToSplitBatch = true; } } if (!needBatchCheck || failedToSplitBatch) { - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "[{0}] FastChecker start hash phy for {1}[{2}][{3}], and phy table is hashed by full scan", baseEc.getTraceId(), phyDbName, phyTable, isSrcTableTask ? "src" : "dst")); @@ -647,11 +678,19 @@ private Pair hashcheckForSinglePhyTable(String phyDbName, String } - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "[{0}] FastChecker finish phy hash for {1}[{2}][{3}], time use[{4}], table hash value[{5}]", baseEc.getTraceId(), phyDbName, phyTable, isSrcTableTask ? "src" : "dst", (System.currentTimeMillis() - startTime) / 1000.0, hashResult == null ? "null" : hashResult)); + this.phyTaskFinished.incrementAndGet(); + + FastCheckerThreadPool.getInstance().increaseCheckTaskInfo( + baseEc.getDdlJobId(), + 0, + 1 + ); + return Pair.of(hashResult, isSrcTableTask); } @@ -665,7 +704,17 @@ public boolean checkWithChangeSet(ExecutionContext baseEc, boolean stopDoubleWri baseEc.getParamManager().getInt(ConnectionParams.FASTCHECKER_BATCH_TIMEOUT_RETRY_TIMES); ExecutionContext tsoEc = baseEc.copy(); + //set trx isolation: RR tsoEc.setTxIsolation(Connection.TRANSACTION_REPEATABLE_READ); + //set share readView + tsoEc.setShareReadView(true); + //socket timeout unit: ms + ParamManager.setVal( + tsoEc.getParamManager().getProps(), + ConnectionParams.SOCKET_TIMEOUT, + Integer.toString(1000 * 60 * 60 * 24 * 7), + true + ); boolean tsoCheckResult = GsiUtils.wrapWithTransaction(tm, ITransactionPolicy.TSO, tsoEc, (ec) -> { /** @@ -675,51 +724,19 @@ public boolean checkWithChangeSet(ExecutionContext baseEc, boolean stopDoubleWri idleQueryForEachPhyDb(this.srcPhyDbAndTables, this.dstPhyDbAndTables, ec); // stop double write - final Logger LOGGER = SQLRecorderLogger.ddlEngineLogger; - final DdlTask currentTask = task; - DdlEngineAccessorDelegate delegate = new DdlEngineAccessorDelegate() { - @Override - protected Integer invoke() { - ComplexTaskMetaManager - .updateSubTasksStatusByJobIdAndObjName(task.getJobId(), - srcSchemaName, - srcLogicalTableName, - ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG, - ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER, - getConnection()); - try { - for (String name : relatedTables) { - TableInfoManager.updateTableVersionWithoutDataId(srcSchemaName, name, getConnection()); - } - } catch (Exception e) { - throw GeneralUtil.nestedException(e); - } - currentTask.setState(DdlTaskState.DIRTY); - DdlEngineTaskRecord taskRecord = TaskHelper.toDdlEngineTaskRecord(currentTask); - return engineTaskAccessor.updateTask(taskRecord); - } - }; - delegate.execute(); - - LOGGER.info( - String.format( - "Update table status[ schema:%s, table:%s, before state:%s, after state:%s]", - srcSchemaName, - srcLogicalTableName, - ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG.name(), - ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER.name())); - - try { - SyncManagerHelper.sync( - new TablesMetaChangePreemptiveSyncAction(srcSchemaName, relatedTables, 1500L, 1500L, - TimeUnit.SECONDS), - true); - } catch (Throwable t) { - LOGGER.error(String.format( - "error occurs while sync table meta, schemaName:%s, tableName:%s", srcSchemaName, - srcLogicalTableName)); - throw GeneralUtil.nestedException(t); - } + ChangeSetUtils.doChangeSetSchemaChange( + srcSchemaName, srcLogicalTableName, + relatedTables, task, + ComplexTaskMetaManager.ComplexTaskStatus.WRITE_REORG, + ComplexTaskMetaManager.ComplexTaskStatus.DELETE_ONLY + ); + + ChangeSetUtils.doChangeSetSchemaChange( + srcSchemaName, srcLogicalTableName, + relatedTables, task, + ComplexTaskMetaManager.ComplexTaskStatus.DELETE_ONLY, + ComplexTaskMetaManager.ComplexTaskStatus.DOING_CHECKER + ); int retryCount = 0; boolean timeoutHappened; @@ -728,9 +745,15 @@ protected Integer invoke() { do { timeoutHappened = false; try { - checkRet = parallelCheck(this.srcPhyDbAndTables, this.dstPhyDbAndTables, ec, this.parallelism, - batchSize, ParallelPolicy.PhyGroupParallel); - } catch (TddlNestableRuntimeException e) { + checkRet = parallelCheck(this.srcPhyDbAndTables, this.dstPhyDbAndTables, ec, batchSize); + } catch (Throwable e) { + //rollback task info + FastCheckerThreadPool.getInstance().rollbackCheckTaskInfo( + baseEc.getDdlJobId(), + this.phyTaskSum.get(), + this.phyTaskFinished.get() + ); + if (StringUtils.containsIgnoreCase(e.getMessage(), "fetch phy table digest timeout")) { timeoutHappened = true; batchSize = batchSize / 4; @@ -774,7 +797,11 @@ public boolean check(ExecutionContext baseEc) { timeoutHappened = false; try { tsoCheckResult = tsoCheck(baseEc, batchSize); - } catch (TddlNestableRuntimeException e) { + } catch (Throwable e) { + //rollback task info + FastCheckerThreadPool.getInstance() + .rollbackCheckTaskInfo(baseEc.getDdlJobId(), this.phyTaskSum.get(), this.phyTaskFinished.get()); + if (StringUtils.containsIgnoreCase(e.getMessage(), "fetch phy table digest timeout")) { timeoutHappened = true; batchSize = batchSize / 4; @@ -793,136 +820,38 @@ public boolean check(ExecutionContext baseEc) { SQLRecorderLogger.ddlLogger.warn( MessageFormat.format("[{0}] FastChecker with TsoCheck failed", baseEc.getTraceId())); } - //boolean xaCheckResult = xaCheckForIsomorphicTable(baseEc); return tsoCheckResult; } protected boolean tsoCheck(ExecutionContext baseEc, long batchSize) { ExecutionContext tsoEc = baseEc.copy(); + //set trx isolation: RR tsoEc.setTxIsolation(Connection.TRANSACTION_REPEATABLE_READ); + //set share readView + tsoEc.setShareReadView(true); + //socket timeout unit: ms + ParamManager.setVal( + tsoEc.getParamManager().getProps(), + ConnectionParams.SOCKET_TIMEOUT, + Integer.toString(1000 * 60 * 60 * 24 * 7), + true + ); boolean tsoCheckResult = GsiUtils.wrapWithTransaction(tm, ITransactionPolicy.TSO, tsoEc, (ec) -> { /** * use idle query (select ... limit 1) for each phyDB so that DN can reserve TSO timestamp, * to prevent "TSO snapshot too old" when checking process is time consuming. * */ idleQueryForEachPhyDb(this.srcPhyDbAndTables, this.dstPhyDbAndTables, ec); - return parallelCheck(this.srcPhyDbAndTables, this.dstPhyDbAndTables, ec, this.parallelism, batchSize, - ParallelPolicy.PhyGroupParallel); + return parallelCheck(this.srcPhyDbAndTables, this.dstPhyDbAndTables, ec, batchSize); }); return tsoCheckResult; } - /** - * since for scaleOut, src tables and dst tables have same structure and name (but they have different phyDb) - * we can set up readview by each pair(src table, dst table). - * So we need not lock all the table one time. Instead, we once lock a pair of them. - * step1. exec "lock tables ... read" for a pair of table(src, dst) - * step2. select 1 to establish readview - * step3. release lock - * step4. do check - * step5. go to step1 to check another pair of table. - */ - protected boolean xaCheckForIsomorphicTable(ExecutionContext baseEc, long maxBatchSize) { - // make sure that src and dst have same tableNum - if (srcPhyDbAndTables.size() != dstPhyDbAndTables.size()) { - return false; - } - for (Map.Entry entry : sourceTargetGroup.entrySet()) { - String srcPhyDb = entry.getKey(); - String dstPhyDb = entry.getValue(); - /** - * since for scaleOut, src tables' Name and dst tables' are same - * we sort them to match each pair - * */ - List srcPhyTables = srcPhyDbAndTables.get(srcPhyDb).stream().sorted().collect(Collectors.toList()); - List dstPhyTables = dstPhyDbAndTables.get(dstPhyDb).stream().sorted().collect(Collectors.toList()); - if (srcPhyTables.size() != dstPhyTables.size()) { - return false; - } - - boolean xaSingleResult; - for (int i = 0; i < srcPhyTables.size(); i++) { - Map> src = ImmutableMap.of(srcPhyDb, ImmutableSet.of(srcPhyTables.get(i))); - Map> dst = ImmutableMap.of(dstPhyDb, ImmutableSet.of(dstPhyTables.get(i))); - Map> needLockTables = - ImmutableMap.of(srcPhyDb, ImmutableSet.of(srcPhyTables.get(i)), dstPhyDb, - ImmutableSet.of(dstPhyTables.get(i))); - - TablesLocker locker = new TablesLocker(this.srcSchemaName, needLockTables); - try { - locker.lock(lockTimeOut); - xaSingleResult = GsiUtils.wrapWithTransaction(tm, ITransactionPolicy.XA, baseEc, (ec) -> { - try { - idleQueryForEachPhyDb(src, dst, ec); - } finally { - locker.unlock(); - } - return parallelCheck(src, dst, ec, this.parallelism, maxBatchSize, - ParallelPolicy.PhyTableParallel); - }); - } finally { - locker.unlock(); - } - if (xaSingleResult == false) { - return false; - } - } - } - return true; - } - - /** - * For GSI check or other scene, src tables and dst tables are heterogeneous, - * so we need lock all src tables and all dst tables to do check. - * step1. exec "lock tables ... read" for all src tables and dst tables. - * step2. select 1 to establish readview - * step3. release lock. - * step4. do check - */ - protected boolean xaCheckForHeterogeneousTable(ExecutionContext baseEc, long maxBatchSize) { - Map> needLockTables = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); - this.srcPhyDbAndTables.forEach((phyDb, phyTables) -> { - if (needLockTables.containsKey(phyDb)) { - needLockTables.get(phyDb).addAll(phyTables); - } else { - needLockTables.put(phyDb, new TreeSet<>(String.CASE_INSENSITIVE_ORDER)); - needLockTables.get(phyDb).addAll(phyTables); - } - }); - //todo: in GSI, we may not lock index table - this.dstPhyDbAndTables.forEach((phyDb, phyTables) -> { - if (needLockTables.containsKey(phyDb)) { - needLockTables.get(phyDb).addAll(phyTables); - } else { - needLockTables.put(phyDb, new TreeSet<>(String.CASE_INSENSITIVE_ORDER)); - needLockTables.get(phyDb).addAll(phyTables); - } - }); - - boolean xaCheckResult = false; - TablesLocker locker = new TablesLocker(this.srcSchemaName, needLockTables); - locker.lock(lockTimeOut); - - try { - xaCheckResult = GsiUtils.wrapWithTransaction(tm, ITransactionPolicy.XA, baseEc, (ec) -> { - try { - idleQueryForEachPhyDb(this.srcPhyDbAndTables, this.dstPhyDbAndTables, ec); - } finally { - locker.unlock(); - } - return parallelCheck(this.srcPhyDbAndTables, this.dstPhyDbAndTables, ec, this.parallelism, maxBatchSize, - ParallelPolicy.PhyTableParallel); - }); - } finally { - locker.unlock(); - } - return xaCheckResult; - } - - private void idleQueryForEachPhyDb(Map> srcDbAndTb, Map> dstDbAndTb, + private void idleQueryForEachPhyDb(Map> srcDbAndTb, + Map> dstDbAndTb, ExecutionContext baseEc) { - Map, Set> phyDbAndTableGather = + Map, String> phyDbAndOneTable = new TreeMap<>(new Comparator>() { @Override public int compare(Pair o1, Pair o2) { @@ -934,200 +863,167 @@ public int compare(Pair o1, Pair o2) { } } }); + /** - * inorder to establish readview, + * inorder to establish readView, * we only need to select * limit 1 on each phyDB. * */ srcDbAndTb.forEach((phyDb, phyTables) -> { - phyDbAndTableGather.put(Pair.of(phyDb, true), ImmutableSet.of(phyTables.stream().findFirst().get())); + phyDbAndOneTable.put(Pair.of(phyDb, true), phyTables.stream().findFirst().get()); }); - dstDbAndTb.forEach((phyDb, phyTables) -> { - phyDbAndTableGather.put(Pair.of(phyDb, false), ImmutableSet.of(phyTables.stream().findFirst().get())); + phyDbAndOneTable.put(Pair.of(phyDb, false), phyTables.stream().findFirst().get()); }); - phyDbAndTableGather.forEach((phyDb, phyTables) -> phyTables.forEach(phyTable -> { + for (Map.Entry, String> entry : phyDbAndOneTable.entrySet()) { + Pair phyDbInfoPair = entry.getKey(); + String phyTb = entry.getValue(); final Map params = new HashMap<>(1); - params.put(1, PlannerUtils.buildParameterContextForTableName(phyTable, 1)); - PhyTableOperation targetPhyOp = phyDb.getValue() ? this.planIdleSelectSrc : this.planIdleSelectDst; - -// PhyTableOperation plan = -// new PhyTableOperation(phyDb.getValue() ? this.planIdleSelectSrc : this.planIdleSelectDst); -// plan.setDbIndex(phyDb.getKey()); -// plan.setTableNames(ImmutableList.of(ImmutableList.of(phyTable))); -// plan.setParam(params); + params.put(1, PlannerUtils.buildParameterContextForTableName(phyTb, 1)); + PhyTableOperation targetPhyOp = + phyDbInfoPair.getValue() ? this.planIdleSelectSrc : this.planIdleSelectDst; PhyTableOpBuildParams buildParams = new PhyTableOpBuildParams(); - buildParams.setGroupName(phyDb.getKey()); - buildParams.setPhyTables(ImmutableList.of(ImmutableList.of(phyTable))); + buildParams.setGroupName(phyDbInfoPair.getKey()); + buildParams.setPhyTables(ImmutableList.of(ImmutableList.of(phyTb))); buildParams.setDynamicParams(params); + PhyTableOperation plan = PhyTableOperationFactory.getInstance().buildPhyTableOperationByPhyOp(targetPhyOp, buildParams); - GsiUtils.retryOnException(() -> { + { Cursor cursor = null; try { cursor = ExecutorHelper.executeByCursor(plan, baseEc, false); while (cursor != null && cursor.next() != null) { } + } catch (Exception e) { + throw new TddlNestableRuntimeException( + String.format("FastChecker establish read view failed on group[%s]", + phyDbInfoPair.getKey()), e); } finally { if (cursor != null) { cursor.close(new ArrayList<>()); } } - return true; - }, (e) -> { - if (e.getSQLState() != null && e.getSQLState().equals(SQLSTATE_DEADLOCK) - && ErrorCode.ER_LOCK_DEADLOCK.getCode() == e.getErrorCode()) { - return true; - } - return false; - }, (e, retryCount) -> { - if (retryCount < 3) { - //sleep when dead lock. - try { - TimeUnit.MILLISECONDS.sleep(RETRY_WAIT[retryCount]); - } catch (InterruptedException ex) { - // Throw it out, because this may caused by user interrupt. - throw GeneralUtil.nestedException(ex); - } - } else { - throw new TddlRuntimeException(ErrorCode.ERR_FAST_CHECKER, - "FastChecker(idle select) max retry times exceeded: " + e.getMessage()); - } - }); - })); + } + } } - /** - * if ParallelPolicy is PhyTableParallel, we put all phyTable task into "runTasks" function, - * to parallel check(it also subject to parallelism limitation). - * if ParallelPolicy is PhyGroupParallel, we once select single phyTable task from each group. - */ - private boolean parallelCheck(Map> srcDbAndTb, Map> dstDbAndTb, - ExecutionContext baseEc, long parallelism, long maxBatchSize, ParallelPolicy policy) { - // Force master first and following will copy this EC. - baseEc.getExtraCmds().put(ConnectionProperties.MASTER, true); - - List> result = new ArrayList<>(); + protected boolean parallelCheck(Map> srcDbAndTb, + Map> dstDbAndTb, + ExecutionContext baseEc, long batchSize) { + Set allGroups = new TreeSet<>(String::compareToIgnoreCase); + allGroups.addAll(srcDbAndTb.keySet()); + allGroups.addAll(dstDbAndTb.keySet()); + + // + Map mapping = queryStorageInstIdByPhyGroup(allGroups); + + Map>>> allFutureTasksByGroup = + new TreeMap<>(String::compareToIgnoreCase); + + int srcTableTaskCount = 0, dstTableTaskCount = 0; + final Map mdcContext = MDC.getCopyOfContextMap(); + + for (Map.Entry> entry : srcDbAndTb.entrySet()) { + String srcDb = entry.getKey(); + for (String srcTb : entry.getValue()) { + FutureTask> task = new FutureTask<>( + () -> { + MDC.setContextMap(mdcContext); + return hashCheckForSinglePhyTable(srcDb, srcTb, baseEc, true, batchSize); + } + ); - int srcTableTaskCount = 0; - for (Set phyTables : srcDbAndTb.values()) { - srcTableTaskCount += phyTables.size(); - } - int dstTableTaskCount = 0; - for (Set phyTables : dstDbAndTb.values()) { - dstTableTaskCount += phyTables.size(); + allFutureTasksByGroup.putIfAbsent(srcDb, new ArrayList<>()); + allFutureTasksByGroup.get(srcDb).add(task); + srcTableTaskCount++; + } } - if (policy == ParallelPolicy.PhyTableParallel) { - final List>> allFutureTasks = - new ArrayList<>(srcTableTaskCount + dstTableTaskCount); - final BlockingQueue blockingQueue = - parallelism <= 0 ? null : new ArrayBlockingQueue<>((int) parallelism); - //gather src tasks - srcDbAndTb.forEach((phyDb, phyTables) -> phyTables.forEach( - phyTable -> allFutureTasks.add(new FutureTask>(() -> { - try { - return hashcheckForSinglePhyTable(phyDb, phyTable, baseEc, true, maxBatchSize); - } finally { - // Poll in finally to prevent dead lock on putting blockingQueue. - if (blockingQueue != null) { - blockingQueue.poll(); // Parallelism control notify. - } + for (Map.Entry> entry : dstDbAndTb.entrySet()) { + String dstDb = entry.getKey(); + for (String dstTb : entry.getValue()) { + FutureTask> task = new FutureTask<>( + () -> { + MDC.setContextMap(mdcContext); + return hashCheckForSinglePhyTable(dstDb, dstTb, baseEc, false, batchSize); } - })))); + ); - dstDbAndTb.forEach( - (phyDb, phyTables) -> phyTables.forEach(phyTable -> allFutureTasks.add(new FutureTask<>(() -> { - try { - return hashcheckForSinglePhyTable(phyDb, phyTable, baseEc, false, maxBatchSize); - } finally { - // Poll in finally to prevent dead lock on putting blockingQueue. - if (blockingQueue != null) { - blockingQueue.poll(); // Parallelism control notify. - } - } - })))); + allFutureTasksByGroup.putIfAbsent(dstDb, new ArrayList<>()); + allFutureTasksByGroup.get(dstDb).add(task); + dstTableTaskCount++; + } + } - Collections.shuffle(allFutureTasks); + SQLRecorderLogger.ddlLogger.info( + MessageFormat.format( + "[{0}] FastChecker try to submit {1} tasks to fastChecker threadPool", + baseEc.getTraceId(), + srcTableTaskCount + dstTableTaskCount + ) + ); + + //update task info + this.phyTaskSum.set(srcTableTaskCount + dstTableTaskCount); + + FastCheckerThreadPool.getInstance().increaseCheckTaskInfo( + baseEc.getDdlJobId(), + this.phyTaskSum.get(), + 0 + ); + + //submit tasks to fastChecker threadPool + FastCheckerThreadPool threadPool = FastCheckerThreadPool.getInstance(); + List> allTasksByStorageInstId = new ArrayList<>(); + for (Map.Entry>>> entry : allFutureTasksByGroup.entrySet()) { + String groupName = entry.getKey(); + if (!mapping.containsKey(groupName)) { + throw new TddlRuntimeException( + ErrorCode.ERR_FAST_CHECKER, + String.format("FastChecker failed to get group-storageInstId mapping, group [%s]", groupName) + ); + } + String storageInstId = mapping.get(groupName); + for (FutureTask> task : entry.getValue()) { + allTasksByStorageInstId.add(Pair.of(storageInstId, task)); + } + } - runTasks(allFutureTasks, blockingQueue, result, parallelism); + threadPool.submitTasks(allTasksByStorageInstId); - } else if (policy == ParallelPolicy.PhyGroupParallel) { - // tablesByGroup>> - final Map>> tablesByGroup = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); - srcDbAndTb.forEach((phyDb, phyTables) -> phyTables.forEach(phyTable -> { - if (tablesByGroup.containsKey(phyDb)) { - tablesByGroup.get(phyDb).add(Pair.of(phyTable, true)); - } else { - tablesByGroup.put(phyDb, new TreeSet<>(new Comparator>() { - @Override - public int compare(Pair o1, Pair o2) { - int ret = String.CASE_INSENSITIVE_ORDER.compare(o1.getKey(), o2.getKey()); - if (ret == 0) { - ret = Boolean.compare(o1.getValue(), o2.getValue()); - } - return ret; - } - })); - tablesByGroup.get(phyDb).add(Pair.of(phyTable, true)); + List> result = new ArrayList<>(); + List>> allFutureTasks = allTasksByStorageInstId + .stream() + .map(Pair::getValue) + .map(task -> (FutureTask>) task) + .collect(Collectors.toList()); + + for (FutureTask> futureTask : allFutureTasks) { + try { + result.add(futureTask.get()); + } catch (Exception e) { + for (FutureTask> taskToBeCancel : allFutureTasks) { + try { + taskToBeCancel.cancel(true); + } catch (Exception ignore) { + } } - })); - dstDbAndTb.forEach((phyDb, phyTables) -> phyTables.forEach(phyTable -> { - if (tablesByGroup.containsKey(phyDb)) { - tablesByGroup.get(phyDb).add(Pair.of(phyTable, false)); + if (e.getMessage().toLowerCase().contains("XResult stream fetch result timeout".toLowerCase())) { + throw new TddlNestableRuntimeException("FastChecker fetch phy table digest timeout", e); } else { - tablesByGroup.put(phyDb, new TreeSet<>(new Comparator>() { - @Override - public int compare(Pair o1, Pair o2) { - int ret = String.CASE_INSENSITIVE_ORDER.compare(o1.getKey(), o2.getKey()); - if (ret == 0) { - ret = Boolean.compare(o1.getValue(), o2.getValue()); - } - return ret; - } - })); - tablesByGroup.get(phyDb).add(Pair.of(phyTable, false)); + throw new TddlNestableRuntimeException(e); } - })); - - while (!tablesByGroup.isEmpty()) { - final BlockingQueue blockingQueue = parallelism <= 0 ? null : new ArrayBlockingQueue<>( - (int) parallelism); - final List>> futureTasks = new ArrayList<>(); - List finishPhyDb = new ArrayList<>(); - tablesByGroup.forEach((phyDb, phyTables) -> { - if (phyTables.isEmpty()) { - finishPhyDb.add(phyDb); - } else { - Pair phyTable = phyTables.stream().findFirst().get(); - futureTasks.add(new FutureTask<>(() -> { - try { - return hashcheckForSinglePhyTable(phyDb, phyTable.getKey(), baseEc, - phyTable.getValue(), maxBatchSize); - } finally { - if (blockingQueue != null) { - blockingQueue.poll(); - } - } - })); - phyTables.remove(phyTable); - } - }); - finishPhyDb.forEach(dbName -> { - tablesByGroup.remove(dbName); - }); - - runTasks(futureTasks, blockingQueue, result, parallelism); } } List srcResult = - result.stream().filter(item -> item != null && item.getKey() != null && item.getValue()).map(Pair::getKey) + result.stream().filter(p -> p != null && p.getKey() != null && p.getValue() == true).map(Pair::getKey) .collect(Collectors.toList()); List dstResult = - result.stream().filter(item -> item != null && item.getKey() != null && !item.getValue()).map(Pair::getKey) + result.stream().filter(p -> p != null && p.getKey() != null && p.getValue() == false).map(Pair::getKey) .collect(Collectors.toList()); return srcTableTaskCount == result.stream().filter(Objects::nonNull).filter(Pair::getValue).count() @@ -1143,66 +1039,6 @@ private boolean compare(List src, List dst) { return srcCaculator.getHashVal().equals(dstCaculator.getHashVal()); } - private void runTasks(List>> futures, BlockingQueue blockingQueue, - List> result, long parallelism) { - AtomicReference excep = new AtomicReference<>(null); - if (parallelism <= 0) { - futures.forEach(task -> FastCheckerThreadPool.getInstance() - .executeWithContext(task, PriorityFIFOTask.TaskPriority.HIGH_PRIORITY_TASK)); - } else { - futures.forEach(task -> { - try { - blockingQueue.put(task); // Just put an object to get blocked when full. - } catch (Exception e) { - excep.set(e); - } - if (null == excep.get()) { - FastCheckerThreadPool.getInstance() - .executeWithContext(task, PriorityFIFOTask.TaskPriority.HIGH_PRIORITY_TASK); - } - }); - } - - if (excep.get() != null) { - // Interrupt all. - futures.forEach(f -> { - try { - f.cancel(true); - } catch (Throwable ignore) { - } - }); - } - - for (FutureTask> future : futures) { - try { - result.add(future.get()); - } catch (ExecutionException e) { - futures.forEach(f -> { - try { - f.cancel(true); - } catch (Throwable ignore) { - } - }); - if (null == excep.get()) { - excep.set(e); - } - if (e.getMessage().toLowerCase().contains("XResult stream fetch result timeout".toLowerCase())) { - throw new TddlNestableRuntimeException("fastchecker fetch phy table digest timeout", e); - } - } catch (InterruptedException e) { - futures.forEach(f -> { - try { - f.cancel(true); - } catch (Throwable ignore) { - } - }); - if (null == excep.get()) { - excep.set(e); - } - } - } - } - public void reportCheckOk(ExecutionContext ec) { final CheckerManager checkerManager = new CheckerManager(srcSchemaName); final String finishDetails = "FastChecker check OK."; @@ -1245,144 +1081,6 @@ public Long getHashVal() { } } - /** - * use TableLocker to lock phyTables. - * we save all the connections to execute unlock. - */ - class TablesLocker { - /** - * set lock table timeout(n seconds) in session level - */ - private static final String TABLE_LOCK_TIMEOUT = "SET SESSION LOCK_WAIT_TIMEOUT = "; - private static final String LOCK_TABLES = "LOCK TABLES "; - private static final String READ_MODE = " READ"; - private static final String UNLOCK_TABLES = "UNLOCK TABLES"; - - private final String schemaName; - private final Map> phyDbAndTables; - private Map lockConnections; - - public TablesLocker(String schemaName, Map> phyDbAndTables) { - this.schemaName = schemaName; - this.phyDbAndTables = phyDbAndTables; - this.lockConnections = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); - } - - public void lock(int timeOutSeconds) { - phyDbAndTables.forEach((phyDb, phyTables) -> { - if (phyTables.size() == 0) { - //this return only break the lambda, it will not finish lock function - return; - } - TGroupDataSource dataSource = getDataSource(phyDb); - if (dataSource == null) { - this.unlock(); - throw new TddlRuntimeException(ErrorCode.ERR_FAST_CHECKER, "FastChecker get connection fail."); - } - - boolean lockFailed = false; - long connOrignalLockWaitTimeout = -1; - Connection conn = null; - try { - conn = dataSource.getConnection(MasterSlave.MASTER_ONLY); - lockConnections.put(phyDb, conn); - - String showLockWaitTimeoutStmt = "show variables like 'lock_wait_timeout'"; - try (PreparedStatement ps = conn.prepareStatement(showLockWaitTimeoutStmt); - ResultSet rs = ps.executeQuery()) { - boolean hasNext = rs.next(); - if (hasNext) { - connOrignalLockWaitTimeout = Long.valueOf(rs.getString("Value")); - } - } - - String setLockWaitTimeOutStmt = TABLE_LOCK_TIMEOUT + timeOutSeconds; - try (PreparedStatement ps = conn.prepareStatement(setLockWaitTimeOutStmt)) { - ps.execute(); - } - - String lockTblStmt = LOCK_TABLES + String.join(READ_MODE + ", ", phyTables) + READ_MODE; - try (PreparedStatement ps = conn.prepareStatement(lockTblStmt)) { - ps.execute(); - } - - } catch (SQLException e) { - /** - * DN timeout will throw SQLException - * */ - lockFailed = true; - if (StringUtils.containsIgnoreCase(e.getMessage(), "Lock wait timeout")) { - throw new TddlRuntimeException(ErrorCode.ERR_FAST_CHECKER, "FastChecker acquire lock timeout.", - e); - } else { - throw new TddlRuntimeException(ErrorCode.ERR_FAST_CHECKER, "FastChecker acquire lock failed.", - e); - } - } catch (TddlRuntimeException e) { - /** - * CN(tddl) timeout will throw TddlRuntimeException - * */ - lockFailed = true; - if (StringUtils.containsIgnoreCase(e.getMessage(), "Query timeout")) { - throw new TddlRuntimeException(ErrorCode.ERR_FAST_CHECKER, "FastChecker acquire lock timeout.", - e); - } else { - throw new TddlRuntimeException(ErrorCode.ERR_FAST_CHECKER, "FastChecker acquire lock failed.", - e); - } - } finally { - - try { - if (connOrignalLockWaitTimeout > -1 && conn != null) { - String recoverConnLockWaitTimeOutStmt = TABLE_LOCK_TIMEOUT + connOrignalLockWaitTimeout; - try (PreparedStatement ps = conn.prepareStatement(recoverConnLockWaitTimeOutStmt)) { - ps.execute(); - } - } - } catch (Throwable ex) { - logger.warn("Failed to recover connection lock wait timeout", ex); - } - - if (lockFailed) { - this.unlock(); - } - - } - }); - } - - public void unlock() { - lockConnections.forEach((phyDb, conn) -> { - try { - String statement = UNLOCK_TABLES; - PreparedStatement ps = conn.prepareStatement(statement); - ps.execute(); - } catch (Throwable e) { - logger.warn("Failed to exec unlock tables", e); - } - }); - - lockConnections.forEach((phyDb, conn) -> { - try { - conn.close(); - } catch (Throwable e) { - logger.warn("Failed to close locked connections", e); - } - }); - - lockConnections.clear(); - } - - private TGroupDataSource getDataSource(String phyDb) { - TopologyHandler topology = ExecutorContext.getContext(schemaName).getTopologyHandler(); - Object dataSource = topology.get(phyDb).getDataSource(); - if (dataSource != null && dataSource instanceof TGroupDataSource) { - return (TGroupDataSource) dataSource; - } - return null; - } - } - public void setDstPhyDbAndTables(Map> dstPhyDbAndTables) { this.dstPhyDbAndTables = dstPhyDbAndTables; } @@ -1391,18 +1089,29 @@ public void setSrcPhyDbAndTables(Map> srcPhyDbAndTables) { this.srcPhyDbAndTables = srcPhyDbAndTables; } - public static List getorderedPrimaryKeys(TableMeta tableMeta, ExecutionContext ec) { - final SchemaManager sm = ec.getSchemaManager(tableMeta.getSchemaName()); + public static List getorderedPrimaryKeys(TableMeta tableMeta) { List primaryKeys = ImmutableList - .copyOf((tableMeta.isHasPrimaryKey() ? tableMeta.getPrimaryIndex().getKeyColumns() : - tableMeta.getGsiImplicitPrimaryKey()) - .stream().map(ColumnMeta::getName).collect(Collectors.toList())); - if (GeneralUtil.isEmpty(primaryKeys) && tableMeta.isGsi()) { - String primaryTable = tableMeta.getGsiTableMetaBean().gsiMetaBean.tableName; - final TableMeta primaryTableMeta = sm.getTable(primaryTable); - primaryKeys = primaryTableMeta.getPrimaryIndex().getKeyColumns().stream().map(ColumnMeta::getName) - .collect(Collectors.toList()); - } + .copyOf( + (tableMeta.isHasPrimaryKey() ? tableMeta.getPrimaryIndex().getKeyColumns() : + new ArrayList()) + .stream().map(ColumnMeta::getName).collect(Collectors.toList()) + ); return primaryKeys; } + + /** + * map < groupName, storageInstId > + */ + private Map queryStorageInstIdByPhyGroup(Set groupName) { + try (Connection metaDbConn = MetaDbDataSource.getInstance().getConnection()) { + GroupDetailInfoAccessor groupDetailInfoAccessor = new GroupDetailInfoAccessor(); + groupDetailInfoAccessor.setConnection(metaDbConn); + return groupDetailInfoAccessor.getStorageInstMappingByOnlyGroupName( + ServerInstIdManager.getInstance().getMasterInstId(), + groupName + ); + } catch (Exception e) { + throw new TddlNestableRuntimeException("FastChecker query group-storageInstId info failed", e); + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/ColumnarManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/ColumnarManager.java new file mode 100644 index 000000000..49de01ac8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/ColumnarManager.java @@ -0,0 +1,130 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.optimizer.config.table.FileMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; +import org.roaringbitmap.RoaringBitmap; + +import java.util.List; + +/** + * Columnar store management for columnar instance. + */ +public interface ColumnarManager extends ColumnarSchemaTransformer, Purgeable { + + static ColumnarManager getInstance() { + return DynamicColumnarManager.getInstance(); + } + + /** + * Reload the columnar manager of current CN, clear all cache and snapshot + */ + void reload(); + + /** + * Get the latest tso at which the columnar snapshot is visible. + * + * @return latest tso value. + */ + long latestTso(); + + /** + * Set the latest tso to this node. + * + * @param tso latest tso + */ + void setLatestTso(long tso); + + /** + * Garbage collection for version management: + * 1. merge all old bitmaps before given tso + * 2. remove useless csv data cache + * 3. remove file metas of invisible files. + * + * @param tso before which purge the old version. + */ + void purge(long tso); + + /** + * Get all the visible file-metas at the version whose tso <= given tso + * In columnar mode, we only need file name, rather than the whole file meta + * So this API is only used for OSS cold-data compatibility + * + * @param tso may larger than latest tso. + * @param logicalTable logical table name. + * @param partName part name. + * @return Collection of file-metas: orc and csv + */ + @Deprecated + Pair, List> findFiles(long tso, String logicalSchema, String logicalTable, + String partName); + + /** + * Get all the visible files at the version whose tso <= given tso + * In columnar mode, we only need file name, rather than the whole file meta. + * + * @param tso may larger than latest tso. + * @param logicalTable logical table name. + * @param partName part name. + * @return Collection of file names + */ + Pair, List> findFileNames(long tso, String logicalSchema, String logicalTable, + String partName); + + /** + * Get csv data cache of given file name in snapshot of tso. + * + * @param tso may larger than latest tso. + * @param csvFileName csv file name. + * @return Collection of csv cache data (in format of chunk) + */ + List csvData(long tso, String csvFileName); + + default List rawCsvData(long tso, String csvFileName, ExecutionContext context) { + return null; + } + + /** + * Fill the given selection array according to snapshot of tso, file name and position block + * USED FOR OLD COLUMNAR TABLE SCAN ONLY + * + * @param fileName file name. + * @param tso may larger than latest tso. + * @param selection selection array. + * @param positionBlock position block (default column in columnar store). + * @return filled size of selection array. + */ + @Deprecated + int fillSelection(String fileName, long tso, int[] selection, IntegerBlock positionBlock); + + @Deprecated + int fillSelection(String fileName, long tso, int[] selection, LongColumnVector longColumnVector, int batchSize); + + /** + * Generate delete bitmap of given file name in snapshot of tso. + * + * @param tso snapshot tso + * @param fileName file name of ORC/CSV + * @return copy of corresponding delete bitmap + */ + RoaringBitmap getDeleteBitMapOf(long tso, String fileName); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/ColumnarSchemaTransformer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/ColumnarSchemaTransformer.java new file mode 100644 index 000000000..77e8f5542 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/ColumnarSchemaTransformer.java @@ -0,0 +1,71 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +import com.alibaba.polardbx.executor.archive.schemaevolution.ColumnMetaWithTs; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.FileMeta; +import com.google.common.collect.ImmutableList; +import org.jetbrains.annotations.NotNull; + +import java.util.List; +import java.util.Map; +import java.util.Optional; + +public interface ColumnarSchemaTransformer { + /** + * @return The physical column index of the file from logical columnar table column index for certain tso + */ + @Deprecated + List getPhysicalColumnIndexes(long tso, String fileName, List columnIndexes); + + /** + * @return The physical column index map of the file from logical columnar table column index for certain field id + */ + Map getPhysicalColumnIndexes(String fileName); + + /** + * @param tso TSO + * @param logicalTable logical name of columnar table + * @return The sort key column index of logical table, start from 0 + */ + List getSortKeyColumns(long tso, String logicalSchema, String logicalTable); + + /** + * Get the physical column indexes of primary key for certain file, start from 1. + */ + int[] getPrimaryKeyColumns(String fileName); + + Optional fileNameOf(String logicalSchema, long tableId, String partName, int columnarFileId); + + FileMeta fileMetaOf(String fileName); + + @NotNull + List getColumnFieldIdList(long versionId, long tableId); + + @NotNull + List getColumnMetas(long schemaTso, String logicalSchema, String logicalTable); + + @NotNull + List getColumnMetas(long schemaTso, long tableId); + + @NotNull + Map getColumnIndex(long schemaTso, long tableId); + + @NotNull + ColumnMetaWithTs getInitColumnMeta(long tableId, long fieldId); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/ColumnarStoreUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/ColumnarStoreUtils.java new file mode 100644 index 000000000..8bcf35a80 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/ColumnarStoreUtils.java @@ -0,0 +1,41 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +public class ColumnarStoreUtils { + /** + * The default column index of `pk` (immutable). + */ + //public static final int PK_COLUMN_INDEX = 1; + /** + * The default column index of `tso` (immutable). + */ + public static final int TSO_COLUMN_INDEX = 0; + /** + * The default column index of `pk_tso` (immutable). + */ + //public static final int PK_TSO_COLUMN_INDEX = 3; + /** + * The default column index of `position` (immutable). + */ + public static final int POSITION_COLUMN_INDEX = 1; + + /** + * number of implicit column BEFORE real physical column + */ + public static final int IMPLICIT_COLUMN_CNT = 2; +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/DynamicColumnarManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/DynamicColumnarManager.java new file mode 100644 index 000000000..258afc83e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/DynamicColumnarManager.java @@ -0,0 +1,609 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.model.lifecycle.AbstractLifecycle; +import com.alibaba.polardbx.common.oss.ColumnarFileType; +import com.alibaba.polardbx.common.oss.OSSFileType; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.archive.schemaevolution.ColumnMetaWithTs; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.gms.util.ColumnarTransactionUtils; +import com.alibaba.polardbx.gms.metadb.table.ColumnarAppendedFilesAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarAppendedFilesRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarFileMappingAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarFileMappingRecord; +import com.alibaba.polardbx.gms.metadb.table.FilesAccessor; +import com.alibaba.polardbx.gms.metadb.table.FilesRecord; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.FileMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.google.common.base.Preconditions; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.collect.ImmutableList; +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; +import org.jetbrains.annotations.NotNull; +import org.roaringbitmap.RoaringBitmap; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Queue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; + +/** + * The instructions of Dynamic-columnar-manager: + * 0. (Inner) Periodically fetch the newest tso from meta db. + * 1. Find columnar-partition-snapshot of certain tso. + * 2. Get file meta from file-meta-cache and columnar-snapshot-cache use findFileNames method. + * 3. Use oss read option to generate orc-task / csv-task + * 4. Use getDeleteBitMapOf method to generate delete bitmap for each task + */ +public class DynamicColumnarManager extends AbstractLifecycle implements ColumnarManager { + private static final Logger LOGGER = LoggerFactory.getLogger("COLUMNAR_TRANS"); + + private static final DynamicColumnarManager INSTANCE = new DynamicColumnarManager(); + + public static final int MAXIMUM_FILE_META_COUNT = 1 << 18; + public static final int MAXIMUM_SIZE_OF_SNAPSHOT_CACHE = 1 << 16; + + private FileVersionStorage versionStorage; + private final Queue filesToBePurged = new LinkedBlockingQueue<>(); + private final Object minTsoLock = new Object(); + private final Object latestTsoLock = new Object(); + /** + * Cache all file-meta by its file name. + */ + private LoadingCache fileMetaCache; + /** + * Cache all Multi-version snapshot by schema name and table id. + */ + private LoadingCache, MultiVersionColumnarSnapshot> snapshotCache; + /** + * columnar schema of each tso + */ + private MultiVersionColumnarSchema columnarSchema; + /** + * Cache mapping: {partition-info, file-id} -> file-name + */ + private LoadingCache, Optional> fileIdMapping; + /** + * cache append file last record: {tso, filename} -> {records} + */ + private LoadingCache, List> appendFileRecordCache; + private final AtomicLong appendFileAccessCounter = new AtomicLong(); + private volatile Long minTso; + private volatile Long latestTso; + + public DynamicColumnarManager() { + } + + public static DynamicColumnarManager getInstance() { + if (!INSTANCE.isInited()) { + synchronized (INSTANCE) { + if (!INSTANCE.isInited()) { + INSTANCE.init(); + } + } + } + return INSTANCE; + } + + public static List getLatestAppendFileRecord(String fileName, Long tso) { + try (Connection connection = MetaDbUtil.getConnection()) { + ColumnarAppendedFilesAccessor columnarAppendedFilesAccessor = new ColumnarAppendedFilesAccessor(); + columnarAppendedFilesAccessor.setConnection(connection); + return columnarAppendedFilesAccessor.queryByFileNameAndMaxTso(fileName, tso); + + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, e, + "fail to fetch append file record with file: " + fileName + " and tso: " + tso); + } + } + + public FileVersionStorage getVersionStorage() { + return versionStorage; + } + + /** + * This method is for unit test ONLY + */ + void injectForTest(FileVersionStorage versionStorage, + MultiVersionColumnarSchema multiVersionColumnarSchema) { + this.versionStorage = versionStorage; + this.versionStorage.open(); + this.columnarSchema = multiVersionColumnarSchema; + } + + @Override + protected void doInit() { + this.versionStorage = new FileVersionStorage(this); + this.versionStorage.open(); + this.columnarSchema = new MultiVersionColumnarSchema(this); + + // Build file meta cache + this.fileMetaCache = CacheBuilder.newBuilder() + .maximumSize(MAXIMUM_FILE_META_COUNT) + .build( + new CacheLoader() { + @Override + public FileMeta load(@NotNull String fileName) { + List filesRecords = null; + try (Connection connection = MetaDbUtil.getConnection()) { + FilesAccessor filesAccessor = new FilesAccessor(); + filesAccessor.setConnection(connection); + + // query meta db && filter table files. + filesRecords = filesAccessor + .queryColumnarByFileName(fileName) + .stream() + .filter(filesRecord -> OSSFileType.of(filesRecord.fileType) == OSSFileType.TABLE_FILE) + .collect(Collectors.toList()); + } catch (SQLException e) { + // ignore. + } + + if (!filesRecords.isEmpty()) { + FileMeta fileMeta = FileMeta.parseFrom(filesRecords.get(0)); + + // fill with column meta. + List columnMetas = + getColumnMetas(fileMeta.getSchemaTs(), + Long.parseLong(fileMeta.getLogicalTableName())); + fileMeta.initColumnMetas(ColumnarStoreUtils.IMPLICIT_COLUMN_CNT, columnMetas); + return fileMeta; + } + + return null; + } + } + ); + + final DynamicColumnarManager self = this; + // Build snapshot cache + this.snapshotCache = CacheBuilder.newBuilder() + .maximumSize(MAXIMUM_SIZE_OF_SNAPSHOT_CACHE) + .build(new CacheLoader, MultiVersionColumnarSnapshot>() { + @Override + public MultiVersionColumnarSnapshot load(@NotNull Pair schemaAndTableId) { + return new MultiVersionColumnarSnapshot( + self, schemaAndTableId.getKey(), schemaAndTableId.getValue() + ); + } + }); + + // Build file-id mapping cache + this.fileIdMapping = CacheBuilder.newBuilder() + .maximumSize(MAXIMUM_FILE_META_COUNT) + .build(new CacheLoader, Optional>() { + @Override + public Optional load(@NotNull Pair key) throws Exception { + Integer columnarFileId = key.getValue(); + PartitionId partitionId = key.getKey(); + String logicalSchema = partitionId.getLogicalSchema(); + String tableId = String.valueOf(partitionId.getTableId()); + String partName = partitionId.getPartName(); + + List records; + try (Connection connection = MetaDbUtil.getConnection()) { + ColumnarFileMappingAccessor accessor = new ColumnarFileMappingAccessor(); + accessor.setConnection(connection); + + records = accessor.queryByFileId( + logicalSchema, tableId, partName, columnarFileId + ); + } + + if (records != null && !records.isEmpty()) { + ColumnarFileMappingRecord record = records.get(0); + return Optional.of(record.getFileName()); + } + + return Optional.empty(); + } + }); + + this.appendFileRecordCache = CacheBuilder.newBuilder() + .maximumSize(MAXIMUM_FILE_META_COUNT) + .build(new CacheLoader, List>() { + @Override + public List load(@NotNull Pair tsoAndFileName) { + return getLatestAppendFileRecord(tsoAndFileName.getValue(), tsoAndFileName.getKey()); + } + }); + + LOGGER.info("Columnar Manager of has been initialized"); + } + + @Override + public void purge(long tso) { + // Update inner state of version chain + if (minTso != null && minTso >= tso) { + return; + } + + synchronized (minTsoLock) { + if (minTso != null && minTso >= tso) { + return; + } + + // update min tso before physical purge to make it safe + minTso = tso; + // this could collect all files which should be purged + snapshotCache.asMap().values().forEach(snapshot -> snapshot.purge(tso)); + + for (String fileName = nextPurgedFile(); fileName != null; fileName = nextPurgedFile()) { + ColumnarFileType columnarFileType = + ColumnarFileType.of(fileName.substring(fileName.lastIndexOf('.') + 1)); + fileMetaCache.invalidate(fileName); + + if (columnarFileType.isDeltaFile()) { + versionStorage.purgeByFile(fileName); + } + } + + versionStorage.purge(tso); + // TODO(siyun): purge when CCI is dropped + + // columnar fetches purge signal by SHOW COLUMNAR OFFSET + } + } + + public void putPurgedFile(String fileName) { + filesToBePurged.add(fileName); + } + + private String nextPurgedFile() { + return filesToBePurged.poll(); + } + + @Override + public Pair, List> findFiles(long tso, String logicalSchema, String logicalTable, + String partName) { + if (tso == Long.MIN_VALUE) { + return Pair.of(ImmutableList.of(), ImmutableList.of()); + } + try { + Long tableId = getTableId(tso, logicalSchema, logicalTable); + MultiVersionColumnarSnapshot.ColumnarSnapshot snapshot = snapshotCache.get( + Pair.of(logicalSchema, tableId) + ).generateSnapshot(partName, tso); + + List csvFiles = new ArrayList<>(); + List orcFiles = new ArrayList<>(); + + // fetch orc file metas + snapshot.getOrcFiles().stream().map( + this::fileMetaOf + ).forEach(orcFiles::add); + + // fetch csv file metas + snapshot.getCsvFiles().stream().map( + this::fileMetaOf + ).forEach(csvFiles::add); + + return Pair.of(orcFiles, csvFiles); + } catch (Throwable e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, e, + String.format("Failed to generate columnar snapshot of tso: %d", tso)); + } + } + + @Override + public Pair, List> findFileNames(long tso, String logicalSchema, String logicalTable, + String partName) { + if (tso == Long.MIN_VALUE) { + return Pair.of(ImmutableList.of(), ImmutableList.of()); + } + try { + Long tableId = getTableId(tso, logicalSchema, logicalTable); + MultiVersionColumnarSnapshot.ColumnarSnapshot snapshot = snapshotCache.get( + Pair.of(logicalSchema, tableId) + ).generateSnapshot(partName, tso); + return Pair.of(snapshot.getOrcFiles(), snapshot.getCsvFiles()); + } catch (Throwable e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, e, + String.format("Failed to generate columnar snapshot of tso: %d", tso)); + } + } + + public List delFileNames(long tso, String logicalSchema, String logicalTable, + String partName) { + if (tso == Long.MIN_VALUE) { + return ImmutableList.of(); + } + + try { + MultiVersionColumnarSnapshot.ColumnarSnapshot snapshot = snapshotCache.get( + Pair.of(logicalSchema, Long.valueOf(logicalTable)) + ).generateSnapshot(partName, tso); + + return snapshot.getDelFiles(); + } catch (Throwable e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, e, + String.format("Failed to generate columnar snapshot of tso: %d", tso)); + } + } + + @Override + public List csvData(long tso, String csvFileName) { + appendFileAccessCounter.getAndIncrement(); + try { + List appendedFilesRecords = + appendFileRecordCache.get(Pair.of(tso, csvFileName)); + if (appendedFilesRecords == null || appendedFilesRecords.isEmpty()) { + return new ArrayList<>(); + } else { + Preconditions.checkArgument(appendedFilesRecords.size() == 1); + return versionStorage.csvData(appendedFilesRecords.get(0).checkpointTso, csvFileName); + } + } catch (Throwable t) { + throw new TddlRuntimeException(ErrorCode.ERR_LOAD_CSV_FILE, t, + String.format("Failed to load csv file, filename: %s, tso: %d", csvFileName, tso)); + } + } + + @Override + public List rawCsvData(long tso, String csvFileName, ExecutionContext context) { + appendFileAccessCounter.getAndIncrement(); + try { + List appendedFilesRecords = + appendFileRecordCache.get(Pair.of(tso, csvFileName)); + if (appendedFilesRecords == null || appendedFilesRecords.isEmpty()) { + return new ArrayList<>(); + } else { + Preconditions.checkArgument(appendedFilesRecords.size() == 1); + return versionStorage.csvRawOrcTypeData(appendedFilesRecords.get(0).checkpointTso, + csvFileName, context); + } + } catch (Throwable t) { + throw new TddlRuntimeException(ErrorCode.ERR_LOAD_CSV_FILE, t, + String.format("Failed to load csv file, filename: %s, tso: %d", csvFileName, tso)); + } + } + + @Override + public int fillSelection(String fileName, long tso, int[] selection, IntegerBlock positionBlock) { + return versionStorage.fillSelection(fileName, tso, selection, positionBlock); + } + + @Override + public int fillSelection(String fileName, long tso, int[] selection, LongColumnVector longColumnVector, + int batchSize) { + return versionStorage.fillSelection(fileName, tso, selection, longColumnVector, batchSize); + } + + @Override + public Optional fileNameOf(String logicalSchema, long tableId, String partName, int columnarFileId) { + PartitionId partitionId = PartitionId.of(partName, logicalSchema, tableId); + try { + return fileIdMapping.get(Pair.of(partitionId, columnarFileId)); + } catch (ExecutionException e) { + fileIdMapping.invalidate(Pair.of(partitionId, columnarFileId)); + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SCHEMA, e.getCause(), + "Failed to fetch file id of partition: " + e.getCause().getMessage()); + } + } + + @Override + public FileMeta fileMetaOf(String fileName) { + try { + return fileMetaCache.get(fileName); + } catch (ExecutionException e) { + fileMetaCache.invalidate(fileName); + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SCHEMA, e.getCause(), + "Failed to fetch file meta of file, file name: " + fileName); + } + } + + @Override + public @NotNull List getColumnFieldIdList(long versionId, long tableId) { + return columnarSchema.getColumnFieldIdList(versionId, tableId); + } + + @Override + public @NotNull List getColumnMetas(long schemaTso, String logicalSchema, String logicalTable) { + return getColumnMetas(schemaTso, getTableId(schemaTso, logicalSchema, logicalTable)); + } + + @Override + public @NotNull List getColumnMetas(long schemaTso, long tableId) { + return columnarSchema.getColumnMetas(schemaTso, tableId); + } + + @Override + public @NotNull Map getColumnIndex(long schemaTso, long tableId) { + return columnarSchema.getColumnIndexMap(schemaTso, tableId); + } + + @Override + public @NotNull ColumnMetaWithTs getInitColumnMeta(long tableId, long fieldId) { + return columnarSchema.getInitColumnMeta(tableId, fieldId); + } + + @Override + public int[] getPrimaryKeyColumns(String fileName) { + FileMeta fileMeta = fileMetaOf(fileName); + long tableId = Long.parseLong(fileMeta.getLogicalTableName()); + long schemaTso = fileMeta.getSchemaTs(); + return columnarSchema.getPrimaryKeyColumns(schemaTso, tableId); + } + + @Override + public RoaringBitmap getDeleteBitMapOf(long tso, String fileName) { + FileMeta fileMeta = fileMetaOf(fileName); + return versionStorage.getDeleteBitMap(fileMeta, tso); + } + + @Override + public long latestTso() { + if (latestTso != null) { + return latestTso; + } + // Fetch the latest tso + synchronized (latestTsoLock) { + if (latestTso != null) { + return latestTso; + } + + Long gmsLatestTso = ColumnarTransactionUtils.getLatestTsoFromGms(); + latestTso = gmsLatestTso != null ? gmsLatestTso : Long.MIN_VALUE; + return latestTso; + } + } + + @Override + public void setLatestTso(long tso) { + if (latestTso == null || latestTso < tso) { + synchronized (latestTsoLock) { + if (latestTso == null || latestTso < tso) { + latestTso = tso; + } + } + } + } + + private Long getTableId(long tso, String logicalSchema, String logicalTable) { + try { + return columnarSchema.getTableId(tso, logicalSchema, logicalTable); + } catch (ExecutionException e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SCHEMA, e.getCause(), + String.format("Failed to fetch table id, tso: %d, schema name: %s, table name: %s", + tso, logicalSchema, logicalTable)); + } + } + + public List generatePacket() { + return versionStorage.generatePacket(); + } + + @Override + protected void doDestroy() { + versionStorage.close(); + } + + @Override + public List getPhysicalColumnIndexes(long tso, String tableName, List columnIndexes) { + // TODO(siyun): NO MOCK + return columnIndexes.stream().map(index -> index + ColumnarStoreUtils.IMPLICIT_COLUMN_CNT) + .collect(Collectors.toList()); + } + + @Override + public Map getPhysicalColumnIndexes(String fileName) { + FileMeta fileMeta = fileMetaOf(fileName); + long tableId = Long.parseLong(fileMeta.getLogicalTableName()); + long schemaTso = fileMeta.getSchemaTs(); + return columnarSchema.getColumnIndexMap(schemaTso, tableId); + } + + @Override + public List getSortKeyColumns(long tso, String logicalSchema, String logicalTable) { + try { + return columnarSchema.getSortKeyColumns(tso, logicalSchema, logicalTable); + } catch (ExecutionException e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SCHEMA, e.getCause(), + String.format("Failed to fetch sort key info, tso: %d, schema name: %s, table name: %s", + tso, logicalSchema, logicalTable)); + } + } + + /** + * 获取当前缓存的下水位线,每分钟更新一次,用于优化增量文件的加载和 purge + * + * @return 下水位线的一个下界 + */ + @NotNull + public Long getMinTso() { + if (minTso == null) { + synchronized (minTsoLock) { + if (minTso == null) { + minTso = ColumnarTransactionUtils.getMinColumnarSnapshotTime(); + } + } + } + + return minTso; + } + + /** + * approximate number of entries in this cache + */ + public long getLoadedAppendFileCount() { + return appendFileRecordCache.size(); + } + + /** + * TODO + */ + public long getLoadedVersionCount() { + return 0; + } + + public long getAppendFileAccessCount() { + return appendFileAccessCounter.get(); + } + + @Override + public void reload() { + synchronized (minTsoLock) { + minTso = null; + synchronized (latestTsoLock) { + latestTso = null; + try { + snapshotCache.invalidateAll(); + fileMetaCache.invalidateAll(); + fileIdMapping.invalidateAll(); + appendFileRecordCache.invalidateAll(); + filesToBePurged.clear(); + } catch (Throwable t) { + // ignore + } + + try { + this.columnarSchema = new MultiVersionColumnarSchema(this); + } catch (Throwable t) { + // ignore + } + + try { + this.versionStorage = new FileVersionStorage(this); + this.versionStorage.open(); + } catch (Throwable t) { + // ignore + } + } + } + + LOGGER.info("Columnar Manager of has been reloaded"); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/FileVersionStorage.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/FileVersionStorage.java new file mode 100644 index 000000000..1a8355bbb --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/FileVersionStorage.java @@ -0,0 +1,394 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.columnar.DeletionFileReader; +import com.alibaba.polardbx.executor.operator.spill.MemorySpillerFactory; +import com.alibaba.polardbx.executor.operator.spill.SpillerFactory; +import com.alibaba.polardbx.gms.metadb.table.ColumnarAppendedFilesRecord; +import com.alibaba.polardbx.optimizer.config.table.FileMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.memory.MemoryManager; +import com.alibaba.polardbx.optimizer.memory.MemoryPool; +import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; +import com.alibaba.polardbx.optimizer.spill.SpillSpaceManager; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.cache.RemovalListener; +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; +import org.jetbrains.annotations.NotNull; +import org.roaringbitmap.RoaringBitmap; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.SortedMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.executor.gms.DynamicColumnarManager.MAXIMUM_FILE_META_COUNT; + +/** + * Version management of columnar store. + * 1. Update the delta-state according to columnar-appended-files record with give tso in a lazy way. + * 2. Maintain the multi-version csv/del data. (build/purge/read) + */ +public class FileVersionStorage implements Closeable, Purgeable { + private static final Logger LOGGER = LoggerFactory.getLogger("COLUMNAR_TRANS"); + + public static final int CSV_CHUNK_LIMIT = 1000; + + private final DynamicColumnarManager columnarManager; + + /** + * Cache the csv cached chunks of all csv files. + * This cache is HEAVY + */ + private LoadingCache csvDataMap; + + /** + * Cache the bitmaps for all table files in this schema. + * This cache is HEAVY + */ + private LoadingCache delDataMap; + + /** + * Maintain the already loaded tso for each partition + */ + private LoadingCache delDataTsoMap; + + private final AtomicLong hitCount = new AtomicLong(0); + private final AtomicLong missCount = new AtomicLong(0); + private final AtomicLong openedIncrementFileCount = new AtomicLong(0); + + public FileVersionStorage(DynamicColumnarManager columnarManager) { + this.columnarManager = columnarManager; + } + + public void open() { + // TODO(siyun): memory management + + this.csvDataMap = CacheBuilder.newBuilder() + .maximumSize(MAXIMUM_FILE_META_COUNT) + // TODO(siyun): support spill to disk while being evicted + .build(new CacheLoader() { + @Override + public MultiVersionCsvData load(@NotNull String key) { + return new MultiVersionCsvData(key, openedIncrementFileCount); + } + }); + + this.delDataMap = CacheBuilder.newBuilder() + // The deletion bitmap could NOT be directly invalidated, which may lead to inconsistency + // This cache can be invalidated by purge if the whole file is invisible at minTso + .build(new CacheLoader() { + @Override + public MultiVersionDelData load(@NotNull String key) { + return new MultiVersionDelData(); + } + }); + + this.delDataTsoMap = CacheBuilder.newBuilder() + // This cache can only be invalidated if the CCI is removed + .build((new CacheLoader() { + @Override + public MultiVersionDelPartitionInfo load(@NotNull PartitionId key) throws Exception { + return new MultiVersionDelPartitionInfo(); + } + })); + } + + public void purge(long tso, String logicalTable, String partName) { + + } + + /** + * Purge all cache below tso + */ + public void purge(long tso) { + for (MultiVersionDelPartitionInfo delPartitionInfo : delDataTsoMap.asMap().values()) { + delPartitionInfo.purge(tso); + } + + for (MultiVersionDelData delData : delDataMap.asMap().values()) { + delData.purge(tso); + } + + for (MultiVersionCsvData csvData : csvDataMap.asMap().values()) { + csvData.purge(tso); + } + } + + public void purgeByFile(String fileName) { + delDataMap.invalidate(fileName); + csvDataMap.invalidate(fileName); + } + + /** + * @param tso this tso must be taken from columnar_appended_files + */ + public List csvData(long tso, String csvFileName) { + MultiVersionCsvData data; + + try { + data = csvDataMap.get(csvFileName); + } catch (ExecutionException e) { + csvDataMap.invalidate(csvFileName); + throw new TddlRuntimeException(ErrorCode.ERR_LOAD_CSV_FILE, e.getCause(), + String.format("Failed to load csv file, filename: %s, tso: %d", csvFileName, tso)); + } + + // find csv cache whose tso <= given tso + SortedMap>> chunkSortedMap = data.getChunksWithTso(tso); + + if (chunkSortedMap != null && !chunkSortedMap.isEmpty()) { + hitCount.getAndIncrement(); + return chunkSortedMap.values().stream() + .flatMap(part -> part.getValue().stream()).collect(Collectors.toList()); + } + + // Case: csv cache missed + missCount.getAndIncrement(); + Lock writeLock = data.getLock(); + writeLock.lock(); + try { + data.loadUntilTso(columnarManager.getMinTso(), tso); + + return Objects.requireNonNull(data.getChunksWithTso(tso)).values().stream() + .flatMap(part -> part.getValue().stream()).collect(Collectors.toList()); + } finally { + writeLock.unlock(); + } + } + + /** + * Read data from csv into memory, never cache. + * + * @param tso this tso must be taken from columnar_appended_files + * @return data in raw orc type: long, double, or byte array + */ + public List csvRawOrcTypeData(long tso, String csvFileName, ExecutionContext context) { + return MultiVersionCsvData.loadRawOrcTypeUntilTso(tso, openedIncrementFileCount, csvFileName, context); + } + + // Used for old columnar table scan + @Deprecated + public int fillSelection(String fileName, long tso, int[] selection, LongColumnVector longColumnVector, + int batchSize) { + try { + RoaringBitmap bitmap = delDataMap.get(fileName).buildDeleteBitMap(tso); + + int selSize = 0; + for (int index = 0; + index < batchSize && selSize < selection.length; + index++) { + // for each position value in position-block + // check all bitmaps in sorted-map + if (longColumnVector.isNull[index]) { + throw GeneralUtil.nestedException("The position vector cannot be null"); + } + int position = (int) longColumnVector.vector[index]; + + if (!bitmap.contains(position)) { + selection[selSize++] = index; + } + } + + return selSize; + } catch (ExecutionException e) { + throw GeneralUtil.nestedException(e); + } + + } + + // Used for old columnar table scan + @Deprecated + public int fillSelection(String fileName, long tso, int[] selection, IntegerBlock positionBlock) { + try { + RoaringBitmap bitmap = delDataMap.get(fileName).buildDeleteBitMap(tso); + + int selSize = 0; + for (int index = 0; + index < positionBlock.getPositionCount() && selSize < selection.length; + index++) { + // for each position value in position-block + // check all bitmaps in sorted-map + int position = positionBlock.getInt(index); + + if (!bitmap.contains(position)) { + selection[selSize++] = index; + } + } + + return selSize; + } catch (ExecutionException e) { + throw GeneralUtil.nestedException(e); + } + + } + + private RoaringBitmap buildDeleteBitMap(String fileName, long tso) { + try { + return delDataMap.get(fileName).buildDeleteBitMap(tso); + } catch (ExecutionException e) { + delDataMap.invalidate(fileName); + throw new TddlRuntimeException(ErrorCode.ERR_LOAD_DEL_FILE, e.getCause(), + String.format("Failed to load delete bitmap of certain file , file name: %s, tso: %d", fileName, tso)); + } + } + + protected void loadDeleteBitMapFromFile(DeletionFileReader fileReader, ColumnarAppendedFilesRecord record) { + // Deserialize progress. + DeletionFileReader.DeletionEntry entry; + int endPosition = (int) (record.appendOffset + record.appendLength); + while (fileReader.position() < endPosition && (entry = fileReader.next()) != null) { + final int fileId = entry.getFileId(); + final long delTso = entry.getTso(); + final RoaringBitmap bitmap = entry.getBitmap(); + columnarManager.fileNameOf( + record.logicalSchema, Long.parseLong(record.logicalTable), record.partName, fileId) + .ifPresent(deletedFileName -> { + try { + delDataMap.get(deletedFileName).putNewTsoBitMap(delTso, bitmap); + } catch (ExecutionException e) { + throw new TddlRuntimeException(ErrorCode.ERR_LOAD_DEL_FILE, e, + String.format("Failed to load delete bitmap file, filename: %s, fileId: %d, tso: %d", + record.fileName, fileId, delTso)); + } + }); + } + } + + public RoaringBitmap getDeleteBitMap(FileMeta fileMeta, long tso) { + MultiVersionDelPartitionInfo delInfo; + PartitionId partitionId = PartitionId.of( + fileMeta.getPartitionName(), + fileMeta.getLogicalTableSchema(), + Long.valueOf(fileMeta.getLogicalTableName())); + try { + delInfo = this.delDataTsoMap.get(partitionId); + } catch (ExecutionException e) { + this.delDataTsoMap.invalidate(partitionId); + throw new TddlRuntimeException(ErrorCode.ERR_LOAD_DEL_FILE, e.getCause(), + String.format( + "Failed to load delete bitmap of certain partition, partition name: %s, schema name: %s, table name: %s, tso: %d", + partitionId.getPartName(), partitionId.getLogicalSchema(), partitionId.getTableId(), tso)); + } + + // cache hit + if (delInfo.getLastTso() >= tso) { + hitCount.getAndIncrement(); + return buildDeleteBitMap(fileMeta.getFileName(), tso); + } + + // cache miss + missCount.getAndIncrement(); + Lock writeLock = delInfo.getLock(); + writeLock.lock(); + + try { + delInfo.loadUntilTso( + fileMeta.getLogicalTableSchema(), + fileMeta.getLogicalTableName(), + fileMeta.getPartitionName(), + columnarManager.getMinTso(), + tso, + this::loadDeleteBitMapFromFile + ); + + return buildDeleteBitMap(fileMeta.getFileName(), tso); + } finally { + writeLock.unlock(); + } + } + + @Override + public void close() { + try { + + } catch (Throwable t) { + LOGGER.error("Fail to close the file version storage", t); + } + } + + private static final int COLUMNAR_VERSION_FIELDS = 11; + + public List generatePacket() { + // 1. tso + // 2. schema + // 3. table + // 4. part + // 5. csv-delta + // 6. csv-offset + // 7. csv-length + // 8. del-delta + // 9. del-offset + // 10. del-length + // 11. state + // TODO(siyun): "SHOW COLUMNAR VERSION command" + return new ArrayList<>(); + } + + /** + * TODO record max size here + */ + public long getMaxCacheSize() { + long total = 0; + if (csvDataMap != null) { + total += csvDataMap.size(); + } + if (delDataMap != null) { + total += delDataMap.size(); + } + return total; + } + + public long getUsedCacheSize() { + long total = 0; + if (csvDataMap != null) { + total += csvDataMap.size(); + } + if (delDataMap != null) { + total += delDataMap.size(); + } + return total; + } + + public long getHitCount() { + return hitCount.get(); + } + + public long getMissCount() { + return missCount.get(); + } + + public long getOpenedIncrementFileCount() { + return openedIncrementFileCount.get(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/GenericObjectListListener.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/GenericObjectListListener.java index 3c2635fb3..124e3b7dc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/GenericObjectListListener.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/GenericObjectListListener.java @@ -24,9 +24,11 @@ import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; import com.alibaba.polardbx.gms.metadb.record.SystemTableRecord; +import com.alibaba.polardbx.gms.metadb.table.TableNamesRecord; import com.alibaba.polardbx.gms.topology.ConfigListenerAccessor; -import com.alibaba.polardbx.gms.topology.ConfigListenerRecord; +import com.alibaba.polardbx.gms.topology.ConfigListenerDataIdRecord; import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import java.sql.Connection; import java.sql.SQLException; @@ -69,12 +71,7 @@ public void onHandleConfig(String dataId, long newOpVersion) { unbindExpiredListeners(); } - /** - * Get visible/available records. - * - * @return Record list - */ - protected abstract List fetchRecords(); + protected abstract List fetchTablesName(); /** * Get an object data id. @@ -82,7 +79,7 @@ public void onHandleConfig(String dataId, long newOpVersion) { * @param record An object record * @return The object data id */ - protected abstract String getDataId(SystemTableRecord record); + protected abstract String getDataId(String tableSchema, String tableName); /** * Get the prefix of data ids in current schema. @@ -97,19 +94,20 @@ public void onHandleConfig(String dataId, long newOpVersion) { * @param record An object record * @return The object listener */ - protected abstract ConfigListener getObjectListener(SystemTableRecord record); + protected abstract ConfigListener getObjectListener(String tableSchema, String tableName); private void bindNewListeners(boolean isInit) { // Bind newly registered dataIds and object listeners. - List records = fetchRecords(); + List records = fetchTablesName(); if (records != null && records.size() > 0) { for (SystemTableRecord record : records) { - String objectDataId = getDataId(record); + TableNamesRecord tablesNameRecord = (TableNamesRecord) record; + String objectDataId = getDataId(schemaName, tablesNameRecord.tableName); if (isInit || !objectListeners.keySet().contains(objectDataId)) { // New a specific listener for the object. - ConfigListener objectListener = getObjectListener(record); + ConfigListener objectListener = getObjectListener(schemaName, tablesNameRecord.tableName); // Bind them to enable timed task. CONFIG_MANAGER.bindListener(objectDataId, objectListener); @@ -175,12 +173,10 @@ private Set fetchActiveDataIds(String dataIdPrefix) { accessor.setConnection(metaDbConn); Set activeDataIds = new HashSet<>(); - List records = accessor.getDataIdsByPrefix(dataIdPrefix); + List records = accessor.getDataIdsOnlyByPrefix(dataIdPrefix); - for (ConfigListenerRecord record : records) { - if (record.status == ConfigListenerRecord.DATA_ID_STATUS_NORMAL) { - activeDataIds.add(MetaDbDataIdBuilder.formatDataId(record.dataId)); - } + for (ConfigListenerDataIdRecord record : records) { + activeDataIds.add(MetaDbDataIdBuilder.formatDataId(record.dataId)); } return activeDataIds; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/GmsTableMetaManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/GmsTableMetaManager.java index 3ee539beb..f2a868413 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/GmsTableMetaManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/GmsTableMetaManager.java @@ -35,6 +35,8 @@ import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.archive.schemaevolution.OrcColumnManager; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.common.RecycleBin; import com.alibaba.polardbx.executor.common.StorageInfoManager; import com.alibaba.polardbx.executor.mdl.MdlContext; import com.alibaba.polardbx.executor.mdl.MdlDuration; @@ -48,6 +50,7 @@ import com.alibaba.polardbx.gms.metadb.foreign.ForeignColsRecord; import com.alibaba.polardbx.gms.metadb.foreign.ForeignRecord; import com.alibaba.polardbx.gms.metadb.table.ColumnStatus; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableMappingRecord; import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; import com.alibaba.polardbx.gms.metadb.table.IndexesRecord; @@ -56,11 +59,10 @@ import com.alibaba.polardbx.gms.metadb.table.TablesExtRecord; import com.alibaba.polardbx.gms.metadb.table.TablesRecord; import com.alibaba.polardbx.gms.partition.TableLocalPartitionRecord; -import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext; import com.alibaba.polardbx.gms.partition.TablePartitionRecord; import com.alibaba.polardbx.gms.tablegroup.ComplexTaskOutlineRecord; -import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.gms.topology.DbInfoRecord; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; @@ -75,6 +77,7 @@ import com.alibaba.polardbx.optimizer.config.table.SchemaManager; import com.alibaba.polardbx.optimizer.config.table.TableColumnMeta; import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.config.table.TruncateUtil; import com.alibaba.polardbx.optimizer.core.TddlRelDataTypeSystemImpl; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.exception.TableNotFoundException; @@ -86,7 +89,6 @@ import com.alibaba.polardbx.optimizer.planmanager.PlanManager; import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; import com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter; -import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; import com.alibaba.polardbx.optimizer.tablegroup.TableGroupVersionManager; import com.alibaba.polardbx.optimizer.utils.SchemaVersionManager; import com.alibaba.polardbx.rpc.client.XSession; @@ -104,7 +106,6 @@ import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.sql.type.SqlTypeUtil; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.SetUtils; import org.apache.commons.lang.exception.ExceptionUtils; import javax.sql.DataSource; @@ -130,9 +131,9 @@ import java.util.Optional; import java.util.Set; import java.util.TreeMap; -import java.util.TreeSet; import java.util.concurrent.TimeUnit; import java.util.function.Function; +import java.util.function.Predicate; import java.util.stream.Collectors; import static com.alibaba.polardbx.common.constants.SequenceAttribute.AUTO_SEQ_PREFIX; @@ -256,11 +257,10 @@ public static Map fetchTableMeta(Connection metaDbConn, meta.setSchemaName(schemaName); DataSource dataSource = MetaDbDataSource.getInstance().getDataSource(); + final boolean lowerCaseTableNames = storage.isLowerCaseTableNames(); final GsiMetaManager gsiMetaManager = new GsiMetaManager(dataSource, schemaName); - meta.setTableColumnMeta(new TableColumnMeta(schemaName, origTableName, - meta.getColumnMultiWriteSourceColumnMeta(), - meta.getColumnMultiWriteTargetColumnMeta())); + meta.setTableColumnMeta(new TableColumnMeta(meta)); meta.setGsiTableMetaBean( gsiMetaManager.getTableMeta(origTableName, IndexStatus.ALL)); meta.setComplexTaskTableMetaBean( @@ -294,6 +294,28 @@ public static Map fetchTableMeta(Connection metaDbConn, // Load lock flag. locked = (meta.getPartitionInfo().getPartFlags() & TablePartitionRecord.FLAG_LOCK) != 0; } + + if (meta.isColumnar()) { + List cciMappingRecord = + tableInfoManager.queryColumnarTableMapping(schemaName, origTableName); + + if (!CollectionUtils.isEmpty(cciMappingRecord)) { + long latestVersionId = cciMappingRecord.get(0).latestVersionId; + long tableId = cciMappingRecord.get(0).tableId; + List allColumnsIncludingInvisible = + tableInfoManager.queryColumns(schemaName, origTableName); + List allFieldIdList = + ColumnarManager.getInstance().getColumnFieldIdList(latestVersionId, tableId); + List visibleFieldIdList = new ArrayList<>(); + for (int i = 0; i < allColumnsIncludingInvisible.size(); i++) { + ColumnsRecord columnsRecord = allColumnsIncludingInvisible.get(i); + if (columnsRecord.getStatus() != ColumnStatus.ABSENT.getValue()) { + visibleFieldIdList.add(allFieldIdList.get(i)); + } + } + meta.setColumnarFieldIdList(visibleFieldIdList); + } + } } // fetch file metas for oss engine. @@ -317,6 +339,151 @@ public static Map fetchTableMeta(Connection metaDbConn, return metaMap; } + public static Map fetchTableMeta(Connection metaDbConn, + String schemaName, + String appName, + List logicalTableNameList, + TddlRuleManager rule, + StorageInfoManager storage, + boolean fetchPrimaryTableMetaOnly, + boolean includeInvisiableInfo) { + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(metaDbConn); + + boolean locked = false; + Map metaMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + + for (String logicalTableName : GeneralUtil.emptyIfNull(logicalTableNameList)) { + + TableMeta meta = null; + String origTableName = logicalTableName; + + TablesRecord tableRecord = tableInfoManager.queryTable(schemaName, logicalTableName, false); + + if (tableRecord == null) { + // Check if there is an ongoing RENAME TABLE operation, so search with new table name. + tableRecord = tableInfoManager.queryTable(schemaName, logicalTableName, true); + + // Use original table name to find column and index meta. + if (tableRecord != null) { + origTableName = tableRecord.tableName; + } + } + + if (tableRecord != null) { + List columnsRecords; + List columnMappingRecords; + List indexesRecords; + if (includeInvisiableInfo) { + columnsRecords = + tableInfoManager.queryColumns(schemaName, origTableName); + columnMappingRecords = + tableInfoManager.queryColumnMappings(schemaName, origTableName); + indexesRecords = + tableInfoManager.queryIndexes(schemaName, origTableName); + } else { + columnsRecords = + tableInfoManager.queryVisibleColumns(schemaName, origTableName); + columnMappingRecords = + tableInfoManager.queryColumnMappings(schemaName, origTableName); + indexesRecords = + tableInfoManager.queryVisibleIndexes(schemaName, origTableName); + } + final List referencedFkRecords = + tableInfoManager.queryReferencedForeignKeys(schemaName, origTableName); + final List fkRecords = + tableInfoManager.queryForeignKeys(schemaName, origTableName); + + meta = buildTableMeta(schemaName, tableRecord, columnsRecords, indexesRecords, columnMappingRecords, + fkRecords, referencedFkRecords, tableInfoManager, logicalTableName); + + if (meta != null && !fetchPrimaryTableMetaOnly) { + + meta.setSchemaName(schemaName); + DataSource dataSource = MetaDbDataSource.getInstance().getDataSource(); + final boolean lowerCaseTableNames = storage.isLowerCaseTableNames(); + final GsiMetaManager gsiMetaManager = + new GsiMetaManager(dataSource, schemaName); + meta.setTableColumnMeta(new TableColumnMeta(meta)); + meta.setGsiTableMetaBean( + gsiMetaManager.getTableMeta(origTableName, IndexStatus.ALL)); + meta.setComplexTaskTableMetaBean( + ComplexTaskMetaManager.getComplexTaskTableMetaBean(metaDbConn, schemaName, origTableName)); + boolean isNewPartDb = DbInfoManager.getInstance().isNewPartitionDb(schemaName); + if (isNewPartDb) { + loadNewestPartitionInfo(metaDbConn, + schemaName, logicalTableName, origTableName, rule, + tableInfoManager, meta); + if (meta.getPartitionInfo() != null) { + meta.setTableGroupDigestList(TableGroupVersionManager.getTableGroupDigestList( + meta.getPartitionInfo().getTableGroupId())); + } + } else { + meta.setSchemaDigestList(SchemaVersionManager.getSchemaDigestList(schemaName)); + } + // Get auto partition mark. + final TablesExtRecord extRecord = + tableInfoManager.queryTableExt(schemaName, origTableName, false); + if (extRecord != null) { + meta.setAutoPartition(extRecord.isAutoPartition()); + // Load lock flag. + locked = extRecord.isLocked(); + } + + // Auto partition flag for new partition table. + if (meta.getPartitionInfo() != null) { + meta.setAutoPartition( + (meta.getPartitionInfo().getPartFlags() & TablePartitionRecord.FLAG_AUTO_PARTITION) + != 0); + // Load lock flag. + locked = (meta.getPartitionInfo().getPartFlags() & TablePartitionRecord.FLAG_LOCK) != 0; + } + + if (meta.isColumnar()) { + List cciMappingRecord = + tableInfoManager.queryColumnarTableMapping(schemaName, origTableName); + + if (!CollectionUtils.isEmpty(cciMappingRecord)) { + long latestVersionId = cciMappingRecord.get(0).latestVersionId; + long tableId = cciMappingRecord.get(0).tableId; + List allColumnsIncludingInvisible = + tableInfoManager.queryColumns(schemaName, origTableName); + List allFieldIdList = + ColumnarManager.getInstance().getColumnFieldIdList(latestVersionId, tableId); + List visibleFieldIdList = new ArrayList<>(); + for (int i = 0; i < allColumnsIncludingInvisible.size(); i++) { + ColumnsRecord columnsRecord = allColumnsIncludingInvisible.get(i); + if (columnsRecord.getStatus() != ColumnStatus.ABSENT.getValue()) { + visibleFieldIdList.add(allFieldIdList.get(i)); + } + } + meta.setColumnarFieldIdList(visibleFieldIdList); + } + } + } + + // fetch file metas for oss engine. + if (meta != null && meta.getPartitionInfo() != null && Engine.isFileStore(meta.getEngine())) { + Map>> fileMetaSet = + FileManager.INSTANCE.getFiles(meta); + meta.setFileMetaSet(fileMetaSet); + } + } + + metaMap.put(logicalTableName, meta); + + if (meta != null && Engine.isFileStore(meta.getEngine())) { + OrcColumnManager.getINSTANCE().rebuild(schemaName, logicalTableName); + } + } + + if (locked) { + throw new RuntimeException("Table `" + logicalTableNameList + "` has been locked by logical meta lock."); + } + return metaMap; + } + + @Override protected void doInit() { if (latestTables != null) { @@ -422,7 +589,6 @@ public void toNewVersionForTableGroup(String tableName, boolean allowTwoVersion) if (!isPartDb) { tonewversion(tableName); } else { - final TableGroupInfoManager tgm = OptimizerContext.getContext(schemaName).getTableGroupInfoManager(); final TableMeta tableMeta = gtm.getTableWithNull(tableName); if (tableMeta == null || tableMeta.getPartitionInfo() == null) { @@ -430,16 +596,7 @@ public void toNewVersionForTableGroup(String tableName, boolean allowTwoVersion) return; } - long tableGroupId = tableMeta.getPartitionInfo().getTableGroupId(); - TableGroupConfig tgConfig = tgm.getTableGroupConfigById(tableGroupId); - if (tgConfig != null) { - List tableNames = - GeneralUtil.emptyIfNull(tgConfig.getTables()).stream() - .map(TablePartRecordInfoContext::getTableName) - .collect(Collectors.toList()); - - toNewVersionInTrx(tableNames, -1, allowTwoVersion); - } + toNewVersionInTrx(Collections.singletonList(tableName), -1, allowTwoVersion); } } @@ -565,6 +722,8 @@ public static TableMeta buildTableMeta(String schemaName, TablesRecord tableReco hasPrimaryKey, TableStatus.convert(tableRecord.status), tableRecord.version, tableRecord.flag); res.setId(tableRecord.id); res.setEngine(Engine.of(tableRecord.engine)); + + res.setEncryption(TableMetaParser.parseEncryption(schemaName, tableRecord.createOptions)); if (Engine.isFileStore(res.getEngine())) { res.buildFileStoreMeta(columnMappingMap, columnMetaMap); } @@ -660,7 +819,8 @@ public static ColumnMeta buildColumnMeta(ColumnsRecord record, boolean nullable = "YES".equalsIgnoreCase(record.isNullable); String typeName = record.jdbcTypeName; - if (TStringUtil.startsWithIgnoreCase(record.columnType, "enum(")) { + if (TStringUtil.startsWithIgnoreCase(record.columnType, "enum(") || + TStringUtil.startsWithIgnoreCase(record.columnType, "set(")) { typeName = record.columnType; } @@ -693,7 +853,8 @@ public static ColumnMeta buildColumnMeta(ColumnsRecord record, new Field(tableName, columnName, record.collationName, extra, columnDefault, calciteDataType, autoIncrement, false); - return new ColumnMeta(tableName, columnName, null, field, ColumnStatus.convert(status), flag); + return new ColumnMeta(tableName, columnName, null, field, ColumnStatus.convert(status), flag, + record.getColumnMappingName()); } @@ -766,6 +927,39 @@ public TableMeta getTable(String tableName) { return table; } + @Override + public List getAllUserTables() { + List tableMetas = Lists.newArrayList(); + for (TableMeta table : latestTables.values()) { + String tableName = table.getTableName(); + // gsi + if (table.isGsi()) { + continue; + } + if (table.isColumnar()) { + continue; + } + // dual + if (tableName.equalsIgnoreCase(DUAL)) { + continue; + } + // recycle bin + if (RecycleBin.isRecyclebinTable(tableName)) { + continue; + } + // truncate tmp table + if (TruncateUtil.isTruncateTmpPrimaryTable(tableName)) { + continue; + } + //invisible + if (table.getStatus() != TableStatus.PUBLIC) { + continue; + } + tableMetas.add(table); + } + return tableMetas; + } + @Override public void putTable(String tableName, TableMeta tableMeta) { throw new UnsupportedOperationException(); @@ -880,9 +1074,7 @@ private List fetchTableMetas() { boolean locked = false; if (meta != null) { - meta.setTableColumnMeta(new TableColumnMeta(schemaName, origTableName, - meta.getColumnMultiWriteSourceColumnMeta(), - meta.getColumnMultiWriteTargetColumnMeta())); + meta.setTableColumnMeta(new TableColumnMeta(meta)); meta.setGsiTableMetaBean( gsiMetaManager.initTableMeta(origTableName, indexRecordsTableMap.get(origTableName), @@ -937,6 +1129,28 @@ private List fetchTableMetas() { locked = (meta.getPartitionInfo().getPartFlags() & TablePartitionRecord.FLAG_LOCK) != 0; } + if (meta.isColumnar()) { + List cciMappingRecord = + tableInfoManager.queryColumnarTableMapping(schemaName, origTableName); + + if (!CollectionUtils.isEmpty(cciMappingRecord)) { + long latestVersionId = cciMappingRecord.get(0).latestVersionId; + long tableId = cciMappingRecord.get(0).tableId; + List allColumnsIncludingInvisible = + tableInfoManager.queryColumns(schemaName, origTableName); + List allFieldIdList = + ColumnarManager.getInstance().getColumnFieldIdList(latestVersionId, tableId); + List visibleFieldIdList = new ArrayList<>(); + for (int i = 0; i < allColumnsIncludingInvisible.size(); i++) { + ColumnsRecord columnsRecord = allColumnsIncludingInvisible.get(i); + if (columnsRecord.getStatus() != ColumnStatus.ABSENT.getValue()) { + visibleFieldIdList.add(allFieldIdList.get(i)); + } + } + meta.setColumnarFieldIdList(visibleFieldIdList); + } + } + } else { logger.error( "Table `" + origTableName + "` build meta error."); @@ -1083,10 +1297,6 @@ protected void loadAndCacheTableMeta(List tableNames, Connection metaDbC if (meta == null) { latestTables.remove(tableName); } else { - boolean modifyPrimaryKey = false; - TreeSet pkSet = new TreeSet<>(String::compareToIgnoreCase); - pkSet.addAll(meta.getPrimaryKeyMap().keySet()); - //create/alter table meta.setSchemaName(schemaName); latestTables.put(tableName, meta); @@ -1105,20 +1315,9 @@ protected void loadAndCacheTableMeta(List tableNames, Connection metaDbC if (!isNewPartition) { TableRuleManager.reload(schemaName, index.indexName); } - - // for modify partition key - if (!indexTableMeta.hasGsiImplicitPrimaryKey()) { - TreeSet gsiPkSet = new TreeSet<>(String::compareToIgnoreCase); - gsiPkSet.addAll(indexTableMeta.getPrimaryKeyMap().keySet()); - if (!SetUtils.isEqualSet(pkSet, gsiPkSet)) { - modifyPrimaryKey = true; - } - } } } } - - meta.getTableColumnMeta().setModifyPartitionKey(modifyPrimaryKey); } } } @@ -1191,7 +1390,8 @@ private void tonewversionImpl(List tableNameList, metaDbConn); } else { newSchemaManager = - new GmsTableMetaManager(oldSchemaManager, ImmutableList.of(tableName), rule, metaDbConn); + new GmsTableMetaManager(oldSchemaManager, ImmutableList.of(tableName), rule, + metaDbConn); } newSchemaManager.init(); @@ -1201,7 +1401,8 @@ private void tonewversionImpl(List tableNameList, } SQLRecorderLogger.ddlMetaLogger.info(MessageFormat.format( "{0} reload table metas for [{1}]: since meta version of table {2} change from {3} to {4}", - String.valueOf(System.identityHashCode(newSchemaManager)), tableNameList, tableName, oldVersion, + String.valueOf(System.identityHashCode(newSchemaManager)), tableNameList, tableName, + oldVersion, newVersion)); } finally { @@ -1424,7 +1625,7 @@ public GsiMetaManager.GsiMetaBean getGsi(String primaryOrIndexTableName, } @Override - public Set guessGsi(String unwrappedName) { + public Set guessGsi(String unwrappedName, Predicate filter) { DataSource dataSource = MetaDbDataSource.getInstance().getDataSource(); final GsiMetaManager gsiMetaManager = new GsiMetaManager(dataSource, schemaName); @@ -1432,8 +1633,9 @@ public Set guessGsi(String unwrappedName) { final Set gsi = new HashSet<>(); for (GsiMetaManager.GsiTableMetaBean bean : meta.getTableMeta().values()) { - if (bean.gsiMetaBean != null && TddlSqlToRelConverter.unwrapGsiName(bean.gsiMetaBean.indexName) - .equalsIgnoreCase(unwrappedName)) { + if (bean.gsiMetaBean != null + && filter.test(bean.gsiMetaBean) + && TddlSqlToRelConverter.unwrapGsiName(bean.gsiMetaBean.indexName).equalsIgnoreCase(unwrappedName)) { gsi.add(bean.gsiMetaBean.indexName); } } @@ -1575,6 +1777,20 @@ private static Map fetchColumnType(Connection conn, String actua return specialType; } + public static TableMeta fetchTableMeta(Connection metaDbConn, + String schemaName, + String appName, + String logicalTableName, + TddlRuleManager rule, + StorageInfoManager storage, + boolean fetchPrimaryTableMetaOnly, + boolean includeInvisiableInfo) { + return MetaDbUtil.queryMetaDbWrapper(metaDbConn, (conn) -> { + return fetchTableMeta(conn, schemaName, appName, Arrays.asList(logicalTableName), rule, storage, + fetchPrimaryTableMetaOnly, includeInvisiableInfo).get(logicalTableName); + }); + } + private static TableMeta fetchTableMeta(String schemaName, Connection conn, String actualTableName, String logicalTableName, Map collationType, Map specialType, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionColumnarSchema.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionColumnarSchema.java new file mode 100644 index 000000000..a41a3083e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionColumnarSchema.java @@ -0,0 +1,255 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +import com.alibaba.polardbx.common.exception.NotSupportException; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.archive.schemaevolution.ColumnMetaWithTs; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableMappingAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableMappingRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; +import com.alibaba.polardbx.gms.metadb.table.IndexesAccessor; +import com.alibaba.polardbx.gms.metadb.table.IndexesRecord; +import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import org.jetbrains.annotations.NotNull; + +import java.sql.Connection; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.locks.Lock; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.executor.gms.DynamicColumnarManager.MAXIMUM_SIZE_OF_SNAPSHOT_CACHE; + +public class MultiVersionColumnarSchema implements Purgeable { + + private final DynamicColumnarManager columnarManager; + + /** + * Cache mapping from logical table name to table id + */ + private final LoadingCache, Long> tableMappingCache; + + /** + * Cache mapping from table id to multi-version columnar table meta + */ + private final LoadingCache columnarTableMetas; + + /** + * Cache indexes column of logical table + */ + private final LoadingCache, List> indexesColumnCache; + + public MultiVersionColumnarSchema(DynamicColumnarManager columnarManager) { + this.columnarManager = columnarManager; + + this.columnarTableMetas = CacheBuilder.newBuilder() + .maximumSize(MAXIMUM_SIZE_OF_SNAPSHOT_CACHE) + .build(new CacheLoader() { + @Override + public MultiVersionColumnarTableMeta load(@NotNull Long tableId) { + return new MultiVersionColumnarTableMeta(tableId); + } + }); + + this.tableMappingCache = CacheBuilder.newBuilder() + .maximumSize(MAXIMUM_SIZE_OF_SNAPSHOT_CACHE) + .build(new CacheLoader, Long>() { + @Override + public Long load(@NotNull Pair key) throws Exception { + String logicalSchema = key.getKey(); + String logicalTableName = key.getValue(); + + List records; + try (Connection connection = MetaDbUtil.getConnection()) { + ColumnarTableMappingAccessor accessor = new ColumnarTableMappingAccessor(); + accessor.setConnection(connection); + + // TODO(siyun): hack now, using newest table mapping + records = accessor.querySchemaIndex(logicalSchema, logicalTableName); + } + + if (records != null && !records.isEmpty()) { + ColumnarTableMappingRecord record = records.get(0); + return record.tableId; + } + + return null; + } + }); + + this.indexesColumnCache = CacheBuilder.newBuilder() + .maximumSize(MAXIMUM_SIZE_OF_SNAPSHOT_CACHE) + .build(new CacheLoader, List>() { + @Override + public List load(@NotNull Pair key) throws Exception { + String logicalSchema = key.getKey(); + String indexName = key.getValue(); + try (Connection connection = MetaDbUtil.getConnection()) { + IndexesAccessor indexesAccessor = new IndexesAccessor(); + indexesAccessor.setConnection(connection); + List indexRecords = + indexesAccessor.queryColumnarIndexColumnsByName(logicalSchema, indexName); + TableInfoManager tableInfoManager = new TableInfoManager(); + tableInfoManager.setConnection(connection); + List columnsRecords = + tableInfoManager.queryVisibleColumns(logicalSchema, indexName); + return indexRecords.stream().map(indexesRecord -> { + for (int i = 0; i < columnsRecords.size(); i++) { + if (indexesRecord.columnName.equalsIgnoreCase(columnsRecords.get(i).columnName)) { + return i; + } + } + return -1; + }).collect(Collectors.toList()); + } + } + }); + } + + public List getSortKeyColumns(long tso, String logicalSchema, String logicalTable) + throws ExecutionException { + return indexesColumnCache.get(Pair.of(logicalSchema, logicalTable)); + } + + public Long getTableId(long tso, String logicalSchema, String logicalTable) throws ExecutionException { + return tableMappingCache.get(Pair.of(logicalSchema, logicalTable)); + } + + private MultiVersionColumnarTableMeta getColumnarTableMeta(long tableId) { + try { + return columnarTableMetas.get(tableId); + } catch (ExecutionException e) { + columnarTableMetas.invalidate(tableId); + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SCHEMA, e.getCause(), + String.format("Failed to fetch column meta of table, table id: %d", tableId)); + } + } + + @NotNull + public List getColumnMetas(long schemaTso, long tableId) { + MultiVersionColumnarTableMeta columnarTableMeta = getColumnarTableMeta(tableId); + + List columnMetas = columnarTableMeta.getColumnMetaListByTso(schemaTso); + if (columnMetas != null) { + return columnMetas; + } + + // Case: column meta cache missed + Lock writeLock = columnarTableMeta.getLock(); + writeLock.lock(); + try { + columnarTableMeta.loadUntilTso(schemaTso); + return Objects.requireNonNull(columnarTableMeta.getColumnMetaListByTso(schemaTso)); + } finally { + writeLock.unlock(); + } + } + + @NotNull + public Map getColumnIndexMap(long schemaTso, long tableId) { + MultiVersionColumnarTableMeta columnarTableMeta = getColumnarTableMeta(tableId); + + Map columnIndex = columnarTableMeta.getFieldIdMapByTso(schemaTso); + if (columnIndex != null) { + return columnIndex; + } + + // Case: column cache missed + Lock writeLock = columnarTableMeta.getLock(); + writeLock.lock(); + try { + columnarTableMeta.loadUntilTso(schemaTso); + return Objects.requireNonNull(columnarTableMeta.getFieldIdMapByTso(schemaTso)); + } finally { + writeLock.unlock(); + } + } + + @NotNull + public List getColumnFieldIdList(long versionId, long tableId) { + MultiVersionColumnarTableMeta columnarTableMeta = getColumnarTableMeta(tableId); + + List fieldIdList = columnarTableMeta.getColumnFieldIdList(versionId); + if (fieldIdList != null) { + return fieldIdList; + } + + // Case:columns cache missed + Lock writeLock = columnarTableMeta.getLock(); + writeLock.lock(); + try { + columnarTableMeta.loadUntilTso(Long.MAX_VALUE); + return Objects.requireNonNull(columnarTableMeta.getColumnFieldIdList(versionId)); + } finally { + writeLock.unlock(); + } + } + + @NotNull + public ColumnMetaWithTs getInitColumnMeta(long tableId, long fieldId) { + MultiVersionColumnarTableMeta columnarTableMeta = getColumnarTableMeta(tableId); + + ColumnMetaWithTs columnMeta = columnarTableMeta.getInitColumnMeta(fieldId); + if (columnMeta != null) { + return columnMeta; + } + + // Case: default column meta cache missed + Lock writeLock = columnarTableMeta.getLock(); + writeLock.lock(); + try { + columnarTableMeta.loadUntilTso(Long.MAX_VALUE); + return Objects.requireNonNull(columnarTableMeta.getInitColumnMeta(fieldId)); + } finally { + writeLock.unlock(); + } + } + + public int @NotNull [] getPrimaryKeyColumns(long schemaTso, long tableId) { + MultiVersionColumnarTableMeta columnarTableMeta = getColumnarTableMeta(tableId); + + int[] primaryKeyColumns = columnarTableMeta.getPrimaryKeyColumns(schemaTso); + if (primaryKeyColumns != null) { + return primaryKeyColumns; + } + + Lock writeLock = columnarTableMeta.getLock(); + writeLock.lock(); + try { + columnarTableMeta.loadUntilTso(Long.MAX_VALUE); + return Objects.requireNonNull(columnarTableMeta.getPrimaryKeyColumns(schemaTso)); + } finally { + writeLock.unlock(); + } + } + + @Override + public void purge(long tso) { + // TODO(siyun): + throw new NotSupportException("purge columnar schema not supported now!"); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionColumnarSnapshot.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionColumnarSnapshot.java new file mode 100644 index 000000000..52896bd9d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionColumnarSnapshot.java @@ -0,0 +1,242 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.oss.ColumnarFileType; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.mpp.split.SplitManagerImpl; +import com.alibaba.polardbx.gms.metadb.table.FilesAccessor; +import com.alibaba.polardbx.gms.metadb.table.FilesRecordSimplified; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import lombok.Data; + +import java.sql.Connection; +import java.sql.SQLException; +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * Multi-version columnar snapshot of certain partition + * Can provide a snapshot of certain tso + */ +public class MultiVersionColumnarSnapshot implements Purgeable { + private static final Logger SPLIT_MANAGER_LOGGER = LoggerFactory.getLogger(SplitManagerImpl.class); + + final private String logicalSchema; + final private Long tableId; + + /** + * In the range of [minTso, latestTso], all files multi-version info of the partition have been loaded. + */ + private final DynamicColumnarManager columnarManager; + private volatile long latestTso = Long.MIN_VALUE; + + /** + * part_name -> (file_name -> (commit_ts, remove_ts)) + */ + private final Map>> allPartsTsoInfo = new ConcurrentHashMap<>(); + + /** + * part_name -> latest_tso + */ + private final Map latestTsoForEachPart = new ConcurrentHashMap<>(); + + public MultiVersionColumnarSnapshot(DynamicColumnarManager columnarManager, String logicalSchema, Long tableId) { + this.columnarManager = columnarManager; + this.logicalSchema = logicalSchema; + this.tableId = tableId; + } + + /** + * The snapshot of all visible orc files and csv files in given tso. + */ + @Data + public static class ColumnarSnapshot { + private final List orcFiles; + private final List csvFiles; + private final List delFiles; + + public ColumnarSnapshot() { + this.orcFiles = new ArrayList<>(); + this.csvFiles = new ArrayList<>(); + this.delFiles = new ArrayList<>(); + } + } + + /** + * Fetch delta files which satisfy: + * lastTso < commitTso <= tso AND minTso < removeTso + * OR + * lastTso < removeTso <= tso AND commitTso <= lastTso + * * 01234567890123456789 + * F1: |-------| + * F2: |-------| + * F3: |--------| + * F4: |-------------...(no removeTso) + * F5: |---| + * F6: |--| + * * 7 11 15 + * * | | | + * * | minTso| + * * latestTso | + * * tso + * In this case minTso = 9, lastTso = 7, tso = 11, we should fetch F1, F2, F4 and F5 + * After updating the tso info using these filesRecords, we can generate any snapshots in [minTso, tso] + */ + private void loadUntilTso(long tso, long minTso) { + List filesRecords = loadDeltaFilesInfoFromGms(latestTso, tso); + + for (FilesRecordSimplified fileRecord : filesRecords) { + String fileName = fileRecord.fileName; + String partName = fileRecord.partitionName; + Long commitTs = fileRecord.commitTs; + Long removeTs = fileRecord.removeTs; + + Map> tsoInfo = + allPartsTsoInfo.computeIfAbsent(partName, s -> new ConcurrentHashMap<>()); + + long curLatestTso = removeTs != null ? removeTs : commitTs; + + latestTsoForEachPart.compute(partName, (p, latestTsoForThisPart) -> { + if (latestTsoForThisPart == null) { + return curLatestTso; + } else { + return Math.max(latestTsoForThisPart, curLatestTso); + } + }); + + if (tsoInfo.containsKey(fileName)) { + if (removeTs == null) { + continue; + } + + if (removeTs <= minTso) { + tsoInfo.remove(fileName); + columnarManager.putPurgedFile(fileName); + } else { + tsoInfo.put(fileName, Pair.of(commitTs, removeTs)); + } + } else { + if (removeTs == null || removeTs > minTso) { + tsoInfo.put(fileName, Pair.of(commitTs, removeTs)); + } + } + } + latestTso = latestTsoForEachPart.values().stream().reduce(Long::min).orElse(Long.MIN_VALUE); + } + + private List loadDeltaFilesInfoFromGms(long lastTso, long tso) { + try (Connection connection = MetaDbUtil.getConnection()) { + FilesAccessor filesAccessor = new FilesAccessor(); + filesAccessor.setConnection(connection); + + return filesAccessor + .queryColumnarDeltaFilesByTsoAndTableId(tso, lastTso, logicalSchema, String.valueOf(tableId)); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, + String.format("Failed to generate columnar snapshot, tso: %d, schema: %s, tableId: %d", + tso, logicalSchema, tableId)); + } + } + + public ColumnarSnapshot generateSnapshot(final String partitionName, final long tso) { + long ioCost = 0L; + long totalCost = 0L; + long startTime = System.nanoTime(); + long latestTsoBackUp = latestTso; + + long minTso = columnarManager.getMinTso(); + if (tso < minTso) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, + String.format("Snapshot of tso[%d] has been purged!", tso)); + } + + if (latestTso < tso) { + long startIOTime = System.nanoTime(); + synchronized (this) { + // In case the tso has not been loaded + // Assuming that the tso is reliable + if (latestTso < tso) { + loadUntilTso(tso, minTso); + } + } + ioCost = System.nanoTime() - startIOTime; + } + + ColumnarSnapshot snapshot = new ColumnarSnapshot(); + Map> tsoInfo = + allPartsTsoInfo.computeIfAbsent(partitionName, s -> new ConcurrentHashMap<>()); + tsoInfo.forEach((fileName, commitAndRemoveTs) -> { + Long commitTs = commitAndRemoveTs.getKey(); + Long removeTs = commitAndRemoveTs.getValue(); + + if (commitTs <= tso && (removeTs == null || removeTs > tso)) { + String suffix = fileName.substring(fileName.lastIndexOf('.') + 1); + ColumnarFileType columnarFileType = ColumnarFileType.of(suffix); + + switch (columnarFileType) { + case ORC: + snapshot.getOrcFiles().add(fileName); + break; + case CSV: + snapshot.getCsvFiles().add(fileName); + break; + case DEL: + snapshot.getDelFiles().add(fileName); + break; + case SET: + default: + // ignore. + } + } + }); + + totalCost = System.nanoTime() - startTime; + + if (SPLIT_MANAGER_LOGGER.isDebugEnabled()) { + SPLIT_MANAGER_LOGGER.debug(MessageFormat.format("generateSnapshot for " + + "tableId = {0}, partName = {1}, " + + "tso = {2}, lastTso = {3}, " + + "totalCost = {4}, ioCost = {5}", + tableId, partitionName, tso, latestTsoBackUp, totalCost, ioCost + )); + } + + return snapshot; + } + + public synchronized void purge(long tso) { + allPartsTsoInfo.forEach((partName, snapshot) -> { + snapshot.entrySet().removeIf(entry -> { + Long removeTs = entry.getValue().getValue(); + if (removeTs != null && removeTs <= tso) { + columnarManager.putPurgedFile(entry.getKey()); + return true; + } else { + return false; + } + }); + }); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionColumnarTableMeta.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionColumnarTableMeta.java new file mode 100644 index 000000000..7d3479874 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionColumnarTableMeta.java @@ -0,0 +1,305 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.archive.schemaevolution.ColumnMetaWithTs; +import com.alibaba.polardbx.gms.metadb.table.ColumnarColumnEvolutionAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarColumnEvolutionRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableEvolutionAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableEvolutionRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.Field; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import org.apache.calcite.sql.type.SqlTypeName; +import org.eclipse.jetty.util.ConcurrentHashSet; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static com.alibaba.polardbx.optimizer.config.table.OrcMetaUtils.TYPE_FACTORY; + +public class MultiVersionColumnarTableMeta implements Purgeable { + private static final Logger LOGGER = LoggerFactory.getLogger("COLUMNAR_TRANS"); + // this size is based on columnar schema compaction frequency + private static final int MAX_VERSION_SCHEMA_CACHE_SIZE = 16; + + private final long tableId; + private final ColumnMeta TSO_COLUMN; + private final ColumnMeta POSITION_COLUMN; + + // column_id -> columnMeta + // this column_id is the id of columnar_column_evolution, ranther than field_id + private final Map allColumnMetas = new ConcurrentHashMap<>(); + + private final Set primaryKeySet = ConcurrentHashMap.newKeySet(); + + // schema_ts -> version_id + // THE schema_ts COULD BE NULL, which means that the columnar has not processed with this version + // notice that only schema_ts is ordered, while version_id may be not + private final SortedMap versionIdMap = new ConcurrentSkipListMap<>(); + + // version_id -> [column_id1 ...] + // notice that columnar_table_evolution stores id rather than field_id of columnar_column_evolution + private final Map> multiVersionColumns = new ConcurrentHashMap<>(); + + // for columnar_column_evoluion: id -> field_id + private final Map columnFieldIdMap = new ConcurrentHashMap<>(); + + // field -> [column_id for version 1, column_id for version 2, ...] + private final Map> multiVersionColumnIds = new ConcurrentHashMap<>(); + + private final LoadingCache> columnMetaListByTso; + private final LoadingCache> fieldIdMapByTso; + + private final Lock lock = new ReentrantLock(); + + public MultiVersionColumnarTableMeta(long tableId) { + this.tableId = tableId; + + String tableName = String.valueOf(this.tableId); + TSO_COLUMN = new ColumnMeta(tableName, "tso", null, + new Field(TYPE_FACTORY.createSqlType(SqlTypeName.BIGINT))); + POSITION_COLUMN = new ColumnMeta(tableName, "position", null, + new Field(TYPE_FACTORY.createSqlType(SqlTypeName.BIGINT))); + + columnMetaListByTso = CacheBuilder.newBuilder() + .maximumSize(MAX_VERSION_SCHEMA_CACHE_SIZE) + .build(new CacheLoader>() { + @Override + public List load(@NotNull Long schemaTso) { + long versionId = versionIdMap.get(schemaTso); + List columns = multiVersionColumns.get(versionId); + + List columnMetas = new ArrayList<>(); + columnMetas.add(TSO_COLUMN); + columnMetas.add(POSITION_COLUMN); + columnMetas.addAll( + columns.stream().map(columnId -> allColumnMetas.get(columnId).getMeta()) + .collect(Collectors.toList()) + ); + + return columnMetas; + } + }); + + fieldIdMapByTso = CacheBuilder.newBuilder() + .maximumSize(MAX_VERSION_SCHEMA_CACHE_SIZE) + .build(new CacheLoader>() { + @Override + public Map load(@NotNull Long schemaTso) { + long versionId = versionIdMap.get(schemaTso); + List columns = multiVersionColumns.get(versionId); + + Map targetColumnIndex = new HashMap<>(); + + for (int i = 0; i < columns.size(); i++) { + targetColumnIndex.put(columnFieldIdMap.get(columns.get(i)), + i + ColumnarStoreUtils.IMPLICIT_COLUMN_CNT); + } + + return targetColumnIndex; + } + }); + } + + @Nullable + public List getColumnFieldIdList(long versionId) { + if (multiVersionColumns.isEmpty() || !multiVersionColumns.containsKey(versionId)) { + return null; + } + + return multiVersionColumns.get(versionId).stream().map(columnFieldIdMap::get) + .collect(Collectors.toList()); + } + + /** + * Get the map from field_id to the position of corresponding file for certain tso + * + * @param schemaTso schema tso + * @return Map: field_id -> column position in file + */ + @Nullable + public Map getFieldIdMapByTso(long schemaTso) { + if (versionIdMap.isEmpty() || versionIdMap.lastKey() < schemaTso) { + return null; + } + + try { + return fieldIdMapByTso.get(schemaTso); + } catch (ExecutionException e) { + throw new RuntimeException( + String.format( + "Failed to build field id map for CCI, table id: %d, schema tso: %d", tableId, schemaTso + ), + e + ); + } + } + + @Nullable + public List getColumnMetaListByTso(long schemaTso) { + if (versionIdMap.isEmpty() || versionIdMap.lastKey() < schemaTso) { + return null; + } + + try { + return columnMetaListByTso.get(schemaTso); + } catch (ExecutionException e) { + throw new RuntimeException( + String.format( + "Failed to build column meta for CCI, table id: %d, schema tso: %d", tableId, schemaTso + ), + e + ); + } + } + + public int @Nullable [] getPrimaryKeyColumns(long schemaTso) { + if (versionIdMap.isEmpty() || versionIdMap.lastKey() < schemaTso) { + return null; + } + + long versionId = versionIdMap.get(schemaTso); + List columns = multiVersionColumns.get(versionId); + + return IntStream.range(0, columns.size()) + .filter(index -> primaryKeySet.contains(columns.get(index))) + .map(index -> index + ColumnarStoreUtils.IMPLICIT_COLUMN_CNT + 1) + .toArray(); + } + + @Nullable + public ColumnMetaWithTs getInitColumnMeta(long fieldId) { + if (multiVersionColumnIds.containsKey(fieldId)) { + return allColumnMetas.get(multiVersionColumnIds.get(fieldId).first()); + } else { + return null; + } + } + + public void loadUntilTso(long schemaTso) { + if (!versionIdMap.isEmpty() && versionIdMap.lastKey() >= schemaTso) { + return; + } + + long latestTso = versionIdMap.isEmpty() ? Long.MIN_VALUE : versionIdMap.lastKey(); + // this latest version id in current memory cache is nonsense, since version id may be out of order + + List tableEvolutionRecordList; + List columnEvolutionRecordList; + + try (Connection connection = MetaDbUtil.getConnection()) { + ColumnarTableEvolutionAccessor accessor = new ColumnarTableEvolutionAccessor(); + accessor.setConnection(connection); + tableEvolutionRecordList = accessor.queryTableIdAndGreaterThanTso(tableId, latestTso); + + ColumnarColumnEvolutionAccessor columnAccessor = new ColumnarColumnEvolutionAccessor(); + columnAccessor.setConnection(connection); + // TODO(siyun): greater than certain id and order by id + columnEvolutionRecordList = columnAccessor.queryTableIdAndVersionIdsOrderById( + tableId, + tableEvolutionRecordList.stream() + // For those versions which have not been loaded + .filter(r -> !multiVersionColumns.containsKey(r.versionId)) + .map(r -> r.versionId) + .collect(Collectors.toList()) + ); + + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, e, + String.format("Failed to generate columnar schema of tso: %d", schemaTso)); + } + + for (ColumnarColumnEvolutionRecord columnEvolutionRecord : columnEvolutionRecordList) { + Long id = columnEvolutionRecord.id; + Long filedId = columnEvolutionRecord.fieldId; + ColumnsRecord columnsRecord = columnEvolutionRecord.columnsRecord; + ColumnMeta columnMeta = GmsTableMetaManager.buildColumnMeta( + columnsRecord, + tableEvolutionRecordList.get(0).indexName, + columnsRecord.collationName, + columnsRecord.characterSetName); + ColumnMetaWithTs columnMetaWithTs = new ColumnMetaWithTs(columnEvolutionRecord.create, columnMeta); + if ("PRI".equalsIgnoreCase(columnsRecord.columnKey)) { + primaryKeySet.add(id); + } + allColumnMetas.put(id, columnMetaWithTs); + columnFieldIdMap.put(id, filedId); + multiVersionColumnIds.compute(filedId, (k, v) -> { + if (v == null) { + SortedSet newSet = new ConcurrentSkipListSet<>(); + newSet.add(id); + return newSet; + } else { + v.add(id); + return v; + } + }); + } + + for (ColumnarTableEvolutionRecord tableRecord : tableEvolutionRecordList) { + long commitTs = tableRecord.commitTs; + long versionId = tableRecord.versionId; + multiVersionColumns.putIfAbsent(versionId, tableRecord.columns); + if (commitTs != Long.MAX_VALUE) { + versionIdMap.put(commitTs, versionId); + } + } + } + + /** + * Purge schema for outdated columnar table schema + * Noted that this tso is different from low watermark, + * since those schemas which haven't been compacted should all be reserved. + */ + @Override + public void purge(long schemaTso) { + + } + + public Lock getLock() { + return lock; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionCsvData.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionCsvData.java new file mode 100644 index 000000000..3b53988f8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionCsvData.java @@ -0,0 +1,296 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.columnar.CSVFileReader; +import com.alibaba.polardbx.executor.columnar.RawOrcTypeCsvReader; +import com.alibaba.polardbx.executor.columnar.SimpleCSVFileReader; +import com.alibaba.polardbx.gms.metadb.table.ColumnarAppendedFilesAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarAppendedFilesRecord; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.FileMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import org.jetbrains.annotations.Nullable; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.SortedMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +public class MultiVersionCsvData implements Purgeable { + private static final Logger LOGGER = LoggerFactory.getLogger("COLUMNAR_TRANS"); + + protected final String csvFileName; + // tso - + private SortedMap>> allChunks = new ConcurrentSkipListMap<>(); + private final AtomicLong openedFileCount; + private final Lock lock = new ReentrantLock(); + + public MultiVersionCsvData(String csvFileName, AtomicLong openedFileCount) { + this.csvFileName = csvFileName; + this.openedFileCount = openedFileCount; + } + + @Nullable + public SortedMap>> getChunksWithTso(long tso) { + // current tso is not loaded, should read from files + if (allChunks.isEmpty() || allChunks.lastKey() < tso) { + return null; + } + return allChunks.headMap(tso + 1); + } + + private List loadDeltaStateFromGms(long minTso, long latestTso, long tso) { + List appendedFilesRecords = new ArrayList<>(); + + try (Connection connection = MetaDbUtil.getConnection()) { + ColumnarAppendedFilesAccessor columnarAppendedFilesAccessor = new ColumnarAppendedFilesAccessor(); + columnarAppendedFilesAccessor.setConnection(connection); + + // single huge part below minTso + if (latestTso < minTso) { + appendedFilesRecords.addAll( + columnarAppendedFilesAccessor.queryLatestByFileNameBetweenTso(csvFileName, latestTso, minTso)); + } else { + minTso = latestTso; + } + + if (minTso < tso) { + // multi-version parts + appendedFilesRecords.addAll( + columnarAppendedFilesAccessor.queryByFileNameBetweenTso(csvFileName, minTso, tso)); + } + + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, e, + String.format("Failed to generate columnar snapshot of tso: %d", tso)); + } + return appendedFilesRecords; + } + + public void loadUntilTso(long minTso, long tso) { + if (!allChunks.isEmpty() && allChunks.lastKey() >= tso) { + return; + } + + minTso = Math.min(minTso, tso); + + // The latest tso which has already been loaded by version chain + long latestTso = allChunks.isEmpty() ? Long.MIN_VALUE : allChunks.lastKey(); + + long lastEndPosition = latestTso == Long.MIN_VALUE ? 0 : allChunks.get(latestTso).getKey(); + + List appendedFilesRecords = loadDeltaStateFromGms(minTso, latestTso, tso); + + if (!appendedFilesRecords.isEmpty()) { + ColumnarAppendedFilesRecord lastRecord = appendedFilesRecords.get(appendedFilesRecords.size() - 1); + + FileMeta fileMeta = ColumnarManager.getInstance().fileMetaOf(csvFileName); + Engine engine = fileMeta.getEngine(); + List columnMetas = fileMeta.getColumnMetas(); + + long maxReadPosition = lastRecord.appendOffset + lastRecord.appendLength; + openedFileCount.incrementAndGet(); + try (SimpleCSVFileReader csvFileReader = new SimpleCSVFileReader()) { + csvFileReader.open(new ExecutionContext(), columnMetas, FileVersionStorage.CSV_CHUNK_LIMIT, engine, + csvFileName, + (int) lastEndPosition, + (int) (maxReadPosition - lastEndPosition)); + for (ColumnarAppendedFilesRecord record : appendedFilesRecords) { + long newEndPosition = record.appendOffset + record.appendLength; + + List results = new ArrayList<>(); + // It may cause OOM + Chunk result; + while ((result = csvFileReader.nextUntilPosition(newEndPosition)) != null) { + results.add(result); + } + + allChunks.put(record.checkpointTso, Pair.of(newEndPosition, results)); + } + } catch (Throwable t) { + throw new TddlRuntimeException(ErrorCode.ERR_LOAD_CSV_FILE, t, + String.format("Failed to load read csv file, file name: %s, last tso: %d, snapshot tso: %d", + csvFileName, latestTso, tso)); + } finally { + openedFileCount.decrementAndGet(); + } + } else { + // add a new version to bump the tso when the file is not appended + // preventing access GMS over and over again when there is no newer version + allChunks.put(tso, Pair.of(lastEndPosition, Collections.emptyList())); + } + } + + /** + * Never use any cache. + */ + public static List loadRawOrcTypeUntilTso(long tso, + AtomicLong openedFileCount, + String csvFileName, + ExecutionContext context) { + // The latest tso which has already been loaded by version chain + long latestTso = Long.MIN_VALUE; + + ColumnarAppendedFilesRecord record; + + try (Connection connection = MetaDbUtil.getConnection()) { + ColumnarAppendedFilesAccessor columnarAppendedFilesAccessor = new ColumnarAppendedFilesAccessor(); + columnarAppendedFilesAccessor.setConnection(connection); + record = columnarAppendedFilesAccessor + .queryLatestByFileNameBetweenTso(csvFileName, latestTso, tso).get(0); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, e, + String.format("[RawOrcType]Failed to generate columnar snapshot of tso: %d", tso)); + } + + if (null != record) { + FileMeta fileMeta = ColumnarManager.getInstance().fileMetaOf(csvFileName); + Engine engine = fileMeta.getEngine(); + List columnMetas = fileMeta.getColumnMetas(); + + long maxReadPosition = record.appendOffset + record.appendLength; + openedFileCount.incrementAndGet(); + try (CSVFileReader csvFileReader = new RawOrcTypeCsvReader()) { + csvFileReader.open(context, columnMetas, FileVersionStorage.CSV_CHUNK_LIMIT, engine, + csvFileName, + 0, + (int) maxReadPosition); + List results = new ArrayList<>(); + // It may cause OOM + Chunk result; + while ((result = csvFileReader.nextUntilPosition(maxReadPosition)) != null) { + results.add(result); + } + return results; + } catch (Throwable t) { + throw new TddlRuntimeException(ErrorCode.ERR_LOAD_CSV_FILE, t, + String.format("[RawOrcType]Failed to load read csv file, " + + "file name: %s, last tso: %d, snapshot tso: %d", + csvFileName, latestTso, tso)); + } finally { + openedFileCount.decrementAndGet(); + } + } + return null; + } + + public Lock getLock() { + return lock; + } + + public void purge(long tso) { + // Merge data in memory is not economical, so we reload data from file for small part of chunk + if (allChunks.isEmpty() || allChunks.firstKey() >= tso) { + return; + } + + // for csv whose new version have not been loaded, we skip the purge + if (allChunks.lastKey() < tso) { + return; + } + + long lastPurgeTso = allChunks.firstKey(); + long floorTso = allChunks.headMap(tso + 1).lastKey(); + + if (lastPurgeTso == floorTso) { + return; + } + + Pair> firstEntry = allChunks.get(lastPurgeTso); + long firstPos = firstEntry.getKey(); + List firstChunk = firstEntry.getValue(); + long purgePos = allChunks.get(floorTso).getKey(); + + List purgedChunkList = new ArrayList<>(); + List orphanChunkList = new ArrayList<>(); + // If first chunk is less than 1000 lines, consider purging together. + // for extremely large row, this strategy may perform bad + if (firstChunk.size() == 1 && firstChunk.get(0).getPositionCount() < FileVersionStorage.CSV_CHUNK_LIMIT) { + firstPos = 0; + } else { + purgedChunkList.addAll(firstChunk); + } + + long currentPos = firstPos; + openedFileCount.incrementAndGet(); + try (SimpleCSVFileReader csvFileReader = new SimpleCSVFileReader()) { + FileMeta fileMeta = ColumnarManager.getInstance().fileMetaOf(csvFileName); + Engine engine = fileMeta.getEngine(); + List columnMetas = fileMeta.getColumnMetas(); + + csvFileReader.open(new ExecutionContext(), columnMetas, FileVersionStorage.CSV_CHUNK_LIMIT, engine, + csvFileName, + (int) firstPos, + (int) (purgePos - firstPos)); + Chunk result; + while ((result = csvFileReader.next()) != null) { + if (result.getPositionCount() >= FileVersionStorage.CSV_CHUNK_LIMIT) { + purgedChunkList.add(result); + currentPos = csvFileReader.position(); + } else { + // not reach 1000 lines, could be purged next time + orphanChunkList.add(result); + } + } + } catch (Throwable t) { + throw new TddlRuntimeException(ErrorCode.ERR_LOAD_CSV_FILE, t, + String.format( + "Failed to load read csv file while purging, file name: %s, last tso: %d, purge tso: %d", + csvFileName, lastPurgeTso, tso)); + } finally { + openedFileCount.decrementAndGet(); + } + + SortedMap>> newChunks = new ConcurrentSkipListMap<>(); + if (!orphanChunkList.isEmpty()) { + newChunks.put(tso - 1, new Pair<>(currentPos, purgedChunkList)); + newChunks.put(tso, new Pair<>(purgePos, orphanChunkList)); + } else { + newChunks.put(tso, new Pair<>(currentPos, purgedChunkList)); + } + + // minimize the lock: + // since the version chain is append-only, loadUtilTso() will not affect version before purge tso + // so only the operations with larger tso require lock + lock.lock(); + try { + newChunks.putAll(allChunks.tailMap(tso + 1)); + // hot swap the cache + LOGGER.debug( + String.format("Csv purge finished: fileName: %s, versions before purge: %d, after purge: %d", + csvFileName, this.allChunks.size(), newChunks.size())); + this.allChunks = newChunks; + } finally { + lock.unlock(); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionDelData.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionDelData.java new file mode 100644 index 000000000..7940e2f29 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionDelData.java @@ -0,0 +1,92 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +import org.roaringbitmap.RoaringBitmap; + +import java.util.SortedMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +public class MultiVersionDelData implements Purgeable { + // The merged bitmap until the latest tso + private final RoaringBitmap mergedBitMap = new RoaringBitmap(); + // tso - cache + private final SortedMap allBitmaps = new ConcurrentSkipListMap<>(); + private final ReadWriteLock lock = new ReentrantReadWriteLock(); + + public void putNewTsoBitMap(long tso, RoaringBitmap bitmap) { + // this tso already exists, skip + if (!allBitmaps.isEmpty() && tso <= allBitmaps.lastKey()) { + return; + } + + Lock writeLock = lock.writeLock(); + writeLock.lock(); + try { + // double check + if (!allBitmaps.isEmpty() && tso <= allBitmaps.lastKey()) { + return; + } + + allBitmaps.put(tso, bitmap); + mergedBitMap.or(bitmap); + } finally { + writeLock.unlock(); + } + } + + // TODO(siyun): 优化 RoaringBitmap 对象的创建和销毁,这里为了保证并发安全,即使读的是最新的 bitmap,也进行了内存的拷贝 + public RoaringBitmap buildDeleteBitMap(long tso) { + if (tso == Long.MIN_VALUE) { + return new RoaringBitmap(); + } + + Lock readLock = lock.readLock(); + readLock.lock(); + try { + RoaringBitmap bitmap = mergedBitMap.clone(); + + for (RoaringBitmap deltaBitmap : allBitmaps.tailMap(tso + 1).values()) { + // This takes the same effects as xor in this case + bitmap.andNot(deltaBitmap); + } + + return bitmap; + } finally { + readLock.unlock(); + } + } + + /** + * Merge all old bitmaps whose tso < given tso into the first bitmap whose tso >= given tso, + * and then remove all old bitmaps. + * + * @param tso purged tso value. + */ + public void purge(long tso) { + Lock writeLock = lock.writeLock(); + writeLock.lock(); + try { + allBitmaps.headMap(tso + 1).clear(); + } finally { + writeLock.unlock(); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionDelPartitionInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionDelPartitionInfo.java new file mode 100644 index 000000000..b1f89d629 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/MultiVersionDelPartitionInfo.java @@ -0,0 +1,192 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.columnar.DeletionFileReader; +import com.alibaba.polardbx.executor.columnar.SimpleDeletionFileReader; +import com.alibaba.polardbx.gms.metadb.table.ColumnarAppendedFilesAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarAppendedFilesRecord; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.google.common.base.Preconditions; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedSet; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; + +/** + * Set of TSOs that have been loaded for each partition of .del file + */ +public class MultiVersionDelPartitionInfo implements Purgeable { + private final SortedSet readDelTso = new ConcurrentSkipListSet<>(); + private final Lock lock = new ReentrantLock(); + + // there may be more than one deletion bitmap in a partition, + // so we need to cache the read position of each bitmap + // + private final Map positionMap = new HashMap<>(); + + public Long getLastTso() { + if (readDelTso.isEmpty()) { + return Long.MIN_VALUE; + } else { + return readDelTso.last(); + } + } + + public void loadUntilTso(String logicalSchema, String logicalTable, String partitionName, long minTso, long tso, + BiConsumer delFileConsumer) { + if (!readDelTso.isEmpty() && readDelTso.last() >= tso) { + return; + } + + long lastTso = readDelTso.isEmpty() ? Long.MIN_VALUE : readDelTso.last(); + + List minSnapshotDelFiles = ((DynamicColumnarManager) ColumnarManager.getInstance()) + .delFileNames(minTso, logicalSchema, logicalTable, partitionName); + + Map> recordsForEachDelFile = new HashMap<>(); + try (Connection connection = MetaDbUtil.getConnection()) { + ColumnarAppendedFilesAccessor columnarAppendedFilesAccessor = new ColumnarAppendedFilesAccessor(); + columnarAppendedFilesAccessor.setConnection(connection); + + if (lastTso < minTso) { + for (String delFileName : minSnapshotDelFiles) { + List records = columnarAppendedFilesAccessor + .queryByFileNameAndMaxTso(delFileName, minTso); + if (records == null || records.isEmpty()) { + continue; + } + + Preconditions.checkArgument(records.size() == 1); + ColumnarAppendedFilesRecord delRecord = records.get(0); + long pos = delRecord.appendOffset + delRecord.appendLength; + + if (positionMap.getOrDefault(delFileName, 0L) >= pos) { + continue; + } + + recordsForEachDelFile.computeIfAbsent(delFileName, s -> new ArrayList<>()); + recordsForEachDelFile.get(delFileName).add(delRecord); + } + } else { + minTso = lastTso; + } + + if (minTso < tso) { + columnarAppendedFilesAccessor.queryDelByPartitionBetweenTso( + logicalSchema, + logicalTable, + partitionName, + lastTso, + tso + ).forEach(record -> { + recordsForEachDelFile.computeIfAbsent(record.fileName, s -> new ArrayList<>()); + recordsForEachDelFile.get(record.fileName).add(record); + }); + } + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, e, + String.format("Failed to generate columnar snapshot of tso: %d", tso)); + } + + List> sortedDelFileRecords = + recordsForEachDelFile.values().stream() + /* sorted by tso to make sure that the bitmap is applied in order */ + .sorted(Comparator.comparingLong(records -> records.get(0).checkpointTso)) + .collect(Collectors.toList()); + + for (List records : sortedDelFileRecords) { + ColumnarAppendedFilesRecord firstRecord = records.get(0); + ColumnarAppendedFilesRecord lastRecord = records.get(records.size() - 1); + + Engine engine = Engine.of(firstRecord.engine); + String fileName = firstRecord.fileName; + long maxReadPosition = lastRecord.appendOffset + lastRecord.appendLength; + long lastEndPosition = positionMap.getOrDefault(fileName, 0L); + + if (lastEndPosition >= maxReadPosition) { + continue; + } + + try (SimpleDeletionFileReader fileReader = new SimpleDeletionFileReader()) { + try { + fileReader.open( + engine, + fileName, + (int) lastEndPosition, + (int) (maxReadPosition - lastEndPosition) + ); + } catch (IOException e) { + throw new TddlRuntimeException(ErrorCode.ERR_LOAD_DEL_FILE, e, + String.format("Failed to open delete bitmap file, filename: %s, offset: %d, length: %d", + fileName, lastEndPosition, maxReadPosition - lastEndPosition)); + } + + for (ColumnarAppendedFilesRecord record : records) { + if (lastEndPosition >= record.appendOffset + record.appendLength) { + continue; + } + + delFileConsumer.accept(fileReader, record); + readDelTso.add(record.checkpointTso); + } + } catch (Throwable t) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, t, + String.format("Failed to generate columnar snapshot of tso: %d", tso)); + } + + positionMap.put(fileName, maxReadPosition); + } + + if (!readDelTso.isEmpty() && readDelTso.last() < tso) { + readDelTso.add(tso); + } + + // In case there are no .del files + if (readDelTso.isEmpty()) { + readDelTso.add(tso); + } + } + + public Lock getLock() { + return lock; + } + + public void purge(long tso) { + lock.lock(); + try { + // must keep one position info at least + readDelTso.headSet(Long.min(tso + 1, readDelTso.last())).clear(); + } finally { + lock.unlock(); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/PartitionId.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/PartitionId.java new file mode 100644 index 000000000..d701db580 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/PartitionId.java @@ -0,0 +1,39 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +import lombok.Data; + +/** + * The unique identifier of a partition. + */ +@Data +public class PartitionId { + private final String partName; + private final String logicalSchema; + private final Long tableId; + + public PartitionId(String partName, String logicalSchema, Long tableId) { + this.partName = partName; + this.logicalSchema = logicalSchema; + this.tableId = tableId; + } + + public static PartitionId of(String partName, String logicalSchema, Long tableId) { + return new PartitionId(partName, logicalSchema, tableId); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/Purgeable.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/Purgeable.java new file mode 100644 index 000000000..c39e2aef6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/Purgeable.java @@ -0,0 +1,24 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms; + +/** + * Multi-version data or meta-data which could be purged + */ +public interface Purgeable { + void purge(long tso); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/TableListListener.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/TableListListener.java index 0381bbbd1..0f4e730c5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/TableListListener.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/TableListListener.java @@ -22,7 +22,7 @@ import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; import com.alibaba.polardbx.gms.metadb.record.SystemTableRecord; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; -import com.alibaba.polardbx.gms.metadb.table.TablesRecord; +import com.alibaba.polardbx.gms.metadb.table.TableNamesRecord; import com.alibaba.polardbx.gms.util.MetaDbUtil; import java.sql.Connection; @@ -37,17 +37,16 @@ public TableListListener(String schemaName) { } @Override - protected List fetchRecords() { - List tablesRecords = fetchVisibleRecords(); + protected List fetchTablesName() { + List tablesRecords = fetchVisibleTableNames(); List records = new ArrayList<>(tablesRecords.size()); records.addAll(tablesRecords); return records; } @Override - protected String getDataId(SystemTableRecord record) { - TablesRecord tablesRecord = (TablesRecord) record; - return MetaDbDataIdBuilder.getTableDataId(tablesRecord.tableSchema, tablesRecord.tableName); + protected String getDataId(String tableSchema, String tableName) { + return MetaDbDataIdBuilder.getTableDataId(tableSchema, tableName); } @Override @@ -56,16 +55,15 @@ protected String getDataIdPrefix() { } @Override - protected ConfigListener getObjectListener(SystemTableRecord record) { - TablesRecord tablesRecord = (TablesRecord) record; - return new TableMetaListener(tablesRecord.tableSchema, tablesRecord.tableName); + protected ConfigListener getObjectListener(String tableSchema, String tableName) { + return new TableMetaListener(tableSchema, tableName); } - private List fetchVisibleRecords() { + private List fetchVisibleTableNames() { TableInfoManager tableInfoManager = new TableInfoManager(); try (Connection metaDbConn = MetaDbUtil.getConnection()) { tableInfoManager.setConnection(metaDbConn); - return tableInfoManager.queryVisibleTables(schemaName); + return tableInfoManager.queryVisibleTableNames(schemaName); } catch (SQLException e) { throw new TddlRuntimeException(ErrorCode.ERR_GMS_GET_CONNECTION, e, e.getMessage()); } finally { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/AlterPartitionKeyUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/AlterPartitionKeyUtils.java index e69de29bb..27c9b35b1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/AlterPartitionKeyUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/AlterPartitionKeyUtils.java @@ -0,0 +1,184 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms.util; + +import com.alibaba.polardbx.common.constants.SequenceAttribute; +import com.alibaba.polardbx.common.ddl.Job; +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.config.ConfigDataMode; + +import com.alibaba.polardbx.executor.ddl.job.validator.ddl.RepartitionValidator; +import com.alibaba.polardbx.executor.sync.RepartitionSyncAction; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.metadb.table.IndexStatus; +import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.metadb.table.TablesExtRecord; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.GlobalIndexMeta; +import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.AsyncDDLContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.sequence.SequenceManagerProxy; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import org.apache.calcite.sql.SqlAlterTablePartitionKey; +import org.apache.calcite.sql.SqlCreateTable; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlIndexColumnName; +import org.apache.calcite.sql.SqlIndexDefinition; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang.math.RandomUtils; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.StringUtils; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.common.constants.SequenceAttribute.AUTO_SEQ_PREFIX; +import static com.alibaba.polardbx.common.ddl.Attribute.RANDOM_SUFFIX_LENGTH_OF_PHYSICAL_TABLE_NAME; +import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_DDL_JOB_UNSUPPORTED; +import static com.alibaba.polardbx.executor.ddl.job.meta.misc.RepartitionMetaChanger.doCutOver; +import static com.alibaba.polardbx.executor.ddl.job.validator.ddl.RepartitionValidator.getExpectedPrimaryAndShardingKeys; +import static com.alibaba.polardbx.optimizer.config.table.GsiMetaManager.TableType.BROADCAST; +import static com.alibaba.polardbx.optimizer.config.table.GsiMetaManager.TableType.SHARDING; +import static com.alibaba.polardbx.optimizer.config.table.GsiMetaManager.TableType.SINGLE; + +/** + * @author guxu + */ +public class AlterPartitionKeyUtils { + + private static final Logger LOGGER = SQLRecorderLogger.ddlEngineLogger; + + private static final int CUT_OVER_FLAG = 0x1; + private static final int FLAG_AUTO_PARTITION = 0x2; + + /** + * 带GSI的拆分表进行拆分键变更时,GSI可能并不包含主表的变更后的拆分键,所以这部分GSI需要重建。 + * https://yuque.antfin.com/coronadb/design/onmpll + */ + private static List> initIndexInfoForRebuildingGsi( + SqlCreateTable primaryTableNode, + SqlAlterTablePartitionKey ast, + String primaryTableDefinition) { + + String schemaName = ast.getOriginTableName().getComponent(0).getLastName(); + String sourceTableName = ast.getOriginTableName().getComponent(1).getLastName(); + + List> result = new ArrayList<>(); + + List> globalKeys = + primaryTableNode.getGlobalKeys(); + if (CollectionUtils.isEmpty(globalKeys)) { + return result; + } + + Map gsiMap = new HashMap<>(); + for (org.apache.calcite.util.Pair e : globalKeys) { + gsiMap.put(e.getKey().getLastName(), e.getValue()); + } + //拆分变更后,GSI表必须包含的列 + final Set expectedPkSkList = getExpectedPrimaryAndShardingKeys( + schemaName, + sourceTableName, + ast.isSingle(), + ast.isBroadcast(), + ast.getDbPartitionBy(), + ast.getTablePartitionBy() + ); + + for (Map.Entry entry : gsiMap.entrySet()) { + final String gsiName = entry.getKey(); + final SqlIndexDefinition gsiDefinition = entry.getValue(); + + final Set gsiAllColumns = new HashSet<>(); + gsiAllColumns.addAll( + gsiDefinition.getColumns().stream().map(e -> e.getColumnNameStr().toLowerCase()) + .collect(Collectors.toSet()) + ); + if (gsiDefinition.getCovering() != null) { + gsiAllColumns.addAll( + gsiDefinition.getCovering().stream().map(e -> e.getColumnNameStr().toLowerCase()) + .collect(Collectors.toSet()) + ); + } + + final List finalCoveringColumns = new ArrayList<>(); + if (gsiDefinition.getCovering() != null) { + finalCoveringColumns.addAll(gsiDefinition.getCovering()); + } + + //如果发现GSI缺少列,则将该列加入重新后GSI的covering列 + if (!CollectionUtils.isSubCollection(expectedPkSkList, gsiAllColumns)) { + final Collection missingColumnNames = CollectionUtils.subtract(expectedPkSkList, gsiAllColumns); + List missingColumns = missingColumnNames.stream() + .map(e -> new SqlIndexColumnName(SqlParserPos.ZERO, new SqlIdentifier(e, SqlParserPos.ZERO), null, + null)) + .collect(Collectors.toList()); + finalCoveringColumns.addAll(missingColumns); + } else { + //如果发现GSI没有缺少列,则不重建它 + continue; + } + + final String randomSuffix = + RandomStringUtils.randomAlphanumeric(RANDOM_SUFFIX_LENGTH_OF_PHYSICAL_TABLE_NAME).toLowerCase(); + final String rebuildGsiName = gsiName + "_" + randomSuffix; + SqlIndexDefinition indexDef = SqlIndexDefinition.globalIndex(SqlParserPos.ZERO, + false, + null, + null, + null, + new SqlIdentifier(rebuildGsiName, SqlParserPos.ZERO), + (SqlIdentifier) primaryTableNode.getTargetTable(), + gsiDefinition.getColumns(), + finalCoveringColumns, + gsiDefinition.getDbPartitionBy(), + gsiDefinition.getTbPartitionBy(), + gsiDefinition.getTbPartitions(), + gsiDefinition.getPartitioning(), + new LinkedList<>(), + gsiDefinition.getTableGroupName(), + gsiDefinition.isWithImplicitTableGroup(), + true); + indexDef.setPrimaryTableNode(primaryTableNode); + indexDef.setPrimaryTableDefinition(primaryTableDefinition); + result.add(Pair.of(gsiName, indexDef)); + } + + return result; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/AlterRepartitionUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/AlterRepartitionUtils.java index d93f0bff3..27d1df8d7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/AlterRepartitionUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/AlterRepartitionUtils.java @@ -20,8 +20,11 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.druid.sql.SQLUtils; +import com.alibaba.polardbx.druid.sql.ast.SQLExpr; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLNumberExpr; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.handler.LogicalShowCreateTablesForShardingDatabaseHandler; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; -import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.GlobalIndexMeta; @@ -29,13 +32,13 @@ import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTablePartitionsPrepareData; +import com.alibaba.polardbx.optimizer.parse.FastsqlParser; import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil; import com.alibaba.polardbx.optimizer.partition.PartitionSpec; import com.alibaba.polardbx.optimizer.partition.common.PartSpecNormalizationParams; import com.alibaba.polardbx.optimizer.partition.common.PartitionStrategy; -import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; import com.alibaba.polardbx.optimizer.utils.RelUtils; import org.apache.calcite.sql.SqlAlterTable; import org.apache.calcite.sql.SqlAlterTablePartitionKey; @@ -48,8 +51,10 @@ import org.apache.calcite.sql.SqlIndexDefinition; import org.apache.calcite.sql.SqlLiteral; import org.apache.calcite.sql.SqlNode; +import org.apache.calcite.sql.SqlNodeList; import org.apache.calcite.sql.SqlPartition; import org.apache.calcite.sql.SqlPartitionBy; +import org.apache.calcite.sql.SqlPartitionByCoHash; import org.apache.calcite.sql.SqlPartitionByHash; import org.apache.calcite.sql.SqlPartitionByList; import org.apache.calcite.sql.SqlPartitionByRange; @@ -57,6 +62,7 @@ import org.apache.calcite.sql.SqlPartitionValueItem; import org.apache.calcite.sql.SqlSubPartition; import org.apache.calcite.sql.SqlSubPartitionBy; +import org.apache.calcite.sql.SqlSubPartitionByCoHash; import org.apache.calcite.sql.SqlSubPartitionByHash; import org.apache.calcite.sql.SqlSubPartitionByList; import org.apache.calcite.sql.SqlSubPartitionByRange; @@ -124,6 +130,43 @@ public static SqlIndexDefinition initIndexInfo(SqlCreateTable primaryTableNode, alterTablePartitionKey.getDbPartitionBy(), alterTablePartitionKey.getTablePartitionBy(), alterTablePartitionKey.getTbpartitions(), + null, + null, + false + ); + + indexDef.setBroadcast(alterTablePartitionKey.isBroadcast()); + indexDef.setSingle(alterTablePartitionKey.isSingle()); + indexDef.setPrimaryTableNode(primaryTableNode); + indexDef.setPrimaryTableDefinition(primaryTableDefinition); + return indexDef; + } + + /** + * for sharding db omc + */ + public static SqlIndexDefinition initIndexInfo4DrdsOmc(String newIndexName, + List indexKeys, + List coverKeys, + boolean isPrimary, + boolean isUnique, + String primaryTableDefinition, + SqlCreateTable primaryTableNode, + SqlAlterTablePartitionKey alterTablePartitionKey) { + if (StringUtils.isEmpty(newIndexName)) { + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_TABLE, "partition table name is empty"); + } + + SqlIndexDefinition indexDef = genSqlIndexDefinition( + primaryTableNode, + indexKeys, + coverKeys, + isPrimary, + isUnique, + newIndexName, + alterTablePartitionKey.getDbPartitionBy(), + alterTablePartitionKey.getTablePartitionBy(), + alterTablePartitionKey.getTbpartitions(), null ); @@ -183,7 +226,10 @@ public static SqlIndexDefinition initIndexInfo(String schemaName, null, null, null, - alterTableNewPartition.getSqlPartition() + alterTableNewPartition.getSqlPartition(), + StringUtils.isNotEmpty(alterTableNewPartition.getTargetImplicitTableGroupName()) ? + new SqlIdentifier(alterTableNewPartition.getTargetImplicitTableGroupName(), SqlParserPos.ZERO) : null, + StringUtils.isNotEmpty(alterTableNewPartition.getTargetImplicitTableGroupName()) ); indexDef.setBroadcast(alterTableNewPartition.isBroadcast()); @@ -203,7 +249,9 @@ public static SqlIndexDefinition initIndexInfo(String newIndexName, boolean isPrimary, boolean isUnique, String primaryTableDefinition, - SqlCreateTable primaryTableNode) { + SqlCreateTable primaryTableNode, + SqlNode tableGroupName, + boolean withImplicitTablegroup) { if (StringUtils.isEmpty(newIndexName)) { throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_TABLE, "partition table name is empty"); } @@ -228,7 +276,9 @@ public static SqlIndexDefinition initIndexInfo(String newIndexName, null, null, null, - genPartitioning(partitionKeys) + genPartitioning(partitionKeys), + tableGroupName, + withImplicitTablegroup ); indexDef.setBroadcast(false); @@ -248,7 +298,9 @@ public static SqlIndexDefinition initIndexInfo(String newIndexName, boolean isUnique, String primaryTableDefinition, SqlCreateTable primaryTableNode, - SqlNode partitioning) { + SqlNode partitioning, + SqlNode tableGroup, + boolean withImplicitTablegroup) { if (StringUtils.isEmpty(newIndexName)) { throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_TABLE, "partition table name is empty"); } @@ -263,7 +315,9 @@ public static SqlIndexDefinition initIndexInfo(String newIndexName, null, null, null, - partitioning + partitioning, + tableGroup, + withImplicitTablegroup ); indexDef.setBroadcast(false); @@ -280,7 +334,9 @@ public static SqlIndexDefinition initIndexInfo(String newIndexName, public static List initIndexInfo(String schemaName, int partitions, List createGsiPrepareData, SqlCreateTable primaryTableNode, - String primaryTableDefinition) { + String primaryTableDefinition, + SqlNode tableGroupName, + boolean withImplicitTablegroup) { if (createGsiPrepareData == null || createGsiPrepareData.isEmpty()) { throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_TABLE, "partition table name is empty"); } @@ -328,7 +384,9 @@ public static List initIndexInfo(String schemaName, int part null, null, null, - sqlPartitionBy + sqlPartitionBy, + tableGroupName, + withImplicitTablegroup ); indexDef.setPrimaryTableNode(primaryTableNode); @@ -345,6 +403,17 @@ private static SqlIndexDefinition genSqlIndexDefinition(SqlCreateTable sqlCreate boolean isPrimary, boolean isUnique, String newTableName, SqlNode dbPartitionBy, SqlNode tbPartitionBy, SqlNode tbPartitions, SqlNode partitioning) { + return genSqlIndexDefinition(sqlCreateTable, partitionColumnList, coveringColumnList, isPrimary, isUnique, + newTableName, dbPartitionBy, tbPartitionBy, tbPartitions, partitioning, null, false); + } + + private static SqlIndexDefinition genSqlIndexDefinition(SqlCreateTable sqlCreateTable, + List partitionColumnList, + List coveringColumnList, + boolean isPrimary, boolean isUnique, String newTableName, + SqlNode dbPartitionBy, SqlNode tbPartitionBy, + SqlNode tbPartitions, SqlNode partitioning, + SqlNode tableGroup, boolean withImplicitTablegroup) { if (sqlCreateTable == null || partitionColumnList == null || partitionColumnList.isEmpty()) { return null; } @@ -377,7 +446,54 @@ private static SqlIndexDefinition genSqlIndexDefinition(SqlCreateTable sqlCreate tbPartitions, partitioning, new LinkedList<>(), + tableGroup, + withImplicitTablegroup, + true); + } + + public static SqlIndexDefinition genSqlIndexDefinition(List partitionColumnList, + List coveringColumnList, + boolean isUnique, + SqlIndexDefinition.SqlIndexType indexType, + String indexName, + String tableName, + SqlNode dbPartitionBy, + SqlNode tbPartitionBy, + SqlNode tbPartitions, + SqlNode partitioning, + boolean withImplicitTableGroup) { + if (partitionColumnList == null || partitionColumnList.isEmpty()) { + return null; + } + + List indexColumns = partitionColumnList.stream() + .map(e -> new SqlIndexColumnName(SqlParserPos.ZERO, new SqlIdentifier(e, SqlParserPos.ZERO), null, null)) + .collect(Collectors.toList()); + + List coveringColumns = coveringColumnList.stream() + .map(e -> new SqlIndexColumnName(SqlParserPos.ZERO, new SqlIdentifier(e, SqlParserPos.ZERO), null, null)) + .collect(Collectors.toList()); + + return SqlIndexDefinition.columnarIndex( + SqlParserPos.ZERO, + false, + null, + isUnique ? "UNIQUE" : null, + indexType, + new SqlIdentifier(indexName, SqlParserPos.ZERO), + new SqlIdentifier(tableName, SqlParserPos.ZERO), + indexColumns, + coveringColumns, + dbPartitionBy, + tbPartitionBy, + tbPartitions, + partitioning, null, + new LinkedList<>(), + null, + null, + new LinkedList<>(), + withImplicitTableGroup, true); } @@ -405,13 +521,15 @@ public static List getShardColumnsFromPartitionBy(SqlPartitionBy sqlPart columns.addAll(subPartitionBy.getColumns()); } for (SqlNode column : columns) { - if (column instanceof SqlBasicCall) { - for (SqlNode col : ((SqlBasicCall) column).operands) { - shardColumns.addAll(((SqlIdentifier) col).names); - } - } else { - shardColumns.addAll(((SqlIdentifier) column).names); - } +// if (column instanceof SqlBasicCall) { +// for (SqlNode col : ((SqlBasicCall) column).operands) { +// shardColumns.addAll(((SqlIdentifier) col).names); +// } +// } else { +// shardColumns.addAll(((SqlIdentifier) column).names); +// } + String colName = PartitionInfoUtil.findPartitionColumn(column); + shardColumns.add(colName); } return new ArrayList<>(shardColumns); } @@ -625,6 +743,66 @@ public static String genGlobalIndexName(String schema, String indexName, Executi // return sqlPartitionBy; // } + public static SqlAlterTablePartitionKey generateSqlPartitionKey(String schemaName, String tableName, + ExecutionContext executionContext) { + ExecutorContext executorContext = ExecutorContext.getContext(schemaName); + if (null == executorContext) { + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_DATABASE, schemaName); + } + GsiMetaManager metaManager = executorContext.getGsiManager().getGsiMetaManager(); + List tableRecords = metaManager.getTableRecords(schemaName, tableName); + GsiMetaManager.TableRecord tableRecord = tableRecords.get(0); + + SQLExpr dbPartitionBy = LogicalShowCreateTablesForShardingDatabaseHandler.buildPartitionBy( + tableRecord.getDbPartitionPolicy(), + tableRecord.getDbPartitionKey(), + false + ); + + SQLExpr tbPartitionBy = LogicalShowCreateTablesForShardingDatabaseHandler.buildPartitionBy( + tableRecord.getTbPartitionPolicy(), + tableRecord.getTbPartitionKey(), + true + ); + + final SQLExpr dbPartitions = + tableRecord.getDbPartitionCount() == null ? null : new SQLNumberExpr(tableRecord.getDbPartitionCount()); + + final SQLExpr tbPartitions = + tableRecord.getTbPartitionCount() == null ? null : new SQLNumberExpr(tableRecord.getTbPartitionCount()); + + StringBuilder sb = new StringBuilder(); + sb.append("alter table "); + sb.append(SqlIdentifier.surroundWithBacktick(tableName)); + + if (tableRecord.getTableType() == GsiMetaManager.TableType.SHARDING.getValue() + || tableRecord.getTableType() == GsiMetaManager.TableType.GSI.getValue()) { + if (dbPartitionBy != null) { + sb.append(" dbpartition by ").append(dbPartitionBy); + if (dbPartitions != null) { + sb.append(" dbpartitions ").append(dbPartitions); + } + + if (tbPartitionBy != null) { + sb.append(" tbpartition by ").append(tbPartitionBy); + if (tbPartitions != null) { + sb.append(" tbpartitions ").append(tbPartitions); + } + } + } + } else if (tableRecord.getTableType() == GsiMetaManager.TableType.SINGLE.getValue()) { + sb.append(" single"); + } else if (tableRecord.getTableType() == GsiMetaManager.TableType.BROADCAST.getValue()) { + sb.append(" broadcast"); + } + + String sql = sb.toString(); + + SqlNodeList astList = new FastsqlParser().parse(sql, executionContext); + + return (SqlAlterTablePartitionKey) astList.get(0); + } + public static SqlPartitionBy generateSqlPartitionBy(String tableName, String tableGroupName, PartitionInfo srcPartitionInfo, @@ -705,6 +883,11 @@ public static SqlPartitionBy genPartitionBy(String tableName, sqlPartitionBy = new SqlPartitionByHash(true, false, SqlParserPos.ZERO); sourceSql = "key(" + builder + ") PARTITIONS " + refPartByDef.getPartitions().size(); break; + case CO_HASH: + sqlPartitionBy = new SqlPartitionByCoHash(SqlParserPos.ZERO); + sourceSql = + "co_hash(" + builder + ") PARTITIONS " + refPartByDef.getPartitions().size(); + break; case RANGE: sqlPartitionBy = new SqlPartitionByRange(SqlParserPos.ZERO); break; @@ -823,6 +1006,14 @@ public static SqlSubPartitionBy genSubPartitionBy(String tableName, if (refSubPartByDef.isUseSubPartTemplate()) { sourceSql += " SUBPARTITIONS " + refSubPartByDef.getPartitions().size(); } + break; + case CO_HASH: + sqlSubPartitionBy = new SqlSubPartitionByCoHash(SqlParserPos.ZERO); + sourceSql = "co_hash(" + builder + ")"; + if (refSubPartByDef.isUseSubPartTemplate()) { + sourceSql += " SUBPARTITIONS " + refSubPartByDef.getPartitions().size(); + } + break; case UDF_HASH: sqlSubPartitionBy = new SqlSubPartitionByUdfHash(SqlParserPos.ZERO); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/ColumnarTransactionUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/ColumnarTransactionUtils.java new file mode 100644 index 000000000..7eb286709 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/ColumnarTransactionUtils.java @@ -0,0 +1,276 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gms.util; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.oss.ColumnarFileType; +import com.alibaba.polardbx.common.properties.DynamicConfig; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.sync.RequestColumnarSnapshotSeqSyncAction; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.metadb.table.ColumnarAppendedFilesAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarAppendedFilesRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarCheckpointsAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarCheckpointsRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableMappingRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableStatus; +import com.alibaba.polardbx.gms.metadb.table.FilesAccessor; +import com.alibaba.polardbx.gms.metadb.table.OrcFileStatusRecord; +import com.alibaba.polardbx.gms.partition.TablePartitionAccessor; +import com.alibaba.polardbx.gms.partition.TablePartitionConfig; +import com.alibaba.polardbx.gms.sync.IGmsSyncAction; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.gms.topology.SystemDbHelper; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.google.common.collect.ImmutableList; +import org.jetbrains.annotations.NotNull; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.stream.Collectors; + +public class ColumnarTransactionUtils { + @NotNull + public static Long getMinColumnarSnapshotTime() { + IGmsSyncAction action = new RequestColumnarSnapshotSeqSyncAction(); + List>> results = + SyncManagerHelper.sync(action, SystemDbHelper.DEFAULT_DB_NAME, SyncScope.ALL); + + long minSnapshotKeepTime = DynamicConfig.getInstance().getMinSnapshotKeepTime(); + long minSnapshotTime = ColumnarManager.getInstance().latestTso() - (minSnapshotKeepTime << 22); + + for (List> nodeRows : results) { + if (nodeRows == null) { + continue; + } + for (Map row : nodeRows) { + Long time = DataTypes.LongType.convertFrom(row.get("TSO")); + if (time == null) { + continue; + } + + minSnapshotTime = Math.min(minSnapshotTime, time); + } + } + return minSnapshotTime; + } + + public static Long getLatestTsoFromGms() { + try (Connection connection = MetaDbUtil.getConnection()) { + ColumnarCheckpointsAccessor checkpointsAccessor = new ColumnarCheckpointsAccessor(); + checkpointsAccessor.setConnection(connection); + + return checkpointsAccessor.queryLatestTso(); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, e, + "Failed to fetch latest columnar tso"); + } + } + + /** + * @return latest tso of checkpoint that only contains orc files but not csv files, + * in format (Innodb tso, Columnar tso) + */ + public static Pair getLatestOrcCheckpointTsoFromGms() { + try (Connection connection = MetaDbUtil.getConnection()) { + ColumnarCheckpointsAccessor checkpointsAccessor = new ColumnarCheckpointsAccessor(); + checkpointsAccessor.setConnection(connection); + + return checkpointsAccessor.queryLatestTsoPair(); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, e, + "Failed to fetch latest columnar tso"); + } + } + + /** + * ShowColumnarStatus 包含显示正在创建的进度,tso 对应的 checkpoint type更丰富 + */ + public static Long getLatestShowColumnarStatusTsoFromGms() { + try (Connection connection = MetaDbUtil.getConnection()) { + ColumnarCheckpointsAccessor checkpointsAccessor = new ColumnarCheckpointsAccessor(); + checkpointsAccessor.setConnection(connection); + + return checkpointsAccessor.queryLatestTsoByShowColumnarStatus(); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SNAPSHOT, e, + "Failed to fetch latest columnar tso"); + } + } + + public static class ColumnarIndexStatusRow { + public long tso; + public String tableSchema; + public String tableName; + public String indexName; + public long indexId; + public long partitionNum; + + public String status; + public long orcFileNum; + public long orcRows; + public long orcFileSize; + public long csvFileNum; + public long csvRows; + public long csvFileSize; + public long delFileNum; + public long delRows; + public long delFileSize; + } + + public static List queryColumnarIndexStatus(Long tso, + List columnarRecords) { + if (tso == null) { + return ImmutableList.of(); + } + + List result = new ArrayList<>(columnarRecords.size()); + //获取每个列存索引表的统计信息 + for (ColumnarTableMappingRecord record : columnarRecords) { + Long tableId = record.tableId; + String schemaName = record.tableSchema; + String tableName = record.tableName; + String indexName = record.indexName; + //a、获取每个表的列存分区名 + List partitionNameList; + try (Connection metaDbConn = MetaDbUtil.getConnection()) { + TablePartitionAccessor tablePartitionAccessor = new TablePartitionAccessor(); + tablePartitionAccessor.setConnection(metaDbConn); + TablePartitionConfig tablePartitionConfig = + tablePartitionAccessor.getPublicTablePartitionConfig(schemaName, indexName); + partitionNameList = tablePartitionConfig.getPartitionSpecConfigs().stream() + .map(partitionSpecConfig -> partitionSpecConfig.getSpecConfigInfo().partName) + .collect(Collectors.toList()); + } catch (Exception e) { + //正在创建的表可能rollback了,分区信息找不到,忽略就好 + if (record.status.equalsIgnoreCase(ColumnarTableStatus.CREATING.name())) { + continue; + } else { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + "fail to fetch partition info by columnar index: " + record.tableSchema + "." + record.tableName + + "(" + record.indexName + "(" + record.tableId + "))", e); + } + } + ColumnarIndexStatusRow row = new ColumnarIndexStatusRow(); + row.tso = tso; + row.tableSchema = schemaName; + row.tableName = tableName; + row.indexName = indexName; + row.indexId = record.tableId; + row.partitionNum = partitionNameList.size(); + + try (Connection metaDbConn = MetaDbUtil.getConnection()) { + FilesAccessor filesAccessor = new FilesAccessor(); + filesAccessor.setConnection(metaDbConn); + + //orc 文件统计 + List orcFileStatusRecords = + filesAccessor.queryOrcFileStatusByTsoAndTableId(tso, schemaName, String.valueOf(tableId)); + if (!orcFileStatusRecords.isEmpty()) { + row.orcFileNum = orcFileStatusRecords.get(0).fileCounts; + row.orcRows = orcFileStatusRecords.get(0).rowCounts; + row.orcFileSize = orcFileStatusRecords.get(0).fileSizes; + } + ColumnarAppendedFilesAccessor appendedFilesAccessor = new ColumnarAppendedFilesAccessor(); + appendedFilesAccessor.setConnection(metaDbConn); + + //csv/del文件统计 + List appendedFilesRecords = + appendedFilesAccessor.queryLastValidAppendByTsoAndTableId(tso, schemaName, + String.valueOf(tableId)); + //按照tso降序排序的,del文件总删除的行数只需要统计各分区最新文件的最新append记录就行,防止多个del文件 + Set haveRecordDelRowPartitions = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + for (ColumnarAppendedFilesRecord appendRecord : appendedFilesRecords) { + String fileName = appendRecord.fileName; + String suffix = fileName.substring(fileName.lastIndexOf('.') + 1); + ColumnarFileType columnarFileType = ColumnarFileType.of(suffix); + switch (columnarFileType) { + case CSV: + row.csvFileNum++; + row.csvFileSize += appendRecord.appendOffset + appendRecord.appendLength; + row.csvRows += appendRecord.totalRows; + break; + case DEL: + row.delFileNum++; + row.delFileSize += appendRecord.appendOffset + appendRecord.appendLength; + if (!haveRecordDelRowPartitions.contains(appendRecord.partName)) { + //del文件的行数记录的是该分区总的删除行数,所以只需要最新的那条 + row.delRows += appendRecord.totalRows; + haveRecordDelRowPartitions.add(appendRecord.partName); + } + default: + break; + } + } + + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + "fail to fetch files for Index: " + record.indexName, e); + } + + String status = record.status; + //正在创建的表,显示进度 + if (record.status.equalsIgnoreCase(ColumnarTableStatus.CREATING.name())) { + StringBuilder str = new StringBuilder(); + str.append(ColumnarTableStatus.CREATING.name()).append(" -> { "); + try (Connection metaDbConn = MetaDbUtil.getConnection()) { + + ColumnarCheckpointsAccessor checkpointsAccessor = new ColumnarCheckpointsAccessor(); + checkpointsAccessor.setConnection(metaDbConn); + //获取select进度 + List selectRecords = + checkpointsAccessor.queryLastRecordByTableAndTsoAndTypes(schemaName, tableId.toString(), tso, + ImmutableList.of( + ColumnarCheckpointsAccessor.CheckPointType.SNAPSHOT, + ColumnarCheckpointsAccessor.CheckPointType.SNAPSHOT_END)); + if (selectRecords.isEmpty()) { + //说明还未开始,或者第一次提交都还没有 + str.append("select: 0/1(0%);"); + } else { + str.append(selectRecords.get(0).extra).append(";"); + } + //获取compaction完成进度 + List compactionRecords = + checkpointsAccessor.queryRecordsByTableAndTsoAndTypes(schemaName, tableId.toString(), tso, + ImmutableList.of(ColumnarCheckpointsAccessor.CheckPointType.SNAPSHOT_FINISHED)); + int finishedCompaction = Math.min(compactionRecords.size(), partitionNameList.size()); + int percent = 100 * finishedCompaction / partitionNameList.size(); + str.append(" compaction: ").append(finishedCompaction).append('/').append(partitionNameList.size()); + str.append("(").append(percent).append("%)"); + + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + "fail to fetch create status by columnar index: " + record.tableSchema + "." + record.tableName + + "(" + record.indexName + "(" + record.tableId + "))", e); + } + str.append(" }"); + status = str.toString(); + } + row.status = status; + result.add(row); + } + return result; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/StatisticUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/StatisticUtils.java index 475ca94d8..b15341981 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/StatisticUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gms/util/StatisticUtils.java @@ -17,6 +17,7 @@ package com.alibaba.polardbx.executor.gms.util; import com.alibaba.druid.util.JdbcUtils; +import com.alibaba.druid.util.StringUtils; import com.alibaba.polardbx.common.Engine; import com.alibaba.polardbx.common.datatype.Decimal; import com.alibaba.polardbx.common.exception.TddlRuntimeException; @@ -26,11 +27,11 @@ import com.alibaba.polardbx.common.oss.OSSMetaLifeCycle; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.LoggerUtil; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.common.utils.thread.ExecutorUtil; import com.alibaba.polardbx.druid.sql.parser.ByteString; -import com.alibaba.polardbx.druid.util.StringUtils; import com.alibaba.polardbx.executor.ExecutorHelper; import com.alibaba.polardbx.executor.PlanExecutor; import com.alibaba.polardbx.executor.common.ExecutorContext; @@ -47,6 +48,7 @@ import com.alibaba.polardbx.gms.module.LogLevel; import com.alibaba.polardbx.gms.module.Module; import com.alibaba.polardbx.gms.module.ModuleLogInfo; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.tablegroup.TableGroupLocation; import com.alibaba.polardbx.gms.topology.DbTopologyManager; import com.alibaba.polardbx.gms.topology.GroupDetailInfoExRecord; @@ -56,6 +58,7 @@ import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.FileMeta; import com.alibaba.polardbx.optimizer.config.table.GlobalIndexMeta; +import com.alibaba.polardbx.optimizer.config.table.IndexMeta; import com.alibaba.polardbx.optimizer.config.table.OSSOrcFileMeta; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.config.table.statistic.Histogram; @@ -74,7 +77,6 @@ import com.alibaba.polardbx.optimizer.optimizeralert.OptimizerAlertUtil; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.PartitionInfoManager; -import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil; import com.alibaba.polardbx.optimizer.partition.PartitionSpec; import com.alibaba.polardbx.optimizer.planmanager.PlanManager; import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; @@ -84,13 +86,16 @@ import com.alibaba.polardbx.rule.TableRule; import com.alibaba.polardbx.statistics.RuntimeStatHelper; import com.alibaba.polardbx.stats.MatrixStatistics; +import com.clearspring.analytics.stream.StreamSummary; import com.clearspring.analytics.stream.cardinality.HyperLogLog; import com.clearspring.analytics.stream.membership.BloomFilter; +import com.clearspring.analytics.util.Lists; import com.google.common.collect.Maps; import io.airlift.slice.Slice; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.rex.RexBuilder; import org.apache.calcite.rex.RexNode; +import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.commons.collections.CollectionUtils; import org.glassfish.jersey.internal.guava.Sets; @@ -102,21 +107,22 @@ import java.sql.Statement; import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Random; import java.util.Set; +import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; import static com.alibaba.polardbx.common.properties.ConnectionParams.STATISTIC_VISIT_DN_TIMEOUT; import static com.alibaba.polardbx.common.utils.GeneralUtil.unixTimeStamp; -import static com.alibaba.polardbx.executor.scheduler.executor.statistic.StatisticSampleCollectionScheduledJob.DATA_MAX_LEN; import static com.alibaba.polardbx.gms.module.LogLevel.CRITICAL; import static com.alibaba.polardbx.gms.module.LogLevel.NORMAL; import static com.alibaba.polardbx.gms.module.LogLevel.WARNING; @@ -125,6 +131,7 @@ import static com.alibaba.polardbx.gms.module.LogPattern.PROCESS_START; import static com.alibaba.polardbx.gms.module.LogPattern.UNEXPECTED; import static com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType.STATISTIC_ROWCOUNT_COLLECTION; +import static com.alibaba.polardbx.optimizer.config.table.statistic.StatisticUtils.DATA_MAX_LEN; import static com.alibaba.polardbx.optimizer.config.table.statistic.StatisticUtils.DEFAULT_SAMPLE_SIZE; import static com.alibaba.polardbx.optimizer.config.table.statistic.StatisticUtils.buildColumnsName; import static com.alibaba.polardbx.optimizer.config.table.statistic.StatisticUtils.getColumnMetas; @@ -135,7 +142,8 @@ */ public class StatisticUtils { - private static final Logger logger = LoggerFactory.getLogger("STATISTICS"); + // Statistics logger + public final static Logger logger = LoggerUtil.statisticsLogger; private static String PRESENT_SQL = "select sum(extent_size) as size from files where " + "logical_schema_name = '%s' and logical_table_name = '%s' and remove_ts is null and " + @@ -152,14 +160,36 @@ public class StatisticUtils { "SELECT table_schema, table_name, table_rows FROM information_schema.tables"; public static boolean forceAnalyzeColumns(String schema, String logicalTableName) { - return forceAnalyzeColumnsDdl(schema, logicalTableName, null); + return forceAnalyzeColumnsDdl(schema, logicalTableName, new ArrayList<>(), null); } - public static boolean forceAnalyzeColumnsDdl(String schema, String logicalTableName, ExecutionContext ec) { + public static boolean forceAnalyzeColumnsDdl(String schema, String logicalTableName, List errMsg, + ExecutionContext ec) { try { + long startNanos = System.nanoTime(); + // check table if exists + if (OptimizerContext.getContext(schema).getLatestSchemaManager() + .getTableWithNull(logicalTableName) == null) { + errMsg.add("FAIL skip tables that not exists:" + schema + "," + logicalTableName); + return false; + } + collectRowCount(schema, logicalTableName); + long endNanos = System.nanoTime(); + logger.info(String.format("Collecting row count of %s.%s consumed %.2fs", + schema, logicalTableName, (endNanos - startNanos) / 1_000_000_000D)); + + startNanos = endNanos; sampleTableDdl(schema, logicalTableName, ec); + endNanos = System.nanoTime(); + logger.info(String.format("Sampling %s.%s consumed %.2fs", + schema, logicalTableName, (endNanos - startNanos) / 1_000_000_000D)); + + startNanos = endNanos; sketchTableDdl(schema, logicalTableName, true, ec); + endNanos = System.nanoTime(); + logger.info(String.format("HLL sketch of %s.%s consumed %.2fs", + schema, logicalTableName, (endNanos - startNanos) / 1_000_000_000D)); /** persist */ persistStatistic(schema, logicalTableName, true); @@ -169,11 +199,15 @@ public static boolean forceAnalyzeColumnsDdl(String schema, String logicalTableN schema, logicalTableName, StatisticManager.getInstance().getCacheLine(schema, logicalTableName)), - schema); + schema, + SyncScope.ALL); + } catch (Exception e) { logger.error(e); + errMsg.add("FAIL " + e.getMessage()); return false; } + errMsg.add("OK"); return true; } @@ -193,7 +227,8 @@ public static void collectRowCount(String schema, String logicalTableName) throw STATISTIC_ROWCOUNT_COLLECTION + " FROM ANALYZE", schema + "," + logicalTableName + ":" + remark }, - WARNING); + WARNING, + e); throw e; } } else { @@ -235,14 +270,16 @@ public static void collectRowCount(String schema, String logicalTableName) throw STATISTIC_ROWCOUNT_COLLECTION + " FROM ANALYZE", schema + "," + logicalTableName + ":" + remark }, - WARNING); + WARNING, + e); throw e; } } sum = sumRowCount(topologyMap, rowCountMap); } - StatisticManager.CacheLine cacheLine = StatisticManager.getInstance().getCacheLine(schema, logicalTableName); + StatisticManager.CacheLine cacheLine + = StatisticManager.getInstance().getCacheLine(schema, logicalTableName); cacheLine.setRowCount(sum); ModuleLogInfo.getInstance() .logRecord( @@ -255,20 +292,30 @@ public static void collectRowCount(String schema, String logicalTableName) throw NORMAL); } - public static boolean sampleColumns(String schema, String logicalTableName) { + public static boolean sampleOneTable(String schema, String logicalTableName) { try { + // check table if exists + if (OptimizerContext.getContext(schema).getLatestSchemaManager() + .getTableWithNull(logicalTableName) == null) { + return false; + } + // don't sample oss table + if (StatisticUtils.isFileStore(schema, logicalTableName)) { + return false; + } + collectRowCount(schema, logicalTableName); - sampleTable(schema, logicalTableName); + sampleTableDdl(schema, logicalTableName, null); /** persist */ persistStatistic(schema, logicalTableName, true); /** sync other nodes */ - SyncManagerHelper.sync( + SyncManagerHelper.syncWithDefaultDB( new UpdateStatisticSyncAction( schema, logicalTableName, StatisticManager.getInstance().getCacheLine(schema, logicalTableName)), - schema); + SyncScope.ALL); } catch (Exception e) { logger.error(e); @@ -297,7 +344,8 @@ public static void persistStatistic(String schema, String logicalTableName, bool cacheLine.getTopN(columnName), cacheLine.getNullCountMap().get(columnName), cacheLine.getSampleRate(), - cacheLine.getLastModifyTime())); + cacheLine.getLastModifyTime(), + cacheLine.getExtend())); } } StatisticManager.getInstance().getSds().batchReplace(columnRowList); @@ -324,11 +372,11 @@ private static void updateMetaDbInformationSchemaTables(String schemaName, Strin new InformationSchemaTables(relOptCluster, relOptCluster.getPlanner().emptyTraitSet()); RexBuilder rexBuilder = relOptCluster.getRexBuilder(); RexNode filterCondition = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, - rexBuilder.makeInputRef(informationSchemaTables, informationSchemaTables.getTableSchemaIndex()), + rexBuilder.makeInputRef(informationSchemaTables, InformationSchemaTables.getTableSchemaIndex()), rexBuilder.makeLiteral(schemaName)); if (logicalTableName != null) { RexNode tableNameFilterCondition = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, - rexBuilder.makeInputRef(informationSchemaTables, informationSchemaTables.getTableNameIndex()), + rexBuilder.makeInputRef(informationSchemaTables, InformationSchemaTables.getTableNameIndex()), rexBuilder.makeLiteral(logicalTableName)); filterCondition = rexBuilder.makeCall(SqlStdOperatorTable.AND, filterCondition, tableNameFilterCondition); @@ -385,110 +433,126 @@ public static void sketchTableDdl(String schema, String logicalTableName, boolea return; } - List columnMetaList = getColumnMetas(false, schema, logicalTableName); - - if (columnMetaList == null || columnMetaList.isEmpty()) { - ModuleLogInfo.getInstance().logRecord(Module.STATISTICS, UNEXPECTED, - new String[] { - "statistic sketch", - "column meta is empty :" + schema + "," + logicalTableName - }, LogLevel.NORMAL); - return; + int hllParallelism = ec.getParamManager().getInt(ConnectionParams.HLL_PARALLELISM); + logger.info(String.format("Sketch table %s.%s with parallelism: %d", schema, logicalTableName, hllParallelism)); + ThreadPoolExecutor sketchHllExecutor = null; + if (hllParallelism > 1) { + sketchHllExecutor = ExecutorUtil.createExecutor("SketchHllExecutor", hllParallelism); } - Map> colMap = PlanManager.getInstance().columnsInvolvedByPlan().get(schema); - - if (colMap == null) { - colMap = Maps.newHashMap(); - } + try { + List columnMetaList = getColumnMetas(false, schema, logicalTableName); + + if (columnMetaList == null || columnMetaList.isEmpty()) { + ModuleLogInfo.getInstance().logRecord(Module.STATISTICS, UNEXPECTED, + new String[] { + "statistic sketch", + "column meta is empty :" + schema + "," + logicalTableName + }, LogLevel.NORMAL); + return; + } - Set colSet = colMap.get(logicalTableName); - Set colDoneSet = Sets.newHashSet(); + Map> colMap = PlanManager.getInstance().columnsInvolvedByPlan().get(schema); - /** - * handle columns needed by plan - */ - for (ColumnMeta columnMeta : columnMetaList) { - if (ec != null && CrossEngineValidator.isJobInterrupted(ec)) { - long jobId = ec.getDdlJobId(); - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, - "The job '" + jobId + "' has been cancelled"); + if (colMap == null) { + colMap = Maps.newHashMap(); } - String columnName = columnMeta.getOriginColumnName(); - if (!needRebuild && (colSet == null || !colSet.contains(columnName))) { - continue; - } + Set colSet = colMap.get(logicalTableName); + Set colDoneSet = Sets.newHashSet(); - if (needRebuild) { - // analyze table would rebuild full ndv sketch info - StatisticManager.getInstance().rebuildShardParts(schema, logicalTableName, columnName); - } else { - // schedule job only update ndv sketch info - StatisticManager.getInstance().updateAllShardParts(schema, logicalTableName, columnName); - } - colDoneSet.add(columnName); - } + /** + * handle columns needed by plan + */ + for (ColumnMeta columnMeta : columnMetaList) { + if (ec != null && CrossEngineValidator.isJobInterrupted(ec)) { + long jobId = ec.getDdlJobId(); + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The job '" + jobId + "' has been cancelled"); + } - /** - * handle columns inside index - */ - TableMeta tableMeta = OptimizerContext.getContext(schema).getLatestSchemaManager().getTable(logicalTableName); - Map>> indexColsMap = GlobalIndexMeta.getTableIndexMap(tableMeta, null); + String columnName = columnMeta.getOriginColumnName(); + if (!needRebuild && (colSet == null || !colSet.contains(columnName))) { + continue; + } - for (String tblName : indexColsMap.keySet()) { - if (ec != null && CrossEngineValidator.isJobInterrupted(ec)) { - long jobId = ec.getDdlJobId(); - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, - "The job '" + jobId + "' has been cancelled"); + if (needRebuild) { + // analyze table would rebuild full ndv sketch info + StatisticManager.getInstance() + .rebuildShardParts(schema, logicalTableName, columnName, ec, sketchHllExecutor); + } else { + // schedule job only update ndv sketch info + StatisticManager.getInstance() + .updateAllShardParts(schema, logicalTableName, columnName, ec, sketchHllExecutor); + } + colDoneSet.add(columnName); } - // index key -> columns - Map> indexColumnMap = indexColsMap.get(tblName); - for (List cols : indexColumnMap.values()) { - if (cols != null && cols.size() == 1 && colMap.get(tblName) != null && colMap.get(tblName) - .contains(cols.iterator().next())) { - continue; + + /** + * handle columns inside index + */ + TableMeta tableMeta = + OptimizerContext.getContext(schema).getLatestSchemaManager().getTable(logicalTableName); + Map>> indexColsMap = GlobalIndexMeta.getTableIndexMap(tableMeta, null); + + for (String tblName : indexColsMap.keySet()) { + if (ec != null && CrossEngineValidator.isJobInterrupted(ec)) { + long jobId = ec.getDdlJobId(); + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The job '" + jobId + "' has been cancelled"); } - for (int i = 0; i < cols.size() - 1; i++) { - String colsName = buildColumnsName(cols, i + 1); - if (colDoneSet.contains(colsName)) { + // index key -> columns + Map> indexColumnMap = indexColsMap.get(tblName); + for (List cols : indexColumnMap.values()) { + if (cols != null && cols.size() == 1 && colMap.get(tblName) != null && colMap.get(tblName) + .contains(cols.iterator().next())) { continue; } - if (needRebuild) { - // analyze table would rebuild full ndv sketch info - StatisticManager.getInstance().rebuildShardParts(schema, logicalTableName, colsName); - } else { - // schedule job only update ndv sketch info - StatisticManager.getInstance().updateAllShardParts(schema, tblName, colsName); + for (int i = 0; i < cols.size() - 1; i++) { + String colsName = buildColumnsName(cols, i + 1); + if (colDoneSet.contains(colsName)) { + continue; + } + if (needRebuild) { + // analyze table would rebuild full ndv sketch info + StatisticManager.getInstance().rebuildShardParts(schema, logicalTableName, colsName, ec, + sketchHllExecutor); + } else { + // schedule job only update ndv sketch info + StatisticManager.getInstance().updateAllShardParts(schema, tblName, colsName, ec, + sketchHllExecutor); + } + colDoneSet.add(colsName); } - colDoneSet.add(colsName); - } - String columnsName = buildColumnsName(cols); - if (!colDoneSet.contains(columnsName)) { - if (needRebuild) { - // analyze table would rebuild full ndv sketch info - StatisticManager.getInstance().rebuildShardParts(schema, logicalTableName, columnsName); - } else { - // schedule job only update ndv sketch info - StatisticManager.getInstance().updateAllShardParts(schema, tblName, columnsName); + String columnsName = buildColumnsName(cols); + if (!colDoneSet.contains(columnsName)) { + if (needRebuild) { + // analyze table would rebuild full ndv sketch info + StatisticManager.getInstance().rebuildShardParts(schema, logicalTableName, columnsName, ec, + sketchHllExecutor); + } else { + // schedule job only update ndv sketch info + StatisticManager.getInstance().updateAllShardParts(schema, tblName, columnsName, ec, + sketchHllExecutor); + } + colDoneSet.add(columnsName); } - colDoneSet.add(columnsName); + } + } + ModuleLogInfo.getInstance().logRecord(Module.STATISTICS, PROCESS_END, + new String[] { + "statistic sketch table ", + schema + "," + logicalTableName + ",is force:" + needRebuild + ",cols:" + String.join(";", + colDoneSet) + }, LogLevel.NORMAL); + } finally { + if (sketchHllExecutor != null) { + sketchHllExecutor.shutdown(); } } - - ModuleLogInfo.getInstance().logRecord(Module.STATISTICS, PROCESS_END, - new String[] { - "statistic sketch table ", - schema + "," + logicalTableName + ",is force:" + needRebuild + ",cols:" + String.join(";", colDoneSet) - }, LogLevel.NORMAL); - return; - } - - public static void sampleTable(String schemaName, String logicalTableName) { - sampleTableDdl(schemaName, logicalTableName, null); } public static void sampleTableDdl(String schemaName, String logicalTableName, ExecutionContext ec) { @@ -509,11 +573,12 @@ public static void sampleTableDdl(String schemaName, String logicalTableName, Ex return; } + // delete cols statistic that not exists + cacheLine.remainColumns(analyzeColumnList); + /** * prepare */ - int topNSize = InstConfUtil.getInt(ConnectionParams.TOPN_SIZE); - int topNMinNum = InstConfUtil.getInt(ConnectionParams.TOPN_MIN_NUM); float sampleRate = 1; long rowCount = cacheLine.getRowCount(); if (rowCount > 0) { @@ -612,13 +677,13 @@ public static void sampleTableDdl(String schemaName, String logicalTableName, Ex cacheLine.setNullCount(colName, nullCount); } + buildSkew(schemaName, logicalTableName, analyzeColumnList, rows, sampleRate); for (int i = 0; i < analyzeColumnList.size(); i++) { if (ec != null && CrossEngineValidator.isJobInterrupted(ec)) { long jobId = ec.getDdlJobId(); throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "The job '" + jobId + "' has been cancelled"); } - int finalI = i; List objs = rows.stream().map(r -> r.getObject(finalI)).filter(o -> o != null).collect(Collectors.toList()); @@ -642,7 +707,12 @@ public static void sampleTableDdl(String schemaName, String logicalTableName, Ex TopN topN = new TopN(dataType, sampleRateUp); objs.forEach(obj -> topN.offer(obj)); - boolean isReady = topN.build(topNSize, topNMinNum); + boolean isReady = topN.build( + canUseNewTopN(schemaName, logicalTableName, colName), + (long) (rows.size() / sampleRate / sampleRateUp), + sampleRate * sampleRateUp + ); + if (isReady) { cacheLine.setTopN(colName, topN); } else { @@ -652,7 +722,6 @@ public static void sampleTableDdl(String schemaName, String logicalTableName, Ex h.buildFromData(objs.stream().filter(d -> isReady ? topN.get(d) == 0 : true).toArray()); cacheLine.setHistogram(colName, h); } - cacheLine.setRowCount((long) (rows.size() / sampleRate / sampleRateUp)); cacheLine.setSampleRate(sampleRate); cacheLine.setLastModifyTime(unixTimeStamp()); @@ -663,6 +732,211 @@ public static void sampleTableDdl(String schemaName, String logicalTableName, Ex }, LogLevel.NORMAL); } + public static List> buildColumnBitSet( + String schemaName, + String logicalTableName, + List analyzeColumnList) { + TableMeta tableMeta = + OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName); + + // sample is derived from B+tree leaf of primary key. + // if primary key is compound, the prefix column is biased, + // don't check skewness of the column in this case + ColumnMeta biasedColumn = null; + IndexMeta primaryKey = tableMeta.getPrimaryIndex(); + if (primaryKey != null && primaryKey.getKeyColumns().size() > 1) { + biasedColumn = primaryKey.getKeyColumns().get(0); + } + + // map column to SN in select list + Map columnIdMap = Maps.newHashMap(); + for (int i = 0; i < analyzeColumnList.size(); i++) { + columnIdMap.put(analyzeColumnList.get(i), i); + } + + // find candidate hot column set + Set columnBitSet = Sets.newHashSet(); + for (IndexMeta indexMeta : tableMeta.getIndexes()) { + BitSet bit = new BitSet(); + for (ColumnMeta column : indexMeta.getKeyColumns()) { + // skip any index start with biasedColumn + if (column == biasedColumn) { + break; + } + if (!supportSkewCheck(column)) { + break; + } + Integer id = columnIdMap.get(column); + bit.set(id); + if (!columnBitSet.contains(bit)) { + columnBitSet.add((BitSet) bit.clone()); + } + } + } + + // sort according to cardinality + List orderedColumns = columnBitSet.stream().sorted((x, y) -> { + if (x.cardinality() > y.cardinality()) { + return -1; + } + if (x.cardinality() < y.cardinality()) { + return 1; + } + return x.toString().compareTo(y.toString()); + }).collect(Collectors.toList()); + List> columnSns = Lists.newArrayList(); + for (BitSet bitSet : orderedColumns) { + + List columnSn = Lists.newArrayList(); + for (int i = bitSet.nextSetBit(0); i >= 0; i = bitSet.nextSetBit(i + 1)) { + columnSn.add(i); + } + columnSns.add(columnSn); + } + return columnSns; + } + + static class ColumnListCounter { + List columnSn; + Row row; + List analyzeColumnList; + + public ColumnListCounter(List columnSn, Row row, List analyzeColumnList) { + this.columnSn = columnSn; + this.row = row; + this.analyzeColumnList = analyzeColumnList; + } + + @Override + public int hashCode() { + int hashCode = 1; + for (int sn : columnSn) { + hashCode = 31 * hashCode + Objects.hashCode(row.getObject(sn)); + } + return hashCode; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + ColumnListCounter other = (ColumnListCounter) obj; + for (int sn : columnSn) { + if (analyzeColumnList.get(sn).getDataType().compare(row.getObject(sn), other.row.getObject(sn)) != 0) { + return false; + } + } + return true; + } + } + + private static boolean orderedContain(List source, List target) { + int tLoc = 0; + for (int cur : source) { + if (cur > target.get(tLoc)) { + return false; + } + if (cur == target.get(tLoc)) { + tLoc++; + } + if (tLoc == target.size()) { + return true; + } + } + return tLoc == target.size(); + } + + public static void buildSkew( + String schemaName, + String logicalTableName, + List analyzeColumnList, + List rows, + float sampleRate) { + + if (rows.size() == 0) { + return; + } + List> columnSns = buildColumnBitSet(schemaName, logicalTableName, analyzeColumnList); + + // check skew + List> skewCols = Lists.newArrayList(); + List> ansColumnSns = Lists.newArrayList(); + for (List columnSn : columnSns) { + + // check where super set is marked as hot + boolean covered = false; + for (List ansColumnSn : ansColumnSns) { + if (orderedContain(ansColumnSn, columnSn)) { + covered = true; + break; + } + } + if (covered) { + continue; + } + + StreamSummary summary = new StreamSummary<>(10000); + for (Row r : rows) { + summary.offer(new ColumnListCounter(columnSn, r, analyzeColumnList)); + } + long count = summary.topK(1).get(0).getCount(); + // The criterion for skewness is temporarily set as having the same quantity of sample values greater than 5 + // and estimated values exceeding 10,000. + if (count > 5 && (count / sampleRate) > 10000) { + ansColumnSns.add(columnSn); + } + } + + for (List ansColumnSn : ansColumnSns) { + Set columnSet = Sets.newHashSet(); + for (int sn : ansColumnSn) { + columnSet.add(analyzeColumnList.get(sn).getName().toLowerCase()); + } + skewCols.add(columnSet); + } + + StatisticManager.getInstance().getCacheLine(schemaName, logicalTableName).setSkewCols(skewCols); + } + + private static boolean supportSkewCheck(ColumnMeta columnMeta) { + DataType dataType = columnMeta.getDataType(); + return DataTypeUtil.isStringType(dataType) || + DataTypeUtil.isUnderLongType(dataType) || + DataTypeUtil.isDateType(dataType); + } + + /** + * check whether to use new topN for specific column + * + * @return true if can use new topN + */ + public static boolean canUseNewTopN(String schemaName, String logicalTableName, String columnName) { + // don't use new + if (!InstConfUtil.getBool(ConnectionParams.NEW_TOPN)) { + return false; + } + if (StringUtils.isEmpty(columnName)) { + return false; + } + TableMeta tableMeta = + OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(logicalTableName); + + // check prefix of primary key + IndexMeta primaryKey = tableMeta.getPrimaryIndex(); + if (primaryKey != null) { + for (ColumnMeta column : primaryKey.getKeyColumns()) { + if (columnName.equalsIgnoreCase(column.getName())) { + return false; + } + } + } + return true; + } + private static Row purgeRowForHistogram(Row r, int size) { Row tmpRow = new ArrayRow(size, r.getParentCursorMeta()); for (int i = 0; i < size; i++) { @@ -721,6 +995,7 @@ private static double scanAnalyze(String schema, String logicalTableName, List()); } if (trx != null) { trx.close(); @@ -758,14 +1033,17 @@ private static double scanAnalyze(String schema, String logicalTableName, List columnMetaList, - float sampleRate) { + protected static String constructScanSamplingSql(String logicalTableName, List columnMetaList, + float sampleRate) { StringBuilder sql = new StringBuilder(); String cmdExtraSamplePercentage = ""; cmdExtraSamplePercentage = ",sample_percentage=" + sampleRate * 100; - sql.append("/*+TDDL:cmd_extra(merge_union=false,ENABLE_DIRECT_PLAN=false" + cmdExtraSamplePercentage + ") */ " - + "select "); + sql.append( + "/*+TDDL:cmd_extra(" + + "enable_post_planner=false,enable_index_selection=false,merge_union=false,enable_direct_plan=false" + + cmdExtraSamplePercentage + ") */ " + + "select "); boolean first = true; for (ColumnMeta columnMeta : columnMetaList) { if (first) { @@ -773,10 +1051,10 @@ private static String constructScanSamplingSql(String logicalTableName, List 0f && sampleRate < 1f) { sql.append(" where rand() < "); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/BackfillExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/BackfillExecutor.java index 55ef737b3..4322fa9b3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/BackfillExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/BackfillExecutor.java @@ -28,10 +28,14 @@ import com.alibaba.polardbx.executor.backfill.Loader; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.gsi.backfill.CdasLoader; +import com.alibaba.polardbx.executor.gsi.backfill.GsiChangeSetLoader; import com.alibaba.polardbx.executor.gsi.backfill.GsiExtractor; import com.alibaba.polardbx.executor.gsi.backfill.GsiLoader; +import com.alibaba.polardbx.executor.gsi.backfill.OmcMirrorCopyExtractor; import com.alibaba.polardbx.executor.gsi.backfill.Updater; +import com.alibaba.polardbx.executor.scaleout.backfill.ChangeSetExecutor; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.utils.PhyTableOperationUtil; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; @@ -39,6 +43,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; @@ -55,7 +60,9 @@ public BackfillExecutor(BiFunction, ExecutionContext, List this.executeFunc = executeFunc; } - public int backfill(String schemaName, String primaryTable, String indexName, ExecutionContext baseEc) { + public int backfill(String schemaName, String primaryTable, String indexName, boolean useBinary, + boolean useChangeSet, boolean canUseReturning, List modifyStringColumns, + ExecutionContext baseEc) { final long batchSize = baseEc.getParamManager().getLong(ConnectionParams.GSI_BACKFILL_BATCH_SIZE); final long speedLimit = baseEc.getParamManager().getLong(ConnectionParams.GSI_BACKFILL_SPEED_LIMITATION); final long speedMin = baseEc.getParamManager().getLong(ConnectionParams.GSI_BACKFILL_SPEED_MIN); @@ -66,11 +73,29 @@ public int backfill(String schemaName, String primaryTable, String indexName, Ex } // Init extractor and loader - final Extractor extractor = - GsiExtractor - .create(schemaName, primaryTable, indexName, batchSize, speedMin, speedLimit, parallelism, baseEc); - final Loader loader = - GsiLoader.create(schemaName, primaryTable, indexName, this.executeFunc, baseEc.isUseHint(), baseEc); + + final Extractor extractor; + final Loader loader; + + if (useChangeSet) { + Map> sourcePhyTables = GsiUtils.getPhyTables(schemaName, primaryTable); + Map tableNameMapping = + GsiUtils.getPhysicalTableMapping(schemaName, primaryTable, indexName, null, null); + + extractor = ChangeSetExecutor + .create(schemaName, primaryTable, indexName, batchSize, speedMin, speedLimit, parallelism, + useBinary, modifyStringColumns, sourcePhyTables, baseEc); + loader = + GsiChangeSetLoader.create(schemaName, primaryTable, indexName, this.executeFunc, baseEc.isUseHint(), + baseEc, tableNameMapping); + } else { + extractor = + GsiExtractor.create(schemaName, primaryTable, indexName, batchSize, speedMin, speedLimit, parallelism, + useBinary, modifyStringColumns, baseEc); + loader = + GsiLoader.create(schemaName, primaryTable, indexName, this.executeFunc, baseEc.isUseHint(), + canUseReturning, baseEc); + } boolean finished; // Foreach row: lock batch -> fill into index -> release lock @@ -87,7 +112,9 @@ public void consume(List> batch, loader.fillIntoIndex(batch, Pair.of(baseEc, extractEcAndIndexPair.getValue()), () -> { try { // Commit and close extract statement - extractEcAndIndexPair.getKey().getTransaction().commit(); + if (!useChangeSet) { + extractEcAndIndexPair.getKey().getTransaction().commit(); + } return true; } catch (Exception e) { logger.error("Close extract statement failed!", e); @@ -108,6 +135,47 @@ public void consume(List> batch, return affectRows.get(); } + public int mirrorCopyGsiBackfill(String schemaName, String primaryTable, String indexName, boolean useChangeSet, + boolean useBinary, ExecutionContext baseEc) { + final long batchSize = baseEc.getParamManager().getLong(ConnectionParams.GSI_BACKFILL_BATCH_SIZE); + final long speedLimit = baseEc.getParamManager().getLong(ConnectionParams.GSI_BACKFILL_SPEED_LIMITATION); + final long speedMin = baseEc.getParamManager().getLong(ConnectionParams.GSI_BACKFILL_SPEED_MIN); + final long parallelism = baseEc.getParamManager().getLong(ConnectionParams.GSI_BACKFILL_PARALLELISM); + + if (null == baseEc.getServerVariables()) { + baseEc.setServerVariables(new HashMap<>()); + } + + ExecutionContext executionContext = baseEc.copy(); + PhyTableOperationUtil.disableIntraGroupParallelism(schemaName, executionContext); + + Map> sourcePhyTables = GsiUtils.getPhyTables(schemaName, primaryTable); + Map tableNameMapping = + GsiUtils.getPhysicalTableMapping(schemaName, primaryTable, indexName, null, null); + + OmcMirrorCopyExtractor extractor = + OmcMirrorCopyExtractor.create(schemaName, primaryTable, indexName, batchSize, speedMin, speedLimit, + parallelism, tableNameMapping, sourcePhyTables, useChangeSet, useBinary, baseEc); + extractor.loadBackfillMeta(executionContext); + + final AtomicInteger affectRows = new AtomicInteger(); + extractor.foreachBatch(executionContext, new BatchConsumer() { + @Override + public void consume(List> batch, + Pair> extractEcAndIndexPair) { + // pass + } + + @Override + public void consume(String sourcePhySchema, String sourcePhyTable, Cursor cursor, ExecutionContext context, + List> mockResult) { + // pass + } + }); + + return affectRows.get(); + } + public int logicalTableDataMigrationBackFill(String srcSchemaName, String dstSchemaName, String srcTableName, String dstTableName, List dstGsiNames, ExecutionContext baseEc) { @@ -117,6 +185,7 @@ public int logicalTableDataMigrationBackFill(String srcSchemaName, String dstSch final long speedMin = baseEc.getParamManager().getLong(ConnectionParams.CREATE_DATABASE_AS_BACKFILL_SPEED_MIN); final long parallelism = baseEc.getParamManager().getLong(ConnectionParams.CREATE_DATABASE_AS_BACKFILL_PARALLELISM); + final boolean useBinary = baseEc.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY); if (null == baseEc.getServerVariables()) { baseEc.setServerVariables(new HashMap<>()); @@ -130,7 +199,7 @@ public int logicalTableDataMigrationBackFill(String srcSchemaName, String dstSch copiedEc.setSchemaName(dstSchemaName); final Extractor extractor = GsiExtractor.create(srcSchemaName, srcTableName, srcTableName, batchSize, speedMin, speedLimit, parallelism, - baseEc); + useBinary, null, baseEc); final CdasLoader cdasLoader = CdasLoader.create(srcSchemaName, dstSchemaName, srcTableName, dstTableName, this.executeFunc, copiedEc.isUseHint(), copiedEc, false); @@ -202,6 +271,7 @@ public int addColumnsBackfill(String schemaName, String primaryTable, List()); @@ -215,7 +285,7 @@ public int addColumnsBackfill(String schemaName, String primaryTable, List reports) { } } + public static void insertReports(Connection conn, List reports) { + try { + batchInsert(SQL_INSERT_CHECKER_REPORT, reports, conn); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, e, "add reports to gms failed!"); + } + } + public List queryReports(long jobId) { return queryByJobId(SQL_SELECT_CHECKER_REPORT, jobId, CheckerReport.ORM); } @@ -360,8 +368,8 @@ private List query(String sql, Map params, Con } } - private void update(String sql, List> params, - Connection connection) throws SQLException { + private static void update(String sql, List> params, Connection connection) + throws SQLException { final int batchSize = 512; for (int i = 0; i < params.size(); i += batchSize) { try (PreparedStatement ps = connection.prepareStatement(sql)) { @@ -376,7 +384,7 @@ private void update(String sql, List> params, } } - private void batchInsert(String sql, List params, Connection connection) throws SQLException { + private static void batchInsert(String sql, List params, Connection connection) throws SQLException { update(sql, params.stream().map(Orm::params).collect(ArrayList::new, ArrayList::add, ArrayList::addAll), connection); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/GsiBackfillManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/GsiBackfillManager.java index 675796cd5..2f34476cf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/GsiBackfillManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/GsiBackfillManager.java @@ -28,8 +28,9 @@ import com.alibaba.polardbx.common.utils.thread.ExecutorUtil; import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; import com.alibaba.polardbx.config.ConfigDataMode; -import com.alibaba.polardbx.executor.ddl.engine.AsyncDDLCache; import com.alibaba.polardbx.executor.gsi.utils.Transformer; +import com.alibaba.polardbx.executor.mpp.metadata.NotNull; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; import com.alibaba.polardbx.gms.metadb.GmsSystemTables; import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; import com.alibaba.polardbx.gms.metadb.record.SystemTableRecord; @@ -49,9 +50,6 @@ import org.apache.commons.lang3.StringUtils; import javax.sql.DataSource; - -import com.alibaba.polardbx.executor.mpp.metadata.NotNull; - import java.math.BigDecimal; import java.math.RoundingMode; import java.sql.Connection; @@ -90,6 +88,8 @@ public class GsiBackfillManager { private static final String SYSTABLE_FILE_STORAGE_BACKFILL_OBJECTS = GmsSystemTables.FILE_STORAGE_BACKFILL_OBJECTS; + private static final String SYSTABLE_PHYSICAL_BACKFILL_OBJECTS = GmsSystemTables.PHYSICAL_BACKFILL_OBJECTS; + public static final String CREATE_GSI_BACKFILL_OBJECTS_TABLE = "CREATE TABLE IF NOT EXISTS `" + SYSTABLE_BACKFILL_OBJECTS + "` (" @@ -159,6 +159,11 @@ public void register(@NotNull final String schemaName, @NotNull final DataSource try (PreparedStatement ps = conn.prepareStatement(SQL_CLEAN_OUTDATED_FILESTORAGE_LOG)) { ps.execute(); } + try (PreparedStatement ps = conn.prepareStatement( + SQL_CLEAN_OUTDATED_PHYSICAL_BACKFILL_LOG)) { + ps.execute(); + } + PhysicalBackfillUtils.destroyDataSources(); } catch (SQLException e) { throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, e, @@ -688,6 +693,11 @@ private static void wrapWithTransaction(DataSource dataSource, Consumer> getPhyTables(String schemaName, String lo } } + public static Map> getPhyTablesDrdsOrderByName(String schemaName, String logicalTableName) { + TableRule tableRule = + OptimizerContext.getContext(schemaName).getRuleManager().getTableRule(logicalTableName); + if (tableRule != null) { + Map> topology = tableRule.getActualTopology(); + Map> ret = new HashMap<>(topology.size()); + topology.forEach((groupKey, phyTables) -> { + Set tables = new TreeSet<>(String::compareToIgnoreCase); + tables.addAll(phyTables); + ret.put(groupKey, new ArrayList<>(tables)); + }); + return ret; + } else { + Map> topology = new HashMap<>(1); + List groupTopology = new ArrayList<>(1); + groupTopology.add(logicalTableName); + topology + .put(OptimizerContext.getContext(schemaName).getRuleManager().getDefaultDbIndex(logicalTableName), + groupTopology); + return topology; + } + } + + public static Map getPhysicalTableMapping(String schemaName, String primaryTableName, + String indexName, PhysicalPlanData physicalPlanData, + PartitionInfo idxPartitionInfo) { + Map phyTableMapping = new HashMap<>(); + PartitionInfo partitionInfo = + OptimizerContext.getContext(schemaName).getPartitionInfoManager().getPartitionInfo(primaryTableName); + if (partitionInfo == null) { + Map> sourceTopology = getPhyTablesDrdsOrderByName(schemaName, primaryTableName); + Map> targetTopology; + if (indexName == null) { + targetTopology = new HashMap<>(); + Map>> topology = physicalPlanData.getTableTopology(); + topology.forEach((groupKey, phyTablesList) -> { + Set tables = new TreeSet<>(String::compareToIgnoreCase); + for (List phyTables : phyTablesList) { + tables.addAll(phyTables); + } + targetTopology.put(groupKey, new ArrayList<>(tables)); + }); + } else { + targetTopology = getPhyTablesDrdsOrderByName(schemaName, indexName); + } + + for (Map.Entry> sourceEntry : sourceTopology.entrySet()) { + String groupKey = sourceEntry.getKey(); + List sourcePhyTables = sourceEntry.getValue(); + List targetPhyTables = targetTopology.get(groupKey); + + for (int i = 0; i < sourcePhyTables.size(); ++i) { + phyTableMapping.put(sourcePhyTables.get(i), targetPhyTables.get(i)); + } + } + } else { + PartitionInfo indexPartitionInfo = + indexName == null ? idxPartitionInfo : + OptimizerContext.getContext(schemaName).getPartitionInfoManager().getPartitionInfo(indexName); + + for (PartitionSpec spec : partitionInfo.getPartitionBy().getPhysicalPartitions()) { + PartitionLocation location = spec.getLocation(); + String phyTbName = location.getPhyTableName(); + String partitionName = spec.getName(); + + for (PartitionSpec indexSpec : indexPartitionInfo.getPartitionBy().getPhysicalPartitions()) { + if (StringUtils.equalsIgnoreCase(partitionName, indexSpec.getName())) { + phyTableMapping.put(phyTbName, indexSpec.getLocation().getPhyTableName()); + break; + } + } + } + } + + return phyTableMapping; + } + /** * return group and physical tables for one logical table. * @@ -285,7 +365,8 @@ public static List buildIndexRecord(SqlIndexDefinition indexDef, St indexComment, seqInIndex, column, - indexDef.isClustered())); + indexDef.isClustered(), + indexDef.isColumnar())); seqInIndex++; } @@ -361,7 +442,8 @@ public static void buildIndexMeta(List indexRecords, List indexRecords, Sq indexTableName, indexTableName, nullable(columnDefMap, columnName), - "NULL", + null, 1, indexStatus, 0, - "NULL", + "", seqInIndex, columnName)); seqInIndex++; @@ -439,11 +521,40 @@ public static List buildIndexMetaByAddColumns(TableMeta primaryTabl indexTableName, indexTableName, nullable(primaryTableMeta, columnName), - "NULL", + null, + 1, + indexStatus, + 0, + "", + seqInIndex, + columnName)); + seqInIndex++; + } + return indexRecords; + } + + public static List buildIndexMetaByAddColumns(List columnNames, + String schemaName, + String tableName, + String indexTableName, + int seqInIndex, + IndexStatus indexStatus, + Map isNullable) { + final List indexRecords = new ArrayList<>(); + final String catalog = DEFAULT_CATALOG; + + for (String columnName : columnNames) { + indexRecords.add(indexCoveringRecord(catalog, + schemaName, + tableName, + indexTableName, + indexTableName, + isNullable.get(columnName).equals("YES") ? "YES" : "", + null, 1, indexStatus, 0, - "NULL", + "", seqInIndex, columnName)); seqInIndex++; @@ -468,11 +579,11 @@ public static List buildIndexMetaByAddColumns(List columnNa indexTableName, indexTableName, nullable ? "YES" : "", - "NULL", + null, 1, indexStatus, 0, - "NULL", + "", seqInIndex, columnName)); seqInIndex++; @@ -489,7 +600,10 @@ public static void buildIndexMetaFromPrimary(List indexRecords, String indexComment, String indexType, IndexStatus indexStatus, - boolean clusteredIndex) { + boolean clusteredIndex, + boolean columnarIndex, + Map columnMapping, + List addNewColumns) { final String catalog = DEFAULT_CATALOG; final String schema = sourceTableMeta.getSchemaName(); @@ -502,13 +616,21 @@ public static void buildIndexMetaFromPrimary(List indexRecords, int seqInIndex = 1; // index columns for (String column : columns) { + if (addNewColumns != null && addNewColumns.contains(column.toLowerCase())) { + // 过滤掉 add column + continue; + } + String oldColumn = column; + if (columnMapping != null && !columnMapping.isEmpty() && columnMapping.containsKey(column.toLowerCase())) { + oldColumn = columnMapping.get(column.toLowerCase()); + } indexRecords.add(indexColumnRecord(catalog, schema, tableName, nonUnique, indexName, indexTableName, - nullable(sourceTableMeta, column), + nullable(sourceTableMeta, oldColumn), indexType, indexLocation, indexStatus, @@ -516,19 +638,29 @@ public static void buildIndexMetaFromPrimary(List indexRecords, indexComment, seqInIndex, column, - clusteredIndex)); + clusteredIndex, + columnarIndex)); seqInIndex++; } if (null != covering) { // covering columns for (String column : covering) { + if (addNewColumns != null && addNewColumns.contains(column.toLowerCase())) { + // 过滤掉 add column + continue; + } + String oldColumn = column; + if (columnMapping != null && !columnMapping.isEmpty() && columnMapping.containsKey( + column.toLowerCase())) { + oldColumn = columnMapping.get(column.toLowerCase()); + } indexRecords.add(indexCoveringRecord(catalog, schema, tableName, indexName, indexTableName, - nullable(sourceTableMeta, column), + nullable(sourceTableMeta, oldColumn), indexType, indexLocation, indexStatus, @@ -677,12 +809,20 @@ private static IndexRecord indexColumnRecord(String catalog, String schema, Stri String indexName, String indexTableName, String nullable, String indexType, int indexLocation, IndexStatus indexStatus, long version, String indexComment, int seqInIndex, - SqlIndexColumnName column, boolean clusteredIndex) { + SqlIndexColumnName column, boolean clusteredIndex, + boolean columnarIndex) { final String columnName = column.getColumnNameStr(); final String collation = null == column.isAsc() ? null : (column.isAsc() ? "A" : "D"); final Long subPart = null == column.getLength() ? null : (RelUtils.longValue(column.getLength())); final String packed = null; final String comment = "INDEX"; + long flag = 0L; + if (clusteredIndex) { + flag |= IndexesRecord.FLAG_CLUSTERED; + } + if (columnarIndex) { + flag |= IndexesRecord.FLAG_COLUMNAR; + } return new IndexRecord(-1, catalog, schema, @@ -705,7 +845,7 @@ private static IndexRecord indexColumnRecord(String catalog, String schema, Stri indexTableName, indexStatus.getValue(), version, - clusteredIndex ? IndexesRecord.FLAG_CLUSTERED : 0L, + flag, IndexVisibility.VISIBLE.getValue()); } @@ -713,11 +853,18 @@ private static IndexRecord indexColumnRecord(String catalog, String schema, Stri String indexName, String indexTableName, String nullable, String indexType, int indexLocation, IndexStatus indexStatus, long version, String indexComment, int seqInIndex, - String columnName, boolean clusteredIndex) { + String columnName, boolean clusteredIndex, boolean columnarIndex) { final String collation = null; final Long subPart = null; final String packed = null; final String comment = "INDEX"; + long flag = 0L; + if (clusteredIndex) { + flag |= IndexesRecord.FLAG_CLUSTERED; + } + if (columnarIndex) { + flag |= IndexesRecord.FLAG_COLUMNAR; + } return new IndexRecord(-1, catalog, schema, @@ -740,7 +887,7 @@ private static IndexRecord indexColumnRecord(String catalog, String schema, Stri indexTableName, indexStatus.getValue(), version, - clusteredIndex ? IndexesRecord.FLAG_CLUSTERED : 0L, + flag, IndexVisibility.VISIBLE.getValue()); } @@ -896,6 +1043,11 @@ public static BackfillObjectRecord buildBackfillObjectRecord(long jobId, String extra); } + public static R wrapWithDistributedXATrx(ITransactionManager tm, ExecutionContext baseEc, + Function call) { + return wrapWithTransaction(tm, ITransactionPolicy.XA, baseEc, call); + } + public static R wrapWithDistributedTrx(ITransactionManager tm, ExecutionContext baseEc, Function call) { return wrapWithTransaction(tm, tm.getDefaultDistributedTrxPolicy(baseEc), baseEc, call); @@ -1007,4 +1159,24 @@ public static String generateRandomGsiName(String logicalSourceTableName) { String targetTableName = logicalSourceTableName + "_" + randomSuffix; return targetTableName; } + + public static List columnAst2nameStr(List columnDefList) { + if (CollectionUtils.isEmpty(columnDefList)) { + return new ArrayList<>(); + } + return columnDefList.stream() + .map(SqlIndexColumnName::getColumnNameStr) + .collect(Collectors.toList()); + } + + public static boolean isAddCci(SqlNode sqlNode, SqlAlterTable sqlAlterTable) { + boolean result = false; + if (sqlNode instanceof SqlCreateIndex) { + result = ((SqlCreateIndex) sqlNode).createCci(); + } else if (sqlNode instanceof SqlAlterTable || sqlNode instanceof SqlCreateTable) { + final SqlAddIndex addIndex = (SqlAddIndex) sqlAlterTable.getAlters().get(0); + result = addIndex.isColumnarIndex(); + } + return result; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/InsertIndexExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/InsertIndexExecutor.java index 621fa024e..d101c7a4e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/InsertIndexExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/InsertIndexExecutor.java @@ -205,18 +205,16 @@ public static int backfillIntoPartitionedTable(RelNode logicalInsert, String schemaName, ExecutionContext executionContext, BiFunction, ExecutionContext, List> executeFunc, - boolean mayInsertDuplicate, - PartitionInfo newPartition, - String targetGroup, - String phyTableName, - boolean mirrorCopy) { + boolean mayInsertDuplicate, PartitionInfo newPartition, + String targetGroup, String phyTableName, boolean mirrorCopy, + boolean returning, List> result) { Map>> shardResults; boolean forceReshard = executionContext.getParamManager().getBoolean(ConnectionParams.FORCE_RESHARD); boolean skipShard = mirrorCopy && StringUtils.isNotEmpty(targetGroup) && !forceReshard; if (tableMeta.getPartitionInfo().isBroadcastTable()) { // { targetDb : { targetTb : [valueIndex1, valueIndex2] } } shardResults = new HashMap<>(); - List> groupAndPhyTableList = + List> groupAndPhyTableList = PartitionInfoUtil.getInvisiblePartitionPhysicalLocation(newPartition); Parameters parameters = executionContext.getParams(); int rowCount = 0; @@ -227,13 +225,13 @@ public static int backfillIntoPartitionedTable(RelNode logicalInsert, rowCount = ((SqlCall) sqlInsert.getSource()).getOperandList().size(); } assert rowCount > 0; - for (Pair dbAndTbPair : groupAndPhyTableList) { + for (com.alibaba.polardbx.common.utils.Pair dbAndTbPair : groupAndPhyTableList) { IntStream stream = IntStream.range(0, rowCount); shardResults.computeIfAbsent(dbAndTbPair.getKey(), o -> new HashMap<>()) .computeIfAbsent(dbAndTbPair.getValue(), o -> new ArrayList<>()).addAll(stream.boxed().collect( Collectors.toList())); } - } else if (!skipShard){ + } else if (!skipShard) { // shard to get value indices shardResults = BuildPlanUtils.shardValues(sqlInsert, tableMeta, @@ -255,7 +253,7 @@ public static int backfillIntoPartitionedTable(RelNode logicalInsert, } } return insertIntoTable(logicalInsert, sqlInsert, tableMeta, "", schemaName, executionContext, executeFunc, - mayInsertDuplicate, shardResults); + mayInsertDuplicate, shardResults, returning, result); } public static int insertIntoTable(RelNode logicalInsert, SqlInsert sqlInsert, TableMeta tableMeta, @@ -263,19 +261,20 @@ public static int insertIntoTable(RelNode logicalInsert, SqlInsert sqlInsert, Ta ExecutionContext executionContext, BiFunction, ExecutionContext, List> executeFunc, boolean mayInsertDuplicate, - boolean mayForGsi, String partName) { + boolean mayForGsi, String partName, + boolean returning, List> result) { Map>> shardResults; boolean forceReshard = executionContext.getParamManager().getBoolean(ConnectionParams.FORCE_RESHARD); if (StringUtils.isBlank(targetGroup) || StringUtils.isBlank(phyTableName) || forceReshard) { shardResults = StringUtils.isEmpty(partName) - ? BuildPlanUtils.shardValues(sqlInsert, + ? BuildPlanUtils.shardValues(sqlInsert, tableMeta, executionContext, schemaName, null) - : BuildPlanUtils.shardValuesByPartName(sqlInsert, - tableMeta, - executionContext, - schemaName, null, partName); + : BuildPlanUtils.shardValuesByPartName(sqlInsert, + tableMeta, + executionContext, + schemaName, null, partName); } else { shardResults = new HashMap<>(); Parameters parameters = executionContext.getParams(); @@ -316,7 +315,7 @@ public static int insertIntoTable(RelNode logicalInsert, SqlInsert sqlInsert, Ta return insertIntoTable(logicalInsert, sqlInsert, tableMeta, targetGroup, schemaName, executionContext, executeFunc, - mayInsertDuplicate, shardResults); + mayInsertDuplicate, shardResults, returning, result); } public static int insertIntoTable(RelNode logicalInsert, SqlInsert sqlInsert, TableMeta tableMeta, @@ -325,9 +324,10 @@ public static int insertIntoTable(RelNode logicalInsert, SqlInsert sqlInsert, Ta BiFunction, ExecutionContext, List> executeFunc, boolean mayInsertDuplicate, boolean mayForGsi) { - return insertIntoTable(logicalInsert, sqlInsert, tableMeta, targetGroup, phyTableName, schemaName, executionContext, + return insertIntoTable(logicalInsert, sqlInsert, tableMeta, targetGroup, phyTableName, schemaName, + executionContext, executeFunc, - mayInsertDuplicate, mayForGsi, ""); + mayInsertDuplicate, mayForGsi, "", false, null); } public static int insertIntoTable(RelNode logicalInsert, SqlInsert sqlInsert, TableMeta tableMeta, @@ -335,7 +335,8 @@ public static int insertIntoTable(RelNode logicalInsert, SqlInsert sqlInsert, Ta ExecutionContext executionContext, BiFunction, ExecutionContext, List> executeFunc, boolean mayInsertDuplicate, - Map>> shardResults) { + Map>> shardResults, + boolean returning, List> result) { final RelOptCluster cluster = SqlConverter.getInstance(schemaName, executionContext).createRelOptCluster(); RelTraitSet traitSet = RelTraitSet.createEmpty(); @@ -358,17 +359,6 @@ public static int insertIntoTable(RelNode logicalInsert, SqlInsert sqlInsert, Ta SqlInsert newSqlInsert = visitor.visit(sqlInsert); BytesSql sql = RelUtils.toNativeBytesSql(newSqlInsert, DbType.MYSQL); -// PhyTableOperation phyTableModify = -// new PhyTableOperation(cluster, traitSet, rowType, null, logicalInsert); -// phyTableModify.setKind(newSqlInsert.getKind()); -// phyTableModify.setDbIndex(targetDb); -// phyTableModify.setTableNames(ImmutableList.of(ImmutableList.of(targetTb))); -// phyTableModify.setBytesSql(sql); -// phyTableModify.setNativeSqlNode(newSqlInsert); -// phyTableModify.setParam(outputParams); -// phyTableModify.setBatchParameters(null); - - PhyTableOpBuildParams buildParams = new PhyTableOpBuildParams(); buildParams.setSchemaName(schemaName); buildParams.setLogTables(ImmutableList.of(tableMeta.getTableName())); @@ -389,7 +379,8 @@ public static int insertIntoTable(RelNode logicalInsert, SqlInsert sqlInsert, Ta buildParams.setDynamicParams(outputParams); buildParams.setBatchParameters(null); - PhyTableOperation phyTableModify = PhyTableOperationFactory.getInstance().buildPhyTblOpByParams(buildParams); + PhyTableOperation phyTableModify = + PhyTableOperationFactory.getInstance().buildPhyTblOpByParams(buildParams); newPhysicalPlans.add(phyTableModify); } } @@ -411,6 +402,11 @@ public static int insertIntoTable(RelNode logicalInsert, SqlInsert sqlInsert, Ta } } + if (returning && result != null) { + result.addAll(ExecUtils.getReturningResultByCursors(cursors, false)); + return 0; + } + return ExecUtils.getAffectRowsByCursors(cursors, false); } @@ -466,7 +462,7 @@ private SqlInsert buildIndexLogicalPlan(SqlInsert oldSqlInsert, TableMeta indexM // an expression SqlNode left = ((SqlCall) node).getOperandList().get(0); String colName = ((SqlIdentifier) left).getLastName(); - if (indexMeta.getColumnIgnoreCase(colName) != null) { + if (indexMeta.containsColumn(colName)) { newUpdateList.add(node); } } @@ -761,7 +757,7 @@ protected static SqlNodeList buildTargetColumnList(SqlInsert oldSqlInsert, Table for (int i = 0; i < oldColumnList.size(); i++) { SqlIdentifier targetColumn = (SqlIdentifier) oldColumnList.get(i); String colName = targetColumn.getLastName(); - if (indexMeta.getColumnIgnoreCase(colName) != null) { + if (indexMeta.containsColumn(colName)) { chosenColumnIndexes.add(i); newColumnList.add(targetColumn); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/PhysicalPlanBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/PhysicalPlanBuilder.java index e3bfaca85..5b270ffd4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/PhysicalPlanBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/PhysicalPlanBuilder.java @@ -30,6 +30,7 @@ import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.TddlOperatorTable; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.dialect.DbType; import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; import com.alibaba.polardbx.optimizer.core.planner.Planner; @@ -65,6 +66,7 @@ import org.apache.calcite.sql.SqlBasicCall; import org.apache.calcite.sql.SqlCall; import org.apache.calcite.sql.SqlDelete; +import org.apache.calcite.sql.SqlDmlKeyword; import org.apache.calcite.sql.SqlDynamicParam; import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlIndexHint; @@ -78,12 +80,14 @@ import org.apache.calcite.sql.SqlSelect.LockMode; import org.apache.calcite.sql.SqlSelectWithPartition; import org.apache.calcite.sql.SqlUpdate; +import org.apache.calcite.sql.fun.SqlBinaryFunction; import org.apache.calcite.sql.fun.SqlHashCheckAggFunction; import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.calcite.sql.type.SqlTypeFactoryImpl; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.util.Pair; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import java.text.MessageFormat; @@ -95,6 +99,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -111,10 +116,31 @@ public class PhysicalPlanBuilder extends PhyOperationBuilderCommon { protected Map currentParams; protected String schemaName; public SqlNode sqlNode; + private boolean convertToBinary; + private final Set notConvertColumns = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + // char/varchar 不转hex,用于 fast checker,以及一些不需要做转换的场景 public PhysicalPlanBuilder(String schemaName, ExecutionContext ec) { this.schemaName = schemaName; this.ec = ec; + this.convertToBinary = false; + } + + // char/varchar 转hex,用于 extractor, checker,以及需要做转换的场景 + public PhysicalPlanBuilder(String schemaName, boolean convertToBinary, ExecutionContext ec) { + this.schemaName = schemaName; + this.ec = ec; + this.convertToBinary = convertToBinary; + } + + public PhysicalPlanBuilder(String schemaName, boolean convertToBinary, List notConvertColumns, + ExecutionContext ec) { + this.schemaName = schemaName; + this.ec = ec; + this.convertToBinary = convertToBinary; + if (CollectionUtils.isNotEmpty(notConvertColumns)) { + this.notConvertColumns.addAll(notConvertColumns); + } } protected void initParams(int newIndex) { @@ -237,17 +263,25 @@ protected RelDataType buildRowTypeForSelect(List selectKeys, ColumnMeta SqlNodeList selectList) { final List selectColumns = new ArrayList<>(); for (String keyName : selectKeys) { - selectList.add(new SqlIdentifier(keyName, SqlParserPos.ZERO)); - selectColumns.add(tableMeta.getColumn(keyName)); - } - - if (null != rowIndex) { - selectColumns.add(rowIndex); + ColumnMeta columnMeta = tableMeta.getColumn(keyName); + if (convertToBinary && !notConvertColumns.contains(keyName) && DataTypeUtil.isStringType( + columnMeta.getDataType())) { + selectList.add(buildBinaryFunction(keyName)); + } else { + selectList.add(new SqlIdentifier(keyName, SqlParserPos.ZERO)); + } + selectColumns.add(columnMeta); } return CalciteUtils.switchRowType(selectColumns, typeFactory); } + private static SqlNode buildBinaryFunction(String columnName) { + SqlNodeList nodeList = new SqlNodeList(SqlParserPos.ZERO); + nodeList.add(new SqlIdentifier(columnName, SqlParserPos.ZERO)); + return new SqlBasicCall(new SqlBinaryFunction(), nodeList.toArray(), SqlParserPos.ZERO); + } + protected static SqlNode buildKeyNameNodeForInClause(List keyNames) { // keyNameNode if (keyNames.size() > 1) { @@ -330,7 +364,7 @@ private void buildTargetColumnList(SqlNodeList oldColumnList, SqlNodeList oldExp for (int i = 0; i < oldColumnList.size(); i++) { SqlIdentifier column = (SqlIdentifier) oldColumnList.get(i); - if (tableMeta.getColumnIgnoreCase(column.getLastName()) != null) { + if (tableMeta.containsColumn(column.getLastName())) { columnList.add(column); expressionList.add(oldExpressionList.get(i)); } @@ -695,6 +729,93 @@ public PhyTableOperation buildUpdateForColumnBackfill(TableMeta tableMeta, List< return buildDmlPhyTblOpTemplate(tableMeta.getSchemaName(), sqlUpdate, tableMeta); } + /** + *
+     * INSERT INTO {target_physical_table} {target_columns}
+     * select {source_columns} from {source_physical_table}
+     * WHERE (pk0, ... , pkn) > (?, ... , ?)
+     * AND (pk0, ... , pkn) <= (?, ... , ?)
+     * ORDER BY pk0, ... , pkn
+     * 
+ */ + public PhyTableOperation buildInsertSelectForOMCBackfill(TableMeta tableMeta, + List targetColumnNames, + List sourceColumnNames, + List primaryKeys, + boolean withLowerBound, boolean withUpperBound, + LockMode lockMode, boolean isInsertIgnore) { + SqlInsert sqlInsert = buildSqlInsertSelectForOMCBackfill(targetColumnNames, sourceColumnNames, + primaryKeys, withLowerBound, withUpperBound, lockMode, isInsertIgnore, tableMeta.isHasPrimaryKey()); + return buildDmlPhyTblOpTemplate(tableMeta.getSchemaName(), sqlInsert, tableMeta); + } + + public SqlInsert buildSqlInsertSelectForOMCBackfill(List targetColumnNames, + List sourceColumnNames, + List primaryKeys, + boolean withLowerBound, boolean withUpperBound, + LockMode lockMode, boolean isInsertIgnore, + boolean forceIndex) { + initParams(0); + + // build select list + SqlNodeList selectList = new SqlNodeList(SqlParserPos.ZERO); + for (String columnName : sourceColumnNames) { + selectList.add(new SqlIdentifier(columnName, SqlParserPos.ZERO)); + } + + // build target table + buildTargetTable(); + + final SqlIdentifier asNode = new SqlIdentifier("tb", SqlParserPos.ZERO); + asNode.indexNode = new SqlNodeList(ImmutableList.of( + new SqlIndexHint(SqlLiteral.createCharString("FORCE INDEX", SqlParserPos.ZERO), null, + new SqlNodeList(ImmutableList.of(SqlLiteral.createCharString("PRIMARY", SqlParserPos.ZERO)), + SqlParserPos.ZERO), SqlParserPos.ZERO)), SqlParserPos.ZERO); + final SqlNode from = + new SqlBasicCall(SqlStdOperatorTable.AS, new SqlNode[] {targetTableNode, asNode}, SqlParserPos.ZERO); + + // build where + SqlNode condition = null; + if (withLowerBound) { + // WHERE (pk0, ... , pkn) > (?, ... , ?) + condition = buildCondition(primaryKeys, SqlStdOperatorTable.GREATER_THAN); + } + + if (withUpperBound) { + // WHERE (pk0, ... , pkn) <= (?, ... , ?) + final SqlNode upperBound = buildCondition(primaryKeys, SqlStdOperatorTable.LESS_THAN_OR_EQUAL); + + condition = + null == condition ? upperBound : PlannerUtils.buildAndTree(ImmutableList.of(condition, upperBound)); + } + + SqlNode target = forceIndex ? from : targetTableNode; + final SqlSelect sqlSelect = + new SqlSelect(SqlParserPos.ZERO, null, selectList, target, condition, null, null, null, null, + null, null); + + sqlSelect.setLockMode(lockMode); + + final SqlNode targetTableParam = BuildPlanUtils.buildTargetTable(); + + final SqlNodeList targetColumnList = + new SqlNodeList(targetColumnNames.stream().map(e -> new SqlIdentifier(e, SqlParserPos.ZERO)).collect( + Collectors.toList()), SqlParserPos.ZERO); + + final SqlNodeList keywords = isInsertIgnore ? + new SqlNodeList(ImmutableList.of(SqlDmlKeyword.IGNORE.symbol(SqlParserPos.ZERO)), SqlParserPos.ZERO) + : SqlNodeList.EMPTY; + + return new SqlInsert(SqlParserPos.ZERO, + keywords, + targetTableParam, + sqlSelect, + targetColumnList, + SqlNodeList.EMPTY, + 0, + null); + } + /** *
      * UPDATE {physical_primary_table}
@@ -1169,6 +1290,72 @@ public PhyTableOperation buildSelectForBackfill(TableMeta tableMeta, List
+     * SELECT {all_columns_exists_in_index_table}
+     * FROM {physical_primary_table}
+     * WHERE (pk0, ... , pkn) > (?, ... , ?)
+     *   AND (pk0, ... , pkn) <= (?, ... , ?)
+     * ORDER BY pk0, ... , pkn
+     * LIMIT ?
+     * LOCK IN SHARE MODE
+     * 
+ * + * @param tableMeta Primary table meta + * @param selectKeys Select list + * @param primaryKeys Primary key list, for condition building + * @param withLowerBound With lower bound condition + * @param withUpperBound With upper bound condition + * @return ExecutionPlan for extracting data from primary table + */ + public PhyTableOperation buildSelectUpperBoundForInsertSelectBackfill(TableMeta tableMeta, List selectKeys, + List primaryKeys, + boolean withLowerBound, + boolean withUpperBound) { + initParams(0); + + // build select list + SqlNodeList selectList = new SqlNodeList(SqlParserPos.ZERO); + RelDataType rowType = buildRowTypeForSelect(selectKeys, tableMeta, selectList); + + // build target table + buildTargetTable(); + + // build where + SqlNode condition = null; + if (withLowerBound) { + // WHERE (pk0, ... , pkn) > (?, ... , ?) + condition = buildCondition(primaryKeys, SqlStdOperatorTable.GREATER_THAN); + } + + if (withUpperBound) { + // WHERE (pk0, ... , pkn) <= (?, ... , ?) + final SqlNode upperBound = buildCondition(primaryKeys, SqlStdOperatorTable.LESS_THAN_OR_EQUAL); + + condition = + null == condition ? upperBound : PlannerUtils.buildAndTree(ImmutableList.of(condition, upperBound)); + } + + // order by primary keys + SqlNodeList orderBy = new SqlNodeList( + primaryKeys.stream().map(key -> new SqlIdentifier(key, SqlParserPos.ZERO)).collect(Collectors.toList()), + SqlParserPos.ZERO); + + // limit ? + SqlNode fetch = SqlLiteral.createExactNumeric("1", SqlParserPos.ZERO); + SqlNode offset = new SqlDynamicParam(nextDynamicIndex++, SqlParserPos.ZERO); + + final SqlSelect sqlSelect = + new SqlSelect(SqlParserPos.ZERO, null, selectList, targetTableNode, condition, null, null, null, orderBy, + offset, fetch); + + // lock mode + sqlSelect.setLockMode(LockMode.UNDEF); + + // create PhyTableOperation + return buildSelectPhyTblOpTemplate(sqlSelect, rowType, tableMeta, LockMode.UNDEF, ec); + } + public PhyTableOperation buildSelectForBackfillNotLimit(TableMeta tableMeta, List selectKeys, List primaryKeys, boolean withLowerBound, boolean withUpperBound, LockMode lockMode) { @@ -1542,7 +1729,7 @@ public Pair buildReplaceForChangeSet(TableMeta tab return new Pair<>(replacePlan, buildDmlPhyTblOpTemplate(schemaName, replacePlan, tableMeta)); } - private PhyTableOperation buildDmlPhyTblOpTemplate(String schemaName, SqlNode sqlNode, TableMeta primTblMeta) { + public PhyTableOperation buildDmlPhyTblOpTemplate(String schemaName, SqlNode sqlNode, TableMeta primTblMeta) { final RelOptCluster cluster = SqlConverter.getInstance(schemaName, ec).createRelOptCluster(); RelTraitSet traitSet = RelTraitSet.createEmpty(); RelDataType rowType = CalciteUtils.switchRowType(Collections.emptyList(), typeFactory); @@ -2129,10 +2316,7 @@ private SqlSelect buildSqlSelect(List> keys, SqlNode keyNameNode, S return sqlSelect; } - // 重要:构造sample plan时,传入的主键primaryKeys必须按原表顺序!!! - public PhyTableOperation buildSqlSelectForSample(TableMeta tableMeta, List selectKeys, - List primaryKeys, boolean withLowerBound, - boolean withUpperBound) { + public PhyTableOperation buildSqlSelectForSample(TableMeta tableMeta, List selectKeys) { initParams(0); // build select list @@ -2142,28 +2326,8 @@ public PhyTableOperation buildSqlSelectForSample(TableMeta tableMeta, List (?, ... , ?) - condition = buildCondition(primaryKeys, SqlStdOperatorTable.GREATER_THAN); - } - - if (withUpperBound) { - // WHERE (pk0, ... , pkn) <= (?, ... , ?) - final SqlNode upperBound = buildCondition(primaryKeys, SqlStdOperatorTable.LESS_THAN_OR_EQUAL); - - condition = - null == condition ? upperBound : PlannerUtils.buildAndTree(ImmutableList.of(condition, upperBound)); - } - - // order by primary keys - SqlNodeList orderBy = new SqlNodeList( - primaryKeys.stream().map(key -> new SqlIdentifier(key, SqlParserPos.ZERO)).collect(Collectors.toList()), - SqlParserPos.ZERO); - final SqlSelect sqlSelect = - new SqlSelect(SqlParserPos.ZERO, null, selectList, targetTableNode, condition, null, null, null, orderBy, + new SqlSelect(SqlParserPos.ZERO, null, selectList, targetTableNode, null, null, null, null, null, null, null); // create PhyTableOperation diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/CdasLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/CdasLoader.java index a1181de70..875168777 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/CdasLoader.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/CdasLoader.java @@ -54,6 +54,8 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.apache.calcite.sql.fun.SqlStdOperatorTable.EQUALS; + /** * Created by zhuqiwei. * @@ -66,9 +68,9 @@ public class CdasLoader extends Loader { private CdasLoader(String schemaName, String tableName, SqlInsert insert, SqlInsert insertIgnore, ExecutionPlan checkerPlan, int[] checkerPkMapping, int[] checkerParamMapping, BiFunction, ExecutionContext, List> executeFunc, - int[] insertedParamMapping, boolean needParamProject) { + int[] insertedParamMapping, boolean needParamProject, String backfillReturning) { super(schemaName, tableName, insert, insertIgnore, checkerPlan, checkerPkMapping, checkerParamMapping, - executeFunc, false); + executeFunc, false, backfillReturning); this.insertedParamMapping = insertedParamMapping; this.needParamProjection = needParamProject; } @@ -80,6 +82,9 @@ public static CdasLoader create(String srcSchemaName, String dstSchemaName, Stri final OptimizerContext optimizerContextDst = OptimizerContext.getContext(dstSchemaName); final OptimizerContext optimizerContextSrc = OptimizerContext.getContext(srcSchemaName); + boolean canUseReturning = + canUseBackfillReturning(ec, dstSchemaName) && canUseBackfillReturning(ec, srcSchemaName); + // Construct target table final SqlNode targetTableParam = BuildPlanUtils.buildTargetTable(); // Construct targetColumnList @@ -150,7 +155,8 @@ public static CdasLoader create(String srcSchemaName, String dstSchemaName, Stri final TddlRuleManager tddlRuleManagerDst = optimizerContextDst.getRuleManager(); final Set filterColumns = Sets.newTreeSet(String::compareToIgnoreCase); final Set primaryKeys = Sets.newTreeSet(String::compareToIgnoreCase); - primaryKeys.addAll(GlobalIndexMeta.getPrimaryKeys(srcTableMeta)); + final List pkList = GlobalIndexMeta.getPrimaryKeys(srcTableMeta); + primaryKeys.addAll(pkList); filterColumns.addAll(primaryKeys); filterColumns.addAll(tddlRuleManagerSrc.getSharedColumns(srcTableName)); filterColumns.addAll(tddlRuleManagerDst.getSharedColumns(dstTableName)); @@ -179,7 +185,8 @@ public static CdasLoader create(String srcSchemaName, String dstSchemaName, Stri return new CdasLoader(dstSchemaName, dstTableName, sqlInsert, sqlInsertIgnore, checkerPlan, checkerPkMapping, checkerParamMapping, - executeFunc, insertMappings, needParamProject); + executeFunc, insertMappings, needParamProject, + canUseReturning ? String.join(",", pkList) : null); } @@ -187,8 +194,13 @@ public static CdasLoader create(String srcSchemaName, String dstSchemaName, Stri public int executeInsert(SqlInsert sqlInsert, String schemaName, String tableName, ExecutionContext executionContext, String sourceDbIndex, String phyTableName) { TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(tableName); - return InsertIndexExecutor - .insertIntoTable(null, sqlInsert, tableMeta, schemaName, executionContext, executeFunc, false); + List> returningRes = new ArrayList<>(); + int affectRows = InsertIndexExecutor + .insertIntoTable(null, sqlInsert, tableMeta, "", "", + schemaName, executionContext, executeFunc, false, true, + "", usingBackfillReturning, returningRes); + + return usingBackfillReturning ? getReturningAffectRows(returningRes, executionContext) : affectRows; } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/GsiChangeSetLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/GsiChangeSetLoader.java new file mode 100644 index 000000000..d76c39bdd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/GsiChangeSetLoader.java @@ -0,0 +1,189 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gsi.backfill; + +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.backfill.Loader; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.gsi.InsertIndexExecutor; +import com.alibaba.polardbx.executor.gsi.PhysicalPlanBuilder; +import com.alibaba.polardbx.executor.partitionmanagement.backfill.AlterTableGroupLoader; +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.GlobalIndexMeta; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; +import com.alibaba.polardbx.optimizer.partition.PartitionInfo; +import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; +import com.alibaba.polardbx.optimizer.utils.BuildPlanUtils; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Sets; +import org.apache.calcite.linq4j.Ord; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlBasicCall; +import org.apache.calcite.sql.SqlDmlKeyword; +import org.apache.calcite.sql.SqlDynamicParam; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlInsert; +import org.apache.calcite.sql.SqlNode; +import org.apache.calcite.sql.SqlNodeList; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.commons.lang.StringUtils; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.stream.Collectors; + +/** + * @author wumu + */ +public class GsiChangeSetLoader extends Loader { + private Map tableNameMapping; + + protected GsiChangeSetLoader(String schemaName, String tableName, SqlInsert insert, SqlInsert insertIgnore, + ExecutionPlan checkerPlan, + int[] checkerPkMapping, + int[] checkerParamMapping, + Map tableNameMapping, + BiFunction, ExecutionContext, List> executeFunc) { + super(schemaName, tableName, insert, insertIgnore, checkerPlan, checkerPkMapping, checkerParamMapping, + executeFunc, + true, null); + this.conflictDetection = true; + this.tableNameMapping = tableNameMapping; + } + + public static Loader create(String schemaName, String primaryTable, String indexTable, + BiFunction, ExecutionContext, List> executeFunc, + boolean useHint, ExecutionContext ec, + Map tableNameMapping) { + final OptimizerContext optimizerContext = OptimizerContext.getContext(schemaName); + + // Construct target table + final SqlNode targetTableParam = BuildPlanUtils.buildTargetTable(); + + // Construct targetColumnList + final TableMeta indexTableMeta = ec.getSchemaManager(schemaName).getTable(indexTable); + final SqlNodeList targetColumnList = new SqlNodeList( + indexTableMeta.getAllColumns() + .stream() + .filter(columnMeta -> (!columnMeta.isGeneratedColumn() && !(columnMeta.getMappingName() != null + && columnMeta.getMappingName().isEmpty()))) + .map(columnMeta -> new SqlIdentifier(columnMeta.getName(), SqlParserPos.ZERO)) + .collect(Collectors.toList()), + SqlParserPos.ZERO); + + // Construct values + final SqlNode[] dynamics = new SqlNode[targetColumnList.size()]; + for (int i = 0; i < targetColumnList.size(); i++) { + dynamics[i] = new SqlDynamicParam(i, SqlParserPos.ZERO); + } + final SqlNode row = new SqlBasicCall(SqlStdOperatorTable.ROW, dynamics, SqlParserPos.ZERO); + final SqlNode[] rowList = new SqlNode[] {row}; + final SqlNode values = new SqlBasicCall(SqlStdOperatorTable.VALUES, rowList, SqlParserPos.ZERO); + + // Insert + final SqlInsert sqlInsert = new SqlInsert(SqlParserPos.ZERO, + new SqlNodeList(SqlParserPos.ZERO), + targetTableParam, + values, + targetColumnList, + SqlNodeList.EMPTY, + 0, + null); + + // Construct keyword + final SqlNodeList keywords = new SqlNodeList(ImmutableList.of(SqlDmlKeyword.IGNORE.symbol(SqlParserPos.ZERO)), + SqlParserPos.ZERO); + + // Insert ignore + final SqlInsert sqlInsertIgnore = new SqlInsert(SqlParserPos.ZERO, + keywords, + targetTableParam, + values, + targetColumnList, + SqlNodeList.EMPTY, + 0, + null); + + // For duplicate check + final TableMeta primaryTableMeta = ec.getSchemaManager(schemaName).getTable(primaryTable); + final TddlRuleManager tddlRuleManager = optimizerContext.getRuleManager(); + final Set filterColumns = Sets.newTreeSet(String::compareToIgnoreCase); + final Set primaryKeys = Sets.newTreeSet(String::compareToIgnoreCase); + primaryKeys.addAll(GlobalIndexMeta.getPrimaryKeys(primaryTableMeta)); + filterColumns.addAll(primaryKeys); + filterColumns.addAll(tddlRuleManager.getSharedColumns(primaryTable)); + filterColumns.addAll(tddlRuleManager.getSharedColumns(indexTable)); + + final List filterList = ImmutableList.copyOf(filterColumns); + + // Mapping from index of filter param to index of insert param + final int[] checkerParamMapping = new int[filterList.size()]; + final int[] checkerPkMapping = new int[primaryKeys.size()]; + int pkIndex = 0; + for (Ord ordFilter : Ord.zip(filterList)) { + for (Ord ordColumn : Ord.zip(targetColumnList)) { + final String columnName = ((SqlIdentifier) ordColumn.getValue()).getSimple(); + if (ordFilter.getValue().equalsIgnoreCase(columnName)) { + checkerParamMapping[ordFilter.i] = ordColumn.i; + if (primaryKeys.contains(columnName)) { + checkerPkMapping[pkIndex++] = ordColumn.i; + } + } + } + } + + // Build select plan for duplication check + final ExecutionPlan checkerPlan = PhysicalPlanBuilder + .buildPlanForBackfillDuplicateCheck(schemaName, indexTableMeta, filterList, filterList, useHint, ec); + + return new GsiChangeSetLoader(schemaName, + indexTable, + sqlInsert, + sqlInsertIgnore, + checkerPlan, + checkerPkMapping, + checkerParamMapping, + tableNameMapping, + executeFunc); + } + + @Override + public int executeInsert(SqlInsert sqlInsert, String schemaName, String tableName, + ExecutionContext executionContext, String sourceDbIndex, String phyTableName) { + TableMeta tableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(tableName); + if (DbInfoManager.getInstance().isNewPartitionDb(schemaName)) { + PartitionInfo newPartInfo = tableMeta.getNewPartitionInfo(); + return InsertIndexExecutor + .backfillIntoPartitionedTable(null, sqlInsert, tableMeta, schemaName, executionContext, executeFunc, + false, newPartInfo, sourceDbIndex, tableNameMapping.get(phyTableName), this.mirrorCopy, false, + null); + } else { + return InsertIndexExecutor + .insertIntoTable(null, sqlInsert, tableMeta, sourceDbIndex, tableNameMapping.get(phyTableName), + schemaName, executionContext, + executeFunc, + false, + false); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/GsiExtractor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/GsiExtractor.java index f515ff7e4..6e96efcc0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/GsiExtractor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/GsiExtractor.java @@ -35,16 +35,18 @@ public class GsiExtractor extends Extractor { public GsiExtractor(String schemaName, String sourceTableName, String targetTableName, long batchSize, long speedMin, long speedLimit, - long parallelism, PhyTableOperation planSelectWithMax, + long parallelism, + boolean useBinary, + List modifyStringColumns, + PhyTableOperation planSelectWithMax, PhyTableOperation planSelectWithMin, PhyTableOperation planSelectWithMinAndMax, PhyTableOperation planSelectMaxPk, PhyTableOperation planSelectSample, - PhyTableOperation planSelectMinAndMaxSample, List primaryKeysId) { - super(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, - planSelectWithMax, planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, - planSelectSample, planSelectMinAndMaxSample, primaryKeysId); + super(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, useBinary, + modifyStringColumns, planSelectWithMax, planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, + planSelectSample, primaryKeysId); } @Override @@ -53,9 +55,10 @@ public Map> getSourcePhyTables() { } public static Extractor create(String schemaName, String sourceTableName, String targetTableName, long batchSize, - long speedMin, long speedLimit, long parallelism, ExecutionContext ec) { + long speedMin, long speedLimit, long parallelism, boolean useBinary, + List modifyStringColumns, ExecutionContext ec) { ExtractorInfo info = Extractor.buildExtractorInfo(ec, schemaName, sourceTableName, targetTableName, true); - final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); + final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, useBinary, modifyStringColumns, ec); return new GsiExtractor(schemaName, sourceTableName, @@ -64,6 +67,8 @@ public static Extractor create(String schemaName, String sourceTableName, String speedMin, speedLimit, parallelism, + useBinary, + modifyStringColumns, builder.buildSelectForBackfill(info.getSourceTableMeta(), info.getTargetTableColumns(), info.getPrimaryKeys(), false, true, SqlSelect.LockMode.SHARED_LOCK), @@ -76,10 +81,7 @@ public static Extractor create(String schemaName, String sourceTableName, String true, true, SqlSelect.LockMode.SHARED_LOCK), builder.buildSelectMaxPkForBackfill(info.getSourceTableMeta(), info.getPrimaryKeys()), - builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys(), info.getPrimaryKeys(), - false, false), - builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys(), info.getPrimaryKeys(), - true, true), + builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys()), info.getPrimaryKeysId()); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/GsiLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/GsiLoader.java index 212ececa3..ab526dd88 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/GsiLoader.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/GsiLoader.java @@ -16,7 +16,12 @@ package com.alibaba.polardbx.executor.gsi.backfill; +import com.alibaba.polardbx.executor.backfill.Extractor; +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.backfill.Loader; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.common.TopologyHandler; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.gsi.InsertIndexExecutor; import com.alibaba.polardbx.executor.gsi.PhysicalPlanBuilder; @@ -41,12 +46,16 @@ import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.calcite.sql.parser.SqlParserPos; +import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.function.BiFunction; import java.util.stream.Collectors; import static com.alibaba.polardbx.common.TddlConstants.IMPLICIT_COL_NAME; +import static com.alibaba.polardbx.executor.columns.ColumnBackfillExecutor.isAllDnUseXDataSource; +import static org.apache.calcite.sql.fun.SqlStdOperatorTable.EQUALS; /** * Fill batch data into index table with duplication check @@ -55,14 +64,14 @@ public class GsiLoader extends Loader { private GsiLoader(String schemaName, String tableName, SqlInsert insert, SqlInsert insertIgnore, ExecutionPlan checkerPlan, int[] checkerPkMapping, int[] checkerParamMapping, - BiFunction, ExecutionContext, List> executeFunc) { + BiFunction, ExecutionContext, List> executeFunc, String backfillReturning) { super(schemaName, tableName, insert, insertIgnore, checkerPlan, checkerPkMapping, checkerParamMapping, - executeFunc, false); + executeFunc, false, backfillReturning); } public static Loader create(String schemaName, String primaryTable, String indexTable, BiFunction, ExecutionContext, List> executeFunc, - boolean useHint, ExecutionContext ec) { + boolean useHint, boolean canUseReturning, ExecutionContext ec) { final OptimizerContext optimizerContext = OptimizerContext.getContext(schemaName); // Construct target table @@ -74,7 +83,8 @@ public static Loader create(String schemaName, String primaryTable, String index final SqlNodeList targetColumnList = new SqlNodeList( indexTableMeta.getAllColumns() .stream() - .filter(columnMeta -> !columnMeta.isGeneratedColumn()) + .filter(columnMeta -> (!columnMeta.isGeneratedColumn() && !(columnMeta.getMappingName() != null + && columnMeta.getMappingName().isEmpty()))) .map(columnMeta -> new SqlIdentifier(columnMeta.getName(), SqlParserPos.ZERO)) .collect(Collectors.toList()), SqlParserPos.ZERO); @@ -117,7 +127,8 @@ public static Loader create(String schemaName, String primaryTable, String index final TddlRuleManager tddlRuleManager = optimizerContext.getRuleManager(); final Set filterColumns = Sets.newTreeSet(String::compareToIgnoreCase); final Set primaryKeys = Sets.newTreeSet(String::compareToIgnoreCase); - primaryKeys.addAll(GlobalIndexMeta.getPrimaryKeys(primaryTableMeta)); + final List pkList = Extractor.getPrimaryKeys(primaryTableMeta, ec); + primaryKeys.addAll(pkList); filterColumns.addAll(primaryKeys); filterColumns.addAll(tddlRuleManager.getSharedColumns(primaryTable)); filterColumns.addAll(tddlRuleManager.getSharedColumns(indexTable)); @@ -155,14 +166,20 @@ public static Loader create(String schemaName, String primaryTable, String index checkerPlan, checkerPkMapping, checkerParamMapping, - executeFunc); + executeFunc, + canUseReturning ? String.join(",", pkList) : null); } @Override public int executeInsert(SqlInsert sqlInsert, String schemaName, String tableName, ExecutionContext executionContext, String sourceDbIndex, String phyTableName) { TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(tableName); - return InsertIndexExecutor - .insertIntoTable(null, sqlInsert, tableMeta, schemaName, executionContext, executeFunc, false); + List> returningRes = new ArrayList<>(); + int affectRows = InsertIndexExecutor + .insertIntoTable(null, sqlInsert, tableMeta, "", "", + schemaName, executionContext, executeFunc, false, true, + "", usingBackfillReturning, returningRes); + + return usingBackfillReturning ? getReturningAffectRows(returningRes, executionContext) : affectRows; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/OmcMirrorCopyExtractor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/OmcMirrorCopyExtractor.java new file mode 100644 index 000000000..68ea8c674 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/backfill/OmcMirrorCopyExtractor.java @@ -0,0 +1,393 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.gsi.backfill; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.properties.DynamicConfig; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.ExecutorHelper; +import com.alibaba.polardbx.executor.backfill.BatchConsumer; +import com.alibaba.polardbx.executor.backfill.Extractor; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.ddl.newengine.DdlEngineStats; +import com.alibaba.polardbx.executor.ddl.newengine.cross.CrossEngineValidator; +import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; +import com.alibaba.polardbx.executor.gsi.GsiUtils; +import com.alibaba.polardbx.executor.gsi.PhysicalPlanBuilder; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.PhyTableOpBuildParams; +import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; +import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperationFactory; +import com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter; +import com.alibaba.polardbx.optimizer.utils.PlannerUtils; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import com.google.common.collect.ImmutableList; +import org.apache.calcite.sql.SqlSelect; + +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.alibaba.polardbx.common.exception.code.ErrorCode.ER_LOCK_DEADLOCK; +import static com.alibaba.polardbx.common.exception.code.ErrorCode.ER_LOCK_WAIT_TIMEOUT; +import static com.alibaba.polardbx.executor.gsi.GsiUtils.SQLSTATE_DEADLOCK; +import static com.alibaba.polardbx.executor.gsi.GsiUtils.SQLSTATE_LOCK_TIMEOUT; + +/** + * @author wumu + */ +public class OmcMirrorCopyExtractor extends Extractor { + private final Map tableNameMapping; + private final Map> sourcePhyTables; + + private final PhyTableOperation planInsertSelectWithMin; + private final PhyTableOperation planInsertSelectWithMax; + private final PhyTableOperation planInsertSelectWithMinAndMax; + + final List tableColumns; + final List primaryKeys; + final Set selectKeySet; + + public OmcMirrorCopyExtractor(String schemaName, String sourceTableName, String targetTableName, long batchSize, + long speedMin, + long speedLimit, + long parallelism, + boolean useBinary, + PhyTableOperation planSelectBatchWithMax, + PhyTableOperation planSelectBatchWithMin, + PhyTableOperation planSelectBatchWithMinAndMax, + PhyTableOperation planSelectMaxPk, + PhyTableOperation planSelectSample, + PhyTableOperation planInsertSelectWithMax, + PhyTableOperation planInsertSelectWithMin, + PhyTableOperation planInsertSelectWithMinAndMax, + List primaryKeysId, + List primaryKeys, List tableColumns, + Map tableNameMapping, + Map> sourcePhyTables) { + super(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, useBinary, + null, planSelectBatchWithMax, planSelectBatchWithMin, planSelectBatchWithMinAndMax, planSelectMaxPk, + planSelectSample, primaryKeysId); + + this.planInsertSelectWithMin = planInsertSelectWithMin; + this.planInsertSelectWithMax = planInsertSelectWithMax; + this.planInsertSelectWithMinAndMax = planInsertSelectWithMinAndMax; + + this.primaryKeys = primaryKeys; + this.tableColumns = tableColumns; + this.tableNameMapping = tableNameMapping; + this.sourcePhyTables = sourcePhyTables; + this.selectKeySet = new HashSet<>(primaryKeysId); + } + + public static OmcMirrorCopyExtractor create(String schemaName, String sourceTableName, String targetTableName, + long batchSize, long speedMin, long speedLimit, long parallelism, + Map tableNameMapping, + Map> sourcePhyTables, + boolean useChangeSet, boolean useBinary, + ExecutionContext ec) { + Extractor.ExtractorInfo + info = Extractor.buildExtractorInfo(ec, schemaName, sourceTableName, targetTableName, true); + final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, useBinary, ec); + + final TableMeta tableMeta = info.getSourceTableMeta(); + final List tableColumns = tableMeta.getWriteColumns() + .stream() + .map(ColumnMeta::getName) + .collect(Collectors.toList()); + + List targetTableColumns = info.getRealTargetTableColumns(); + List sourceTableColumns = info.getTargetTableColumns(); + List primaryKeys = info.getPrimaryKeys(); + + SqlSelect.LockMode lockMode = useChangeSet ? SqlSelect.LockMode.UNDEF : SqlSelect.LockMode.SHARED_LOCK; + boolean isInsertIgnore = !useChangeSet; + + return new OmcMirrorCopyExtractor(schemaName, + sourceTableName, + targetTableName, + batchSize, + speedMin, + speedLimit, + parallelism, + useBinary, + builder.buildSelectUpperBoundForInsertSelectBackfill(info.getSourceTableMeta(), + info.getTargetTableColumns(), info.getPrimaryKeys(), + false, true), + builder.buildSelectUpperBoundForInsertSelectBackfill(info.getSourceTableMeta(), + info.getTargetTableColumns(), info.getPrimaryKeys(), + true, false), + builder.buildSelectUpperBoundForInsertSelectBackfill(info.getSourceTableMeta(), + info.getTargetTableColumns(), info.getPrimaryKeys(), + true, true), + builder.buildSelectMaxPkForBackfill(info.getSourceTableMeta(), info.getPrimaryKeys()), + builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys()), + builder.buildInsertSelectForOMCBackfill(tableMeta, targetTableColumns, sourceTableColumns, + primaryKeys, false, true, lockMode, isInsertIgnore), + builder.buildInsertSelectForOMCBackfill(tableMeta, targetTableColumns, sourceTableColumns, + primaryKeys, true, false, lockMode, isInsertIgnore), + builder.buildInsertSelectForOMCBackfill(tableMeta, targetTableColumns, sourceTableColumns, + primaryKeys, true, true, lockMode, isInsertIgnore), + info.getPrimaryKeysId(), + primaryKeys, + tableColumns, + tableNameMapping, + sourcePhyTables + ); + } + + @Override + protected void foreachPhyTableBatch(String dbIndex, String phyTable, + List backfillObjects, + ExecutionContext ec, + BatchConsumer loader, + AtomicReference interrupted) { + String physicalTableName = TddlSqlToRelConverter.unwrapPhysicalTableName(phyTable); + + // Load upper bound + List upperBoundParam = + buildUpperBoundParam(backfillObjects.size(), backfillObjects, primaryKeysIdMap); + final boolean withUpperBound = GeneralUtil.isNotEmpty(upperBoundParam); + + // Init historical position mark + long successRowCount = backfillObjects.get(0).successRowCount; + AtomicReference currentSuccessRowCount = new AtomicReference<>(0L); + List lastPk = initSelectParam(backfillObjects, primaryKeysIdMap); + + long rangeBackfillStartTime = System.currentTimeMillis(); + + List> lastBatch = null; + AtomicReference finished = new AtomicReference<>(false); + long actualBatchSize = batchSize; + do { + try { + if (rateLimiter != null) { + rateLimiter.acquire((int) actualBatchSize); + } + long start = System.currentTimeMillis(); + + // Dynamic adjust lower bound of rate. + final long dynamicRate = DynamicConfig.getInstance().getGeneralDynamicSpeedLimitation(); + if (dynamicRate > 0) { + throttle.resetMaxRate(dynamicRate); + } + + // For next batch, build select plan and parameters + final PhyTableOperation selectPlan = buildSelectPlanWithParam(dbIndex, + physicalTableName, + actualBatchSize, + Stream.concat(lastPk.stream(), upperBoundParam.stream()).collect(Collectors.toList()), + GeneralUtil.isNotEmpty(lastPk), + withUpperBound); + + List finalLastPk = lastPk; + lastBatch = GsiUtils.retryOnException( + // 1. Lock rows within trx1 (single db transaction) + // 2. Fill into index table within trx2 (XA transaction) + // 3. Trx1 commit, if (success) {trx2 commit} else {trx2 rollback} + () -> GsiUtils.wrapWithSingleDbTrx(tm, ec, + (selectEc) -> extract(dbIndex, physicalTableName, selectPlan, selectEc, finalLastPk, + upperBoundParam, currentSuccessRowCount, finished)), + e -> (GsiUtils.vendorErrorIs(e, SQLSTATE_DEADLOCK, ER_LOCK_DEADLOCK) + || GsiUtils.vendorErrorIs(e, SQLSTATE_LOCK_TIMEOUT, ER_LOCK_WAIT_TIMEOUT)) + || e.getMessage().contains("Loader check error."), + (e, retryCount) -> deadlockErrConsumer(selectPlan, ec, e, retryCount)); + + // For status recording + List beforeLastPk = lastPk; + + // Build parameter for next batch + lastPk = buildSelectParam(lastBatch, primaryKeysId); + + successRowCount += currentSuccessRowCount.get(); + + reporter.updatePositionMark(ec, backfillObjects, successRowCount, lastPk, beforeLastPk, + finished.get(), primaryKeysIdMap); + // 估算速度 + ec.getStats().backfillRows.addAndGet(currentSuccessRowCount.get()); + DdlEngineStats.METRIC_BACKFILL_ROWS_FINISHED.update(currentSuccessRowCount.get()); + + if (!finished.get()) { + throttle.feedback(new com.alibaba.polardbx.executor.backfill.Throttle.FeedbackStats( + System.currentTimeMillis() - start, start, currentSuccessRowCount.get())); + } + DdlEngineStats.METRIC_BACKFILL_ROWS_SPEED.set((long) throttle.getActualRateLastCycle()); + + if (rateLimiter != null) { + // Limit rate. + rateLimiter.setRate(throttle.getNewRate()); + } + + // Check DDL is ongoing. + if (CrossEngineValidator.isJobInterrupted(ec) || Thread.currentThread().isInterrupted() + || interrupted.get()) { + long jobId = ec.getDdlJobId(); + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The job '" + jobId + "' has been cancelled"); + } + if (actualBatchSize < batchSize) { + actualBatchSize = Math.min(actualBatchSize * 2, batchSize); + } + } catch (TddlRuntimeException e) { + boolean retry = (e.getErrorCode() == ErrorCode.ERR_X_PROTOCOL_BAD_PACKET.getCode() || + (e.getErrorCode() == 1153 && e.getMessage().toLowerCase().contains("max_allowed_packet")) || + (e.getSQLState() != null && e.getSQLState().equalsIgnoreCase("S1000") && e.getMessage() + .toLowerCase().contains("max_allowed_packet"))) && actualBatchSize > 1; + if (retry) { + actualBatchSize = Math.max(actualBatchSize / 8, 1); + } else { + throw e; + } + } + + // for sliding window of split + checkAndSplitBackfillObject( + dbIndex, phyTable, successRowCount, ec, rangeBackfillStartTime, lastBatch, backfillObjects); + } while (!finished.get()); + + DdlEngineStats.METRIC_BACKFILL_ROWS_SPEED.set(0); + reporter.addBackfillCount(successRowCount); + + SQLRecorderLogger.ddlLogger.warn(MessageFormat.format("[{0}] Last backfill row for {1}[{2}][{3}]: {4}", + ec.getTraceId(), + dbIndex, + phyTable, + successRowCount, + GsiUtils.rowToString(lastBatch.isEmpty() ? null : lastBatch.get(lastBatch.size() - 1)))); + } + + protected List> extract(String dbIndex, String phyTableName, + PhyTableOperation extractPlan, ExecutionContext extractEc, + List lowerBound, + List upperBound, + AtomicReference successRowCount, + AtomicReference finished) { + Cursor extractCursor = null; + // Transform + final List> result; + try { + // Extract + extractCursor = ExecutorHelper.execute(extractPlan, extractEc); + result = com.alibaba.polardbx.executor.gsi.utils.Transformer.buildBatchParam(extractCursor, useBinary, + null); + } finally { + if (extractCursor != null) { + extractCursor.close(new ArrayList<>()); + } + } + + if (result.isEmpty()) { + finished.set(true); + } else { + // build new upperBound + upperBound = buildSelectParam(result, primaryKeysId); + } + + long affectRows = + executeInsertSelect(dbIndex, tableNameMapping.get(phyTableName), phyTableName, lowerBound, upperBound, + extractEc); + successRowCount.set(affectRows); + + extractEc.getTransaction().commit(); + + return result; + } + + private long executeInsertSelect(String dbIndex, String targetPhyTable, + String sourcePhyTable, + List lowerBound, + List upperBound, + ExecutionContext executionContext) { + boolean withLowerBound = GeneralUtil.isNotEmpty(lowerBound); + boolean withUpperBound = GeneralUtil.isNotEmpty(upperBound); + if (!withUpperBound) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "backfill failed because the upperbound of the primary key could not be found."); + } + + PhyTableOperation updatePlan = buildInsertSelectPlanWithParam(dbIndex, sourcePhyTable, targetPhyTable, + Stream.concat(lowerBound.stream(), upperBound.stream()).collect(Collectors.toList()), withLowerBound, + true); + + Cursor extractCursor = ExecutorHelper.execute(updatePlan, executionContext); + + return ExecUtils.getAffectRowsByCursor(extractCursor); + } + + protected PhyTableOperation buildInsertSelectPlanWithParam(String dbIndex, String sourcePhyTable, + String targetPhyTable, + List params, + boolean withLowerBound, + boolean withUpperBound) { + Map planParams = new HashMap<>(); + // Physical table is 1st parameter + planParams.put(1, PlannerUtils.buildParameterContextForTableName(targetPhyTable, 1)); + planParams.put(2, PlannerUtils.buildParameterContextForTableName(sourcePhyTable, 2)); + + int nextParamIndex = 3; + + // Parameters for where(DNF) + final int pkNumber = params.size() / ((withLowerBound ? 1 : 0) + (withUpperBound ? 1 : 0)); + if (withLowerBound) { + for (int i = 0; i < pkNumber; ++i) { + for (int j = 0; j <= i; ++j) { + planParams.put(nextParamIndex, + new ParameterContext(params.get(j).getParameterMethod(), + new Object[] {nextParamIndex, params.get(j).getArgs()[1]})); + nextParamIndex++; + } + } + } + if (withUpperBound) { + final int base = withLowerBound ? pkNumber : 0; + for (int i = 0; i < pkNumber; ++i) { + for (int j = 0; j <= i; ++j) { + planParams.put(nextParamIndex, + new ParameterContext(params.get(base + j).getParameterMethod(), + new Object[] {nextParamIndex, params.get(base + j).getArgs()[1]})); + nextParamIndex++; + } + } + } + + PhyTableOperation targetPhyOp = !withLowerBound ? planInsertSelectWithMax : + (withUpperBound ? planInsertSelectWithMinAndMax : planInsertSelectWithMin); + PhyTableOpBuildParams buildParams = new PhyTableOpBuildParams(); + buildParams.setGroupName(dbIndex); + buildParams.setPhyTables(ImmutableList.of(ImmutableList.of(targetPhyTable, sourcePhyTable))); + buildParams.setDynamicParams(planParams); + + return PhyTableOperationFactory.getInstance().buildPhyTableOperationByPhyOp(targetPhyOp, buildParams); + } + + @Override + public Map> getSourcePhyTables() { + return sourcePhyTables; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/corrector/Corrector.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/corrector/Corrector.java index 6c7126d0d..c12cabff3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/corrector/Corrector.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/corrector/Corrector.java @@ -195,7 +195,8 @@ public void start(ExecutionContext baseEc, Checker checker) { } @Override - public boolean batch(String logTblOrIndexTbl, String dbIndex, String phyTable, ExecutionContext selectEc, Checker checker, + public boolean batch(String logTblOrIndexTbl, String dbIndex, String phyTable, ExecutionContext selectEc, + Checker checker, boolean primaryToGsi, List>> baseRows, List>> checkRows) { @@ -419,7 +420,8 @@ public void finish(ExecutionContext baseEc, Checker checker) { "Corrector."))); } - private int doDelete(String logTblOrIdxTbl, String dbIndex, String phyTable, List pks, ExecutionContext newEc) { + private int doDelete(String logTblOrIdxTbl, String dbIndex, String phyTable, List pks, + ExecutionContext newEc) { final Map planParams = new HashMap<>(); // Physical table is 1st parameter planParams.put(1, PlannerUtils.buildParameterContextForTableName(phyTable, 1)); @@ -446,7 +448,8 @@ private int doDelete(String logTblOrIdxTbl, String dbIndex, String phyTable, Lis buildParams.setGroupName(dbIndex); buildParams.setPhyTables(ImmutableList.of(ImmutableList.of(phyTable))); buildParams.setDynamicParams(planParams); - PhyTableOperation plan = PhyTableOperationFactory.getInstance().buildPhyTableOperationByPhyOp(targetPhyOp, buildParams); + PhyTableOperation plan = + PhyTableOperationFactory.getInstance().buildPhyTableOperationByPhyOp(targetPhyOp, buildParams); return applyDelete(plan, planParams, newEc); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/corrector/GsiChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/corrector/GsiChecker.java index ecce37d7a..b63575615 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/corrector/GsiChecker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/corrector/GsiChecker.java @@ -53,6 +53,7 @@ public static class Params { long speedLimit; long parallelism; long earlyFailNumber; + boolean useBinary; public static Params buildFromExecutionContext(ExecutionContext ec) { ParamManager pm = ec.getParamManager(); @@ -62,7 +63,8 @@ public static Params buildFromExecutionContext(ExecutionContext ec) { pm.getLong(ConnectionParams.GSI_CHECK_SPEED_MIN), pm.getLong(ConnectionParams.GSI_CHECK_SPEED_LIMITATION), pm.getLong(ConnectionParams.GSI_CHECK_PARALLELISM), - pm.getLong(ConnectionParams.GSI_EARLY_FAIL_NUMBER) + pm.getLong(ConnectionParams.GSI_EARLY_FAIL_NUMBER), + pm.getBoolean(ConnectionParams.BACKFILL_USING_BINARY) ); } } @@ -80,9 +82,9 @@ public GsiChecker(String schemaName, String tableName, String indexName, Comparator>> rowComparator) { super(schemaName, tableName, indexName, primaryTableMeta, gsiTableMeta, params.getBatchSize(), params.getSpeedMin(), params.getSpeedLimit(), params.getParallelism(), - primaryLock, gsiLock, planSelectWithMaxPrimary, planSelectWithMaxGsi, planSelectWithMinAndMaxPrimary, - planSelectWithMinAndMaxGsi, planSelectWithInTemplate, planSelectWithIn, planSelectMaxPk, indexColumns, - primaryKeysId, rowComparator); + params.isUseBinary(), primaryLock, gsiLock, planSelectWithMaxPrimary, planSelectWithMaxGsi, + planSelectWithMinAndMaxPrimary, planSelectWithMinAndMaxGsi, planSelectWithInTemplate, planSelectWithIn, + planSelectMaxPk, indexColumns, primaryKeysId, rowComparator); } public static Checker create(String schemaName, String tableName, String indexName, @@ -107,7 +109,7 @@ public static Checker create(String schemaName, String tableName, String indexNa } Extractor.ExtractorInfo info = Extractor.buildExtractorInfo(ec, schemaName, tableName, indexName, false, true); - final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); + final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, params.isUseBinary(), ec); final Pair selectWithIn = builder .buildSelectWithInForChecker(baseTableMeta, info.getTargetTableColumns(), info.getPrimaryKeys(), diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/fastchecker/GsiFastChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/fastchecker/GsiFastChecker.java index e318c4300..39a39a50a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/fastchecker/GsiFastChecker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/fastchecker/GsiFastChecker.java @@ -19,7 +19,6 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.executor.backfill.Extractor; import com.alibaba.polardbx.executor.fastchecker.FastChecker; import com.alibaba.polardbx.executor.gsi.GsiUtils; import com.alibaba.polardbx.executor.gsi.PhysicalPlanBuilder; @@ -28,13 +27,9 @@ import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; -import com.alibaba.polardbx.statistics.SQLRecorderLogger; -import org.apache.commons.collections.MapUtils; -import java.text.MessageFormat; import java.util.ArrayList; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -43,7 +38,7 @@ public class GsiFastChecker extends FastChecker { public GsiFastChecker(String schemaName, String srcLogicalTableName, String dstLogicalTableName, Map> srcPhyDbAndTables, Map> dstPhyDbAndTables, List srcColumns, List dstColumns, List srcPks, List dstPks, - long parallelism, int lockTimeOut, PhyTableOperation planSelectHashCheckSrc, + PhyTableOperation planSelectHashCheckSrc, PhyTableOperation planSelectHashCheckWithUpperBoundSrc, PhyTableOperation planSelectHashCheckWithLowerBoundSrc, PhyTableOperation planSelectHashCheckWithLowerUpperBoundSrc, @@ -53,8 +48,8 @@ public GsiFastChecker(String schemaName, String srcLogicalTableName, String dstL PhyTableOperation planSelectHashCheckWithLowerUpperBoundDst, PhyTableOperation planIdleSelectSrc, PhyTableOperation planIdleSelectDst, PhyTableOperation planSelectSampleSrc, PhyTableOperation planSelectSampleDst) { - super(schemaName, schemaName, srcLogicalTableName, dstLogicalTableName, null, srcPhyDbAndTables, - dstPhyDbAndTables, srcColumns, dstColumns, srcPks, dstPks, parallelism, lockTimeOut, planSelectHashCheckSrc, + super(schemaName, schemaName, srcLogicalTableName, dstLogicalTableName, srcPhyDbAndTables, + dstPhyDbAndTables, srcColumns, dstColumns, srcPks, dstPks, planSelectHashCheckSrc, planSelectHashCheckWithUpperBoundSrc, planSelectHashCheckWithLowerBoundSrc, planSelectHashCheckWithLowerUpperBoundSrc, planSelectHashCheckDst, planSelectHashCheckWithUpperBoundDst, planSelectHashCheckWithLowerBoundDst, planSelectHashCheckWithLowerUpperBoundDst, planIdleSelectSrc, @@ -62,7 +57,7 @@ public GsiFastChecker(String schemaName, String srcLogicalTableName, String dstL } public static FastChecker create(String schemaName, String tableName, String indexName, - Map virtualColumnMap, long parallelism, + Map srcColumnMap, Map tarColumnMap, ExecutionContext ec) { // Build select plan final SchemaManager sm = ec.getSchemaManager(schemaName); @@ -81,30 +76,39 @@ public static FastChecker create(String schemaName, String tableName, String ind throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_CHECKER, "Incorrect GSI relationship."); } - final List indexColumns = - indexTableMeta.getAllColumns().stream().map(ColumnMeta::getName).collect(Collectors.toList()); - final List baseTableColumns = new ArrayList<>(indexColumns); + // for rebuild table + final List indexColumns = new ArrayList<>(); + final List baseTableColumns = new ArrayList<>(); + for (ColumnMeta columnMeta : indexTableMeta.getAllColumns()) { + if (columnMeta.getMappingName() != null) { + if (!columnMeta.getMappingName().isEmpty()) { + baseTableColumns.add(columnMeta.getMappingName()); + indexColumns.add(columnMeta.getName()); + } + } else { + baseTableColumns.add(columnMeta.getName()); + indexColumns.add(columnMeta.getName()); + } + } // 重要:构造planSelectSampleSrc 和 planSelectSampleDst时,传入的主键必须按原本的主键顺序! - final List baseTablePks = FastChecker.getorderedPrimaryKeys(baseTableMeta, ec); - final List indexTablePks = FastChecker.getorderedPrimaryKeys(indexTableMeta, ec); + final List baseTablePks = FastChecker.getorderedPrimaryKeys(baseTableMeta); + final List indexTablePks = FastChecker.getorderedPrimaryKeys(indexTableMeta); final Map> srcPhyDbAndTables = GsiUtils.getPhyTables(schemaName, tableName); final Map> dstPhyDbAndTables = GsiUtils.getPhyTables(schemaName, indexName); final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); - final int lockTimeOut = ec.getParamManager().getInt(ConnectionParams.FASTCHECKER_LOCK_TIMEOUT); - return new GsiFastChecker(schemaName, tableName, indexName, srcPhyDbAndTables, dstPhyDbAndTables, - baseTableColumns, indexColumns, baseTablePks, indexTablePks, parallelism, lockTimeOut, - builder.buildSelectHashCheckForGSIChecker(baseTableMeta, baseTableColumns, virtualColumnMap, baseTablePks, + baseTableColumns, indexColumns, baseTablePks, indexTablePks, + builder.buildSelectHashCheckForGSIChecker(baseTableMeta, baseTableColumns, srcColumnMap, baseTablePks, false, false), - builder.buildSelectHashCheckForGSIChecker(baseTableMeta, baseTableColumns, virtualColumnMap, baseTablePks, + builder.buildSelectHashCheckForGSIChecker(baseTableMeta, baseTableColumns, srcColumnMap, baseTablePks, false, true), - builder.buildSelectHashCheckForGSIChecker(baseTableMeta, baseTableColumns, virtualColumnMap, baseTablePks, + builder.buildSelectHashCheckForGSIChecker(baseTableMeta, baseTableColumns, srcColumnMap, baseTablePks, true, false), - builder.buildSelectHashCheckForGSIChecker(baseTableMeta, baseTableColumns, virtualColumnMap, baseTablePks, + builder.buildSelectHashCheckForGSIChecker(baseTableMeta, baseTableColumns, srcColumnMap, baseTablePks, true, true), builder.buildSelectHashCheckForChecker(indexTableMeta, indexColumns, indexTablePks, false, false), @@ -115,7 +119,7 @@ public static FastChecker create(String schemaName, String tableName, String ind builder.buildIdleSelectForChecker(baseTableMeta, baseTableColumns), builder.buildIdleSelectForChecker(indexTableMeta, indexColumns), - builder.buildSqlSelectForSample(baseTableMeta, baseTablePks, baseTablePks, false, false), - builder.buildSqlSelectForSample(indexTableMeta, indexTablePks, indexTablePks, false, false)); + builder.buildSqlSelectForSample(baseTableMeta, baseTablePks), + builder.buildSqlSelectForSample(indexTableMeta, indexTablePks)); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/utils/Transformer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/utils/Transformer.java index c088c0506..40c3b3295 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/utils/Transformer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/gsi/utils/Transformer.java @@ -16,9 +16,10 @@ package com.alibaba.polardbx.executor.gsi.utils; -import com.alibaba.polardbx.common.datatype.Decimal; import com.alibaba.polardbx.common.datatype.Decimal; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.jdbc.ParameterMethod; import com.alibaba.polardbx.common.jdbc.ZeroDate; @@ -36,7 +37,7 @@ import com.google.common.base.Preconditions; import com.google.common.io.BaseEncoding; import io.airlift.slice.Slice; -import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang.StringUtils; import java.math.BigInteger; import java.nio.charset.Charset; @@ -44,6 +45,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.BiFunction; /** @@ -58,7 +60,7 @@ public class Transformer { * @param defaultGen Default upper bound generator for empty source table * @return Parameter list for data extraction */ - public static List> convertUpperBoundWithDefault(Cursor cursor, + public static List> convertUpperBoundWithDefault(Cursor cursor, boolean useBinary, BiFunction defaultGen) { final List> batchParams = new ArrayList<>(); @@ -70,7 +72,7 @@ public static List> convertUpperBoundWithDefault( final Map params = new HashMap<>(columns.size()); for (int i = 0; i < columns.size(); i++) { - ParameterContext pc = buildColumnParam(row, i); + ParameterContext pc = buildColumnParam(row, i, useBinary); final DataType columnType = columns.get(i).getDataType(); if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.FloatType, DataTypes.DoubleType)) { @@ -106,13 +108,55 @@ public static List> convertUpperBoundWithDefault( return batchParams; } + public static List> convertUpperBoundWithDefaultForFastChecker(Cursor cursor, + boolean useBinary, + List> rowValues) { + final List> batchParams = new ArrayList<>(); + + Row row; + while ((row = cursor.next()) != null) { + final List columns = row.getParentCursorMeta().getColumns(); + + final Map params = new HashMap<>(columns.size()); + final List rowValue = new ArrayList<>(); + for (int i = 0; i < columns.size(); i++) { + rowValue.add(row.getObject(i)); + if (row.getObject(i) == null) { + throw new TddlRuntimeException(ErrorCode.ERR_FAST_CHECKER, + "fastchecker failed because found null value in sampled primary keys"); + } + ParameterContext pc = buildColumnParam(row, i, useBinary); + + final DataType columnType = columns.get(i).getDataType(); + if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.FloatType, DataTypes.DoubleType)) { + if (null != pc.getArgs()[1]) { + // For float value like "-100.003", query like "c_float <= -100.003" returns nothing. + // Should replace upper bound with "c_float <= -100" + pc = new ParameterContext(pc.getParameterMethod(), + new Object[] {pc.getArgs()[0], Math.ceil((Double) pc.getArgs()[1])}); + } + } + + params.put(i + 1, pc); + } + rowValues.add(rowValue); + batchParams.add(params); + } + return batchParams; + } + /** * Build batch insert parameter, from the results of select * * @param cursor result cursor of select * @return batch parameters for insert */ - public static List> buildBatchParam(Cursor cursor) { + public static List> buildBatchParam(Cursor cursor, boolean useBinary) { + return buildBatchParam(cursor, useBinary, null); + } + + public static List> buildBatchParam(Cursor cursor, boolean useBinary, + Set notConvertColumns) { final List> batchParams = new ArrayList<>(); Row row; @@ -121,8 +165,11 @@ public static List> buildBatchParam(Cursor cursor final Map params = new HashMap<>(columns.size()); for (int i = 0; i < columns.size(); i++) { + ColumnMeta columnMeta = columns.get(i); + String colName = columnMeta.getName(); + boolean canConvert = useBinary && (notConvertColumns == null || !notConvertColumns.contains(colName)); - final ParameterContext parameterContext = buildColumnParam(row, i); + final ParameterContext parameterContext = buildColumnParam(row, i, canConvert); params.put(i + 1, parameterContext); } @@ -132,6 +179,10 @@ public static List> buildBatchParam(Cursor cursor return batchParams; } + public static ParameterContext buildColumnParam(Row row, int i) { + return buildColumnParam(row, i, false); + } + /** * Build column parameter for insert,from the results of select * @@ -139,7 +190,7 @@ public static List> buildBatchParam(Cursor cursor * @param i column index, start from 0 * @return ParameterContext for specified column */ - public static ParameterContext buildColumnParam(Row row, int i) { + public static ParameterContext buildColumnParam(Row row, int i, boolean strToBinary) { DataType columnType = DataTypes.BinaryType; Object value = null; ParameterMethod method = ParameterMethod.setObject1; @@ -186,6 +237,10 @@ public static ParameterContext buildColumnParam(Row row, int i) { // 使用 setBytes 标记,序列化时使用16进制字符串 value = row.getBytes(i); method = ParameterMethod.setBytes; + } else if (strToBinary && DataTypeUtil.isStringType(columnType)) { + // 字符串类型,直接select binary得到二进制数,直接setBytes,避免字符集转换带来的损失 + value = row.getBytes(i); + method = ParameterMethod.setBytes; } } } catch (TddlNestableRuntimeException e) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/BaseDalHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/BaseDalHandler.java index b38d3b170..f6a48b9cf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/BaseDalHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/BaseDalHandler.java @@ -19,7 +19,7 @@ import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.impl.MultiCursorAdapter; +import com.alibaba.polardbx.executor.cursor.impl.GatherCursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; @@ -77,7 +77,11 @@ public Cursor buildMultiCursor(ExecutionContext executionContext, BaseDalOperati .execByExecPlanNode(relNode, executionContext)); } } - baseDalCursor = MultiCursorAdapter.wrap(inputCursors); - return baseDalCursor; + + if (inputCursors.size() == 1) { + return inputCursors.get(0); + } else { + return new GatherCursor(inputCursors, executionContext); + } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/DropDatabaseHandlerCommon.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/DropDatabaseHandlerCommon.java new file mode 100644 index 000000000..f8d4e5cb9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/DropDatabaseHandlerCommon.java @@ -0,0 +1,39 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import org.apache.calcite.rel.RelNode; + +/** + * @author chenmo.cm + */ +public abstract class DropDatabaseHandlerCommon extends HandlerCommon { + + public DropDatabaseHandlerCommon(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + return null; + } + + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/HandlerCommon.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/HandlerCommon.java index 6367c5c95..ef12f1973 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/HandlerCommon.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/HandlerCommon.java @@ -29,6 +29,8 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.jdbc.Parameters; +import com.alibaba.polardbx.common.model.Group.GroupType; import com.alibaba.polardbx.common.model.Group; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; @@ -65,15 +67,20 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.rel.BaseQueryOperation; +import com.alibaba.polardbx.optimizer.core.rel.LogicalInsert; import com.alibaba.polardbx.optimizer.core.rel.LogicalModify; import com.alibaba.polardbx.optimizer.core.rel.LogicalRelocate; +import com.alibaba.polardbx.optimizer.core.rel.LogicalUpsert; import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation; import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; import com.alibaba.polardbx.optimizer.core.rel.dml.BroadcastWriter; import com.alibaba.polardbx.optimizer.core.rel.dml.DistinctWriter; import com.alibaba.polardbx.optimizer.core.rel.dml.Writer; import com.alibaba.polardbx.optimizer.core.rel.dml.util.SourceRows; +import com.alibaba.polardbx.optimizer.core.rel.dml.writer.BroadcastModifyWriter; import com.alibaba.polardbx.optimizer.core.rel.dml.writer.RelocateWriter; +import com.alibaba.polardbx.optimizer.core.rel.dml.writer.ShardingModifyWriter; +import com.alibaba.polardbx.optimizer.core.rel.dml.writer.SingleModifyWriter; import com.alibaba.polardbx.optimizer.core.row.Row; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.memory.MemoryEstimator; @@ -152,6 +159,9 @@ public Cursor handlePlan(RelNode logicalPlan, ExecutionContext executionContext) private void executeSubNodesBlockConcurrent(ExecutionContext executionContext, List subNodes, List subCursors, String schemaName) { + if (subNodes != null && subNodes.isEmpty()) { + return; + } checkExecMemCost(executionContext, subNodes); RelNode firstOp = subNodes.get(0); boolean isPhyTblOp = firstOp instanceof PhyTableOperation; @@ -580,6 +590,8 @@ protected List> execute(RelocateWriter relocateWriter, RowSet rowSe final RelocateWriter rw = w.unwrap(RelocateWriter.class); final boolean usePartFieldChecker = rw.isUsePartFieldChecker() && executionContext.getParamManager().getBoolean(ConnectionParams.DML_USE_NEW_SK_CHECKER); + final boolean checkJsonByStringCompare = + executionContext.getParamManager().getBoolean(ConnectionParams.DML_CHECK_JSON_BY_STRING_COMPARE); final List skSources = Mappings.permute(row, rw.getIdentifierKeySourceMapping()); final List skTargets = Mappings.permute(row, rw.getIdentifierKeyTargetMapping()); @@ -614,7 +626,7 @@ protected List> execute(RelocateWriter relocateWriter, RowSet rowSe final GroupKey skTargetKey = new GroupKey(skTargets.toArray(), rw.getIdentifierKeyMetas()); final GroupKey skSourceKey = new GroupKey(skSources.toArray(), rw.getIdentifierKeyMetas()); - return skTargetKey.equalsForUpdate(skSourceKey); + return skTargetKey.equalsForUpdate(skSourceKey, checkJsonByStringCompare); } }; @@ -844,11 +856,29 @@ protected static ExecutionContext clearSqlMode(ExecutionContext executionContext if (null != serverVariables) { executionContext.getServerVariables().putAll(serverVariables); } - executionContext.getServerVariables().put("sql_mode", ""); + executionContext.getServerVariables().put("sql_mode", "NO_AUTO_VALUE_ON_ZERO"); + return executionContext; + } + + public static ExecutionContext setChangeSetApplySqlMode(ExecutionContext executionContext) { + // fix the data truncate issue + final Map serverVariables = executionContext.getServerVariables(); + executionContext.setServerVariables(new TreeMap<>(String.CASE_INSENSITIVE_ORDER)); + if (null != serverVariables) { + executionContext.getServerVariables().putAll(serverVariables); + } + String sqlMode = (String) executionContext.getServerVariables().get("sql_mode"); + if (sqlMode != null && !sqlMode.isEmpty() && (StringUtils.containsIgnoreCase(sqlMode, "STRICT_ALL_TABLES") + || StringUtils.containsIgnoreCase(sqlMode, "STRICT_TRANS_TABLES"))) { + sqlMode = "STRICT_ALL_TABLES,NO_AUTO_VALUE_ON_ZERO"; + } else { + sqlMode = "NO_AUTO_VALUE_ON_ZERO"; + } + executionContext.getServerVariables().put("sql_mode", sqlMode); return executionContext; } - protected static void upgradeEncoding(ExecutionContext executionContext, String schemaName, String baseTableName) { + public static void upgradeEncoding(ExecutionContext executionContext, String schemaName, String baseTableName) { final Map columnCharacterSet = getColumnCharacterSet(executionContext, schemaName, baseTableName); @@ -1190,7 +1220,9 @@ protected void beforeUpdateFkCheck(TableModify tableModify, String schemaName, S List sortedColumns = getSortedColumns(true, tableMeta, data.getValue()); - List> selectValues = getSelectValues(executionContext, schemaName, + ExecutionContext selectEc = executionContext.copy(); + selectEc.setParams(new Parameters(selectEc.getParams().getCurrentParameter(), false)); + List> selectValues = getSelectValues(selectEc, schemaName, parentTableMeta, updateValueList, tableModify, memoryAllocator, builder, shardResults, sortedColumns, false); @@ -1211,6 +1243,22 @@ protected void beforeUpdateFkCascade(TableModify tableModify, String schemaName, throw new TddlRuntimeException(ErrorCode.ERR_FK_EXCEED_MAX_DEPTH); } + if (tableModify instanceof LogicalInsert && ((LogicalInsert) tableModify).isUpsert()) { + DistinctWriter writer; + if (!((LogicalUpsert) tableModify).isModifyPartitionKey()) { + writer = ((LogicalUpsert) tableModify).getPrimaryUpsertWriter().getUpdaterWriter(); + } else { + writer = ((LogicalUpsert) tableModify).getPrimaryRelocateWriter().getModifyWriter(); + } + if (writer instanceof SingleModifyWriter) { + tableModify = ((SingleModifyWriter) writer).getModify(); + } else if (writer instanceof BroadcastModifyWriter) { + tableModify = ((BroadcastModifyWriter) writer).getModify(); + } else { + tableModify = ((ShardingModifyWriter) writer).getModify(); + } + } + final MemoryPool selectValuesPool = MemoryPoolUtils.createOperatorTmpTablePool(executionContext); final MemoryAllocatorCtx memoryAllocator = selectValuesPool.getMemoryAllocatorCtx(); @@ -1281,7 +1329,9 @@ protected void beforeUpdateFkCascade(TableModify tableModify, String schemaName, } }); - List> selectValues = getSelectValues(executionContext, schemaName, + ExecutionContext selectEc = executionContext.copy(); + selectEc.setParams(new Parameters(selectEc.getParams().getCurrentParameter(), false)); + List> selectValues = getSelectValues(selectEc, schemaName, refTableMeta, shardConditionValueList, tableModify, memoryAllocator, builder, selectShardResults, sortedColumns, false); @@ -1289,6 +1339,8 @@ protected void beforeUpdateFkCascade(TableModify tableModify, String schemaName, continue; } + List returnColumns = new ArrayList<>(refTableMeta.getAllColumns()); + switch (data.getValue().onUpdate) { case RESTRICT: case NO_ACTION: @@ -1306,7 +1358,6 @@ protected void beforeUpdateFkCascade(TableModify tableModify, String schemaName, selectValue.addAll(updateValueList.get(0)); } - List returnColumns = refTableMeta.getAllColumns(); for (ColumnMeta column : refTableMeta.getAllColumns()) { if (data.getValue().columns.stream().anyMatch(c -> c.equalsIgnoreCase(column.getName()))) { returnColumns.add(column); @@ -1327,7 +1378,6 @@ protected void beforeUpdateFkCascade(TableModify tableModify, String schemaName, } } - returnColumns = refTableMeta.getAllColumns(); for (ColumnMeta column : refTableMeta.getAllColumns()) { if (data.getValue().columns.stream().anyMatch(c -> c.equalsIgnoreCase(column.getName()))) { returnColumns.add(column); @@ -1352,7 +1402,7 @@ protected void beforeUpdateFkCascade(TableModify tableModify, String schemaName, } } - protected void beforeDeleteFkCascade(LogicalModify logicalModify, String schemaName, String targetTable, + protected void beforeDeleteFkCascade(TableModify logicalModify, String schemaName, String targetTable, ExecutionContext executionContext, List> values, Map>>> fkPlans, int depth) { @@ -1408,7 +1458,9 @@ protected void beforeDeleteFkCascade(LogicalModify logicalModify, String schemaN List sortedColumns = getSortedColumns(false, tableMeta, data.getValue()); - List> selectValues = getSelectValues(executionContext, schemaName, + ExecutionContext selectEc = executionContext.copy(); + selectEc.setParams(new Parameters(selectEc.getParams().getCurrentParameter(), false)); + List> selectValues = getSelectValues(selectEc, schemaName, refTableMeta, conditionValueList, logicalModify, memoryAllocator, builder, shardResults, sortedColumns, false); @@ -1420,14 +1472,14 @@ protected void beforeDeleteFkCascade(LogicalModify logicalModify, String schemaN selectValues.removeAll(values); } + List returnColumns = new ArrayList<>(refTableMeta.getAllColumns()); + switch (data.getValue().onDelete) { case RESTRICT: case NO_ACTION: throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, "Cannot delete or update a parent row: a foreign key constraint fails"); case CASCADE: - List returnColumns = refTableMeta.getAllColumns(); - if (!data.getValue().isPushDown()) { executeFkPlan(executionContext, returnColumns, fkPlans, schemaName, tableName, constraintName, selectValues); @@ -1441,7 +1493,6 @@ protected void beforeDeleteFkCascade(LogicalModify logicalModify, String schemaN } } - returnColumns = refTableMeta.getAllColumns(); for (ColumnMeta column : refTableMeta.getAllColumns()) { if (data.getValue().columns.stream().anyMatch(c -> c.equalsIgnoreCase(column.getName()))) { returnColumns.add(column); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalAlterInstanceHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalAlterInstanceHandler.java new file mode 100644 index 000000000..55c3d292c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalAlterInstanceHandler.java @@ -0,0 +1,92 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.executor.ddl.job.factory.LogicalAlterInstanceReadonlyStatusFactory; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; +import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob; +import com.alibaba.polardbx.executor.handler.ddl.LogicalCommonDdlHandler; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterInstance; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import org.apache.calcite.sql.SqlAlterInstance; +import org.apache.calcite.sql.SqlSetOption; +import org.apache.commons.lang.BooleanUtils; + +import java.util.Map; +import java.util.Set; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +public class LogicalAlterInstanceHandler extends LogicalCommonDdlHandler { + protected static final Map> supportedOptionAndValues + = ImmutableMap.of( + "read_only", ImmutableSet.of("false", "true") + ); + + public LogicalAlterInstanceHandler(IRepository repo) { + super(repo); + } + + @Override + protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + LogicalAlterInstance logicalAlterDatabase = (LogicalAlterInstance) logicalDdlPlan; + + SqlAlterInstance sqlAlterDatabase = (SqlAlterInstance) logicalAlterDatabase.relDdl.sqlNode; + + for (SqlSetOption option : sqlAlterDatabase.getOptitionList()) { + String optionName = option.getName().getSimple().toLowerCase(); + String value = option.getValue().toString().toLowerCase(); + if (supportedOptionAndValues.containsKey(optionName) && (supportedOptionAndValues.get(optionName).isEmpty() + || supportedOptionAndValues.get(optionName) + .contains(value))) { + if (optionName.equalsIgnoreCase("read_only")) { + boolean readonly = BooleanUtils.toBoolean(value); + return new LogicalAlterInstanceReadonlyStatusFactory(readonly).create(); + } + } + } + + return new TransientDdlJob(); + } + + @Override + protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + LogicalAlterInstance logicalAlterInstance = (LogicalAlterInstance) logicalDdlPlan; + + SqlAlterInstance sqlAlterInstance = (SqlAlterInstance) logicalAlterInstance.relDdl.sqlNode; + + for (SqlSetOption option : sqlAlterInstance.getOptitionList()) { + String optionName = option.getName().getSimple().toLowerCase(); + String value = option.getValue().toString().toLowerCase(); + if (!supportedOptionAndValues.containsKey(optionName) || (!supportedOptionAndValues.get(optionName) + .contains(value) && !supportedOptionAndValues.get(optionName).isEmpty())) { + throw new TddlNestableRuntimeException( + String.format("option [%s=%s] is not supported", optionName, value) + ); + } + } + return false; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCancelReplicaCheckTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCancelReplicaCheckTableHandler.java new file mode 100644 index 000000000..bbd0f25d3 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCancelReplicaCheckTableHandler.java @@ -0,0 +1,83 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.fastjson.JSON; +import com.alibaba.polardbx.common.cdc.CdcConstants; +import com.alibaba.polardbx.common.cdc.ResultCode; +import com.alibaba.polardbx.common.cdc.RplConstants; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.druid.util.StringUtils; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.net.util.CdcTargetUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlCancelReplicaCheck; +import org.apache.http.entity.ContentType; + +import java.util.HashMap; +import java.util.Map; + +/** + * @author yudong + * @since 2023/11/9 10:40 + **/ +public class LogicalCancelReplicaCheckTableHandler extends HandlerCommon { + + private static final String API_PATTERN = "http://%s/replica/fullValidation/cancel"; + + public LogicalCancelReplicaCheckTableHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + SqlCancelReplicaCheck sqlCancelReplicaCheck = + (SqlCancelReplicaCheck) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + String dbName = sqlCancelReplicaCheck.getDbName().toString(); + if (StringUtils.isEmpty(dbName)) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, "channel and database cannot be empty!"); + } + Map params = new HashMap<>(); + params.put(RplConstants.RPL_FULL_VALID_DB, dbName); + if (sqlCancelReplicaCheck.getTableName() != null) { + String tbName = sqlCancelReplicaCheck.getTableName().toString(); + params.put(RplConstants.RPL_FULL_VALID_TB, tbName); + } + + String daemonEndpoint = CdcTargetUtil.getDaemonMasterTarget(); + String url = String.format(API_PATTERN, daemonEndpoint); + String res; + try { + res = PooledHttpHelper.doPost(url, ContentType.APPLICATION_JSON, JSON.toJSONString(params), 10000); + } catch (Exception e) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); + } + + ResultCode httpResult = JSON.parseObject(res, ResultCode.class); + if (httpResult.getCode() != CdcConstants.SUCCESS_CODE) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, httpResult.getMsg()); + } + return new AffectRowCursor(0); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalChangeMasterHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalChangeMasterHandler.java index a1a92f0da..26c9073b8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalChangeMasterHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalChangeMasterHandler.java @@ -16,17 +16,20 @@ package com.alibaba.polardbx.executor.handler; +import com.alibaba.fastjson.JSON; import com.alibaba.polardbx.common.cdc.CdcConstants; import com.alibaba.polardbx.common.cdc.ResultCode; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.net.util.CdcTargetUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlChangeMaster; @@ -38,6 +41,8 @@ */ public class LogicalChangeMasterHandler extends LogicalReplicationBaseHandler { + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; + public LogicalChangeMasterHandler(IRepository repo) { super(repo); } @@ -54,6 +59,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { ContentType.APPLICATION_JSON, JSON.toJSONString(sqlNode.getParams()), 10000); } catch (Exception e) { + cdcLogger.error("change master error!", e); throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); } ResultCode httpResult = JSON.parseObject(res, ResultCode.class); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalChangeReplicationFilterHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalChangeReplicationFilterHandler.java index 7045ceb37..963ec57b5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalChangeReplicationFilterHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalChangeReplicationFilterHandler.java @@ -22,12 +22,14 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.net.util.CdcTargetUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlChangeReplicationFilter; import org.apache.http.entity.ContentType; @@ -39,7 +41,9 @@ */ public class LogicalChangeReplicationFilterHandler extends LogicalReplicationBaseHandler { - public LogicalChangeReplicationFilterHandler(IRepository repo){ + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; + + public LogicalChangeReplicationFilterHandler(IRepository repo) { super(repo); } @@ -55,10 +59,13 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { ContentType.APPLICATION_JSON, JSON.toJSONString(sqlNode.getParams()), 10000); } catch (Exception e) { + cdcLogger.error("change replication filter error!", e); throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); } ResultCode httpResult = JSON.parseObject(res, ResultCode.class); if (httpResult.getCode() != CdcConstants.SUCCESS_CODE) { + cdcLogger.warn( + "change replication filter failed! code:" + httpResult.getCode() + ", msg:" + httpResult.getMsg()); throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, httpResult.getMsg()); } return new AffectRowCursor(0); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalClearCclRulesHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalClearCclRulesHandler.java index 4a0621206..b89984cc1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalClearCclRulesHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalClearCclRulesHandler.java @@ -20,6 +20,7 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalContinueReplicaCheckTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalContinueReplicaCheckTableHandler.java new file mode 100644 index 000000000..5fa48627f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalContinueReplicaCheckTableHandler.java @@ -0,0 +1,84 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.fastjson.JSON; +import com.alibaba.polardbx.common.cdc.CdcConstants; +import com.alibaba.polardbx.common.cdc.ResultCode; +import com.alibaba.polardbx.common.cdc.RplConstants; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.HttpClientHelper; +import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.druid.util.StringUtils; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.net.util.CdcTargetUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlContinueReplicaCheck; +import org.apache.http.entity.ContentType; + +import java.util.HashMap; +import java.util.Map; + +/** + * @author yudong + * @since 2023/11/9 10:40 + **/ +public class LogicalContinueReplicaCheckTableHandler extends HandlerCommon { + + private static final String API_PATTERN = "http://%s/replica/fullValidation/continue"; + + public LogicalContinueReplicaCheckTableHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + SqlContinueReplicaCheck sqlContinueReplicaCheck = + (SqlContinueReplicaCheck) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + String dbName = sqlContinueReplicaCheck.getDbName().toString(); + if (StringUtils.isEmpty(dbName)) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, "database cannot be empty!"); + } + Map params = new HashMap<>(); + params.put(RplConstants.RPL_FULL_VALID_DB, dbName); + if (sqlContinueReplicaCheck.getTableName() != null) { + String tbName = sqlContinueReplicaCheck.getTableName().toString(); + params.put(RplConstants.RPL_FULL_VALID_TB, tbName); + } + + String daemonEndpoint = CdcTargetUtil.getDaemonMasterTarget(); + String url = String.format(API_PATTERN, daemonEndpoint); + String res; + try { + res = PooledHttpHelper.doPost(url, ContentType.APPLICATION_JSON, JSON.toJSONString(params), 10000); + } catch (Exception e) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); + } + + ResultCode httpResult = JSON.parseObject(res, ResultCode.class); + if (httpResult.getCode() != CdcConstants.SUCCESS_CODE) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, httpResult.getMsg()); + } + return new AffectRowCursor(0); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalContinueScheduleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalContinueScheduleHandler.java index c2fd40e71..7553992a5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalContinueScheduleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalContinueScheduleHandler.java @@ -47,10 +47,11 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { long scheduleId = pauseSchedule.getScheduleId(); ScheduledJobsRecord record = ScheduledJobsManager.queryScheduledJobById(scheduleId); - if(record == null){ + if (record == null) { return new AffectRowCursor(0); } - PolarPrivilegeUtils.checkPrivilege(record.getTableSchema(), record.getTableName(), PrivilegePoint.ALTER, executionContext); + PolarPrivilegeUtils.checkPrivilege(record.getTableSchema(), record.getTableName(), PrivilegePoint.ALTER, + executionContext); logger.info(String.format("continue scheduled job:[%s]", scheduleId)); int row = ScheduledJobsManager.continueScheduledJob(scheduleId); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateCclRuleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateCclRuleHandler.java index 9aaf02e6f..1ea47f6fb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateCclRuleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateCclRuleHandler.java @@ -27,6 +27,7 @@ import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; @@ -41,6 +42,8 @@ import com.alibaba.polardbx.optimizer.parse.SqlParameterizeUtils; import com.alibaba.polardbx.optimizer.parse.bean.SqlParameterized; import com.alibaba.polardbx.optimizer.utils.CclUtils; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlCharStringLiteral; import org.apache.calcite.sql.SqlCreateCclRule; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateCclTriggerHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateCclTriggerHandler.java index 12907719d..3651e9636 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateCclTriggerHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateCclTriggerHandler.java @@ -17,14 +17,12 @@ package com.alibaba.polardbx.executor.handler; import com.alibaba.fastjson.JSON; -import com.alibaba.polardbx.druid.sql.ast.SqlType; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.druid.sql.ast.SqlType; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; @@ -38,6 +36,8 @@ import com.alibaba.polardbx.optimizer.ccl.common.CclSqlMetric; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalCcl; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlCharStringLiteral; import org.apache.calcite.sql.SqlCreateCclTrigger; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateScheduleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateScheduleHandler.java index 08ebd7d4e..b4deb6900 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateScheduleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateScheduleHandler.java @@ -187,10 +187,8 @@ public int createAutoSplitTableGroupScheduledJob(SqlCreateSchedule createSchedul } //权限检查 - List allTables = tableGroupConfig.getAllTables(); - if (CollectionUtils.isNotEmpty(allTables)) { - for (TablePartRecordInfoContext tablePartRecordInfoContext : allTables) { - final String tableName = tablePartRecordInfoContext.getTableName(); + if (CollectionUtils.isNotEmpty(tableGroupConfig.getAllTables())) { + for (String tableName : tableGroupConfig.getAllTables()) { PolarPrivilegeUtils.checkPrivilege(tableSchema, tableName, PrivilegePoint.ALTER, executionContext); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityEntityHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityEntityHandler.java new file mode 100644 index 000000000..65b47ef44 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityEntityHandler.java @@ -0,0 +1,78 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.lbac.LBACPrivilegeCheckUtils; +import com.alibaba.polardbx.gms.lbac.LBACSecurityEntity; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; +import com.alibaba.polardbx.gms.lbac.accessor.LBACAccessorUtils; +import com.alibaba.polardbx.lbac.LBACException; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlCreateSecurityEntity; + +import java.util.Collections; + +/** + * @author pangzhaoxing + */ +public class LogicalCreateSecurityEntityHandler extends HandlerCommon { + + private static final Logger logger = LoggerFactory.getLogger(LogicalCreateSecurityEntityHandler.class); + + public LogicalCreateSecurityEntityHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + if (!LBACPrivilegeCheckUtils.isHighPrivilege(executionContext.getPrivilegeContext().getPolarUserInfo())) { + throw new LBACException("check privilege failed"); + } + + SqlCreateSecurityEntity createSecurityEntity = + (SqlCreateSecurityEntity) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + LBACSecurityEntity.EntityType entityType = + LBACSecurityEntity.EntityType.valueOf(createSecurityEntity.getEntityType().getSimple().toUpperCase()); + LBACSecurityEntity.EntityKey entityKey = + LBACAccessorUtils.parseEntityKey(createSecurityEntity.getEntityKey().getSimple(), entityType); + String attrName = createSecurityEntity.getEntityAttr().getSimple().toLowerCase(); + if (entityType == LBACSecurityEntity.EntityType.TABLE) { + if (LBACSecurityManager.getInstance().getPolicy(attrName) == null) { + throw new LBACException("the policy is not exist"); + } + } else { + if (LBACSecurityManager.getInstance().getLabel(attrName) == null) { + throw new LBACException("the label is not exist"); + } + } + + LBACSecurityEntity securityEntity = new LBACSecurityEntity(entityKey, entityType, attrName); + int affectRow = + LBACSecurityManager.getInstance().insertSecurityEntity(Collections.singletonList(securityEntity)); + return new AffectRowCursor(affectRow); + + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityLabelComponentHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityLabelComponentHandler.java new file mode 100644 index 000000000..396053602 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityLabelComponentHandler.java @@ -0,0 +1,86 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; +import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.lbac.LBACPrivilegeCheckUtils; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; +import com.alibaba.polardbx.gms.lbac.accessor.LBACAccessorUtils; +import com.alibaba.polardbx.gms.lbac.accessor.LBACComponentAccessor; +import com.alibaba.polardbx.gms.lbac.component.ComponentType; +import com.alibaba.polardbx.gms.lbac.component.LBACSecurityLabelComponent; +import com.alibaba.polardbx.lbac.LBACException; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlCreateSecurityLabelComponent; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * @author pangzhaoxing + */ +public class LogicalCreateSecurityLabelComponentHandler extends HandlerCommon { + + private static final Logger logger = LoggerFactory.getLogger(LogicalCreateSecurityLabelComponentHandler.class); + + public LogicalCreateSecurityLabelComponentHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + if (!LBACPrivilegeCheckUtils.isHighPrivilege(executionContext.getPrivilegeContext().getPolarUserInfo())) { + throw new LBACException("check privilege failed"); + } + + SqlCreateSecurityLabelComponent createComponent = + (SqlCreateSecurityLabelComponent) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + String componentName = createComponent.getComponentName().getSimple().toLowerCase();//全部小写 + String componentContent = createComponent.getComponentContent().getNlsString().getValue().toLowerCase();//全部小写 + ComponentType componentType = + ComponentType.valueOf(createComponent.getComponentType().getSimple().toUpperCase());//全部小写 + LBACSecurityLabelComponent + component = LBACAccessorUtils.createSecurityLabelComponent(componentName, componentType, componentContent); + if (!LBACSecurityManager.getInstance().validateComponent(component)) { + throw new LBACException("security label component is invalid"); + } + + try (Connection connection = MetaDbDataSource.getInstance().getConnection()) { + LBACComponentAccessor slcAccessor = new LBACComponentAccessor(); + slcAccessor.setConnection(connection); + int affectRow = slcAccessor.insert(component); + + MetaDbConfigManager.getInstance().notify(MetaDbDataIdBuilder.getLBACSecurityDataId(), connection); + // wait for all cn to load metadb + MetaDbConfigManager.getInstance().sync(MetaDbDataIdBuilder.getLBACSecurityDataId()); + return new AffectRowCursor(affectRow); + } catch (SQLException e) { + throw new TddlNestableRuntimeException(e); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityLabelHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityLabelHandler.java new file mode 100644 index 000000000..cc0077152 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityLabelHandler.java @@ -0,0 +1,83 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; +import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.lbac.LBACPrivilegeCheckUtils; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; +import com.alibaba.polardbx.gms.lbac.LBACSecurityLabel; +import com.alibaba.polardbx.gms.lbac.accessor.LBACAccessorUtils; +import com.alibaba.polardbx.gms.lbac.accessor.LBACLabelAccessor; +import com.alibaba.polardbx.lbac.LBACException; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlCreateSecurityLabel; + +import java.sql.Connection; +import java.sql.SQLException; + +/** + * @author pangzhaoxing + */ +public class LogicalCreateSecurityLabelHandler extends HandlerCommon { + + private static final Logger logger = LoggerFactory.getLogger(LogicalCreateSecurityLabelHandler.class); + + public LogicalCreateSecurityLabelHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + if (!LBACPrivilegeCheckUtils.isHighPrivilege(executionContext.getPrivilegeContext().getPolarUserInfo())) { + throw new LBACException("check privilege failed"); + } + + SqlCreateSecurityLabel createLabel = + (SqlCreateSecurityLabel) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + String labelName = createLabel.getLabelName().getSimple().toLowerCase();//全部小写 + String policyName = createLabel.getPolicyName().getSimple().toLowerCase();//全部小写 + String labelContent = createLabel.getLabelContent().getNlsString().getValue().toLowerCase();//全部小写 + LBACSecurityLabel label = LBACAccessorUtils.createSecurityLabel(labelName, policyName, labelContent); + if (!LBACSecurityManager.getInstance().validateLabel(label)) { + throw new LBACException("security label is not invalid"); + } + + try (Connection connection = MetaDbDataSource.getInstance().getConnection()) { + LBACLabelAccessor slAccessor = new LBACLabelAccessor(); + slAccessor.setConnection(connection); + int affectRow = slAccessor.insert(label); + + MetaDbConfigManager.getInstance().notify(MetaDbDataIdBuilder.getLBACSecurityDataId(), connection); + // wait for all cn to load metadb + MetaDbConfigManager.getInstance().sync(MetaDbDataIdBuilder.getLBACSecurityDataId()); + return new AffectRowCursor(affectRow); + } catch (SQLException e) { + throw new TddlNestableRuntimeException(e); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityPolicyHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityPolicyHandler.java new file mode 100644 index 000000000..105cc7bef --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalCreateSecurityPolicyHandler.java @@ -0,0 +1,79 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; +import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.lbac.LBACPrivilegeCheckUtils; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; +import com.alibaba.polardbx.gms.lbac.LBACSecurityPolicy; +import com.alibaba.polardbx.gms.lbac.accessor.LBACAccessorUtils; +import com.alibaba.polardbx.gms.lbac.accessor.LBACPolicyAccessor; +import com.alibaba.polardbx.lbac.LBACException; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlCreateSecurityPolicy; + +import java.sql.Connection; +import java.sql.SQLException; + +public class LogicalCreateSecurityPolicyHandler extends HandlerCommon { + + private static final Logger logger = LoggerFactory.getLogger(LogicalCreateSecurityPolicyHandler.class); + + public LogicalCreateSecurityPolicyHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + if (!LBACPrivilegeCheckUtils.isHighPrivilege(executionContext.getPrivilegeContext().getPolarUserInfo())) { + throw new LBACException("check privilege failed"); + } + + SqlCreateSecurityPolicy createPolicy = + (SqlCreateSecurityPolicy) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + String policyName = createPolicy.getPolicyName().getSimple().toLowerCase();//全部小写 + String policyComponents = createPolicy.getPolicyComponents().getNlsString().getValue().toLowerCase();//全部小写 + LBACSecurityPolicy policy = LBACAccessorUtils.createSecurityPolicy(policyName, policyComponents); + if (!LBACSecurityManager.getInstance().validatePolicy(policy)) { + throw new LBACException("security policy is invalid"); + } + + try (Connection connection = MetaDbDataSource.getInstance().getConnection()) { + LBACPolicyAccessor spAccessor = new LBACPolicyAccessor(); + spAccessor.setConnection(connection); + int affectRow = spAccessor.insert(policy); + + MetaDbConfigManager.getInstance().notify(MetaDbDataIdBuilder.getLBACSecurityDataId(), connection); + // wait for all cn to load metadb + MetaDbConfigManager.getInstance().sync(MetaDbDataIdBuilder.getLBACSecurityDataId()); + return new AffectRowCursor(affectRow); + } catch (SQLException e) { + throw new TddlNestableRuntimeException(e); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropCclRuleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropCclRuleHandler.java index 78548666e..d16ed221c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropCclRuleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropCclRuleHandler.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropScheduleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropScheduleHandler.java index 7f2fee39d..cf031eec2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropScheduleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropScheduleHandler.java @@ -76,10 +76,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { tableGroupInfoManager.getTableGroupConfigByName(record.getTableGroupName()); if (tableGroupConfig != null) { //权限检查 - List allTables = tableGroupConfig.getAllTables(); - if (CollectionUtils.isNotEmpty(allTables)) { - for (TablePartRecordInfoContext tablePartRecordInfoContext : allTables) { - final String tableName = tablePartRecordInfoContext.getTableName(); + if (CollectionUtils.isNotEmpty(tableGroupConfig.getAllTables())) { + for (String tableName : tableGroupConfig.getAllTables()) { PolarPrivilegeUtils.checkPrivilege(record.getTableSchema(), tableName, PrivilegePoint.ALTER, executionContext); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityEntityHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityEntityHandler.java new file mode 100644 index 000000000..396adfc05 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityEntityHandler.java @@ -0,0 +1,75 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.lbac.LBACPrivilegeCheckUtils; +import com.alibaba.polardbx.gms.lbac.LBACSecurityEntity; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; +import com.alibaba.polardbx.gms.lbac.accessor.LBACAccessorUtils; +import com.alibaba.polardbx.lbac.LBACException; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlCreateSecurityEntity; +import org.apache.calcite.sql.SqlDropSecurityEntity; +import org.apache.calcite.sql.SqlIdentifier; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * @author pangzhaoxing + */ +public class LogicalDropSecurityEntityHandler extends HandlerCommon { + + private static final Logger logger = LoggerFactory.getLogger(LogicalDropSecurityEntityHandler.class); + + public LogicalDropSecurityEntityHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + if (!LBACPrivilegeCheckUtils.isHighPrivilege(executionContext.getPrivilegeContext().getPolarUserInfo())) { + throw new LBACException("check privilege failed"); + } + + SqlDropSecurityEntity dropSecurityEntity = + (SqlDropSecurityEntity) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + List securityEntities = new ArrayList<>(); + for (int i = 0; i < dropSecurityEntity.getEntityTypes().size(); i++) { + LBACSecurityEntity.EntityType entityType = + LBACSecurityEntity.EntityType.valueOf( + dropSecurityEntity.getEntityTypes().get(i).getSimple().toUpperCase()); + LBACSecurityEntity.EntityKey entityKey = LBACAccessorUtils.parseEntityKey( + dropSecurityEntity.getEntityKeys().get(i).getSimple(), entityType); + securityEntities.add(new LBACSecurityEntity(entityType, entityKey)); + } + + int affectRow = LBACSecurityManager.getInstance().deleteSecurityEntity(securityEntities); + return new AffectRowCursor(affectRow); + + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityLabelComponentHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityLabelComponentHandler.java new file mode 100644 index 000000000..0ccc27eab --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityLabelComponentHandler.java @@ -0,0 +1,84 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.druid.sql.ast.SQLName; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.lbac.LBACPrivilegeCheckUtils; +import com.alibaba.polardbx.gms.lbac.LBACSecurityLabel; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; +import com.alibaba.polardbx.gms.lbac.LBACSecurityPolicy; +import com.alibaba.polardbx.gms.lbac.component.LBACSecurityLabelComponent; +import com.alibaba.polardbx.lbac.LBACException; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlDropSecurityLabelComponent; +import org.apache.calcite.sql.SqlIdentifier; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +/** + * @author pangzhaoxing + */ +public class LogicalDropSecurityLabelComponentHandler extends HandlerCommon { + + private static final Logger logger = LoggerFactory.getLogger(LogicalDropSecurityLabelComponentHandler.class); + + public LogicalDropSecurityLabelComponentHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + if (!LBACPrivilegeCheckUtils.isHighPrivilege(executionContext.getPrivilegeContext().getPolarUserInfo())) { + throw new LBACException("check privilege failed"); + } + + SqlDropSecurityLabelComponent dropSecurityLabelComponent = + (SqlDropSecurityLabelComponent) ((LogicalDal) logicalPlan).getNativeSqlNode(); + List components = new ArrayList<>(); + for (SqlIdentifier identifier : dropSecurityLabelComponent.getComponentNames()) { + String componentName = identifier.getSimple().toLowerCase(); + LBACSecurityLabelComponent securityLabelComponent = + LBACSecurityManager.getInstance().getComponent(componentName); + + if (securityLabelComponent == null) { + continue; + } + + for (LBACSecurityPolicy policy : LBACSecurityManager.getInstance().getPolicies()) { + for (String component : policy.getComponentNames()) { + if (componentName.equalsIgnoreCase(component)) { + throw new LBACException("the component is used by policy, can not be dropped"); + } + } + } + components.add(securityLabelComponent); + } + + int affectRow = LBACSecurityManager.getInstance().deleteSecurityLabelComponent(components); + return new AffectRowCursor(affectRow); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityLabelHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityLabelHandler.java new file mode 100644 index 000000000..26b50f514 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityLabelHandler.java @@ -0,0 +1,70 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.lbac.LBACPrivilegeCheckUtils; +import com.alibaba.polardbx.gms.lbac.LBACSecurityLabel; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; +import com.alibaba.polardbx.lbac.LBACException; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlCreateSecurityPolicy; +import org.apache.calcite.sql.SqlDropSecurityLabel; +import org.apache.calcite.sql.SqlIdentifier; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * @author pangzhaoxing + */ +public class LogicalDropSecurityLabelHandler extends HandlerCommon { + + private static final Logger logger = LoggerFactory.getLogger(LogicalDropSecurityLabelHandler.class); + + public LogicalDropSecurityLabelHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + if (!LBACPrivilegeCheckUtils.isHighPrivilege(executionContext.getPrivilegeContext().getPolarUserInfo())) { + throw new LBACException("check privilege failed"); + } + + SqlDropSecurityLabel dropSecurityLabel = + (SqlDropSecurityLabel) ((LogicalDal) logicalPlan).getNativeSqlNode(); + List labels = new ArrayList<>(); + for (SqlIdentifier identifier : dropSecurityLabel.getLabelNames()) { + String labelName = identifier.getSimple().toLowerCase(); + LBACSecurityLabel securityLabel = LBACSecurityManager.getInstance().getLabel(labelName); + if (securityLabel == null) { + continue; + } + labels.add(securityLabel); + } + int affectRow = LBACSecurityManager.getInstance().deleteSecurityLabel(labels); + return new AffectRowCursor(affectRow); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityPolicyHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityPolicyHandler.java new file mode 100644 index 000000000..de8adea76 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalDropSecurityPolicyHandler.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.lbac.LBACPrivilegeCheckUtils; +import com.alibaba.polardbx.gms.lbac.LBACSecurityEntity; +import com.alibaba.polardbx.gms.lbac.LBACSecurityLabel; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; +import com.alibaba.polardbx.gms.lbac.LBACSecurityPolicy; +import com.alibaba.polardbx.lbac.LBACException; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlDropSecurityLabel; +import org.apache.calcite.sql.SqlDropSecurityPolicy; +import org.apache.calcite.sql.SqlIdentifier; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * @author pangzhaoxing + */ +public class LogicalDropSecurityPolicyHandler extends HandlerCommon { + + private static final Logger logger = LoggerFactory.getLogger(LogicalDropSecurityPolicyHandler.class); + + public LogicalDropSecurityPolicyHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + if (!LBACPrivilegeCheckUtils.isHighPrivilege(executionContext.getPrivilegeContext().getPolarUserInfo())) { + throw new LBACException("check privilege failed"); + } + + SqlDropSecurityPolicy dropSecurityPolicy = + (SqlDropSecurityPolicy) ((LogicalDal) logicalPlan).getNativeSqlNode(); + List policies = new ArrayList<>(); + for (SqlIdentifier identifier : dropSecurityPolicy.getPolicyNames()) { + String policyName = identifier.getSimple().toLowerCase(); + LBACSecurityPolicy securityPolicy = LBACSecurityManager.getInstance().getPolicy(policyName); + if (securityPolicy == null) { + continue; + } + + if (!securityPolicy.getLabelNames().isEmpty()) { + throw new LBACException("can not drop a policy with labels : " + policyName); + } + policies.add(securityPolicy); + } + int affectRow = LBACSecurityManager.getInstance().deleteSecurityPolicy(policies); + return new AffectRowCursor(affectRow); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalFireScheduleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalFireScheduleHandler.java index f68f41e03..62ab4b3b7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalFireScheduleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalFireScheduleHandler.java @@ -21,8 +21,11 @@ import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.scheduler.ScheduledJobsManager; +import com.alibaba.polardbx.executor.scheduler.executor.statistic.StatisticHllScheduledJob; +import com.alibaba.polardbx.executor.scheduler.executor.statistic.StatisticSampleCollectionScheduledJob; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.utils.PolarPrivilegeUtils; +import com.alibaba.polardbx.gms.scheduler.ExecutableScheduledJob; import com.alibaba.polardbx.gms.scheduler.ScheduledJobsRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; @@ -45,6 +48,30 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { if (record == null) { return new AffectRowCursor(0); } + if (record.getExecutorType().equalsIgnoreCase("STATISTIC_SAMPLE_SKETCH")) { + ExecutableScheduledJob executableScheduledJob = new ExecutableScheduledJob(); + executableScheduledJob.setScheduleId(record.getScheduleId()); + executableScheduledJob.setFireTime(System.currentTimeMillis()); + executableScheduledJob.setTimeZone("SYSTEM"); + StatisticSampleCollectionScheduledJob job = + new StatisticSampleCollectionScheduledJob(executableScheduledJob); + job.setFromScheduleJob(false); + job.execute(); + + logger.info(String.format("fire scheduled job:[%s]", scheduleId)); + return new AffectRowCursor(1); + } else if (record.getExecutorType().equalsIgnoreCase("STATISTIC_HLL_SKETCH")) { + ExecutableScheduledJob executableScheduledJob = new ExecutableScheduledJob(); + executableScheduledJob.setScheduleId(record.getScheduleId()); + executableScheduledJob.setFireTime(System.currentTimeMillis()); + executableScheduledJob.setTimeZone("SYSTEM"); + StatisticHllScheduledJob job = new StatisticHllScheduledJob(executableScheduledJob); + job.setFromScheduleJob(false); + job.execute(); + + logger.info(String.format("fire scheduled job:[%s]", scheduleId)); + return new AffectRowCursor(1); + } PolarPrivilegeUtils.checkPrivilege(record.getTableSchema(), record.getTableName(), PrivilegePoint.ALTER, executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalFlushLogsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalFlushLogsHandler.java index 97651d4de..d0bb8e693 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalFlushLogsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalFlushLogsHandler.java @@ -16,13 +16,12 @@ package com.alibaba.polardbx.executor.handler; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.cdc.ICdcManager; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; @@ -31,6 +30,7 @@ import com.alibaba.polardbx.gms.metadb.cdc.BinlogStreamGroupRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import com.google.common.collect.Maps; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlFlushLogs; @@ -42,13 +42,15 @@ import java.util.List; import java.util.Map; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + /** * @author chengjin * @since 2023/7/21 14:37 **/ public class LogicalFlushLogsHandler extends HandlerCommon { - private static final Logger logger = LoggerFactory.getLogger(LogicalShowCdcStorageHandler.class); + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; public LogicalFlushLogsHandler(IRepository repo) { super(repo); @@ -77,11 +79,11 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } - Map extendParams = Maps.newHashMap(); + Map extendParams = buildExtendParameter(executionContext); extendParams.put(ICdcManager.CDC_GROUP_NAME, groupName); - CdcManagerHelper.getInstance() - .notifyDdlNew("", null, SqlKind.FLUSH_LOGS.name(), executionContext.getOriginSql(), null, null, null, - DdlVisibility.Private, extendParams); + CdcManagerHelper.getInstance().notifyDdlNew("", null, SqlKind.FLUSH_LOGS.name(), + executionContext.getOriginSql(), null, null, null, + CdcDdlMarkVisibility.Private, extendParams); return new AffectRowCursor(0); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalGrantSecurityLabelHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalGrantSecurityLabelHandler.java new file mode 100644 index 000000000..af81cdb2c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalGrantSecurityLabelHandler.java @@ -0,0 +1,111 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.privilege.PolarAccountInfo; +import com.alibaba.polardbx.gms.privilege.PolarPrivManager; +import com.alibaba.polardbx.gms.lbac.LBACSecurityEntity; +import com.alibaba.polardbx.gms.lbac.LBACPrivilegeCheckUtils; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; +import com.alibaba.polardbx.gms.lbac.LBACSecurityPolicy; +import com.alibaba.polardbx.lbac.LBACException; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlGrantSecurityLabel; + +import java.util.ArrayList; +import java.util.List; + +import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_USER_NOT_EXISTS; + +/** + * @author pangzhaoxing + */ +public class LogicalGrantSecurityLabelHandler extends HandlerCommon { + + public LogicalGrantSecurityLabelHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + if (!LBACPrivilegeCheckUtils.isHighPrivilege(executionContext.getPrivilegeContext().getPolarUserInfo())) { + throw new LBACException("check privilege failed"); + } + + SqlGrantSecurityLabel grantLabel = + (SqlGrantSecurityLabel) ((LogicalDal) logicalPlan).getNativeSqlNode(); + String policyName = grantLabel.getPolicyName().getSimple().toLowerCase(); + String labelName = grantLabel.getLabelName().getSimple().toLowerCase(); + String user = grantLabel.getUserName().getUser(); + String host = grantLabel.getUserName().getHost(); + String accessType = grantLabel.getAccessType().getSimple().toLowerCase(); + //check 正确性 + boolean readAccess = false; + boolean writeAccess = false; + if ("read".equalsIgnoreCase(accessType)) { + readAccess = true; + } else if ("write".equalsIgnoreCase(accessType)) { + writeAccess = true; + } else if ("all".equalsIgnoreCase(accessType)) { + readAccess = true; + writeAccess = true; + } else { + throw new LBACException("unknown access type"); + } + LBACSecurityPolicy policy = LBACSecurityManager.getInstance().getPolicy(policyName); + if (policy == null) { + throw new LBACException("security policy is not exist"); + } + if (LBACSecurityManager.getInstance().getLabel(labelName) == null) { + throw new LBACException("security label is not exist"); + } + if (!policy.containLabel(labelName)) { + throw new LBACException("security label is not belong to policy"); + } + PolarAccountInfo accountInfo = PolarPrivManager.getInstance().getMatchUser(user, host); + if (accountInfo == null) { + throw new TddlRuntimeException(ERR_USER_NOT_EXISTS); + } + + List esaList = new ArrayList<>(2); + String entityAttr = labelName; + if (readAccess) { + LBACSecurityEntity esa = new LBACSecurityEntity(); + esa.setEntityKey(LBACSecurityEntity.EntityKey.createUserKey(accountInfo.getIdentifier(), policyName)); + esa.setSecurityAttr(entityAttr); + esa.setType(LBACSecurityEntity.EntityType.USER_READ); + esaList.add(esa); + } + if (writeAccess) { + LBACSecurityEntity esa = new LBACSecurityEntity(); + esa.setEntityKey(LBACSecurityEntity.EntityKey.createUserKey(accountInfo.getIdentifier(), policyName)); + esa.setSecurityAttr(entityAttr); + esa.setType(LBACSecurityEntity.EntityType.USER_WRITE); + esaList.add(esa); + } + + int affectRow = LBACSecurityManager.getInstance().insertSecurityEntity(esaList); + return new AffectRowCursor(affectRow); + + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalImportSequenceHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalImportSequenceHandler.java new file mode 100644 index 000000000..86c58816a --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalImportSequenceHandler.java @@ -0,0 +1,114 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.druid.sql.SQLUtils; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.executor.utils.StandardToEnterpriseEditionUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalImportSequence; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlImportSequence; + +import java.math.BigInteger; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +public class LogicalImportSequenceHandler extends HandlerCommon { + public LogicalImportSequenceHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + final LogicalImportSequence importSequence = (LogicalImportSequence) logicalPlan; + final SqlImportSequence sqlImportSequence = (SqlImportSequence) importSequence.getNativeSqlNode(); + + final String logicalDatabase = SQLUtils.normalize(sqlImportSequence.getLogicalDb()); + + Map result = handleImportSequenceInSchema(logicalDatabase, executionContext); + + return buildResult(result); + } + + protected Map handleImportSequenceInSchema(String logicalDatabase, + ExecutionContext executionContext) { + Set allLogicalTableWhichHasSeq = + StandardToEnterpriseEditionUtil.queryTableWhichHasSequence(logicalDatabase, executionContext); + + String phyDatabase = StandardToEnterpriseEditionUtil.queryPhyDbNameByLogicalDbName(logicalDatabase); + Map sequences = + StandardToEnterpriseEditionUtil.querySequenceValuesInPhysicalDatabase(phyDatabase, logicalDatabase); + + Map result = new TreeMap<>(String::compareToIgnoreCase); + + for (String logicalTb : allLogicalTableWhichHasSeq) { + if (!sequences.containsKey(logicalTb)) { + result.put(logicalTb, "only contain logical table"); + } + } + + Iterator> iter = sequences.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + String phyTb = entry.getKey(); + if (!allLogicalTableWhichHasSeq.contains(phyTb)) { + result.put(phyTb, "only contain physical table"); + iter.remove(); + } + } + + StandardToEnterpriseEditionUtil.updateSequence(sequences, logicalDatabase, result); + + return result; + } + + Cursor buildResult(Map resultMap) { + ArrayResultCursor result = new ArrayResultCursor("Result"); + if (!resultMap.isEmpty()) { + result.addColumn("table_name", DataTypes.StringType); + result.addColumn("state", DataTypes.StringType); + result.addColumn("err_msg", DataTypes.StringType); + + for (Map.Entry entry : resultMap.entrySet()) { + result.addRow( + new Object[] { + entry.getKey(), + "fail", + entry.getValue() + } + ); + } + } else { + result.addColumn("table_name", DataTypes.StringType); + result.addRow(new Object[] {"ALL SUCCESS"}); + } + + return result; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalPauseReplicaCheckTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalPauseReplicaCheckTableHandler.java new file mode 100644 index 000000000..24d754bb3 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalPauseReplicaCheckTableHandler.java @@ -0,0 +1,84 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.fastjson.JSON; +import com.alibaba.polardbx.common.cdc.CdcConstants; +import com.alibaba.polardbx.common.cdc.ResultCode; +import com.alibaba.polardbx.common.cdc.RplConstants; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.HttpClientHelper; +import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.druid.util.StringUtils; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.net.util.CdcTargetUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlPauseReplicaCheck; +import org.apache.http.entity.ContentType; + +import java.util.HashMap; +import java.util.Map; + +/** + * @author yudong + * @since 2023/11/9 10:40 + **/ +public class LogicalPauseReplicaCheckTableHandler extends HandlerCommon { + + private static final String API_PATTERN = "http://%s/replica/fullValidation/pause"; + + public LogicalPauseReplicaCheckTableHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + SqlPauseReplicaCheck sqlPauseReplicaCheck = + (SqlPauseReplicaCheck) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + String dbName = sqlPauseReplicaCheck.getDbName().toString(); + if (StringUtils.isEmpty(dbName)) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, "database cannot be empty!"); + } + Map params = new HashMap<>(); + params.put(RplConstants.RPL_FULL_VALID_DB, dbName); + if (sqlPauseReplicaCheck.getTableName() != null) { + String tbName = sqlPauseReplicaCheck.getTableName().toString(); + params.put(RplConstants.RPL_FULL_VALID_TB, tbName); + } + + String daemonEndpoint = CdcTargetUtil.getDaemonMasterTarget(); + String url = String.format(API_PATTERN, daemonEndpoint); + String res; + try { + res = PooledHttpHelper.doPost(url, ContentType.APPLICATION_JSON, JSON.toJSONString(params), 10000); + } catch (Exception e) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); + } + + ResultCode httpResult = JSON.parseObject(res, ResultCode.class); + if (httpResult.getCode() != CdcConstants.SUCCESS_CODE) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, httpResult.getMsg()); + } + return new AffectRowCursor(0); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalPauseScheduleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalPauseScheduleHandler.java index e38281786..ab881bfff 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalPauseScheduleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalPauseScheduleHandler.java @@ -47,10 +47,11 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { long scheduleId = pauseSchedule.getScheduleId(); ScheduledJobsRecord record = ScheduledJobsManager.queryScheduledJobById(scheduleId); - if(record == null){ + if (record == null) { return new AffectRowCursor(0); } - PolarPrivilegeUtils.checkPrivilege(record.getTableSchema(), record.getTableName(), PrivilegePoint.ALTER, executionContext); + PolarPrivilegeUtils.checkPrivilege(record.getTableSchema(), record.getTableName(), PrivilegePoint.ALTER, + executionContext); logger.info(String.format("pause scheduled job:[%s]", scheduleId)); int row = ScheduledJobsManager.pauseScheduledJob(scheduleId); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRebalanceHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRebalanceHandler.java index 74bb2082b..889bb4725 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRebalanceHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRebalanceHandler.java @@ -27,6 +27,7 @@ import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.balancer.BalanceOptions; import com.alibaba.polardbx.executor.balancer.Balancer; +import com.alibaba.polardbx.executor.balancer.action.ActionUtils; import com.alibaba.polardbx.executor.balancer.action.BalanceAction; import com.alibaba.polardbx.executor.balancer.policy.PolicyDrainNode; import com.alibaba.polardbx.executor.cursor.Cursor; @@ -37,7 +38,10 @@ import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob; +import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils; import com.alibaba.polardbx.executor.handler.ddl.LogicalCommonDdlHandler; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; +import com.alibaba.polardbx.executor.scaleout.ScaleOutUtils; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.scheduler.DdlPlanAccessor; import com.alibaba.polardbx.gms.scheduler.DdlPlanRecord; @@ -45,9 +49,11 @@ import com.alibaba.polardbx.gms.topology.DbInfoRecord; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.config.schema.DefaultDbSchema; +import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; +import com.alibaba.polardbx.optimizer.locality.StoragePoolManager; import com.alibaba.polardbx.optimizer.utils.RelUtils; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlRebalance; @@ -58,6 +64,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.text.DecimalFormat; import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicReference; @@ -103,6 +110,22 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext ec) { initSchemaName(ec); + final boolean useChangeSet = ChangeSetUtils.isChangeSetProcedure(ec); + final boolean usePhysicalBackfill = PhysicalBackfillUtils.isSupportForPhysicalBackfill(ec.getSchemaName(), ec); + if (useChangeSet && usePhysicalBackfill && !sqlRebalance.isExplain()) { + long currentMaxSlaveLatency = PhysicalBackfillUtils.getTheMaxSlaveLatency(); + long configMaxSlaveLatency = + ec.getParamManager().getLong(ConnectionParams.PHYSICAL_BACKFILL_MAX_SLAVE_LATENCY); + if (currentMaxSlaveLatency > configMaxSlaveLatency) { + throw new TddlRuntimeException(ErrorCode.ERR_REBALANCE, + "Seconds_Behind_Master is too large, please fix this problem before schedule this job"); + } + } + + if (useChangeSet && usePhysicalBackfill) { + PhysicalBackfillUtils.destroyDataSources(); + } + if (sqlRebalance.isLogicalDdl() && !sqlRebalance.isExplain()) { return handleLogicalRebalance(sqlRebalance, ec); } @@ -229,12 +252,22 @@ protected List buildActions(BaseDdlOperation logicalPlan, Executi } protected Cursor buildCursor(List actions, ExecutionContext ec) { + final boolean useChangeSet = ChangeSetUtils.isChangeSetProcedure(ec); + final boolean usePhysicalBackfill = PhysicalBackfillUtils.isSupportForPhysicalBackfill(ec.getSchemaName(), ec); + boolean physicalBackfill = useChangeSet && usePhysicalBackfill; + ArrayResultCursor result = new ArrayResultCursor("Rebalance"); result.addColumn("JOB_ID", DataTypes.LongType); result.addColumn("SCHEMA", DataTypes.StringType); result.addColumn("NAME", DataTypes.StringType); result.addColumn("ACTION", DataTypes.StringType); result.addColumn("BACKFILL_ROWS", DataTypes.LongType); + double speed = 0; + if (physicalBackfill) { + result.addColumn("BACKFILL_DATA_SIZE", DataTypes.LongType); + result.addColumn("BACKFILL_ESTIMATED_TIME", DataTypes.DoubleType); + speed = PhysicalBackfillUtils.netWorkSpeedTest(ec); + } long jobId = 0; if (ec.getDdlContext() != null) { @@ -246,7 +279,29 @@ protected Cursor buildCursor(List actions, ExecutionContext ec) { final String name = action.getName(); final String step = action.getStep(); final Long backfillRows = action.getBackfillRows(); - result.addRow(new Object[] {jobId, schema, name, step, backfillRows}); + if (physicalBackfill) { + final Long backfillData = action.getDiskSize(); + DecimalFormat df = new DecimalFormat("#.00"); + double backfillCost = 0; + + double parallel; + if (DbInfoManager.getInstance().isNewPartitionDb(schema)) { + parallel = ScaleOutUtils.getTableGroupTaskParallelism(ec); + } else { + parallel = ScaleOutUtils.getScaleoutTaskParallelism(ec); + } + parallel = Math.min(action.getLogicalTableCount(), parallel); + parallel = Math.max(1, parallel * 0.75); + if (speed > 0 && backfillData > 0) { + backfillCost = Double.parseDouble(df.format(3 * backfillData / 1024 / speed / parallel)); + } + + result.addRow(new Object[] { + jobId, schema, name, step, backfillRows, backfillData, physicalBackfill ? backfillCost : null}); + } else { + result.addRow(new Object[] { + jobId, schema, name, step, backfillRows}); + } } return result; @@ -268,9 +323,12 @@ private Cursor handleLogicalRebalance(SqlRebalance sqlRebalance, ExecutionContex if (sqlRebalance.getDrainNode() != null && (sqlRebalance.isRebalanceDatabase() || sqlRebalance.isRebalanceDatabase() || sqlRebalance.isRebalanceTenant() || sqlRebalance.isRebalaceTenantDb())) { - PolicyDrainNode.DrainNodeInfo drainNodeInfo = - PolicyDrainNode.DrainNodeInfo.parse(sqlRebalance.getDrainNode()); - drainNodeInfo.validate(); + if (!(sqlRebalance.isRebalanceTenant() && sqlRebalance.getStoragePoolName().toString().equalsIgnoreCase( + StoragePoolManager.RECYCLE_STORAGE_POOL_NAME))) { + PolicyDrainNode.DrainNodeInfo drainNodeInfo = + PolicyDrainNode.DrainNodeInfo.parse(sqlRebalance.getDrainNode()); + drainNodeInfo.validate(); + } } String lockResource = sqlRebalance.getKind().name(); @@ -290,6 +348,9 @@ private Cursor handleLogicalRebalance(SqlRebalance sqlRebalance, ExecutionContex String resource = ""; if (sqlRebalance.isRebalanceTableGroup()) { resource = String.format("tablegroup:%s", sqlRebalance.getTableGroupName()); + } else if (sqlRebalance.isRebalanceTenant()) { + resource = + ActionUtils.genRebalanceTenantResourceName(sqlRebalance.getStoragePoolName().toString()); } AtomicReference replicateRequest = new AtomicReference<>(false); String finalResource = resource; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRebalanceMasterHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRebalanceMasterHandler.java index 51ac21bfb..b3aacbc1e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRebalanceMasterHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRebalanceMasterHandler.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.HttpClientHelper; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.druid.util.StringUtils; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; @@ -30,6 +31,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; import com.alibaba.polardbx.optimizer.utils.RelUtils; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlRebalanceMaster; @@ -40,6 +42,8 @@ **/ public class LogicalRebalanceMasterHandler extends HandlerCommon { + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; + public LogicalRebalanceMasterHandler(IRepository repo) { super(repo); } @@ -60,13 +64,15 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { try { res = HttpClientHelper.doGet("http://" + daemonEndpoint + "/system/rebalance"); } catch (Exception e) { + cdcLogger.error("rebalance master error!", e); throw new RuntimeException("rebalance master failed", e); } - ResultCode resultCode = JSON.parseObject(res, ResultCode.class); + ResultCode resultCode = JSON.parseObject(res, ResultCode.class); if (resultCode.getCode() == CdcConstants.SUCCESS_CODE) { return new AffectRowCursor(0); } else { + cdcLogger.warn("rebalance master failed! code:" + resultCode.getCode() + ", msg:" + resultCode.getMsg()); throw new TddlRuntimeException(ErrorCode.ERR_CDC_GENERIC, resultCode.getMsg()); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalReplicaHashcheckHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalReplicaHashcheckHandler.java new file mode 100644 index 000000000..928f01c82 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalReplicaHashcheckHandler.java @@ -0,0 +1,83 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.rplchecker.LogicalTableHashCalculator; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.SchemaManager; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlBasicCall; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlNode; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.SqlReplicaHashcheck; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * @author yudong + * @since 2023/8/22 15:30 + **/ +public class LogicalReplicaHashcheckHandler extends HandlerCommon { + public LogicalReplicaHashcheckHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext ec) { + SqlReplicaHashcheck sqlNode = (SqlReplicaHashcheck) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + SqlIdentifier tableSource = (SqlIdentifier) sqlNode.getFrom(); + String schema = tableSource.names.get(0); + String table = tableSource.getLastName(); + final SchemaManager sm = ec.getSchemaManager(schema); + final TableMeta baseTableMeta = sm.getTable(table); + + List columnList = new ArrayList<>(); + List allColumns = baseTableMeta.getAllColumns(); + for (ColumnMeta column : allColumns) { + // 隐式主键无法保证上下游一致,所以不校验隐式主键 + if (column.getName().equals("_drds_implicit_id_")) { + continue; + } + columnList.add(column.getName()); + } + + LogicalTableHashCalculator calculator = + new LogicalTableHashCalculator(schema, table, columnList, sqlNode.getLowerBounds(), + sqlNode.getUpperBounds(), ec); + Long hash = calculator.calculateHash(); + + ArrayResultCursor result = new ArrayResultCursor("replica hashcheck"); + result.addColumn("hash", DataTypes.LongType); + result.addRow(new Object[] {hash}); + return result; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalReplicationBaseHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalReplicationBaseHandler.java index 2e46b6d0e..c48fa1789 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalReplicationBaseHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalReplicationBaseHandler.java @@ -16,9 +16,14 @@ package com.alibaba.polardbx.executor.handler; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.rpc.cdc.RplCommandResponse; +import io.grpc.Channel; +import io.grpc.ManagedChannel; import org.apache.calcite.rel.RelNode; /** diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalResetMasterHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalResetMasterHandler.java index 5babc3072..d2bd223d6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalResetMasterHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalResetMasterHandler.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.HttpClientHelper; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.druid.util.StringUtils; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; @@ -30,6 +31,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; import com.alibaba.polardbx.optimizer.utils.RelUtils; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlResetMaster; @@ -40,6 +42,8 @@ **/ public class LogicalResetMasterHandler extends HandlerCommon { + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; + public LogicalResetMasterHandler(IRepository repo) { super(repo); } @@ -60,30 +64,36 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { try { res = HttpClientHelper.doGet("http://" + daemonEndpoint + "/system/reset"); } catch (Exception e) { + cdcLogger.error("reset master error!", e); throw new RuntimeException("reset master error", e); } - ResultCode resultCode = JSON.parseObject(res, ResultCode.class); + ResultCode resultCode = JSON.parseObject(res, ResultCode.class); if (resultCode.getCode() != CdcConstants.SUCCESS_CODE) { + cdcLogger.warn("reset master failed! code:" + resultCode.getCode() + ", msg:" + resultCode.getMsg()); throw new TddlRuntimeException(ErrorCode.ERR_CDC_GENERIC, resultCode.getMsg()); } try { res = HttpClientHelper.doGet("http://" + daemonEndpoint + "/system/cleanBinlog"); } catch (Exception e) { + cdcLogger.error("clean cdc binlog error!", e); throw new RuntimeException("reset master error", e); } resultCode = JSON.parseObject(res, ResultCode.class); if (resultCode.getCode() != CdcConstants.SUCCESS_CODE) { + cdcLogger.warn("clean cdc binlog failed! code:" + resultCode.getCode() + ", msg:" + resultCode.getMsg()); throw new TddlRuntimeException(ErrorCode.ERR_CDC_GENERIC, resultCode.getMsg()); } try { res = HttpClientHelper.doGet("http://" + daemonEndpoint + "/system/start"); } catch (Exception e) { + cdcLogger.error("start master error!", e); throw new RuntimeException("reset master error", e); } resultCode = JSON.parseObject(res, ResultCode.class); if (resultCode.getCode() != CdcConstants.SUCCESS_CODE) { + cdcLogger.warn("start master failed! code:" + resultCode.getCode() + ", msg:" + resultCode.getMsg()); throw new TddlRuntimeException(ErrorCode.ERR_CDC_GENERIC, resultCode.getMsg()); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalResetReplicaCheckTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalResetReplicaCheckTableHandler.java new file mode 100644 index 000000000..ed4ecc8f9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalResetReplicaCheckTableHandler.java @@ -0,0 +1,84 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.fastjson.JSON; +import com.alibaba.polardbx.common.cdc.CdcConstants; +import com.alibaba.polardbx.common.cdc.ResultCode; +import com.alibaba.polardbx.common.cdc.RplConstants; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.HttpClientHelper; +import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.druid.util.StringUtils; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.net.util.CdcTargetUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlResetReplicaCheck; +import org.apache.http.entity.ContentType; + +import java.util.HashMap; +import java.util.Map; + +/** + * @author yudong + * @since 2023/11/9 10:42 + **/ +public class LogicalResetReplicaCheckTableHandler extends HandlerCommon { + + private static final String API_PATTERN = "http://%s/replica/fullValidation/reset"; + + public LogicalResetReplicaCheckTableHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + SqlResetReplicaCheck sqlResetReplicaCheck = + (SqlResetReplicaCheck) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + String dbName = sqlResetReplicaCheck.getDbName().toString(); + if (StringUtils.isEmpty(dbName)) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, "database cannot be empty!"); + } + Map params = new HashMap<>(); + params.put(RplConstants.RPL_FULL_VALID_DB, dbName); + if (sqlResetReplicaCheck.getTableName() != null) { + String tbName = sqlResetReplicaCheck.getTableName().toString(); + params.put(RplConstants.RPL_FULL_VALID_TB, tbName); + } + + String daemonEndpoint = CdcTargetUtil.getDaemonMasterTarget(); + String url = String.format(API_PATTERN, daemonEndpoint); + String res; + try { + res = PooledHttpHelper.doPost(url, ContentType.APPLICATION_JSON, JSON.toJSONString(params), 10000); + } catch (Exception e) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); + } + + ResultCode httpResult = JSON.parseObject(res, ResultCode.class); + if (httpResult.getCode() != CdcConstants.SUCCESS_CODE) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, httpResult.getMsg()); + } + return new AffectRowCursor(0); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalResetSlaveHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalResetSlaveHandler.java index afc401673..402e44d06 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalResetSlaveHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalResetSlaveHandler.java @@ -23,12 +23,14 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.net.util.CdcTargetUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlResetSlave; import org.apache.http.entity.ContentType; @@ -39,6 +41,8 @@ */ public class LogicalResetSlaveHandler extends LogicalReplicationBaseHandler { + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; + public LogicalResetSlaveHandler(IRepository repo) { super(repo); } @@ -57,10 +61,12 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { ContentType.APPLICATION_JSON, JSON.toJSONString(sqlNode.getParams()), 10000); } catch (Exception e) { + cdcLogger.error("reset slave error!", e); throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); } ResultCode httpResult = JSON.parseObject(res, ResultCode.class); if (httpResult.getCode() != CdcConstants.SUCCESS_CODE) { + cdcLogger.warn("reset slave failed! code:" + httpResult.getCode() + ", msg:" + httpResult.getMsg()); throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, httpResult.getMsg()); } return new AffectRowCursor(0); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRestartMasterHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRestartMasterHandler.java index 48731d4d0..b87e07712 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRestartMasterHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRestartMasterHandler.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.HttpClientHelper; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.druid.util.StringUtils; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; @@ -30,6 +31,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; import com.alibaba.polardbx.optimizer.utils.RelUtils; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlRestartMaster; @@ -40,6 +42,8 @@ **/ public class LogicalRestartMasterHandler extends HandlerCommon { + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; + public LogicalRestartMasterHandler(IRepository repo) { super(repo); } @@ -60,13 +64,15 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { try { res = HttpClientHelper.doGet("http://" + daemonEndpoint + "/system/restart"); } catch (Exception e) { + cdcLogger.error("reset master error!", e); throw new RuntimeException("restart master failed", e); } - ResultCode resultCode = JSON.parseObject(res, ResultCode.class); + ResultCode resultCode = JSON.parseObject(res, ResultCode.class); if (resultCode.getCode() == CdcConstants.SUCCESS_CODE) { return new AffectRowCursor(0); } else { + cdcLogger.warn("reset master failed! code:" + resultCode.getCode() + ", msg:" + resultCode.getMsg()); throw new TddlRuntimeException(ErrorCode.ERR_CDC_GENERIC, resultCode.getMsg()); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRevokeSecurityLabelHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRevokeSecurityLabelHandler.java new file mode 100644 index 000000000..9484257c9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalRevokeSecurityLabelHandler.java @@ -0,0 +1,103 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.lbac.LBACPrivilegeCheckUtils; +import com.alibaba.polardbx.gms.lbac.LBACSecurityEntity; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; +import com.alibaba.polardbx.gms.lbac.LBACSecurityPolicy; +import com.alibaba.polardbx.gms.privilege.PolarAccountInfo; +import com.alibaba.polardbx.gms.privilege.PolarPrivManager; +import com.alibaba.polardbx.lbac.LBACException; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlDropSecurityLabel; +import org.apache.calcite.sql.SqlRevokeSecurityLabel; + +import java.util.ArrayList; +import java.util.List; + +import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_USER_NOT_EXISTS; + +/** + * @author pangzhaoxing + */ +public class LogicalRevokeSecurityLabelHandler extends HandlerCommon { + + public LogicalRevokeSecurityLabelHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + if (!LBACPrivilegeCheckUtils.isHighPrivilege(executionContext.getPrivilegeContext().getPolarUserInfo())) { + throw new LBACException("check privilege failed"); + } + + SqlRevokeSecurityLabel revokeLabel = + (SqlRevokeSecurityLabel) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + String policyName = revokeLabel.getPolicyName().getSimple().toLowerCase(); + String accessType = revokeLabel.getAccessType().getSimple().toLowerCase(); + String user = revokeLabel.getUserName().getUser(); + String host = revokeLabel.getUserName().getHost(); + + boolean readAccess = false; + boolean writeAccess = false; + if ("read".equalsIgnoreCase(accessType)) { + readAccess = true; + } else if ("write".equalsIgnoreCase(accessType)) { + writeAccess = true; + } else if ("all".equalsIgnoreCase(accessType)) { + readAccess = true; + writeAccess = true; + } else { + throw new LBACException("unknown access type"); + } + + LBACSecurityPolicy policy = LBACSecurityManager.getInstance().getPolicy(policyName); + if (policy == null) { + return new AffectRowCursor(0); + } + PolarAccountInfo accountInfo = PolarPrivManager.getInstance().getMatchUser(user, host); + if (accountInfo == null) { + return new AffectRowCursor(0); + } + + List esaList = new ArrayList<>(2); + if (readAccess) { + LBACSecurityEntity esa = new LBACSecurityEntity(); + esa.setEntityKey(LBACSecurityEntity.EntityKey.createUserKey(accountInfo.getIdentifier(), policyName)); + esa.setType(LBACSecurityEntity.EntityType.USER_READ); + esaList.add(esa); + } + if (writeAccess) { + LBACSecurityEntity esa = new LBACSecurityEntity(); + esa.setEntityKey(LBACSecurityEntity.EntityKey.createUserKey(accountInfo.getIdentifier(), policyName)); + esa.setType(LBACSecurityEntity.EntityType.USER_WRITE); + esaList.add(esa); + } + + int affectRow = LBACSecurityManager.getInstance().deleteSecurityEntity(esaList); + return new AffectRowCursor(affectRow); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalSetCdcGlobalHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalSetCdcGlobalHandler.java index d5efca243..4c83ca0db 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalSetCdcGlobalHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalSetCdcGlobalHandler.java @@ -18,14 +18,14 @@ import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.gms.metadb.cdc.BinlogSystemConfigAccessor; +import com.alibaba.polardbx.gms.metadb.cdc.CdcConfigAccessor; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlSetCdcGlobal; @@ -33,6 +33,7 @@ import java.sql.Connection; import java.util.List; +import java.util.Properties; /** * @author yudong @@ -40,7 +41,7 @@ **/ public class LogicalSetCdcGlobalHandler extends HandlerCommon { - private static final Logger logger = LoggerFactory.getLogger(LogicalShowCdcStorageHandler.class); + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; public LogicalSetCdcGlobalHandler(IRepository repo) { super(repo); @@ -54,19 +55,19 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { String configKeyPrefix = with == null ? "" : with.toString().replace("'", "") + ":"; try (Connection metaDbConn = MetaDbUtil.getConnection()) { - BinlogSystemConfigAccessor accessor = new BinlogSystemConfigAccessor(); + CdcConfigAccessor accessor = new CdcConfigAccessor(); accessor.setConnection(metaDbConn); - MetaDbUtil.beginTransaction(metaDbConn); + Properties props = new Properties(); for (Pair pair : variableAssignmentList) { String key = pair.getKey().toString(); key = key.replace("'", ""); String configKey = configKeyPrefix + key; String configValue = pair.getValue().toString(); - accessor.insert(configKey, configValue); + props.setProperty(configKey, configValue); } - MetaDbUtil.commit(metaDbConn); + accessor.updateInstConfigValue(props); } catch (Exception e) { - logger.error("set cdc global error", e); + cdcLogger.error("set cdc global error", e); throw new TddlNestableRuntimeException(); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinaryLogsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinaryLogsHandler.java index a6958781d..b038b04b4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinaryLogsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinaryLogsHandler.java @@ -26,13 +26,17 @@ import com.alibaba.polardbx.rpc.CdcRpcClient; import com.alibaba.polardbx.rpc.cdc.BinaryLog; import com.alibaba.polardbx.rpc.cdc.CdcServiceGrpc.CdcServiceBlockingStub; +import com.alibaba.polardbx.rpc.cdc.FullBinaryLog; import com.alibaba.polardbx.rpc.cdc.Request; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlShowBinaryLogs; +import org.apache.commons.lang3.StringUtils; import java.util.Iterator; +import static com.alibaba.polardbx.executor.utils.CdcExeUtil.tryExtractStreamNameFromUser; + /** * created by ziyang.lb */ @@ -46,15 +50,35 @@ public LogicalShowBinaryLogsHandler(IRepository repo) { public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { SqlShowBinaryLogs sqlShowBinaryLogs = (SqlShowBinaryLogs) ((LogicalShow) logicalPlan).getNativeSqlNode(); SqlNode with = sqlShowBinaryLogs.getWith(); - String streamName = with == null ? "" : RelUtils.lastStringValue(with); + String streamName = + with == null ? tryExtractStreamNameFromUser(executionContext) : RelUtils.lastStringValue(with); CdcServiceBlockingStub cdcServiceBlockingStub = - with == null ? CdcRpcClient.getCdcRpcClient().getCdcServiceBlockingStub() : + StringUtils.isBlank(streamName) ? CdcRpcClient.getCdcRpcClient().getCdcServiceBlockingStub() : CdcRpcClient.getCdcRpcClient().getCdcServiceBlockingStub(streamName); - Iterator logs = cdcServiceBlockingStub.showBinaryLogs( - Request.newBuilder().setStreamName(streamName).build()); - CdcResultCursor result = new CdcResultCursor("SHOW BINARY LOGS", logs, cdcServiceBlockingStub.getChannel()); - result.addColumn("Log_name", DataTypes.StringType); - result.addColumn("File_size", DataTypes.LongType); + + CdcResultCursor result; + if (sqlShowBinaryLogs.isFull()) { + Iterator fullLogs = cdcServiceBlockingStub.showFullBinaryLogs( + Request.newBuilder().setStreamName(streamName).build()); + result = new CdcResultCursor("SHOW FULL BINARY LOGS", fullLogs, cdcServiceBlockingStub.getChannel()); + result.addColumn("Log_name", DataTypes.StringType); + result.addColumn("File_size", DataTypes.LongType); + result.addColumn("Create_time", DataTypes.StringType); + result.addColumn("Last_modify_time", DataTypes.StringType); + result.addColumn("First_event_time", DataTypes.StringType); + result.addColumn("Last_event_time", DataTypes.StringType); + result.addColumn("Last_tso", DataTypes.StringType); + result.addColumn("Upload_status", DataTypes.StringType); + result.addColumn("File_location", DataTypes.StringType); + result.addColumn("ExtInfo", DataTypes.StringType); + } else { + Iterator logs = cdcServiceBlockingStub.showBinaryLogs( + Request.newBuilder().setStreamName(streamName).build()); + result = new CdcResultCursor("SHOW BINARY LOGS", logs, cdcServiceBlockingStub.getChannel()); + result.addColumn("Log_name", DataTypes.StringType); + result.addColumn("File_size", DataTypes.LongType); + } + result.initMeta(); return result; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinaryStreamsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinaryStreamsHandler.java index 9a7ce310e..7ed0ed32a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinaryStreamsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinaryStreamsHandler.java @@ -18,7 +18,6 @@ import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.spi.IRepository; @@ -27,14 +26,19 @@ import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; +import com.alibaba.polardbx.optimizer.utils.RelUtils; import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlNode; +import org.apache.calcite.sql.SqlShowBinaryStreams; import java.sql.Connection; import java.sql.SQLException; import java.util.List; public class LogicalShowBinaryStreamsHandler extends HandlerCommon { - private static final Logger logger = LoggerFactory.getLogger(LogicalShowBinaryStreamsHandler.class); + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; public LogicalShowBinaryStreamsHandler(IRepository repo) { super(repo); @@ -42,6 +46,11 @@ public LogicalShowBinaryStreamsHandler(IRepository repo) { @Override public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + SqlShowBinaryStreams sqlShowBinaryStreams = + (SqlShowBinaryStreams) ((LogicalShow) logicalPlan).getNativeSqlNode(); + SqlNode with = sqlShowBinaryStreams.getWith(); + String groupName = with == null ? null : RelUtils.lastStringValue(with); + ArrayResultCursor result = new ArrayResultCursor("SHOW BINARY STREAMS"); result.addColumn("Group", DataTypes.StringType); result.addColumn("Stream", DataTypes.StringType); @@ -52,7 +61,12 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { BinlogStreamAccessor binlogStreamAccessor = new BinlogStreamAccessor(); try (Connection metaDbConn = MetaDbUtil.getConnection()) { binlogStreamAccessor.setConnection(metaDbConn); - List streams = binlogStreamAccessor.listAllStream(); + List streams; + if (groupName == null) { + streams = binlogStreamAccessor.listAllStream(); + } else { + streams = binlogStreamAccessor.listStreamInGroup(groupName); + } if (streams == null) { throw new TddlNestableRuntimeException("binlog multi stream is not support..."); } @@ -61,7 +75,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { stream.getGroupName(), stream.getStreamName(), stream.getFileName(), stream.getPosition()}); } } catch (SQLException e) { - logger.error("get binlog x stream fail", e); + cdcLogger.error("get binlog x stream fail", e); } return result; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinlogEventsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinlogEventsHandler.java index 09ec97526..ee196c99a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinlogEventsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowBinlogEventsHandler.java @@ -31,9 +31,12 @@ import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlNodeList; import org.apache.calcite.sql.SqlShowBinlogEvents; +import org.apache.commons.lang3.StringUtils; import java.util.Iterator; +import static com.alibaba.polardbx.executor.utils.CdcExeUtil.tryExtractStreamNameFromUser; + public class LogicalShowBinlogEventsHandler extends HandlerCommon { public LogicalShowBinlogEventsHandler(IRepository repo) { super(repo); @@ -48,15 +51,19 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { offset = ((SqlNodeList) sqlShowBinlogEvents.getLimit()).get(0); rowCount = ((SqlNodeList) sqlShowBinlogEvents.getLimit()).get(1); } + + String fileName = sqlShowBinlogEvents.getLogName() == null ? "" : + RelUtils.lastStringValue(sqlShowBinlogEvents.getLogName()); SqlNode with = sqlShowBinlogEvents.getWith(); - String streamName = with == null ? "" : RelUtils.lastStringValue(with); + String streamName = with == null ? + extractStreamName(fileName, executionContext) : RelUtils.lastStringValue(with); + CdcServiceBlockingStub cdcServiceBlockingStub = - with == null ? CdcRpcClient.getCdcRpcClient().getCdcServiceBlockingStub() : + StringUtils.isBlank(streamName) ? CdcRpcClient.getCdcRpcClient().getCdcServiceBlockingStub() : CdcRpcClient.getCdcRpcClient().getCdcServiceBlockingStub(streamName); Iterator binlogEvents = cdcServiceBlockingStub.showBinlogEvents( ShowBinlogEventsRequest.newBuilder() - .setLogName(sqlShowBinlogEvents.getLogName() == null ? "" - : RelUtils.lastStringValue(sqlShowBinlogEvents.getLogName())) + .setLogName(fileName) .setPos(sqlShowBinlogEvents.getPos() == null ? -1 : RelUtils.longValue(sqlShowBinlogEvents.getPos()).intValue()) .setOffset(offset == null ? -1 : RelUtils.longValue(offset).intValue()) @@ -74,4 +81,17 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { result.initMeta(); return result; } + + private String extractStreamName(String fileName, ExecutionContext executionContext) { + if (StringUtils.isNotBlank(fileName)) { + int idx = fileName.lastIndexOf('_'); + if (idx == -1) { + return ""; + } else { + return fileName.substring(0, idx); + } + } else { + return tryExtractStreamNameFromUser(executionContext); + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCclRuleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCclRuleHandler.java index 87ecc3658..b36c9a1f2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCclRuleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCclRuleHandler.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.spi.IRepository; @@ -30,11 +31,14 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.gms.metadb.ccl.CclRuleAccessor; import com.alibaba.polardbx.gms.metadb.ccl.CclRuleRecord; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalCcl; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlDal; import org.apache.calcite.sql.SqlShowCclRule; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCclTriggerHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCclTriggerHandler.java index 81f21f943..a073e8737 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCclTriggerHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCclTriggerHandler.java @@ -17,12 +17,11 @@ package com.alibaba.polardbx.executor.handler; import com.alibaba.fastjson.JSON; -import com.alibaba.polardbx.druid.sql.ast.SqlType; -import com.google.common.collect.Lists; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.druid.sql.ast.SqlType; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.spi.IRepository; @@ -33,6 +32,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalCcl; +import com.google.common.collect.Lists; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlDal; import org.apache.calcite.sql.SqlShowCclTrigger; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCdcStorageHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCdcStorageHandler.java index ec6ad99de..a32f2df08 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCdcStorageHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCdcStorageHandler.java @@ -18,7 +18,6 @@ import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.spi.IRepository; @@ -27,6 +26,7 @@ import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import java.sql.Connection; @@ -38,7 +38,7 @@ **/ public class LogicalShowCdcStorageHandler extends HandlerCommon { - private static final Logger logger = LoggerFactory.getLogger(LogicalShowCdcStorageHandler.class); + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; public LogicalShowCdcStorageHandler(IRepository repo) { super(repo); @@ -59,7 +59,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } } } catch (Exception e) { - logger.error("get final task info error", e); + cdcLogger.error("show cdc storage error!", e); throw new TddlNestableRuntimeException(e); } return result; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTableGroupHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTableGroupHandler.java index 5cde572c9..e1cf34444 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTableGroupHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTableGroupHandler.java @@ -36,11 +36,13 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; +import com.alibaba.polardbx.optimizer.tablegroup.TableGroupUtils; import com.alibaba.polardbx.optimizer.utils.RelUtils; import com.alibaba.polardbx.repo.mysql.common.ResultSetHelper; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlShowCreateTable; import org.apache.calcite.sql.SqlShowCreateTableGroup; +import org.apache.commons.lang3.StringUtils; import java.util.ArrayList; import java.util.List; @@ -75,8 +77,17 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { if (tableGroupConfig == null) { throw new TddlRuntimeException(ErrorCode.ERR_TABLE_GROUP_NOT_EXISTS, tableGroupName); } + String partitionDef = TableGroupUtils.getPartitionDefinition(tableGroupConfig, executionContext); + String createTableGroup; + if (!StringUtils.isEmpty(partitionDef)) { + createTableGroup = "CREATE TABLEGROUP `" + tableGroupName.toUpperCase() + "` " + partitionDef; + } else { + createTableGroup = "CREATE TABLEGROUP `" + tableGroupName.toUpperCase() + "`"; + } result.addRow( - new Object[] {tableGroupName, tableGroupConfig.getTableGroupRecord().getPartition_definition()}); + new Object[] { + tableGroupName, + createTableGroup}); return result; } else { throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTableHandler.java index c7f61072f..1b67ec98f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTableHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTableHandler.java @@ -16,27 +16,45 @@ package com.alibaba.polardbx.executor.handler; +import com.alibaba.polardbx.common.charset.CharsetName; +import com.alibaba.polardbx.common.charset.CollationName; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.config.ConfigDataMode; +import com.alibaba.polardbx.druid.DbType; import com.alibaba.polardbx.druid.sql.SQLUtils; +import com.alibaba.polardbx.druid.sql.ast.SQLCurrentTimeExpr; +import com.alibaba.polardbx.druid.sql.ast.SQLExpr; +import com.alibaba.polardbx.druid.sql.ast.SQLIndexDefinition; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLIdentifierExpr; import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnDefinition; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLNotNullConstraint; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLSelectOrderByItem; import com.alibaba.polardbx.druid.sql.ast.statement.SQLTableElement; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlPrimaryKey; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MysqlForeignKey; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; +import com.alibaba.polardbx.druid.sql.dialect.mysql.parser.MySqlExprParser; +import com.alibaba.polardbx.druid.sql.parser.ByteString; import com.alibaba.polardbx.druid.util.JdbcConstants; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; +import com.alibaba.polardbx.gms.metadb.table.TablesRecord; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; import com.alibaba.polardbx.repo.mysql.common.ResultSetHelper; import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.commons.lang3.StringUtils; import java.util.ArrayList; import java.util.List; -import static org.apache.calcite.sql.SqlIdentifier.surroundWithBacktick; +import java.util.Locale; +import java.util.Optional; +import java.util.stream.Collectors; /** * @author mengshi @@ -50,6 +68,7 @@ public LogicalShowCreateTableHandler(IRepository repo) { super(repo); partitionDatabaseHandler = new LogicalShowCreateTablesForPartitionDatabaseHandler(repo); shardingDatabaseHandler = new LogicalShowCreateTablesForShardingDatabaseHandler(repo); + } @Override @@ -96,7 +115,7 @@ public static String reorgLogicalColumnOrder(String schemaName, String logicalTa for (SQLTableElement tableElement : createTableStmt.getTableElementList()) { if (tableElement instanceof SQLColumnDefinition) { String physicalColumnName = - SQLUtils.normalize(((SQLColumnDefinition) tableElement).getColumnName()); + SQLUtils.normalizeNoTrim(((SQLColumnDefinition) tableElement).getColumnName()); if (TStringUtil.equalsIgnoreCase(physicalColumnName, logicalColumn.columnName)) { newTableElements.add(tableElement); break; @@ -117,4 +136,81 @@ public static String reorgLogicalColumnOrder(String schemaName, String logicalTa return createTableStmt.toString(); } + public static MySqlCreateTableStatement fetchShowCreateTableFromMetaDb(String schemaName, String tableName, + ExecutionContext executionContext) { + final MySqlCreateTableStatement createTableStmt = new MySqlCreateTableStatement(); + createTableStmt.setTableName(tableName); + // Always show columns in logical column order no matter what the mode. + List logicalColumnsInOrder = + ResultSetHelper.fetchLogicalColumnsInOrder(schemaName, tableName); + + List primaryKeys = new ArrayList<>(); + for (ColumnsRecord logicalColumn : logicalColumnsInOrder) { + SQLColumnDefinition sqlColumnDefinition = new SQLColumnDefinition(); + sqlColumnDefinition.setName(SqlIdentifier.surroundWithBacktick(logicalColumn.columnName)); + sqlColumnDefinition.setDbType(DbType.mysql); + MySqlExprParser parser = new MySqlExprParser(ByteString.from(logicalColumn.columnType)); + sqlColumnDefinition.setDataType(parser.parseDataType()); + + if (logicalColumn.columnDefault != null) { + SQLExpr sqlExpr = SQLUtils.toDefaultSQLExpr( + sqlColumnDefinition.getDataType(), logicalColumn.columnDefault, logicalColumn.flag); + if (sqlExpr != null) { + sqlColumnDefinition.setDefaultExpr(sqlExpr); + } + + if (sqlExpr instanceof SQLCurrentTimeExpr) { + if (!StringUtils.isEmpty(logicalColumn.extra) && logicalColumn.extra.toUpperCase(Locale.ROOT) + .contains("ON UPDATE")) { + sqlColumnDefinition.setOnUpdate(sqlExpr); + } + } + } + + if (!StringUtils.isEmpty(logicalColumn.columnComment)) { + sqlColumnDefinition.setComment(logicalColumn.columnComment); + } + + if ("NO".equalsIgnoreCase(logicalColumn.isNullable)) { + sqlColumnDefinition.addConstraint(new SQLNotNullConstraint()); + } + + if ("PRI".equalsIgnoreCase(logicalColumn.columnKey)) { + primaryKeys.add(SqlIdentifier.surroundWithBacktick(logicalColumn.columnName)); + } + boolean autoIncrement = TStringUtil.equalsIgnoreCase(logicalColumn.extra, "auto_increment"); + if (autoIncrement) { + sqlColumnDefinition.setAutoIncrement(true); + } + createTableStmt.addColumn(sqlColumnDefinition); + } + + //PRIMARY + List colNames = primaryKeys.stream() + .map(e -> new SQLSelectOrderByItem(new SQLIdentifierExpr(e))) + .collect(Collectors.toList()); + MySqlPrimaryKey newPrimaryKey = new MySqlPrimaryKey(); + SQLIndexDefinition indexDefinition = newPrimaryKey.getIndexDefinition(); + indexDefinition.setKey(true); + indexDefinition.setType("PRIMARY"); + indexDefinition.getColumns().addAll(colNames); + createTableStmt.getTableElementList().add(newPrimaryKey); + + TablesRecord tablesRecord = + ResultSetHelper.fetchLogicalTableRecord(schemaName, tableName); + if (!StringUtils.isEmpty(tablesRecord.engine)) { + createTableStmt.addOption("ENGINE", new SQLIdentifierExpr(tablesRecord.engine)); + } + + if (!StringUtils.isEmpty(tablesRecord.tableCollation)) { + String tableCharacterSet = Optional.ofNullable(tablesRecord.tableCollation) + .map(CollationName::getCharsetOf) + .map(Enum::name) + .orElse(CharsetName.DEFAULT_CHARACTER_SET); + createTableStmt.addOption("CHARACTER SET", new SQLIdentifierExpr(tableCharacterSet)); + createTableStmt.addOption("COLLATE", new SQLIdentifierExpr(tablesRecord.tableCollation)); + } + return createTableStmt; + } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTablesForPartitionDatabaseHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTablesForPartitionDatabaseHandler.java index 6a98f8a73..86d8536f3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTablesForPartitionDatabaseHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTablesForPartitionDatabaseHandler.java @@ -16,15 +16,21 @@ package com.alibaba.polardbx.executor.handler; +import com.alibaba.polardbx.common.ColumnarTableOptions; import com.alibaba.polardbx.common.Engine; import com.alibaba.polardbx.common.TddlConstants; +import com.alibaba.polardbx.common.constants.SequenceAttribute; import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.druid.sql.SQLUtils; +import com.alibaba.polardbx.druid.sql.ast.SQLExpr; import com.alibaba.polardbx.druid.sql.ast.SQLName; import com.alibaba.polardbx.druid.sql.ast.SQLOrderingSpecification; import com.alibaba.polardbx.druid.sql.ast.SQLPartitionBy; @@ -51,10 +57,18 @@ import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.ddl.ImplicitTableGroupUtil; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.locality.LocalityDesc; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.metadb.seq.SequencesAccessor; +import com.alibaba.polardbx.gms.metadb.seq.SequencesRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableEvolutionAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableEvolutionRecord; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; import com.alibaba.polardbx.gms.metadb.table.IndexVisibility; +import com.alibaba.polardbx.gms.metadb.table.TablesAccessor; +import com.alibaba.polardbx.gms.metadb.table.TablesRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.OptimizerContext; @@ -73,6 +87,7 @@ import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.PartitionInfoManager; import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; +import com.alibaba.polardbx.optimizer.sequence.SequenceManagerProxy; import com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter; import com.alibaba.polardbx.optimizer.utils.RelUtils; import com.alibaba.polardbx.optimizer.view.InformationSchemaViewManager; @@ -86,9 +101,13 @@ import org.apache.calcite.sql.SqlShowCreateTable; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.calcite.sql.validate.SqlValidatorImpl; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; +import java.sql.Connection; +import java.sql.SQLException; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -96,10 +115,14 @@ import java.util.TreeSet; import java.util.stream.Collectors; +import static java.lang.Long.max; + /** * @author mengshi */ public class LogicalShowCreateTablesForPartitionDatabaseHandler extends HandlerCommon { + private static final Logger logger = + LoggerFactory.getLogger(LogicalShowCreateTablesForPartitionDatabaseHandler.class); public LogicalShowCreateTablesForPartitionDatabaseHandler(IRepository repo) { super(repo); @@ -133,11 +156,19 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { result.addColumn("Create Table", DataTypes.StringType, false); result.initMeta(); - String sql = fetchShowCreateTableFromPhy(schemaName, tableName, showCreateTable, show, executionContext); - - sql = LogicalShowCreateTableHandler.reorgLogicalColumnOrder(schemaName, tableName, sql); + final boolean outputMySQLIndent = + executionContext.getParamManager().getBoolean(ConnectionParams.OUTPUT_MYSQL_INDENT); - result.initMeta(); + String sql; + if (executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_LOGICAL_TABLE_META) || + ConfigDataMode.isColumnarMode()) { + MySqlCreateTableStatement tableStatement = LogicalShowCreateTableHandler.fetchShowCreateTableFromMetaDb( + schemaName, tableName, executionContext); + sql = tableStatement.toString(); + } else { + sql = fetchShowCreateTableFromPhy(schemaName, tableName, showCreateTable, show, executionContext); + sql = LogicalShowCreateTableHandler.reorgLogicalColumnOrder(schemaName, tableName, sql); + } StringBuilder partitionStr = new StringBuilder(); TddlRuleManager tddlRuleManager = OptimizerContext.getContext(schemaName).getRuleManager(); @@ -157,6 +188,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { .getGsiManager() .getGsiTableAndIndexMeta(schemaName, tableName, IndexStatus.ALL); + boolean containImplicitColumn = false; + boolean containAutoIncrement = false; // handle implicit pk MySqlCreateTableStatement createTable = (MySqlCreateTableStatement) SQLUtils.parseStatementsWithDefaultFeatures(sql, JdbcConstants.MYSQL).get(0) @@ -171,6 +204,10 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { SQLColumnDefinition sqlColumnDefinition = (SQLColumnDefinition) sqlTableElement; String columnName = SQLUtils.normalizeNoTrim(sqlColumnDefinition.getColumnName()); ColumnMeta columnMeta = tableMeta.getColumnIgnoreCase(columnName); + if (sqlColumnDefinition.isAutoIncrement()) { + containAutoIncrement = true; + } + if (columnMeta != null && columnMeta.isBinaryDefault()) { // handle binary default value SQLHexExpr newDefaultVal = new SQLHexExpr(columnMeta.getField().getDefault()); @@ -184,6 +221,11 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } } + if (sqlTableElement instanceof SQLColumnDefinition + && SqlValidatorImpl.isImplicitKey(((SQLColumnDefinition) sqlTableElement).getNameAsString())) { + containImplicitColumn = true; + } + if (sqlTableElement instanceof SQLColumnDefinition && SqlValidatorImpl.isImplicitKey(((SQLColumnDefinition) sqlTableElement).getNameAsString()) && !needShowImplicitId(executionContext) && !showCreateTable.isFull()) { @@ -285,7 +327,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { List indexDefs = buildIndexDefs(schemaName, gsiMeta, tableName, tableMeta, localIndexes, showCreateTable.isFull(), - needShowHashByRange); + needShowHashByRange, executionContext); createTable.getTableElementList().addAll(indexDefs); if (tableMeta.isAutoPartition() && showCreateTable.isFull()) { @@ -294,6 +336,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { // fix table options Engine engine = tableMeta.getEngine(); + SQLAssignItem autoIncrementOption = null; for (SQLAssignItem tableOption : createTable.getTableOptions()) { if (tableOption.getTarget().toString().equalsIgnoreCase("ENGINE")) { if (tableOption.getValue() == null || !tableOption.getValue().toString() @@ -301,10 +344,116 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { tableOption.setValue(new SQLCharExpr(engine.name())); } } + + if (tableOption.getTarget().toString().equalsIgnoreCase("AUTO_INCREMENT")) { + autoIncrementOption = tableOption; + continue; + } + } + + /** + * 单独处理autoIncrement + * 1. 值取自sequence manager, 而非物理表 + * 2. 如果存在`_drds_implicit_id_`列, 则不显示autoIncrement值 + * 3. 对于group sequence, simple sequence、new sequence: 如果该表的sequence还未使用过,则show create table时不显示autoIncrement值(与mysql保持一致) + * */ + if (!containImplicitColumn && containAutoIncrement) { + String sequenceName = SequenceAttribute.AUTO_SEQ_PREFIX + tableName; + + SequencesAccessor sequencesAccessor = new SequencesAccessor(); + boolean hide = false; + List sequence = null; + try (Connection metaDbConn = MetaDbDataSource.getInstance().getConnection()) { + sequencesAccessor.setConnection(metaDbConn); + String whereClause = String.format(" where name = \"%s\"", sequenceName); + sequence = sequencesAccessor.show(schemaName, whereClause); + } catch (SQLException e) { + logger.error("failed to query sequence info", e); + hide = true; + } + + Long valueInMeta = null; + if (sequence != null && sequence.size() == 1) { + SequencesRecord sequencesRecord = sequence.get(0); + SequenceAttribute.Type type = SequenceAttribute.Type.fromString(sequencesRecord.type); + if (type == SequenceAttribute.Type.NEW || type == SequenceAttribute.Type.SIMPLE) { + Long startWith = null; + boolean parseSucceed = true; + try { + valueInMeta = Long.parseLong(sequencesRecord.value); + startWith = Long.parseLong(sequencesRecord.startWith); + } catch (Exception ignore) { + hide = true; + parseSucceed = false; + } + + if (parseSucceed && valueInMeta.equals(startWith)) { + hide = true; + } + } else if (type == SequenceAttribute.Type.GROUP) { + Long unitCount = null, unitIndex = null, innerStep = null; + boolean parseSucceed = true; + try { + valueInMeta = Long.parseLong(sequencesRecord.value); + unitCount = Long.parseLong(sequencesRecord.unitCount); + unitIndex = Long.parseLong(sequencesRecord.unitIndex); + innerStep = Long.parseLong(sequencesRecord.innerStep); + } catch (Exception ignore) { + hide = true; + parseSucceed = false; + } + //value为初始值,代表sequence尚未被用, 可以隐藏 + if (parseSucceed) { + Long initBound = (unitIndex + unitCount) * innerStep; + if (valueInMeta < initBound) { + hide = true; + } + } + } + } else { + hide = true; + } + + if (!hide) { + Long seqVal = SequenceManagerProxy.getInstance() + .currValue(schemaName, sequenceName) + 1L; + if (valueInMeta != null) { + seqVal = max(seqVal, valueInMeta); + } + if (autoIncrementOption != null) { + autoIncrementOption.setValue(new SQLIntegerExpr(seqVal)); + } else { + autoIncrementOption = new SQLAssignItem(); + autoIncrementOption.setTarget(new SQLIdentifierExpr("AUTO_INCREMENT")); + autoIncrementOption.setValue(new SQLIntegerExpr(seqVal)); + createTable.getTableOptions().add(autoIncrementOption); + } + } else { + Iterator iterator = createTable.getTableOptions().iterator(); + while (iterator.hasNext()) { + SQLAssignItem option = iterator.next(); + if (option.getTarget().toString().equalsIgnoreCase("AUTO_INCREMENT")) { + iterator.remove(); + } + } + } + } else { + Iterator iterator = createTable.getTableOptions().iterator(); + while (iterator.hasNext()) { + SQLAssignItem option = iterator.next(); + if (option.getTarget().toString().equalsIgnoreCase("AUTO_INCREMENT")) { + iterator.remove(); + } + } + } + + //handle lbac attr + if (showCreateTable.isFull()) { + LogicalShowCreateTablesForShardingDatabaseHandler.buildLBACAttr(createTable, schemaName, tableName); } //sql = createTable.toString(); - sql = createTable.toSqlString(needShowHashByRange); + sql = createTable.toSqlString(needShowHashByRange, outputMySQLIndent); String tableLocality = partitionInfoManager.getPartitionInfo(tableName).getLocality(); LocalityDesc localityDesc = LocalityInfoUtils.parse(tableLocality); @@ -320,6 +469,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } sql = sql + buildTableGroupInfo(schemaName, showCreateTable, partInfo); + sql = tryAttachImplicitTableGroupInfo(executionContext, schemaName, tableName, sql); result.addRow(new Object[] {tableName, sql}); return result; @@ -425,10 +575,13 @@ public List buildIndexDefs(String schemaName, TableMeta meta, List localIndexes, boolean full, - boolean needShowHashByRange) { + boolean needShowHashByRange, + ExecutionContext executionContext) { + boolean enableUseKeyForAllLocalIndex = + executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_USE_KEY_FOR_ALL_LOCAL_INDEX); Set ignoredLocalIndexNames = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); List indexDefs = new ArrayList<>(); - if (meta.withGsi()) { + if (meta.withGsi() && gsiMeta.getTableMeta() != null && !gsiMeta.getTableMeta().isEmpty()) { final GsiMetaManager.GsiTableMetaBean mainTableMeta = gsiMeta.getTableMeta().get(mainTableName); for (Map.Entry entry : mainTableMeta.indexMap.entrySet()) { final String indexName = entry.getKey(); @@ -491,6 +644,8 @@ public List buildIndexDefs(String schemaName, final MySqlUnique indeDef = new MySqlUnique(); indeDef.getIndexDefinition().setIndex(true); + boolean isAutoPartTbl = meta.isAutoPartition(); + useKeyInsteadOfIndexIfNeed(full, enableUseKeyForAllLocalIndex, isAutoPartTbl, indeDef); if (!indexMeta.nonUnique) { indeDef.getIndexDefinition().setType("UNIQUE"); } @@ -511,8 +666,22 @@ public List buildIndexDefs(String schemaName, } else { indeDef.getCovering().addAll(coveringColumns); } + indeDef.setColumnar(indexMeta.columnarIndex); + if (full && indexMeta.columnarIndex) { + // set options + Map options = getColumnarIndexOptions(indexMeta.tableSchema, indexMeta.indexName); + if (options != null) { + indeDef.setDictionaryColumns(options.get(ColumnarTableOptions.DICTIONARY_COLUMNS)); + } + TablesAccessor tablesAccessor = new TablesAccessor(); + String engine = getColumnarIndexEngine(schemaName, indexMeta.indexName, tablesAccessor); + if (engine != null) { + indeDef.setEngineName(new SQLIdentifierExpr(engine)); + } + } + if (!coveringColumns.isEmpty() || full || !meta.isAutoPartition()) { - if (!indeDef.isClustered()) { + if (!indeDef.isClustered() && !indeDef.isColumnar()) { indeDef.setGlobal(true); // Set one of global or clustered. } } @@ -526,12 +695,14 @@ public List buildIndexDefs(String schemaName, indexName = SQLUtils.normalizeNoTrim(key.getName().getSimpleName()); if (meta.isAutoPartition() || full) { key.getIndexDefinition().setLocal(true); + removeLocalKeyWordOnShowCreateTableIfNeed(full, enableUseKeyForAllLocalIndex, key); } } else if (localIndexElement instanceof MySqlTableIndex) { MySqlTableIndex key = (MySqlTableIndex) localIndexElement; indexName = SQLUtils.normalizeNoTrim(key.getName().getSimpleName()); if (meta.isAutoPartition() || full) { key.setLocal(true); + removeLocalKeyWordOnShowCreateTableIfNeed(full, enableUseKeyForAllLocalIndex, key); } } else { continue; @@ -548,6 +719,37 @@ public List buildIndexDefs(String schemaName, return indexDefs; } + private Map getColumnarIndexOptions(String schemaName, String indexName) { + ColumnarTableEvolutionAccessor accessor = new ColumnarTableEvolutionAccessor(); + try (Connection metaDbConn = MetaDbDataSource.getInstance().getConnection()) { + accessor.setConnection(metaDbConn); + List records = + accessor.querySchemaIndexLatest(schemaName, indexName); + if (CollectionUtils.isEmpty(records)) { + logger.error("empty columnar_table_evolution record: " + indexName); + return null; + } + return records.get(0).options; + } catch (SQLException e) { + logger.error("failed to query columnar option info", e); + } + return null; + } + + public String getColumnarIndexEngine(String schemaName, String indexName, TablesAccessor accessor) { + try (Connection metaDbConn = MetaDbDataSource.getInstance().getConnection()) { + accessor.setConnection(metaDbConn); + TablesRecord record = accessor.query(schemaName, indexName, false); + if (record == null) { + return null; + } + return record.engine; + } catch (Throwable ex) { + logger.error("failed to query columnar engine info", ex); + } + return null; + } + public SQLPartitionBy buildSqlPartitionBy(GsiMetaManager.GsiTableMetaBean indexTableMeta, boolean needShowHashByRange) { String indexName = indexTableMeta.gsiMetaBean.indexName; @@ -556,7 +758,8 @@ public SQLPartitionBy buildSqlPartitionBy(GsiMetaManager.GsiTableMetaBean indexT OptimizerContext.getContext(schemaName).getPartitionInfoManager(); PartitionInfo partitionInfo = partitionInfoManager.getPartitionInfo(indexName); if (partitionInfo != null) { - boolean usePartitionBy = partitionInfo.isGsi() || partitionInfo.isPartitionedTable(); + boolean usePartitionBy = partitionInfo.isGsi() || partitionInfo.isPartitionedTable() || + partitionInfo.isColumnar(); ByteString byteString = ByteString.from( usePartitionBy ? partitionInfo.showCreateTablePartitionDefInfo(needShowHashByRange, "\t\t") : ""); final MySqlCreateTableParser createParser = new MySqlCreateTableParser(byteString); @@ -571,4 +774,56 @@ private boolean needShowImplicitId(ExecutionContext executionContext) { Object value = executionContext.getExtraCmds().get(ConnectionProperties.SHOW_IMPLICIT_ID); return value != null && Boolean.parseBoolean(value.toString()); } + + private String tryAttachImplicitTableGroupInfo(ExecutionContext executionContext, String schemaName, + String tableName, String sql) { + Object value = executionContext.getExtraCmds().get(ConnectionProperties.SHOW_IMPLICIT_TABLE_GROUP); + if (value != null && Boolean.parseBoolean(value.toString())) { + return ImplicitTableGroupUtil.tryAttachImplicitTableGroup(schemaName, tableName, sql); + } + return sql; + } + + private void useKeyInsteadOfIndexIfNeed( + boolean full, + boolean enableUseKeyForAllLocalIndex, + boolean isAutoPartTbl, + MySqlUnique indexDef) { + if (enableUseKeyForAllLocalIndex && isAutoPartTbl && !full) { + indexDef.getIndexDefinition().setIndex(false); + indexDef.getIndexDefinition().setKey(true); + } + } + + private void removeLocalKeyWordOnShowCreateTableIfNeed(boolean showFullCreateTable, + boolean enableUseKeyForAllLocalIndex, + SQLTableElement keyAst) { + if (enableUseKeyForAllLocalIndex && !showFullCreateTable) { + if (keyAst instanceof MySqlTableIndex) { + MySqlTableIndex key = (MySqlTableIndex) keyAst; + key.setLocal(false); + SQLExpr oldCommentExpr = key.getComment(); + SQLExpr newCommentExpr = null; + if (oldCommentExpr == null) { + newCommentExpr = new SQLCharExpr("LOCAL INDEX"); + } else { + String oldCommentStr = SQLUtils.normalizeNoTrim(((SQLCharExpr) oldCommentExpr).getText()); + newCommentExpr = new SQLCharExpr(oldCommentStr + ", LOCAL INDEX"); + } + key.setComment(newCommentExpr); + } else if (keyAst instanceof MySqlKey) { + MySqlKey key = (MySqlKey) keyAst; + key.getIndexDefinition().setLocal(false); + SQLExpr oldCommentExpr = key.getComment(); + SQLExpr newCommentExpr = null; + if (oldCommentExpr == null) { + newCommentExpr = new SQLCharExpr("LOCAL KEY"); + } else { + String oldCommentStr = SQLUtils.normalizeNoTrim(((SQLCharExpr) oldCommentExpr).getText()); + newCommentExpr = new SQLCharExpr(oldCommentStr + ", LOCAL KEY"); + } + key.setComment(newCommentExpr); + } + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTablesForShardingDatabaseHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTablesForShardingDatabaseHandler.java index eb21013b7..560e67c67 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTablesForShardingDatabaseHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowCreateTablesForShardingDatabaseHandler.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.common.utils.logger.Logger; @@ -38,6 +39,7 @@ import com.alibaba.polardbx.druid.sql.ast.expr.SQLIntegerExpr; import com.alibaba.polardbx.druid.sql.ast.expr.SQLMethodInvokeExpr; import com.alibaba.polardbx.druid.sql.ast.expr.SQLNumberExpr; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAssignItem; import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnDefinition; import com.alibaba.polardbx.druid.sql.ast.statement.SQLExprTableSource; import com.alibaba.polardbx.druid.sql.ast.statement.SQLSelectOrderByItem; @@ -57,8 +59,14 @@ import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.locality.LocalityDesc; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.metadb.seq.SequencesAccessor; +import com.alibaba.polardbx.gms.metadb.seq.SequencesRecord; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; import com.alibaba.polardbx.gms.metadb.table.IndexVisibility; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; +import com.alibaba.polardbx.gms.lbac.LBACSecurityLabel; +import com.alibaba.polardbx.gms.lbac.LBACSecurityPolicy; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.schema.InformationSchema; @@ -112,12 +120,17 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; +import java.sql.Connection; +import java.sql.SQLException; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import static java.lang.Math.max; + /** * @author chenmo.cm */ @@ -632,6 +645,9 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { .getGsiManager() .getGsiTableAndIndexMeta(schemaName, tableName, IndexStatus.ALL); + boolean containImplicitColumn = false; + boolean containAutoIncrement = false; + // handle implicit pk final MySqlCreateTableStatement createTable = (MySqlCreateTableStatement) SQLUtils.parseStatementsWithDefaultFeatures(sql, @@ -643,6 +659,9 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { for (SQLTableElement sqlTableElement : createTable.getTableElementList()) { if (tableMeta != null && sqlTableElement instanceof SQLColumnDefinition) { SQLColumnDefinition sqlColumnDefinition = (SQLColumnDefinition) sqlTableElement; + if (sqlColumnDefinition.isAutoIncrement()) { + containAutoIncrement = true; + } String columnName = SQLUtils.normalizeNoTrim(sqlColumnDefinition.getColumnName()); ColumnMeta columnMeta = tableMeta.getColumnIgnoreCase(columnName); if (columnMeta != null && columnMeta.isBinaryDefault()) { @@ -658,6 +677,11 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } } + if (sqlTableElement instanceof SQLColumnDefinition + && SqlValidatorImpl.isImplicitKey(((SQLColumnDefinition) sqlTableElement).getNameAsString())) { + containImplicitColumn = true; + } + if (sqlTableElement instanceof SQLColumnDefinition && SqlValidatorImpl.isImplicitKey(((SQLColumnDefinition) sqlTableElement).getNameAsString()) && !needShowImplicitId(executionContext)) { @@ -683,7 +707,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { // Only single to single table was allowed to create foreign keys. String defaultGroupName = tddlRuleManager.getDefaultDbIndex().toLowerCase(); String phyReferencedTableName = - SQLUtils.normalize(foreignKey.getReferencedTableName().getSimpleName()); + SQLUtils.normalizeNoTrim(foreignKey.getReferencedTableName().getSimpleName()); String fullQualifiedPhyRefTableName = defaultGroupName + "." + phyReferencedTableName; Set logicalTableNames = @@ -791,7 +815,125 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { createTable.getTableElementList().addAll(gsiDefs); } - sql = createTable.toString(); + //handle lbac attr + buildLBACAttr(createTable, schemaName, tableName); + + /** + * fix autoIncrement 显示问题: + * 1. 值取自sequence manager里的真实sequence值, 而非物理表上自带的autoIncrement值 + * 2. 如果存在`_drds_implicit_id_`列, 则不显示autoIncrement值 + * 3. 对于group, new, simple sequence, 如果当前table还未触发sequence.nextval, 则不显示autoIncrement值(与mysql保持一致) + * */ + SQLAssignItem autoIncrementOption = createTable + .getTableOptions() + .stream() + .filter(option -> { + if (option instanceof SQLAssignItem) { + if (option.getTarget().toString().equalsIgnoreCase("AUTO_INCREMENT")) { + return true; + } + } + return false; + }) + .findFirst() + .orElse(null); + + if (!containImplicitColumn && containAutoIncrement) { + String sequenceName = SequenceAttribute.AUTO_SEQ_PREFIX + tableName; + + SequencesAccessor sequencesAccessor = new SequencesAccessor(); + + boolean hide = false; + List sequence = null; + try (Connection metaDbConn = MetaDbDataSource.getInstance().getConnection()) { + sequencesAccessor.setConnection(metaDbConn); + String whereClause = String.format(" where name = \"%s\"", sequenceName); + sequence = sequencesAccessor.show(schemaName, whereClause); + } catch (SQLException e) { + logger.error("failed to query sequence info", e); + hide = true; + } + + Long valueInMeta = null; + if (sequence != null && sequence.size() == 1) { + SequencesRecord sequencesRecord = sequence.get(0); + SequenceAttribute.Type type = Type.fromString(sequencesRecord.type); + if (type == Type.NEW || type == Type.SIMPLE) { + Long startWith = null; + boolean parseSucceed = true; + try { + valueInMeta = Long.parseLong(sequencesRecord.value); + startWith = Long.parseLong(sequencesRecord.startWith); + } catch (Exception ignore) { + hide = true; + parseSucceed = false; + } + + if (parseSucceed && valueInMeta.equals(startWith)) { + hide = true; + } + } else if (type == Type.GROUP) { + Long unitCount = null, unitIndex = null, innerStep = null; + boolean parseSucceed = true; + try { + valueInMeta = Long.parseLong(sequencesRecord.value); + unitCount = Long.parseLong(sequencesRecord.unitCount); + unitIndex = Long.parseLong(sequencesRecord.unitIndex); + innerStep = Long.parseLong(sequencesRecord.innerStep); + } catch (Exception ignore) { + hide = true; + parseSucceed = false; + } + //value为初始值,代表sequence尚未被用, 可以隐藏 + if (parseSucceed) { + Long initBound = (unitIndex + unitCount) * innerStep; + if (valueInMeta < initBound) { + hide = true; + } + } + } + } else { + hide = true; + } + + if (!hide) { + Long seqVal = SequenceManagerProxy.getInstance() + .currValue(schemaName, sequenceName) + 1L; + if (valueInMeta != null) { + seqVal = max(seqVal, valueInMeta); + } + if (autoIncrementOption != null) { + autoIncrementOption.setValue(new SQLIntegerExpr(seqVal)); + } else { + autoIncrementOption = new SQLAssignItem(); + autoIncrementOption.setTarget(new SQLIdentifierExpr("AUTO_INCREMENT")); + autoIncrementOption.setValue(new SQLIntegerExpr(seqVal)); + createTable.getTableOptions().add(autoIncrementOption); + } + } else { + Iterator iterator = createTable.getTableOptions().iterator(); + while (iterator.hasNext()) { + SQLAssignItem option = iterator.next(); + if (option.getTarget().toString().equalsIgnoreCase("AUTO_INCREMENT")) { + iterator.remove(); + } + } + } + } else { + Iterator iterator = createTable.getTableOptions().iterator(); + while (iterator.hasNext()) { + SQLAssignItem option = iterator.next(); + if (option.getTarget().toString().equalsIgnoreCase("AUTO_INCREMENT")) { + iterator.remove(); + } + } + } + +// sql = createTable.toString(); + final boolean outputMySQLIndent = + executionContext.getParamManager().getBoolean(ConnectionParams.OUTPUT_MYSQL_INDENT); + sql = createTable.toSqlString(false, outputMySQLIndent); + // Sharding table or single table with broadcast if (tableRule != null) { // Check if any implicit key exists. If yes, it means that @@ -804,9 +946,20 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { // Get the corresponding sequence type. String seqName = SequenceAttribute.AUTO_SEQ_PREFIX + tableName; try { - SequenceAttribute.Type seqType = - SequenceManagerProxy.getInstance().checkIfExists(schemaName, seqName); - if (seqType != SequenceAttribute.Type.NA) { + String sequenceName = SequenceAttribute.AUTO_SEQ_PREFIX + tableName; + SequencesAccessor sequencesAccessor = new SequencesAccessor(); + List sequence = null; + try (Connection metaDbConn = MetaDbDataSource.getInstance().getConnection()) { + sequencesAccessor.setConnection(metaDbConn); + String whereClause = String.format(" where name = \"%s\"", sequenceName); + sequence = sequencesAccessor.show(schemaName, whereClause); + } catch (SQLException e) { + logger.error("failed to query sequence info", e); + } + Type seqType = + sequence != null && sequence.size() == 1 ? Type.fromString(sequence.get(0).type) : + Type.NA; + if (seqType != Type.NA) { // Replace it with extended syntax. String replacement = SequenceAttribute.EXTENDED_AUTO_INC_SYNTAX + seqType; sql = StringUtils.replaceOnce(sql, @@ -851,6 +1004,33 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } } + public static void buildLBACAttr(MySqlCreateTableStatement createTable, String schemaName, String tableName) { + //for unit test + if (MetaDbDataSource.getInstance() == null) { + return; + } + //handle lbac attr + LBACSecurityPolicy policy = LBACSecurityManager.getInstance().getTablePolicy(schemaName, tableName); + if (policy != null) { + String policyName = policy.getPolicyName(); + createTable.addOption("security policy", new SQLIdentifierExpr(policyName)); + for (SQLTableElement sqlTableElement : createTable.getTableElementList()) { + if (sqlTableElement instanceof SQLColumnDefinition) { + String columnName = ((SQLColumnDefinition) sqlTableElement).getColumnName(); + if (columnName.startsWith("`")) { + columnName = columnName.substring(1, columnName.length() - 1); + } + LBACSecurityLabel + label = LBACSecurityManager.getInstance().getColumnLabel(schemaName, tableName, columnName); + if (label != null) { + ((SQLColumnDefinition) sqlTableElement).setSecuredWith( + new SQLIdentifierExpr(label.getLabelName())); + } + } + } + } + } + public List buildGsiDefs(String schemaName, GsiMetaBean gsiMeta, String mainTableName, boolean full) { final GsiMetaManager.GsiTableMetaBean mainTableMeta = gsiMeta.getTableMeta().get(mainTableName); @@ -976,7 +1156,11 @@ public SQLExpr buildPartitionBy(GsiMetaManager.GsiTableMetaBean indexTableMeta, final String policy = onTable ? indexTableMeta.tbPartitionPolicy : indexTableMeta.dbPartitionPolicy; final String key = onTable ? indexTableMeta.tbPartitionKey : indexTableMeta.dbPartitionKey; - if (null == indexTableMeta || TStringUtil.isBlank(policy)) { + return buildPartitionBy(policy, key, onTable); + } + + public static SQLExpr buildPartitionBy(String policy, String key, boolean onTable) { + if (TStringUtil.isBlank(policy)) { return null; } @@ -1015,7 +1199,7 @@ public SQLExpr buildPartitionBy(GsiMetaManager.GsiTableMetaBean indexTableMeta, return partitionBy; } - public boolean isSingleParam(String partitionPolicy) { + public static boolean isSingleParam(String partitionPolicy) { return TStringUtil.startsWithIgnoreCase(partitionPolicy, "hash") || TStringUtil.startsWithIgnoreCase(partitionPolicy, "yyyymm_opt") || TStringUtil.startsWithIgnoreCase(partitionPolicy, "yyyydd_opt") diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowDsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowDsHandler.java index 7638055eb..e18fc78c1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowDsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowDsHandler.java @@ -18,9 +18,6 @@ import com.alibaba.polardbx.atom.TAtomDataSource; import com.alibaba.polardbx.atom.config.TAtomDsConfDO; -import com.alibaba.polardbx.group.config.Weight; -import com.alibaba.polardbx.group.jdbc.DataSourceWrapper; -import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.common.model.Group; import com.alibaba.polardbx.common.model.Matrix; import com.alibaba.polardbx.common.utils.GeneralUtil; @@ -36,6 +33,9 @@ import com.alibaba.polardbx.gms.topology.GroupDetailInfoExRecord; import com.alibaba.polardbx.gms.util.GroupInfoUtil; import com.alibaba.polardbx.gms.util.InstIdUtil; +import com.alibaba.polardbx.group.config.Weight; +import com.alibaba.polardbx.group.jdbc.DataSourceWrapper; +import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.locality.StoragePoolManager; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowFilesHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowFilesHandler.java index c1ad6b7e7..590284bc3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowFilesHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowFilesHandler.java @@ -143,7 +143,8 @@ private Cursor handlePartitionedTable(TableMeta tableMeta, ExecutionContext exec rowCount, createTime, ossOrcFileMeta.getCommitTs() == null ? null : - sdf.format(new Date(ossOrcFileMeta.getCommitTs() >> ITimestampOracle.BITS_LOGICAL_TIME)) + sdf.format( + new Date(ossOrcFileMeta.getCommitTs() >> ITimestampOracle.BITS_LOGICAL_TIME)) }); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowGlobalDeadlocksHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowGlobalDeadlocksHandler.java index 7b40352ee..a15c65213 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowGlobalDeadlocksHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowGlobalDeadlocksHandler.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.executor.utils.transaction.DeadlockParser; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.google.common.collect.ImmutableList; @@ -70,7 +71,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } } else { // Otherwise, get deadlock information from leader - results = SyncManagerHelper.sync(new FetchDeadlockInfoSyncAction(schemaName), schemaName); + results = SyncManagerHelper.sync(new FetchDeadlockInfoSyncAction(schemaName), schemaName, + SyncScope.MASTER_ONLY); } if (CollectionUtils.isNotEmpty(results)) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowHtcHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowHtcHandler.java index 27f81e616..c80b432c3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowHtcHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowHtcHandler.java @@ -39,7 +39,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; /** * @author chenmo.cm diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowIndexHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowIndexHandler.java index 72b82dad9..bd2638908 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowIndexHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowIndexHandler.java @@ -38,6 +38,7 @@ import com.alibaba.polardbx.optimizer.view.PolarDbXSystemTableView; import com.alibaba.polardbx.optimizer.view.SystemTableView; import com.alibaba.polardbx.optimizer.view.PerformanceSchemaViewManager; +import com.alibaba.polardbx.optimizer.view.SystemTableView; import com.alibaba.polardbx.optimizer.view.ViewManager; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlShow; @@ -107,7 +108,7 @@ private Cursor handleForShardingDatabase(RelNode logicalPlan, ExecutionContext e result.addColumn("Seq_in_index", null, DataTypes.IntegerType); result.addColumn("Column_name", null, DataTypes.StringType); result.addColumn("Collation", null, DataTypes.StringType); - result.addColumn("Cardinality", null, DataTypes.IntegerType); + result.addColumn("Cardinality", null, DataTypes.LongType); result.addColumn("Sub_part", null, DataTypes.IntegerType); result.addColumn("Packed", null, DataTypes.StringType); result.addColumn("Null", null, DataTypes.StringType); @@ -121,7 +122,9 @@ private Cursor handleForShardingDatabase(RelNode logicalPlan, ExecutionContext e if (IMPLICIT_COL_NAME.equalsIgnoreCase(row.getString(4))) { continue; } - result.addRow(row.getValues().toArray()); + final Object[] objects = row.getValues().toArray(); + objects[0] = tableName.toLowerCase(); + result.addRow(objects); } indexFromMain.close(new ArrayList<>()); @@ -225,7 +228,7 @@ private Cursor handleForPartitionDatabase(RelNode logicalPlan, ExecutionContext result.addColumn("Seq_in_index", null, DataTypes.IntegerType); result.addColumn("Column_name", null, DataTypes.StringType); result.addColumn("Collation", null, DataTypes.StringType); - result.addColumn("Cardinality", null, DataTypes.IntegerType); + result.addColumn("Cardinality", null, DataTypes.LongType); result.addColumn("Sub_part", null, DataTypes.IntegerType); result.addColumn("Packed", null, DataTypes.StringType); result.addColumn("Null", null, DataTypes.StringType); @@ -240,6 +243,7 @@ private Cursor handleForPartitionDatabase(RelNode logicalPlan, ExecutionContext continue; } final Object[] objects = row.getValues().toArray(); + objects[0] = tableName.toLowerCase(); final String indexName = objects[2].toString(); if (indexName.startsWith(AUTO_LOCAL_INDEX_PREFIX)) { objects[2] = indexName.substring(AUTO_LOCAL_INDEX_PREFIX.length()); // Fake one. diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowMasterStatusHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowMasterStatusHandler.java index 0697fefdc..0d74282b9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowMasterStatusHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowMasterStatusHandler.java @@ -25,6 +25,7 @@ import com.alibaba.polardbx.optimizer.utils.RelUtils; import com.alibaba.polardbx.rpc.CdcRpcClient; import com.alibaba.polardbx.rpc.cdc.CdcServiceGrpc.CdcServiceBlockingStub; +import com.alibaba.polardbx.rpc.cdc.FullMasterStatus; import com.alibaba.polardbx.rpc.cdc.MasterStatus; import com.alibaba.polardbx.rpc.cdc.Request; import io.grpc.Channel; @@ -32,10 +33,10 @@ import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlShowMasterStatus; +import org.apache.commons.lang3.StringUtils; + +import static com.alibaba.polardbx.executor.utils.CdcExeUtil.tryExtractStreamNameFromUser; -/** - * - */ public class LogicalShowMasterStatusHandler extends HandlerCommon { public LogicalShowMasterStatusHandler(IRepository repo) { super(repo); @@ -46,22 +47,59 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { SqlShowMasterStatus sqlShowMasterStatus = (SqlShowMasterStatus) ((LogicalShow) logicalPlan).getNativeSqlNode(); SqlNode with = sqlShowMasterStatus.getWith(); - String streamName = with == null ? "" : RelUtils.lastStringValue(with); + String streamName = + with == null ? tryExtractStreamNameFromUser(executionContext) : RelUtils.lastStringValue(with); CdcServiceBlockingStub cdcServiceBlockingStub = - with == null ? CdcRpcClient.getCdcRpcClient().getCdcServiceBlockingStub() : + StringUtils.isBlank(streamName) ? CdcRpcClient.getCdcRpcClient().getCdcServiceBlockingStub() : CdcRpcClient.getCdcRpcClient().getCdcServiceBlockingStub(streamName); - MasterStatus masterStatus = cdcServiceBlockingStub.showMasterStatus( - Request.newBuilder().setStreamName(streamName).build()); - ArrayResultCursor result = new ArrayResultCursor("SHOW MASTER STATUS"); - result.addColumn("File", DataTypes.StringType); - result.addColumn("Position", DataTypes.LongType); - result.addColumn("Binlog_Do_DB", DataTypes.StringType); - result.addColumn("Binlog_Ignore_DB", DataTypes.StringType); - result.addColumn("Executed_Gtid_Set", DataTypes.StringType); - result.initMeta(); - result.addRow(new Object[] { - masterStatus.getFile(), masterStatus.getPosition(), masterStatus.getBinlogDoDB(), - masterStatus.getBinlogIgnoreDB(), masterStatus.getExecutedGtidSet()}); + + ArrayResultCursor result = null; + if (sqlShowMasterStatus.isFull()) { + FullMasterStatus fullMasterStatus = + cdcServiceBlockingStub.showFullMasterStatus(Request.newBuilder().setStreamName(streamName).build()); + result = new ArrayResultCursor("SHOW FULL MASTER STATUS"); + result.addColumn("File", DataTypes.StringType); + result.addColumn("Position", DataTypes.LongType); + result.addColumn("LastTso", DataTypes.StringType); + result.addColumn("DelayTimeMs", DataTypes.LongType); + result.addColumn("AvgRevEps", DataTypes.LongType); + result.addColumn("AvgRevBps", DataTypes.LongType); + result.addColumn("AvgWriteEps", DataTypes.LongType); + result.addColumn("AvgWriteBps", DataTypes.LongType); + result.addColumn("AvgWriteTps", DataTypes.LongType); + result.addColumn("AvgUploadBps", DataTypes.LongType); + result.addColumn("AvgDumpBps", DataTypes.LongType); + result.addColumn("ExtInfo", DataTypes.StringType); + result.initMeta(); + result.addRow(new Object[] { + fullMasterStatus.getFile(), + fullMasterStatus.getPosition(), + fullMasterStatus.getLastTso(), + fullMasterStatus.getDelayTime(), + fullMasterStatus.getAvgRevEps(), + fullMasterStatus.getAvgRevBps(), + fullMasterStatus.getAvgWriteEps(), + fullMasterStatus.getAvgWriteBps(), + fullMasterStatus.getAvgWriteTps(), + fullMasterStatus.getAvgUploadBps(), + fullMasterStatus.getAvgDumpBps(), + fullMasterStatus.getExtInfo() + }); + } else { + MasterStatus masterStatus = cdcServiceBlockingStub.showMasterStatus( + Request.newBuilder().setStreamName(streamName).build()); + result = new ArrayResultCursor("SHOW MASTER STATUS"); + result.addColumn("File", DataTypes.StringType); + result.addColumn("Position", DataTypes.LongType); + result.addColumn("Binlog_Do_DB", DataTypes.StringType); + result.addColumn("Binlog_Ignore_DB", DataTypes.StringType); + result.addColumn("Executed_Gtid_Set", DataTypes.StringType); + result.initMeta(); + result.addRow(new Object[] { + masterStatus.getFile(), masterStatus.getPosition(), masterStatus.getBinlogDoDB(), + masterStatus.getBinlogIgnoreDB(), masterStatus.getExecutedGtidSet()}); + } + Channel channel = cdcServiceBlockingStub.getChannel(); if (channel instanceof ManagedChannel) { ((ManagedChannel) channel).shutdown(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowPartitionsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowPartitionsHandler.java index dae8e49df..b7612b550 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowPartitionsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowPartitionsHandler.java @@ -19,14 +19,10 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.TStringUtil; -import com.alibaba.polardbx.executor.common.ExecutorContext; -import com.alibaba.polardbx.executor.common.TopologyHandler; import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.ExecutorCursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaPartitionsMetaHandler; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; import com.alibaba.polardbx.optimizer.config.table.TableMeta; @@ -34,21 +30,14 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.function.calc.scalar.CanAccessTable; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; -import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.PartitionInfoManager; -import com.alibaba.polardbx.optimizer.partition.PartitionSpec; -import com.alibaba.polardbx.optimizer.partition.common.PartitionTableType; -import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; import com.alibaba.polardbx.optimizer.utils.RelUtils; import com.alibaba.polardbx.rule.TableRule; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlShowPartitions; import org.apache.commons.lang.StringUtils; -import java.util.ArrayList; -import java.util.List; -import java.util.Locale; import java.util.Map; /** diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowPartitionsHeatmapHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowPartitionsHeatmapHandler.java index 9d838f1bf..8127ee641 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowPartitionsHeatmapHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowPartitionsHeatmapHandler.java @@ -16,11 +16,6 @@ package com.alibaba.polardbx.executor.handler; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - import com.alibaba.fastjson.JSON; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; @@ -33,20 +28,24 @@ import com.alibaba.polardbx.executor.sync.FetchPartitionHeatmapSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; import com.alibaba.polardbx.optimizer.utils.RelUtils; - import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlShowPartitionsHeatmap; import org.apache.commons.collections.CollectionUtils; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + /** * @author ximing.yd - * @date 2022/1/5 7:15 下午 */ public class LogicalShowPartitionsHeatmapHandler extends HandlerCommon { @@ -80,7 +79,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } else { // Otherwise, get information from leader results = SyncManagerHelper.sync(new FetchPartitionHeatmapSyncAction(schemaName, timeRange, type), - schemaName); + schemaName, SyncScope.MASTER_ONLY); } if (CollectionUtils.isNotEmpty(results)) { @@ -89,7 +88,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { continue; } for (Map row : info) { - final String heatmap = (String)row.get("HEATMAP"); + final String heatmap = (String) row.get("HEATMAP"); result.addRow(new Object[] {heatmap}); } } @@ -99,8 +98,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } private Map getParamMap(RelNode logicalPlan) { - final LogicalShow show = (LogicalShow)logicalPlan; - final SqlShowPartitionsHeatmap showPartitionsHeatmap = (SqlShowPartitionsHeatmap)show.getNativeSqlNode(); + final LogicalShow show = (LogicalShow) logicalPlan; + final SqlShowPartitionsHeatmap showPartitionsHeatmap = (SqlShowPartitionsHeatmap) show.getNativeSqlNode(); String originTimeRange = RelUtils.lastStringValue(showPartitionsHeatmap.getOperandList().get(0)); String originType = RelUtils.lastStringValue(showPartitionsHeatmap.getOperandList().get(1)); String timeRange = VisualConstants.LAST_ONE_HOURS; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowProfileHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowProfileHandler.java index fcbeec296..cef5964e3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowProfileHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowProfileHandler.java @@ -26,6 +26,7 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; @@ -280,7 +281,8 @@ protected List>> doShowProfileSyncAction(String schemaN } catch (Exception e) { throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - List>> results = SyncManagerHelper.sync(showProfileSyncAction, schemaName); + List>> results = SyncManagerHelper.sync(showProfileSyncAction, schemaName, + SyncScope.CURRENT_ONLY); return results; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowPruneTraceHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowPruneTraceHandler.java new file mode 100644 index 000000000..5784c4484 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowPruneTraceHandler.java @@ -0,0 +1,74 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.statis.ColumnarPruneRecord; +import org.apache.calcite.rel.RelNode; + +import java.util.Collection; + +/** + * @author jilong.ljl + */ +public class LogicalShowPruneTraceHandler extends HandlerCommon { + public LogicalShowPruneTraceHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + ArrayResultCursor result = new ArrayResultCursor("TRACE"); + result.addColumn("TABLE_NAME", DataTypes.StringType); + result.addColumn("FILTER", DataTypes.StringType); + result.addColumn("INIT_TIME(NS)", DataTypes.StringType); + result.addColumn("PRUNE_TIME(NS)", DataTypes.StringType); + result.addColumn("FILE_NUM", DataTypes.StringType); + result.addColumn("STRIPE_NUM", DataTypes.StringType); + result.addColumn("RG_NUM", DataTypes.StringType); + result.addColumn("RG_LEFT_NUM", DataTypes.StringType); + result.addColumn("SORT_KEY_PRUNE_NUM", DataTypes.StringType); + result.addColumn("ZONE_MAP_PRUNE_NUM", DataTypes.StringType); + result.addColumn("BITMAP_PRUNE_NUM", DataTypes.StringType); + + result.initMeta(); + + Collection ops = null; + if (executionContext.getColumnarTracer() != null) { + ops = executionContext.getColumnarTracer().pruneRecords(); + for (ColumnarPruneRecord op : ops) { + result.addRow(new Object[] { + op.getTableName(), + op.getFilter(), + op.initIndexTime, + op.indexPruneTime, + op.fileNum, + op.stripeNum, + op.rgNum, + op.rgLeftNum, + op.sortKeyPruneNum, + op.zoneMapPruneNum, + op.bitMapPruneNum}); + } + } + return result; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowReplicaCheckDiffHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowReplicaCheckDiffHandler.java new file mode 100644 index 000000000..9c9de6983 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowReplicaCheckDiffHandler.java @@ -0,0 +1,103 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.druid.util.StringUtils; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.metadb.GmsSystemTables; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlShowReplicaCheckDiff; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +/** + * @author yudong + * @since 2023/11/9 10:41 + **/ +public class LogicalShowReplicaCheckDiffHandler extends HandlerCommon { + + private static final Logger logger = LoggerFactory.getLogger(LogicalShowReplicaCheckDiffHandler.class); + + public LogicalShowReplicaCheckDiffHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + SqlShowReplicaCheckDiff sqlShowReplicaCheckDiff = + (SqlShowReplicaCheckDiff) ((LogicalDal) logicalPlan).getNativeSqlNode(); + String dbName = sqlShowReplicaCheckDiff.getDbName().toString(); + if (StringUtils.isEmpty(dbName)) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, "database cannot be empty!"); + } + String tbName = null; + String sql; + if (sqlShowReplicaCheckDiff.getTableName() != null) { + tbName = sqlShowReplicaCheckDiff.getTableName().toString(); + sql = String.format("SELECT * FROM " + GmsSystemTables.RPL_FULL_VALID_DIFF_TABLE + + " WHERE `dst_logical_db` = '%s' AND `dst_logical_table` = '%s'", dbName, tbName); + } else { + sql = String.format("SELECT * FROM " + GmsSystemTables.RPL_FULL_VALID_DIFF_TABLE + + " WHERE `dst_logical_db` = '%s'", dbName); + } + + final ArrayResultCursor result = new ArrayResultCursor("CHECK REPLICA TABLE SHOW DIFF"); + result.addColumn("DATABASE", DataTypes.StringType); + result.addColumn("TABLE", DataTypes.StringType); + result.addColumn("ERROR_TYPE", DataTypes.StringType); + result.addColumn("STATUS", DataTypes.StringType); + result.addColumn("SRC_KEY_NAME", DataTypes.StringType); + result.addColumn("SRC_KEY_VAL", DataTypes.StringType); + result.addColumn("DST_KEY_NAME", DataTypes.StringType); + result.addColumn("DST_KEY_VAL", DataTypes.StringType); + result.initMeta(); + + try (Connection metaDbConn = MetaDbUtil.getConnection()) { + PreparedStatement statement = metaDbConn.prepareStatement(sql); + ResultSet rs = statement.executeQuery(); + while (rs.next()) { + result.addRow(new Object[] { + rs.getString("dst_logical_db"), + rs.getString("dst_logical_table"), + rs.getString("error_type"), + rs.getString("status"), + rs.getString("src_key_name"), + rs.getString("src_key_val"), + rs.getString("dst_key_name"), + rs.getString("dst_key_val") + }); + } + return result; + } catch (SQLException ex) { + logger.error("get replica diff failed!", ex); + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, ex); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowReplicaCheckProgressHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowReplicaCheckProgressHandler.java new file mode 100644 index 000000000..e7c1f7e96 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowReplicaCheckProgressHandler.java @@ -0,0 +1,114 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.fastjson.JSON; +import com.alibaba.polardbx.common.cdc.CdcConstants; +import com.alibaba.polardbx.common.cdc.ResultCode; +import com.alibaba.polardbx.common.cdc.RplConstants; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.druid.util.StringUtils; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.net.util.CdcTargetUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import lombok.Data; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlShowReplicaCheckProgress; +import org.apache.http.entity.ContentType; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * @author yudong + * @since 2023/11/9 10:41 + **/ +public class LogicalShowReplicaCheckProgressHandler extends HandlerCommon { + + private static final String API_PATTERN = "http://%s/replica/fullValidation/progress"; + + public LogicalShowReplicaCheckProgressHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + SqlShowReplicaCheckProgress sqlShowReplicaCheckProgress = + (SqlShowReplicaCheckProgress) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + String dbName = sqlShowReplicaCheckProgress.getDbName().toString(); + if (StringUtils.isEmpty(dbName)) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, "database cannot be empty!"); + } + Map params = new HashMap<>(); + params.put(RplConstants.RPL_FULL_VALID_DB, dbName); + if (sqlShowReplicaCheckProgress.getTableName() != null) { + String tbName = sqlShowReplicaCheckProgress.getTableName().toString(); + params.put(RplConstants.RPL_FULL_VALID_TB, tbName); + } + + String daemonEndpoint = CdcTargetUtil.getDaemonMasterTarget(); + String url = String.format(API_PATTERN, daemonEndpoint); + String res; + try { + res = PooledHttpHelper.doPost(url, ContentType.APPLICATION_JSON, JSON.toJSONString(params), 10000); + } catch (Exception e) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); + } + + ResultCode httpResult = JSON.parseObject(res, ResultCode.class); + if (httpResult.getCode() != CdcConstants.SUCCESS_CODE) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, httpResult.getMsg()); + } + + try { + final ArrayResultCursor result = new ArrayResultCursor("CHECK REPLICA TABLE SHOW PROGRESS"); + result.addColumn("DATABASE", DataTypes.StringType); + result.addColumn("TABLE", DataTypes.StringType); + result.addColumn("STAGE", DataTypes.StringType); + result.addColumn("STATUS", DataTypes.StringType); + result.addColumn("SUMMARY", DataTypes.StringType); + result.initMeta(); + + String jsonStr = (String) httpResult.getData(); + List infos = JSON.parseArray(jsonStr, ReplicaFullValidProgressInfo.class); + + infos.forEach(r -> result.addRow( + new Object[] {r.getDbName(), r.getTbName(), r.getStage(), r.getStatus(), r.getSummary()})); + return result; + } catch (Exception e) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); + } + } + + @Data + private static class ReplicaFullValidProgressInfo { + String dbName; + String tbName; + String stage; + String status; + String summary; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowSlaveStatusHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowSlaveStatusHandler.java index 9d68debc7..95ff08464 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowSlaveStatusHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowSlaveStatusHandler.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.spi.IRepository; @@ -31,6 +32,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlShowSlaveStatus; import org.apache.commons.collections.CollectionUtils; @@ -46,6 +48,8 @@ */ public class LogicalShowSlaveStatusHandler extends LogicalReplicationBaseHandler { + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; + public LogicalShowSlaveStatusHandler(IRepository repo) { super(repo); } @@ -62,10 +66,12 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { ContentType.APPLICATION_JSON, JSON.toJSONString(sqlNode.getParams()), 10000); } catch (Exception e) { + cdcLogger.error("show slave status error!", e); throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); } ResultCode httpResult = JSON.parseObject(res, ResultCode.class); if (httpResult.getCode() != CdcConstants.SUCCESS_CODE) { + cdcLogger.warn("show slave status failed! code:" + httpResult.getCode() + ", msg:" + httpResult.getMsg()); throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, httpResult.getMsg()); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowSlowHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowSlowHandler.java index 308a32e7b..915277a48 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowSlowHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowSlowHandler.java @@ -24,7 +24,7 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.gms.privilege.PolarPrivUtil; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; @@ -97,7 +97,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } List>> results = - SyncManagerHelper.sync(showSlowAction, executionContext.getSchemaName()); + SyncManagerHelper.sync(showSlowAction, executionContext.getSchemaName(), SyncScope.CURRENT_ONLY); int size = 0; for (List> rs : results) { if (rs == null) { @@ -160,7 +160,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } List>> results = SyncManagerHelper.sync(showPhysicalSlowAction, - executionContext.getSchemaName()); + executionContext.getSchemaName(), SyncScope.NOT_COLUMNAR_SLAVE); int size = 0; for (List> rs : results) { if (rs == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowStatsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowStatsHandler.java index ba24930c3..fcaf22031 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowStatsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowStatsHandler.java @@ -25,6 +25,7 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; @@ -80,7 +81,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { double connectionCreatePerSecond = 0; List>> results = - SyncManagerHelper.sync(showStatsAction, executionContext.getSchemaName()); + SyncManagerHelper.sync(showStatsAction, executionContext.getSchemaName(), SyncScope.ALL); long activeConnection = 0; long totalRequest = 0; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowTableAccessHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowTableAccessHandler.java index 40e237342..32cb6f73d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowTableAccessHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowTableAccessHandler.java @@ -25,6 +25,7 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.statis.PlanAccessStat; @@ -72,7 +73,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } List>> tableAccessStatOfAllCn = - SyncManagerHelper.sync(showTableAccessAction, executionContext.getSchemaName()); + SyncManagerHelper.sync(showTableAccessAction, executionContext.getSchemaName(), SyncScope.ALL); ArrayResultCursor cursor = new ArrayResultCursor("SHOW_TABLE_ACCESS"); cursor.addColumn("TABLE_SCHEMA", DataTypes.StringType); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowVariablesHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowVariablesHandler.java deleted file mode 100644 index cff1d4517..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalShowVariablesHandler.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.handler; - -import com.alibaba.polardbx.common.constants.TransactionAttribute; -import com.alibaba.polardbx.common.jdbc.BatchInsertPolicy; -import com.alibaba.polardbx.config.InstanceRoleManager; -import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.ExecutorCursor; -import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; -import com.alibaba.polardbx.executor.operator.FilterExec; -import com.alibaba.polardbx.executor.operator.ResultSetCursorExec; -import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.TddlRelDataTypeSystemImpl; -import com.alibaba.polardbx.optimizer.core.TddlTypeFactoryImpl; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.optimizer.core.expression.calc.IExpression; -import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; -import com.alibaba.polardbx.optimizer.utils.RelUtils; -import com.alibaba.polardbx.optimizer.utils.RexUtils; -import org.apache.calcite.rel.RelNode; -import org.apache.calcite.rex.RexBuilder; -import org.apache.calcite.rex.RexNode; -import org.apache.calcite.sql.SqlShowVariables; -import org.apache.calcite.sql.fun.SqlStdOperatorTable; -import org.apache.calcite.sql.type.SqlTypeName; - -import java.util.Arrays; -import java.util.Map.Entry; - -/** - * @author chenmo.cm - */ -public class LogicalShowVariablesHandler extends HandlerCommon { - - public LogicalShowVariablesHandler(IRepository repo) { - super(repo); - } - - @Override - public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { - final LogicalShow show = (LogicalShow) logicalPlan; - - ArrayResultCursor cursor = new ArrayResultCursor("VARIABLES"); - cursor.addColumn("Variable_name", DataTypes.StringType); - cursor.addColumn("Value", DataTypes.StringType); - cursor.initMeta(); - - if (executionContext.getServerVariables() != null) { - // SHOW VARIABLES - for (Entry entry : executionContext.getServerVariables().entrySet()) { - cursor.addRow(new Object[] {entry.getKey(), entry.getValue()}); - } - } - - if (executionContext.getExtraServerVariables() != null) { - // SHOW EXTRA VARIABLES - for (Entry entry : executionContext.getExtraServerVariables().entrySet()) { - cursor.addRow(new Object[] {entry.getKey(), entry.getValue()}); - } - } - - // DRDS_TRANSACTION_POLICY - cursor.addRow(new Object[] { - TransactionAttribute.DRDS_TRANSACTION_POLICY, - executionContext.getConnection().getTrxPolicy().toString()}); - - // BATCH_INSERT_POLICY - cursor.addRow(new Object[] { - BatchInsertPolicy.getVariableName(), - executionContext.getConnection().getBatchInsertPolicy(executionContext.getExtraCmds()).getName()}); - - // DRDS_INSTANCE_ROLE - cursor.addRow(new Object[] { - InstanceRoleManager.INSTANCE_ROLE_VARIABLE, - InstanceRoleManager.INSTANCE.getInstanceRole()}); - - // SHARE_READ_VIEW - cursor.addRow(new Object[] { - TransactionAttribute.SHARE_READ_VIEW, - executionContext.isShareReadView()}); - - final SqlShowVariables showVariables = (SqlShowVariables) show.getNativeSqlNode(); - if (showVariables.like != null) { - final String pattern = RelUtils.stringValue(showVariables.like); - RexBuilder rexBuilder = new RexBuilder(new TddlTypeFactoryImpl(TddlRelDataTypeSystemImpl.getInstance())); - RexNode likeCondition = rexBuilder.makeCall( - SqlStdOperatorTable.LIKE, - Arrays - .asList(rexBuilder.makeInputRef(rexBuilder.getTypeFactory().createSqlType(SqlTypeName.VARCHAR), 0), - rexBuilder.makeLiteral(pattern))); - IExpression expression = RexUtils.buildRexNode(likeCondition, executionContext); - - FilterExec filterExec = - new FilterExec(new ResultSetCursorExec(cursor, executionContext, Long.MAX_VALUE), expression, - null, executionContext); - return new ExecutorCursor(filterExec, cursor.getMeta()); - } - - return cursor; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalSlowSqlCclHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalSlowSqlCclHandler.java index 2bdbc6b7f..1948588a4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalSlowSqlCclHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalSlowSqlCclHandler.java @@ -17,15 +17,12 @@ package com.alibaba.polardbx.executor.handler; import com.alibaba.fastjson.JSON; -import com.alibaba.polardbx.druid.sql.ast.SqlType; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; +import com.alibaba.polardbx.druid.sql.ast.SqlType; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; @@ -40,6 +37,7 @@ import com.alibaba.polardbx.gms.metadb.ccl.CclRuleRecord; import com.alibaba.polardbx.gms.metadb.ccl.CclTriggerAccessor; import com.alibaba.polardbx.gms.metadb.ccl.CclTriggerRecord; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.gms.util.InstIdUtil; import com.alibaba.polardbx.gms.util.MetaDbUtil; @@ -52,10 +50,13 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalCcl; import com.alibaba.polardbx.optimizer.core.row.Row; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlShowCclRule; import org.apache.calcite.sql.SqlSlowSqlCcl; -import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlSpecialIdentifier; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.commons.collections.CollectionUtils; @@ -151,8 +152,6 @@ public Cursor handleGo(SqlSlowSqlCcl easyCcl, ExecutionContext executionContext) int maxConcurrency = ThreadCpuStatUtil.NUM_CORES / 2; int maxCclRule = DEFAULT_MAX_CCL_RULE; long slowSqlTime = executionContext.getParamManager().getLong(ConnectionParams.SLOW_SQL_TIME); - slowSqlTime = (Long) executionContext.getUserDefVariables().getOrDefault("SLOW_SQL_TIME", slowSqlTime); - slowSqlTime = (Long) executionContext.getUserDefVariables().getOrDefault("slow_sql_time", slowSqlTime); if (slowSqlTime > (long) Integer.MAX_VALUE) { slowSqlTime = Integer.MAX_VALUE; } @@ -213,7 +212,8 @@ public Cursor handleGo(SqlSlowSqlCcl easyCcl, ExecutionContext executionContext) throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - List>> processListResults = SyncManagerHelper.sync(showProcesslistSyncAction); + List>> processListResults = SyncManagerHelper.sync(showProcesslistSyncAction, + SyncScope.CURRENT_ONLY); for (List> nodeRows : processListResults) { if (nodeRows == null) { continue; @@ -280,7 +280,8 @@ public Cursor handleGo(SqlSlowSqlCcl easyCcl, ExecutionContext executionContext) } catch (Exception e) { throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - List>> results = SyncManagerHelper.sync(killSyncAction, schema); + List>> results = SyncManagerHelper.sync( + killSyncAction, schema, SyncScope.CURRENT_ONLY); for (List> result : results) { count += (Integer) result.iterator().next().get(ResultCursor.AFFECT_ROW); } @@ -514,7 +515,7 @@ private Cursor getPlanCacheCursor(ExecutionContext executionContext) { List>> results = SyncManagerHelper.sync(new FetchPlanCacheSyncAction(schemaName, false), - schemaName); + schemaName, SyncScope.CURRENT_ONLY); for (List> nodeRows : results) { if (nodeRows == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStartMasterHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStartMasterHandler.java index fb997a4b9..9b5467232 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStartMasterHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStartMasterHandler.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.HttpClientHelper; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.druid.util.StringUtils; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; @@ -30,6 +31,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; import com.alibaba.polardbx.optimizer.utils.RelUtils; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlStartMaster; @@ -40,6 +42,8 @@ **/ public class LogicalStartMasterHandler extends HandlerCommon { + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; + public LogicalStartMasterHandler(IRepository repo) { super(repo); } @@ -60,13 +64,15 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { try { res = HttpClientHelper.doGet("http://" + daemonEndpoint + "/system/start"); } catch (Exception e) { + cdcLogger.error("start master error!", e); throw new RuntimeException("start master failed", e); } - ResultCode resultCode = JSON.parseObject(res, ResultCode.class); + ResultCode resultCode = JSON.parseObject(res, ResultCode.class); if (resultCode.getCode() == CdcConstants.SUCCESS_CODE) { return new AffectRowCursor(0); } else { + cdcLogger.warn("start master failed! code:" + resultCode.getCode() + ", msg:" + resultCode.getMsg()); throw new TddlRuntimeException(ErrorCode.ERR_CDC_GENERIC, resultCode.getMsg()); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStartReplicaCheckTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStartReplicaCheckTableHandler.java new file mode 100644 index 000000000..dcb9500b1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStartReplicaCheckTableHandler.java @@ -0,0 +1,87 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler; + +import com.alibaba.fastjson.JSON; +import com.alibaba.polardbx.common.cdc.CdcConstants; +import com.alibaba.polardbx.common.cdc.ResultCode; +import com.alibaba.polardbx.common.cdc.RplConstants; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.HttpClientHelper; +import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.druid.util.StringUtils; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.net.util.CdcTargetUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlStartReplicaCheck; +import org.apache.http.entity.ContentType; + +import java.util.HashMap; +import java.util.Map; + +/** + * @author yudong + * @since 2023/11/9 10:39 + **/ +public class LogicalStartReplicaCheckTableHandler extends HandlerCommon { + + private static final String API_PATTERN = "http://%s/replica/fullValidation/create"; + + public LogicalStartReplicaCheckTableHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + SqlStartReplicaCheck sqlStartReplicaCheck = + (SqlStartReplicaCheck) ((LogicalDal) logicalPlan).getNativeSqlNode(); + + String dbName = sqlStartReplicaCheck.getDbName().toString(); + Map params = new HashMap<>(); + params.put(RplConstants.RPL_FULL_VALID_DB, dbName); + if (sqlStartReplicaCheck.getTableName() != null) { + String tbName = sqlStartReplicaCheck.getTableName().toString(); + params.put(RplConstants.RPL_FULL_VALID_TB, tbName); + } + if (sqlStartReplicaCheck.getChannel() != null) { + String channel = sqlStartReplicaCheck.getChannel().toString(); + params.put(RplConstants.CHANNEL, channel); + } else { + params.put(RplConstants.CHANNEL, ""); + } + + String daemonEndpoint = CdcTargetUtil.getDaemonMasterTarget(); + String url = String.format(API_PATTERN, daemonEndpoint); + String res; + try { + res = PooledHttpHelper.doPost(url, ContentType.APPLICATION_JSON, JSON.toJSONString(params), 10000); + } catch (Exception e) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); + } + + ResultCode httpResult = JSON.parseObject(res, ResultCode.class); + if (httpResult.getCode() != CdcConstants.SUCCESS_CODE) { + throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, httpResult.getMsg()); + } + return new AffectRowCursor(0); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStartSlaveHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStartSlaveHandler.java index c67f690c7..30dda4592 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStartSlaveHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStartSlaveHandler.java @@ -22,12 +22,14 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.net.util.CdcTargetUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlStartSlave; import org.apache.http.entity.ContentType; @@ -39,7 +41,9 @@ */ public class LogicalStartSlaveHandler extends LogicalReplicationBaseHandler { - public LogicalStartSlaveHandler(IRepository repo){ + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; + + public LogicalStartSlaveHandler(IRepository repo) { super(repo); } @@ -55,10 +59,12 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { ContentType.APPLICATION_JSON, JSON.toJSONString(sqlNode.getParams()), 10000); } catch (Exception e) { + cdcLogger.error("start slave error!", e); throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); } ResultCode httpResult = JSON.parseObject(res, ResultCode.class); if (httpResult.getCode() != CdcConstants.SUCCESS_CODE) { + cdcLogger.warn("start slave failed! code:" + httpResult.getCode() + ", msg:" + httpResult.getMsg()); throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, httpResult.getMsg()); } return new AffectRowCursor(0); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStopMasterHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStopMasterHandler.java index 2cfe57aeb..44ba2f058 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStopMasterHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStopMasterHandler.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.HttpClientHelper; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.druid.util.StringUtils; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; @@ -30,6 +31,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; import com.alibaba.polardbx.optimizer.utils.RelUtils; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlStopMaster; @@ -40,6 +42,8 @@ **/ public class LogicalStopMasterHandler extends HandlerCommon { + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; + public LogicalStopMasterHandler(IRepository repo) { super(repo); } @@ -60,11 +64,13 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { try { res = HttpClientHelper.doGet("http://" + daemonEndpoint + "/system/stop"); } catch (Exception e) { + cdcLogger.error("stop master error!", e); throw new RuntimeException("stop master failed", e); } - ResultCode resultCode = JSON.parseObject(res, ResultCode.class); + ResultCode resultCode = JSON.parseObject(res, ResultCode.class); if (resultCode.getCode() == CdcConstants.SUCCESS_CODE) { + cdcLogger.warn("stop slave failed! code:" + resultCode.getCode() + ", msg:" + resultCode.getMsg()); return new AffectRowCursor(0); } else { throw new TddlRuntimeException(ErrorCode.ERR_CDC_GENERIC, resultCode.getMsg()); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStopSlaveHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStopSlaveHandler.java index 019064fbb..1496eea78 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStopSlaveHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/LogicalStopSlaveHandler.java @@ -22,12 +22,14 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.PooledHttpHelper; +import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.net.util.CdcTargetUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlStopSlave; import org.apache.http.entity.ContentType; @@ -39,7 +41,9 @@ */ public class LogicalStopSlaveHandler extends LogicalReplicationBaseHandler { - public LogicalStopSlaveHandler(IRepository repo){ + private static final Logger cdcLogger = SQLRecorderLogger.cdcLogger; + + public LogicalStopSlaveHandler(IRepository repo) { super(repo); } @@ -55,10 +59,12 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { ContentType.APPLICATION_JSON, JSON.toJSONString(sqlNode.getParams()), 10000); } catch (Exception e) { + cdcLogger.error("stop slave error!", e); throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, e); } ResultCode httpResult = JSON.parseObject(res, ResultCode.class); if (httpResult.getCode() != CdcConstants.SUCCESS_CODE) { + cdcLogger.warn("stop slave failed! code:" + httpResult.getCode() + ", msg:" + httpResult.getMsg()); throw new TddlRuntimeException(ErrorCode.ERR_REPLICATION_RESULT, httpResult.getMsg()); } return new AffectRowCursor(0); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ShowTransHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ShowTransHandler.java index 587a50bbd..1b060b0f3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ShowTransHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ShowTransHandler.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; @@ -52,6 +53,7 @@ public ShowTransHandler(IRepository repo) { public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { final LogicalShow show = (LogicalShow) logicalPlan; final SqlShowTrans showTrans = (SqlShowTrans) show.getNativeSqlNode(); + final boolean isColumnar = showTrans.isColumnar(); ArrayResultCursor result = new ArrayResultCursor("TRANSACTIONS"); result.addColumn("TRANS_ID", DataTypes.StringType); @@ -59,16 +61,20 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { result.addColumn("DURATION_MS", DataTypes.LongType); result.addColumn("STATE", DataTypes.StringType); result.addColumn("PROCESS_ID", DataTypes.LongType); + if (isColumnar) { + result.addColumn("TSO", DataTypes.LongType); + } ISyncAction syncAction; try { - syncAction = (ISyncAction) showTransSyncActionClass.getConstructor(String.class) - .newInstance(executionContext.getSchemaName()); + syncAction = (ISyncAction) showTransSyncActionClass.getConstructor(String.class, boolean.class) + .newInstance(executionContext.getSchemaName(), isColumnar); } catch (Exception e) { throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - List>> results = SyncManagerHelper.sync(syncAction, executionContext.getSchemaName()); + List>> results = SyncManagerHelper.sync(syncAction, executionContext.getSchemaName(), + SyncScope.ALL); for (List> rs : results) { if (rs == null) { @@ -80,7 +86,13 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { final long duration = (Long) row.get("DURATION_MS"); final String state = (String) row.get("STATE"); final long processId = (Long) row.get("PROCESS_ID"); - result.addRow(new Object[] {transId, type, duration, state, processId}); + + if (isColumnar) { + final long tso = (Long) row.get("TSO"); + result.addRow(new Object[] {transId, type, duration, state, processId, tso}); + } else { + result.addRow(new Object[] {transId, type, duration, state, processId}); + } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ShowTransStatsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ShowTransStatsHandler.java index 308688cef..b3a3ec7f6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ShowTransStatsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ShowTransStatsHandler.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.statistic.StatisticsUtils; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.stats.TransStatsColumn; @@ -83,7 +84,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } final String schema = executionContext.getSchemaName(); - List>> results = SyncManagerHelper.sync(syncAction, schema); + List>> results = SyncManagerHelper.sync(syncAction, schema, SyncScope.CURRENT_ONLY); List allStats = new ArrayList<>(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/VirtualViewHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/VirtualViewHandler.java index 1dc67cb96..8e6bc2974 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/VirtualViewHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/VirtualViewHandler.java @@ -29,6 +29,8 @@ import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaCheckRoutinesHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaCollationsCharsetHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaCollationsHandler; +import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaColumnarIndexStatusHandler; +import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaColumnarStatusHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaCreateDatabaseAsBackFillHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaCreateDatabaseHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaDdlPlanHandler; @@ -76,6 +78,7 @@ import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaQueryInfoHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaReactorPerfHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaRebalanceBackFillHandler; +import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaRebalanceProgressHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaReplicaStatHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaRoutinesHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaSPMHandler; @@ -84,8 +87,11 @@ import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaSchemataHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaSequencesHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaSessionPerfHandler; +import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaShowHelpHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaStatementSummaryHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaStatementSummaryHistoryHandler; +import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaStatisticsDataHandler; +import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaStatisticsHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaStorageHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaStoragePoolInfoHandler; import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaStoragePropertiesHandler; @@ -137,6 +143,9 @@ public VirtualViewHandler(IRepository repo) { super(repo); subHandler = new ArrayList<>(); subHandler.add(new VirtualStatisticHandler(this)); + subHandler.add(new InformationSchemaStatisticsDataHandler(this)); + subHandler.add(new InformationSchemaStatisticsHandler(this)); + subHandler.add(new InformationSchemaSchemataHandler(this)); subHandler.add(new InformationSchemaTablesHandler(this)); subHandler.add(new InformationSchemaSchemataHandler(this)); subHandler.add(new InformationSchemaInformationSchemaTablesHandler(this)); @@ -150,6 +159,8 @@ public VirtualViewHandler(IRepository repo) { subHandler.add(new InformationSchemaWorkloadHandler(this)); subHandler.add(new InformationSchemaQueryInfoHandler(this)); subHandler.add(new InformationSchemaGlobalIndexesHandler(this)); + subHandler.add(new InformationSchemaColumnarIndexStatusHandler(this)); + subHandler.add(new InformationSchemaColumnarStatusHandler(this)); subHandler.add(new InformationSchemaMetadataLockHandler(this)); subHandler.add(new InformationSchemaModuleHandler(this)); subHandler.add(new InformationSchemaModuleEventHandler(this)); @@ -218,6 +229,8 @@ public VirtualViewHandler(IRepository repo) { subHandler.add(new InformationSchemaTraceHandler(this)); subHandler.add(new InformationSchemaReplicaStatHandler(this)); subHandler.add(new InformationSchemaOptimizerAlertHandler(this)); + subHandler.add(new InformationSchemaRebalanceProgressHandler(this)); + subHandler.add(new InformationSchemaShowHelpHandler(this)); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterFileStoragHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterFileStoragHandler.java index 6a5ed1f6a..c39bc2425 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterFileStoragHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterFileStoragHandler.java @@ -31,7 +31,6 @@ /** * @author chenzilin - * @date 2022/2/14 17:08 */ public class LogicalAlterFileStoragHandler extends LogicalCommonDdlHandler { @@ -42,14 +41,16 @@ public LogicalAlterFileStoragHandler(IRepository repo) { @Override protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { LogicalAlterFileStorage logicalAlterFileStorage = - (LogicalAlterFileStorage) logicalDdlPlan; + (LogicalAlterFileStorage) logicalDdlPlan; logicalAlterFileStorage.preparedData(); // god privilege check. TableValidator.checkGodPrivilege(executionContext); if (logicalAlterFileStorage.relDdl instanceof AlterFileStorageAsOfTimestamp) { - return new AlterFileStorageAsOfTimestampJobFactory(logicalAlterFileStorage.getPreparedData(), executionContext).create(); + return new AlterFileStorageAsOfTimestampJobFactory(logicalAlterFileStorage.getPreparedData(), + executionContext).create(); } else if (logicalAlterFileStorage.relDdl instanceof AlterFileStoragePurgeBeforeTimestamp) { - return new AlterFileStoragePurgeBeforeTimestampJobFactory(logicalAlterFileStorage.getPreparedData(), executionContext).create(); + return new AlterFileStoragePurgeBeforeTimestampJobFactory(logicalAlterFileStorage.getPreparedData(), + executionContext).create(); } else if (logicalAlterFileStorage.relDdl instanceof AlterFileStorageBackup) { return new AlterFileStorageBackupJobFactory(logicalAlterFileStorage.getPreparedData(), executionContext).create(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterFunctionHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterFunctionHandler.java index b369ee03e..8f0871427 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterFunctionHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterFunctionHandler.java @@ -16,21 +16,17 @@ package com.alibaba.polardbx.executor.handler.ddl; -import com.alibaba.polardbx.druid.sql.SQLUtils; -import com.alibaba.polardbx.druid.sql.ast.SQLName; import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.AlterFunctionModifyMetaTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.AlterProcedureTask; +import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcAlterFunctionMarkTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; -import com.alibaba.polardbx.executor.pl.PLUtils; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterFunction; -import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterProcedure; import org.apache.calcite.sql.SqlAlterFunction; -import org.apache.calcite.sql.SqlAlterProcedure; import java.util.ArrayList; import java.util.List; @@ -46,6 +42,7 @@ public DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext exec List taskList = new ArrayList<>(); taskList.add(getAlterFunctionTask(logicalDdlPlan, executionContext)); + taskList.add(getCdcAlterFunctionMarkTask(logicalDdlPlan)); executableDdlJob.addSequentialTasks(taskList); return executableDdlJob; @@ -58,4 +55,9 @@ private AlterFunctionModifyMetaTask getAlterFunctionTask(BaseDdlOperation logica alterFunction.getFunctionName(), alterFunction.getText()); } + + private CdcAlterFunctionMarkTask getCdcAlterFunctionMarkTask(BaseDdlOperation logicalDdlPlan) { + SqlAlterFunction alterFunction = ((LogicalAlterFunction) logicalDdlPlan).getSqlAlterFunction(); + return new CdcAlterFunctionMarkTask(logicalDdlPlan.getSchemaName(), alterFunction.getFunctionName()); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterProcedureHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterProcedureHandler.java index ffcdca99d..909adae80 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterProcedureHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterProcedureHandler.java @@ -19,6 +19,7 @@ import com.alibaba.polardbx.druid.sql.SQLUtils; import com.alibaba.polardbx.druid.sql.ast.SQLName; import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.udf.AlterProcedureTask; +import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcAlterProcedureMarkTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; @@ -43,6 +44,7 @@ public DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext exec List taskList = new ArrayList<>(); taskList.add(getAlterProcedureTask(logicalDdlPlan, executionContext)); + taskList.add(getCdcAlterProcedureMarkTask(logicalDdlPlan, executionContext)); executableDdlJob.addSequentialTasks(taskList); return executableDdlJob; @@ -58,4 +60,12 @@ private AlterProcedureTask getAlterProcedureTask(BaseDdlOperation logicalDdlPlan procedureSchema, SQLUtils.normalize(procedureName.getSimpleName()), alterProcedure.getText()); } + + private CdcAlterProcedureMarkTask getCdcAlterProcedureMarkTask(BaseDdlOperation logicalDdlPlan, + ExecutionContext executionContext) { + SqlAlterProcedure alterProcedure = ((LogicalAlterProcedure) logicalDdlPlan).getSqlAlterProcedure(); + SQLName procedureName = alterProcedure.getProcedureName(); + String procedureSchema = PLUtils.getProcedureSchema(procedureName, executionContext.getSchemaName()); + return new CdcAlterProcedureMarkTask(procedureSchema, SQLUtils.normalize(procedureName.getSimpleName())); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterStoragePoolHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterStoragePoolHandler.java index 309e303f9..081f1d467 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterStoragePoolHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterStoragePoolHandler.java @@ -19,20 +19,33 @@ import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.balancer.action.BalanceAction; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.ddl.job.factory.storagepool.AlterStoragePoolAddNodeJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.storagepool.AlterStoragePoolDrainNodeJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.storagepool.CreateStoragePoolJobFactory; +import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; +import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob; +import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils; import com.alibaba.polardbx.executor.handler.LogicalRebalanceHandler; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; +import com.alibaba.polardbx.executor.scaleout.ScaleOutUtils; import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterStoragePool; +import org.apache.calcite.rel.RelNode; import java.sql.Connection; import java.sql.ResultSet; import java.sql.Statement; +import java.text.DecimalFormat; +import java.util.List; public class LogicalAlterStoragePoolHandler extends LogicalCommonDdlHandler { @@ -85,4 +98,53 @@ private boolean releaseLock(Connection conn, String lockResource) { return false; } } +// +// @Override +// public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { +// BaseDdlOperation logicalDdlPlan = (BaseDdlOperation) logicalPlan; +// +// initDdlContext(logicalDdlPlan, executionContext); +// +// // Validate the plan on file storage first +// TableValidator.validateTableEngine(logicalDdlPlan, executionContext); +// // Validate the plan first and then return immediately if needed. +// boolean returnImmediately = validatePlan(logicalDdlPlan, executionContext); +// +// boolean isNewPartDb = DbInfoManager.getInstance().isNewPartitionDb(logicalDdlPlan.getSchemaName()); +// +// if (isNewPartDb) { +// setPartitionDbIndexAndPhyTable(logicalDdlPlan); +// } else { +// setDbIndexAndPhyTable(logicalDdlPlan); +// } +// +// // Build a specific DDL job by subclass. +// DdlJob ddlJob = returnImmediately ? +// new TransientDdlJob() : +// buildDdlJob(logicalDdlPlan, executionContext); +// +// // Validate the DDL job before request. +// validateJob(logicalDdlPlan, ddlJob, executionContext); +// +// +// // Handle the client DDL request on the worker side. +// handleDdlRequest(ddlJob, executionContext); +// +// if (executionContext.getDdlContext().isSubJob()) { +// return buildSubJobResultCursor(ddlJob, executionContext); +// } +// return buildCursor(logicalDdlPlan, ddlJob, executionContext); +// } +// +// protected Cursor buildCursor(RelNode logicalPlan, DdlJob ddlJob, ExecutionContext ec) { +// +// ArrayResultCursor result = new ArrayResultCursor("AlterStoragePool"); +// result.addColumn("PLAN_ID", DataTypes.LongType); +// Long planId = fetchPlanIdFromMetaDb(ddlJob, ec); +// +// +// +// +// return result; +// } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableAddPartitionProxyHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableAddPartitionProxyHandler.java index d596c8e4e..8d3996712 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableAddPartitionProxyHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableAddPartitionProxyHandler.java @@ -68,7 +68,6 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e * sql 'split partition default_part into (partition p2 values in(3,4), partition p3 values in(5,6), partition default_part values in(default))' * 2. use new sql to build split partition subJob * */ - String splitSql; if (!isAddSubPartition) { splitSql = AlterTableGroupUtils.convertAddListRelToSplitListSql(alterTableAddPartition, true, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableExtractPartitionHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableExtractPartitionHandler.java index adf2c83e0..f31f18fb0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableExtractPartitionHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableExtractPartitionHandler.java @@ -19,7 +19,6 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.executor.ddl.job.factory.AlterTableExtractPartitionJobFactory; -import com.alibaba.polardbx.executor.ddl.job.factory.AlterTableGroupExtractPartitionJobFactory; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.partitionmanagement.AlterTableGroupUtils; import com.alibaba.polardbx.executor.spi.IRepository; @@ -27,14 +26,8 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTableExtractPartition; -import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTableGroupExtractPartition; -import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTableSplitPartitionByHotValue; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableExtractPartitionPreparedData; -import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTableGroupExtractPartitionPreparedData; import org.apache.calcite.rel.ddl.AlterTable; -import org.apache.calcite.sql.SqlAlterTable; -import org.apache.calcite.sql.SqlAlterTableGroup; -import org.apache.calcite.sql.SqlAlterTableSplitPartitionByHotValue; import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.util.Util; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableGroupMovePartitionHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableGroupMovePartitionHandler.java index 0f7be22db..760fbd1a2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableGroupMovePartitionHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableGroupMovePartitionHandler.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob; import com.alibaba.polardbx.executor.partitionmanagement.AlterTableGroupUtils; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; @@ -58,7 +59,9 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e return new TransientDdlJob(); } - logicalAlterTableGroupMovePartition.preparedData(executionContext); + boolean usePhysicalBackfill = + PhysicalBackfillUtils.isSupportForPhysicalBackfill(logicalDdlPlan.getSchemaName(), executionContext); + logicalAlterTableGroupMovePartition.preparedData(executionContext, usePhysicalBackfill); //CheckOSSArchiveUtil.checkWithoutOSS(logicalAlterTableGroupMovePartition.getPreparedData()); return AlterTableGroupMovePartitionJobFactory .create(logicalAlterTableGroupMovePartition.relDdl, logicalAlterTableGroupMovePartition.getPreparedData(), diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableHandler.java index d0f24c8c5..3340a094f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableHandler.java @@ -22,26 +22,39 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.properties.ParamManager; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.druid.sql.SQLUtils; import com.alibaba.polardbx.druid.sql.ast.SQLIndexDefinition; +import com.alibaba.polardbx.druid.sql.ast.SQLName; import com.alibaba.polardbx.druid.sql.ast.SQLStatement; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLNullExpr; import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableAddColumn; import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableAddConstraint; import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableAddIndex; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableDropColumnItem; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableDropPrimaryKey; import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableItem; import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLCharacterDataType; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnConstraint; import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnDefinition; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnPrimaryKey; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnReference; import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateTableStatement; import com.alibaba.polardbx.druid.sql.ast.statement.SQLNotNullConstraint; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLSelectOrderByItem; import com.alibaba.polardbx.druid.sql.ast.statement.SQLTableElement; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlKey; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlPrimaryKey; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlUnique; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlAlterTableChangeColumn; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlAlterTableModifyColumn; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlTableIndex; import com.alibaba.polardbx.druid.util.JdbcConstants; @@ -52,6 +65,7 @@ import com.alibaba.polardbx.executor.ddl.job.builder.DdlPhyPlanBuilder; import com.alibaba.polardbx.executor.ddl.job.builder.gsi.CreateGlobalIndexBuilder; import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; +import com.alibaba.polardbx.executor.ddl.job.factory.AlterTableAddLogicalForeignKeyJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.AlterTableGeneratedColumnJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.AlterTableJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.AlterTableOnlineModifyColumnJobFactory; @@ -61,15 +75,18 @@ import com.alibaba.polardbx.executor.ddl.job.factory.gsi.AlterGsiVisibilityJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.gsi.CreatePartitionGsiJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.gsi.DropGsiJobFactory; -import com.alibaba.polardbx.executor.ddl.job.factory.gsi.ModifyPartitionKeyJobFactory; +import com.alibaba.polardbx.executor.ddl.job.factory.gsi.RebuildTableJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.gsi.RenameGsiJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.gsi.RepartitionJobFactory; +import com.alibaba.polardbx.executor.ddl.job.factory.gsi.columnar.CreateColumnarIndexJobFactory; +import com.alibaba.polardbx.executor.ddl.job.factory.gsi.columnar.DropColumnarIndexJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.oss.AlterTableAsOfTimeStampJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.oss.AlterTableDropOssFileJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.oss.AlterTablePurgeBeforeTimeStampJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.oss.MoveOSSDataJobFactory; import com.alibaba.polardbx.executor.ddl.job.task.basic.AlterColumnDefaultTask; import com.alibaba.polardbx.executor.ddl.job.task.basic.TableSyncTask; +import com.alibaba.polardbx.executor.ddl.job.task.basic.UpdateTablesVersionTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.GsiStatisticsInfoSyncTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.StatisticSampleTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateTableVersionTask; @@ -93,6 +110,7 @@ import com.alibaba.polardbx.executor.handler.LogicalShowCreateTableHandler; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.GsiStatisticsSyncAction; +import com.alibaba.polardbx.executor.utils.DdlUtils; import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; @@ -117,6 +135,7 @@ import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTable; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTableGroupAddPartition; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.AlterTablePreparedData; +import com.alibaba.polardbx.optimizer.core.rel.ddl.data.RebuildTablePrepareData; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.RenameLocalIndexPreparedData; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.RenameTablePreparedData; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.RepartitionPrepareData; @@ -133,6 +152,7 @@ import com.alibaba.polardbx.optimizer.utils.RelUtils; import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.optimizer.core.row.Row; +import com.alibaba.polardbx.optimizer.parse.FastsqlParser; import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; import com.alibaba.polardbx.optimizer.parse.TableMetaParser; import com.alibaba.polardbx.optimizer.parse.visitor.ContextParameters; @@ -142,9 +162,13 @@ import com.alibaba.polardbx.optimizer.partition.PartitionSpec; import com.alibaba.polardbx.optimizer.partition.common.PartitionStrategy; import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; +import com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter; +import com.alibaba.polardbx.optimizer.utils.ForeignKeyUtils; +import com.alibaba.polardbx.rule.TableRule; import com.alibaba.polardbx.optimizer.utils.ForeignKeyUtils; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.ddl.AlterTable; import org.apache.calcite.rex.RexNode; @@ -174,11 +198,16 @@ import java.text.MessageFormat; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; @@ -186,6 +215,8 @@ import java.util.stream.Collectors; import static com.alibaba.polardbx.common.TddlConstants.IMPLICIT_COL_NAME; +import static com.alibaba.polardbx.common.TddlConstants.IMPLICIT_KEY_NAME; +import static com.alibaba.polardbx.executor.gms.util.AlterRepartitionUtils.generateSqlPartitionKey; import static com.alibaba.polardbx.executor.gms.util.AlterRepartitionUtils.getShardColumnsFromPartitionBy; public class LogicalAlterTableHandler extends LogicalCommonDdlHandler { @@ -197,6 +228,13 @@ public LogicalAlterTableHandler(IRepository repo) { @Override protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + final Long versionId = DdlUtils.generateVersionId(executionContext); + return doBuildDdlJob(logicalDdlPlan, versionId, executionContext); + } + + protected DdlJob doBuildDdlJob(BaseDdlOperation logicalDdlPlan, + Long ddlVersionId, + ExecutionContext executionContext) { LogicalAlterTable logicalAlterTable = (LogicalAlterTable) logicalDdlPlan; if (logicalAlterTable.isAllocateLocalPartition()) { @@ -225,12 +263,21 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e if (logicalAlterTable.isRepartition() || logicalAlterTable.isCreateGsi() - || logicalAlterTable.isCreateClusteredIndex()) { + || logicalAlterTable.isCreateClusteredIndex() + || logicalAlterTable.isCreateCci()) { initPrimaryTableDefinition(logicalAlterTable, executionContext); } + logicalAlterTable.validateColumnar(); + + if (logicalAlterTable.validateOnlineModify(executionContext, false) + || logicalAlterTable.autoConvertToOmc(executionContext)) { + return buildRebuildTableJob(logicalAlterTable, true, executionContext); + } + logicalAlterTable = rewriteExpressionIndex(logicalAlterTable, executionContext); logicalAlterTable.prepareData(); + logicalAlterTable.setDdlVersionId(ddlVersionId); if (logicalAlterTable.isDropFile()) { return buildDropFileJob(logicalDdlPlan, executionContext, logicalAlterTable); @@ -244,9 +291,13 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e return buildAlterTablePurgeBeforeTimeStamp(logicalDdlPlan, executionContext, logicalAlterTable); } else if (logicalAlterTable.isRepartition()) { return buildRepartitionJob(logicalAlterTable, executionContext); + } else if (logicalAlterTable.isCreateCci()) { + return buildCreateCciJob(logicalAlterTable, executionContext); } else if (logicalAlterTable.isCreateGsi() || logicalAlterTable.isCreateClusteredIndex()) { return buildCreateGsiJob(logicalAlterTable, executionContext); + } else if (logicalAlterTable.isDropCci()) { + return buildDropCciJob(logicalAlterTable, executionContext); } else if (logicalAlterTable.isDropGsi()) { return buildDropGsiJob(logicalAlterTable, executionContext); } else if (logicalAlterTable.isAlterTableRenameGsi()) { @@ -256,9 +307,9 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e } else if (logicalAlterTable.isAlterIndexVisibility()) { return buildAlterIndexVisibilityJob(logicalAlterTable, executionContext); } else { - if (logicalAlterTable.getAlterTablePreparedData().isOnlineModifyColumn() - || logicalAlterTable.getAlterTablePreparedData().isOnlineChangeColumn()) { - return buildAlterTableOnlineModifyColumnJob(logicalAlterTable, executionContext); + if (logicalAlterTable.getAlterTablePreparedData().isNeedRepartition()) { + // for drop primary key, add primary + return buildRebuildTableJob(logicalAlterTable, false, executionContext); } else { return buildAlterTableJob(logicalAlterTable, executionContext); } @@ -576,6 +627,7 @@ private DdlJob buildAlterTableEngineJob(BaseDdlOperation logicalDdlPlan, Executi case LOCAL_DISK: case EXTERNAL_DISK: case NFS: + case ABS: // innodb -> file store return new LogicalAlterTableEngineHandler(repo, sourceEngine, targetEngine) .buildDdlJob(logicalDdlPlan, executionContext); @@ -588,6 +640,7 @@ private DdlJob buildAlterTableEngineJob(BaseDdlOperation logicalDdlPlan, Executi case OSS: case LOCAL_DISK: case EXTERNAL_DISK: + case ABS: case NFS: { switch (targetEngine) { case INNODB: @@ -798,6 +851,7 @@ private DdlJob buildAlterTableOnlineModifyColumnJob(LogicalAlterTable logicalAlt } private DdlJob buildAlterTableJob(LogicalAlterTable logicalAlterTable, ExecutionContext executionContext) { + // Need Refractor, actually we should not let it play in this way. AlterTablePreparedData alterTablePreparedData = logicalAlterTable.getAlterTablePreparedData(); AlterTableWithGsiPreparedData gsiData = logicalAlterTable.getAlterTableWithGsiPreparedData(); @@ -825,49 +879,6 @@ private DdlJob buildAlterTableJob(LogicalAlterTable logicalAlterTable, Execution logicalAlterTable, executionContext); ddlJob = alterTableJobFactory.create(); - } else if (alterTablePreparedData.isNeedRepartition()) { - // need repartition to modify column - Map tableNameMap = new HashMap<>(); - Map virtualColumnMap = new HashMap<>(); - Map columnNewDef = new HashMap<>(); - AtomicBoolean primaryKeyChanged = new AtomicBoolean(false); - initPrimaryTableDefinition4ModifySk(logicalAlterTable, executionContext, gsiData, tableNameMap, - virtualColumnMap, columnNewDef, primaryKeyChanged); - - if (primaryKeyChanged.get()) { - // alter table drop add primary key, but primary key is not changed - return new TransientDdlJob(); - } - - logicalAlterTable.prepareModifySk(alterTablePreparedData.getNewTableMeta()); - - List globalIndexesPreparedData = - logicalAlterTable.getCreateGlobalIndexesPreparedData(); - - Map globalIndexPrepareData = new HashMap<>(); - for (CreateGlobalIndexPreparedData createGsiPreparedData : globalIndexesPreparedData) { - DdlPhyPlanBuilder builder = CreateGlobalIndexBuilder.create( - logicalAlterTable.relDdl, - createGsiPreparedData, - executionContext).build(); - - globalIndexPrepareData.put(createGsiPreparedData, builder.genPhysicalPlanData()); - } - - ModifyPartitionKeyJobFactory jobFactory = new ModifyPartitionKeyJobFactory( - logicalAlterTable.getSchemaName(), - logicalAlterTable.getTableName(), - tableNameMap, - globalIndexPrepareData, - virtualColumnMap, - columnNewDef, - physicalPlanData, - executionContext - ); - jobFactory.setAlterDefaultColumns(alterTablePreparedData.getAlterDefaultColumns()); - jobFactory.setNeedDropImplicitKey(alterTablePreparedData.isNeedDropImplicitKey()); - - ddlJob = jobFactory.create(); } else if (logicalAlterTable.isAddGeneratedColumn() || logicalAlterTable.isDropGeneratedColumn()) { if (logicalAlterTable.isDropGeneratedColumn()) { List colNames = new ArrayList<>(); @@ -946,9 +957,19 @@ private DdlJob buildAlterTableJob(LogicalAlterTable logicalAlterTable, Execution new AlterTableGeneratedColumnJobFactory(physicalPlanData, alterTablePreparedData, logicalAlterTable, executionContext); ddlJob = jobFactory.create(); + } else if (logicalAlterTable.isAddLogicalForeignKeyOnly()) { + AlterTableAddLogicalForeignKeyJobFactory alterTableAddLogicalForeignKeyJobFactory = + new AlterTableAddLogicalForeignKeyJobFactory(physicalPlanData, alterTablePreparedData, + logicalAlterTable, executionContext); + ddlJob = alterTableAddLogicalForeignKeyJobFactory.create(); } else { + ParamManager paramManager = executionContext.getParamManager(); + boolean supportTwoPhaseDdl = paramManager.getBoolean(ConnectionParams.ENABLE_DRDS_MULTI_PHASE_DDL); + String finalStatus = paramManager.getString(ConnectionParams.TWO_PHASE_DDL_FINAL_STATUS); AlterTableJobFactory alterTableJobFactory = new AlterTableJobFactory(physicalPlanData, alterTablePreparedData, logicalAlterTable, executionContext); + alterTableJobFactory.setSupportTwoPhaseDdl(supportTwoPhaseDdl); + alterTableJobFactory.setFinalStatus(finalStatus); TableMeta tableMeta = OptimizerContext.getContext(alterTablePreparedData.getSchemaName()).getLatestSchemaManager() @@ -956,6 +977,9 @@ private DdlJob buildAlterTableJob(LogicalAlterTable logicalAlterTable, Execution if (tableMeta.isGsi()) { alterTableJobFactory.withAlterGsi(true, tableMeta.getGsiTableMetaBean().gsiMetaBean.tableName); } + if (tableMeta.isColumnar() || tableMeta.withColumnar()) { + alterTablePreparedData.setColumnar(true); + } ddlJob = alterTableJobFactory.create(); } @@ -1009,33 +1033,33 @@ private DdlJob buildAlterTableJob(LogicalAlterTable logicalAlterTable, Execution createIndexWithGsi.getLocalIndexPreparedDataList().stream().anyMatch(x -> x.canEquals(alter))) { return true; } - return alter.isNeedRepartition(); + if (alter.isColumnar()) { + return true; + } + return false; }); for (AlterTablePreparedData clusteredTable : alterOnGsi) { - if (!clusteredTable.isNeedRepartition()) { - clusteredTable.setColumnAfterAnother(new ArrayList<>()); - clusteredTable.setIsGsi(true); + clusteredTable.setColumnAfterAnother(new ArrayList<>()); + clusteredTable.setIsGsi(true); - DdlPhyPlanBuilder builder = - AlterTableBuilder.create(logicalAlterTable.relDdl, clusteredTable, executionContext).build(); + DdlPhyPlanBuilder builder = + AlterTableBuilder.create(logicalAlterTable.relDdl, clusteredTable, executionContext).build(); - PhysicalPlanData clusterIndexPlan = builder.genPhysicalPlanData(); - clusterIndexPlan.setSequence(null); + PhysicalPlanData clusterIndexPlan = builder.genPhysicalPlanData(); + clusterIndexPlan.setSequence(null); - AlterTableJobFactory jobFactory = - new AlterTableJobFactory(clusterIndexPlan, clusteredTable, logicalAlterTable, executionContext); - jobFactory.validateExistence(false); - jobFactory.withAlterGsi(true, alterTablePreparedData.getTableName()); + AlterTableJobFactory jobFactory = + new AlterTableJobFactory(clusterIndexPlan, clusteredTable, logicalAlterTable, executionContext); + jobFactory.validateExistence(false); + jobFactory.withAlterGsi(true, alterTablePreparedData.getTableName()); - ExecutableDdlJob clusterIndexJob = jobFactory.create(); - ddlJob.appendJob(clusterIndexJob); - } + ExecutableDdlJob clusterIndexJob = jobFactory.create(); + ddlJob.appendJob(clusterIndexJob); } } - if (!alterTablePreparedData.isNeedRepartition() - && CollectionUtils.isNotEmpty(alterTablePreparedData.getAlterDefaultColumns())) { + if (CollectionUtils.isNotEmpty(alterTablePreparedData.getAlterDefaultColumns())) { String schemaName = physicalPlanData.getSchemaName(); String logicalTableName = physicalPlanData.getLogicalTableName(); @@ -1053,6 +1077,151 @@ private DdlJob buildAlterTableJob(LogicalAlterTable logicalAlterTable, Execution ddlJob.labelAsTail(endAlterColumnDefaultSyncTask); } + // update primary table version + if (!alterTablePreparedData.isNeedRepartition()) { + String schemaName = physicalPlanData.getSchemaName(); + String logicalTableName = physicalPlanData.getLogicalTableName(); + + UpdateTablesVersionTask updateTablesVersionTask = + new UpdateTablesVersionTask(schemaName, + Collections.singletonList(logicalTableName)); + TableSyncTask finalSyncTask = new TableSyncTask(schemaName, logicalTableName); + + ddlJob.appendTask(updateTablesVersionTask); + ddlJob.addTaskRelationship(updateTablesVersionTask, finalSyncTask); + } + + tableVersions.put(alterTablePreparedData.getTableName(), + alterTablePreparedData.getTableVersion()); + ValidateTableVersionTask validateTableVersionTask = + new ValidateTableVersionTask(alterTablePreparedData.getSchemaName(), tableVersions); + + ddlJob.addTask(validateTableVersionTask); + ddlJob.addTaskRelationship(validateTableVersionTask, ddlJob.getHead()); + + return ddlJob; + } + + private void validateGenerateColumn(LogicalAlterTable logicalAlterTable, List columns, + ExecutionContext ec) { + Pair primaryTableInfo = genPrimaryTableInfo(logicalAlterTable, ec); + + final List statementList = + SQLUtils.parseStatementsWithDefaultFeatures(primaryTableInfo.getKey(), JdbcConstants.MYSQL); + final MySqlCreateTableStatement createTableStmt = (MySqlCreateTableStatement) statementList.get(0); + + final boolean enableWithGenCol = + ec.getParamManager().getBoolean(ConnectionParams.ENABLE_OMC_WITH_GEN_COL); + + // Check if table has any generated column + for (SQLColumnDefinition columnDefinition : createTableStmt.getColumnDefinitions()) { + if (columnDefinition.getGeneratedAlawsAs() != null) { + if (!enableWithGenCol) { + // For now, we do not allow OMC on table with generated column, because on mysql we can not add or + // drop column before a generated column using inplace algorithm + throw new TddlRuntimeException(ErrorCode.ERR_OPTIMIZER, + String.format("Can not modify column [%s] on table with generated column [%s].", + columns, columnDefinition.getColumnName())); + } + + String expr = columnDefinition.getGeneratedAlawsAs().toString(); + Set refCols = GeneratedColumnUtil.getReferencedColumns(expr); + + for (String column : columns) { + if (refCols.contains(column)) { + throw new TddlRuntimeException(ErrorCode.ERR_OPTIMIZER, + String.format("Can not modify column [%s] referenced by a generated column [%s].", + column, columnDefinition.getColumnName())); + } + } + } + } + } + + private DdlJob buildRebuildTableJob(LogicalAlterTable logicalAlterTable, boolean omc, ExecutionContext ec) { + if (omc) { + logicalAlterTable.prepareOnlineModifyColumn(); + } + + ec.getParamManager().getProps() + .put(ConnectionProperties.ONLY_MANUAL_TABLEGROUP_ALLOW, Boolean.FALSE.toString()); + + AlterTablePreparedData alterTablePreparedData = logicalAlterTable.getAlterTablePreparedData(); + AlterTableWithGsiPreparedData gsiData = logicalAlterTable.getAlterTableWithGsiPreparedData(); + + DdlPhyPlanBuilder alterTableBuilder = + AlterTableBuilder.create(logicalAlterTable.relDdl, alterTablePreparedData, ec).build(); + PhysicalPlanData physicalPlanData = alterTableBuilder.genPhysicalPlanData(); + + List changedColumns = new ArrayList<>(); + if (GeneralUtil.isNotEmpty(alterTablePreparedData.getUpdatedColumns())) { + changedColumns.addAll(alterTablePreparedData.getUpdatedColumns()); + } + + if (GeneralUtil.isNotEmpty(alterTablePreparedData.getChangedColumns())) { + changedColumns.addAll(alterTablePreparedData.getChangedColumns().stream().map(Pair::getKey).collect( + Collectors.toList())); + } + + // validate generate column + validateGenerateColumn(logicalAlterTable, changedColumns, ec); + + if (ec.getDdlContext().getDdlStmt().contains(ForeignKeyUtils.PARTITION_FK_SUB_JOB)) { + ec.getDdlContext().setFkRepartition(true); + } + + AtomicBoolean primaryKeyNotChanged = new AtomicBoolean(false); + RebuildTablePrepareData rebuildTablePrepareData = new RebuildTablePrepareData(); + initPrimaryTableDefinition4RebuildTable(logicalAlterTable, ec, gsiData, primaryKeyNotChanged, + rebuildTablePrepareData); + + if (primaryKeyNotChanged.get()) { + // alter table drop add primary key, but primary key is not changed + return new TransientDdlJob(); + } + + logicalAlterTable.prepareModifySk(alterTablePreparedData.getNewTableMeta()); + + List globalIndexesPreparedData = + logicalAlterTable.getCreateGlobalIndexesPreparedData(); + + Map relatedTableGroupInfo = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + globalIndexesPreparedData.stream().forEach(o -> relatedTableGroupInfo.putAll(o.getRelatedTableGroupInfo())); + + Map indexTablePreparedDataMap = new LinkedHashMap<>(); + + Map globalIndexPrepareData = new HashMap<>(); + for (CreateGlobalIndexPreparedData createGsiPreparedData : globalIndexesPreparedData) { + createGsiPreparedData.getRelatedTableGroupInfo().putAll(relatedTableGroupInfo); + DdlPhyPlanBuilder builder = CreateGlobalIndexBuilder.create( + logicalAlterTable.relDdl, + createGsiPreparedData, + indexTablePreparedDataMap, + ec).build(); + + indexTablePreparedDataMap.put(createGsiPreparedData.getIndexTableName(), createGsiPreparedData); + globalIndexPrepareData.put(createGsiPreparedData, builder.genPhysicalPlanData()); + } + + RebuildTableJobFactory jobFactory = new RebuildTableJobFactory( + logicalAlterTable.getSchemaName(), + logicalAlterTable.getTableName(), + globalIndexPrepareData, + rebuildTablePrepareData, + physicalPlanData, + ec + ); + jobFactory.setAlterDefaultColumns(alterTablePreparedData.getAlterDefaultColumns()); + jobFactory.setNeedDropImplicitKey(alterTablePreparedData.isNeedDropImplicitKey()); + jobFactory.setChangedColumns(changedColumns); + + ExecutableDdlJob ddlJob = jobFactory.create(); + Optional opt = globalIndexesPreparedData.stream().filter(o -> o.isNeedToGetTableGroupLock()).findAny(); + if (opt.isPresent()) { + //create tablegroup firstly + return ddlJob; + } + Map tableVersions = new HashMap<>(); tableVersions.put(alterTablePreparedData.getTableName(), alterTablePreparedData.getTableVersion()); ValidateTableVersionTask validateTableVersionTask = @@ -1130,6 +1299,21 @@ private DdlJob buildRepartitionJob(LogicalAlterTable logicalAlterTable, Executio ).create(); } + private DdlJob buildCreateCciJob(LogicalAlterTable logicalAlterTable, + ExecutionContext executionContext) { + final CreateGlobalIndexPreparedData cciPreparedData = logicalAlterTable + .getAlterTableWithGsiPreparedData() + .getCreateIndexWithGsiPreparedData() + .getGlobalIndexPreparedData(); + + ExecutableDdlJob cciJob = CreateColumnarIndexJobFactory.create4CreateCci( + logicalAlterTable.relDdl, + cciPreparedData, + executionContext); + + return cciJob; + } + private DdlJob buildCreateGsiJob(LogicalAlterTable logicalAlterTable, ExecutionContext executionContext) { AlterTableWithGsiPreparedData alterTableWithGsiPreparedData = logicalAlterTable.getAlterTableWithGsiPreparedData(); @@ -1163,7 +1347,7 @@ private DdlJob buildCreateGsiJob(LogicalAlterTable logicalAlterTable, ExecutionC } gsiJob.addSequentialTasksAfter(gsiJob.getTail(), Lists.newArrayList(new StatisticSampleTask( globalIndexPreparedData.getSchemaName(), - globalIndexPreparedData.getIndexTableName() + globalIndexPreparedData.getPrimaryTableName() ))); gsiJob.appendTask( new GsiStatisticsInfoSyncTask( @@ -1229,6 +1413,25 @@ private DdlJob buildDropGsiJob(LogicalAlterTable logicalAlterTable, ExecutionCon return baseJob; } + public DdlJob buildDropCciJob(LogicalAlterTable logicalAlterTable, ExecutionContext executionContext) { + final AlterTableWithGsiPreparedData alterTableWithGsiPreparedData = + logicalAlterTable.getAlterTableWithGsiPreparedData(); + final DropIndexWithGsiPreparedData dropIndexWithGsiPreparedData = + alterTableWithGsiPreparedData.getDropIndexWithGsiPreparedData(); + final DropGlobalIndexPreparedData preparedData = dropIndexWithGsiPreparedData.getGlobalIndexPreparedData(); + + final Map tableVersions = new HashMap<>(); + tableVersions.put(preparedData.getPrimaryTableName(), preparedData.getTableVersion()); + final ValidateTableVersionTask validateTableVersionTask = + new ValidateTableVersionTask(preparedData.getSchemaName(), tableVersions); + + ExecutableDdlJob cciJob = DropColumnarIndexJobFactory.create(preparedData, executionContext, false, true); + cciJob.addTask(validateTableVersionTask); + cciJob.addTaskRelationship(validateTableVersionTask, cciJob.getHead()); + + return cciJob; + } + private DdlJob buildRenameGsiJob(LogicalAlterTable logicalAlterTable, ExecutionContext executionContext) { AlterTableWithGsiPreparedData alterTableWithGsiPreparedData = logicalAlterTable.getAlterTableWithGsiPreparedData(); @@ -1432,21 +1635,19 @@ private void checkAutoIncrement4Cdc(String sqlCreateTable) { } } - private void initPrimaryTableDefinition4ModifySk(BaseDdlOperation logicalDdlPlan, - ExecutionContext executionContext, - AlterTableWithGsiPreparedData gsiData, - Map tableNameMap, - Map virtualColumnMap, - Map columnNewDef, - AtomicBoolean primaryKeyChanged) { + private void initPrimaryTableDefinition4RebuildTable(BaseDdlOperation logicalDdlPlan, + ExecutionContext executionContext, + AlterTableWithGsiPreparedData gsiData, + AtomicBoolean primaryKeyNotChanged, + RebuildTablePrepareData rebuildTablePrepareData) { String schemaName = logicalDdlPlan.getSchemaName(); String tableName = logicalDdlPlan.getTableName(); List oldPrimaryKeys = new ArrayList<>(); AlterTablePreparedData alterTablePreparedData = ((LogicalAlterTable) logicalDdlPlan).getAlterTablePreparedData(); Pair primaryTableInfo = - genPrimaryTableInfoAfterModifyColumn(logicalDdlPlan, executionContext, virtualColumnMap, columnNewDef, - oldPrimaryKeys); + genPrimaryTableInfoAfterModifyColumn(logicalDdlPlan, executionContext, oldPrimaryKeys, + rebuildTablePrepareData); checkAutoIncrement4Cdc(primaryTableInfo.getKey()); @@ -1456,9 +1657,338 @@ private void initPrimaryTableDefinition4ModifySk(BaseDdlOperation logicalDdlPlan SqlAlterTable ast = (SqlAlterTable) logicalDdlPlan.getNativeSqlNode(); + List gsiList; + if (DbInfoManager.getInstance().isNewPartitionDb(schemaName)) { + gsiList = buildIndexDefinition4Auto(schemaName, tableName, logicalDdlPlan, executionContext, + gsiData, alterTablePreparedData, rebuildTablePrepareData, primaryKeyNotChanged, oldPrimaryKeys, + primaryTableInfo, ast); + } else { + gsiList = buildIndexDefinition4Drds(schemaName, tableName, executionContext, + gsiData, alterTablePreparedData, rebuildTablePrepareData, primaryTableInfo, ast); + } + + List sqlAddIndexList = gsiList.stream().map(e -> + StringUtils.equalsIgnoreCase(e.getType(), "UNIQUE") ? + new SqlAddUniqueIndex(SqlParserPos.ZERO, e.getIndexName(), e) : + new SqlAddIndex(SqlParserPos.ZERO, e.getIndexName(), e) + ).collect(Collectors.toList()); + + ast.getSkAlters().addAll(sqlAddIndexList); + } + + protected Pair genPrimaryTableInfoAfterModifyColumn(BaseDdlOperation logicalDdlPlan, + ExecutionContext executionContext, + List oldPrimaryKeys, + RebuildTablePrepareData rebuildTablePrepareData) { + Map virtualColumnMap = rebuildTablePrepareData.getVirtualColumnMap(); + Map columnNewDef = rebuildTablePrepareData.getColumnNewDef(); + Map backfillColumnMap = rebuildTablePrepareData.getBackfillColumnMap(); + List modifyStringColumns = rebuildTablePrepareData.getModifyStringColumns(); + List addNewColumns = rebuildTablePrepareData.getAddNewColumns(); + List dropOldColumns = rebuildTablePrepareData.getDropColumns(); + + Pair primaryTableInfo = genPrimaryTableInfo(logicalDdlPlan, executionContext); + oldPrimaryKeys.addAll(primaryTableInfo.getValue().getPrimaryKey().getColumns() + .stream().map(e -> e.getColumnNameStr().toLowerCase()).collect(Collectors.toList())); + + final List statementList = + SQLUtils.parseStatementsWithDefaultFeatures(primaryTableInfo.getKey(), JdbcConstants.MYSQL); + final MySqlCreateTableStatement stmt = (MySqlCreateTableStatement) statementList.get(0); + + SqlAlterTable sqlAlterTable = (SqlAlterTable) logicalDdlPlan.getNativeSqlNode(); + final List alterStatement = + SQLUtils.parseStatementsWithDefaultFeatures(sqlAlterTable.getSourceSql(), JdbcConstants.MYSQL); + final SQLAlterTableStatement alterStmt = (SQLAlterTableStatement) alterStatement.get(0); + + List columnsDef = new ArrayList<>(); + Set dropColumns = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + Map newColumnDefinitionMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + Map newColumnDefinitionMap4AddColumn = + new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + Map newColumnAfter = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + Map newColumnFirst = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + Map newColumnAfter4AddColumn = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + Map newColumnFirst4AddColumn = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + for (SQLAlterTableItem sqlAlterTableItem : alterStmt.getItems()) { + String colName; + SQLName afterColumn; + boolean first; + SQLColumnDefinition newColumnDefinition; + if (sqlAlterTableItem instanceof MySqlAlterTableModifyColumn) { + MySqlAlterTableModifyColumn modifyColumn = (MySqlAlterTableModifyColumn) sqlAlterTableItem; + newColumnDefinition = modifyColumn.getNewColumnDefinition(); + colName = SQLUtils.normalizeNoTrim(newColumnDefinition.getColumnName()); + afterColumn = modifyColumn.getAfterColumn(); + first = modifyColumn.isFirst(); + + newColumnDefinitionMap.put(colName, newColumnDefinition); + if (afterColumn != null) { + newColumnAfter.put(colName, SQLUtils.normalizeNoTrim(afterColumn.getSimpleName())); + } + if (first) { + newColumnFirst.put(colName, true); + } + columnsDef.add(colName.toLowerCase()); + } else if (sqlAlterTableItem instanceof MySqlAlterTableChangeColumn) { + MySqlAlterTableChangeColumn changeColumn = (MySqlAlterTableChangeColumn) sqlAlterTableItem; + newColumnDefinition = changeColumn.getNewColumnDefinition(); + colName = SQLUtils.normalizeNoTrim(changeColumn.getColumnName().getSimpleName()); + afterColumn = changeColumn.getAfterColumn(); + first = changeColumn.isFirst(); + + backfillColumnMap.put(colName.toLowerCase(), + SQLUtils.normalizeNoTrim(newColumnDefinition.getColumnName()).toLowerCase()); + + newColumnDefinitionMap.put(colName, newColumnDefinition); + if (afterColumn != null) { + newColumnAfter.put(colName, SQLUtils.normalizeNoTrim(afterColumn.getSimpleName())); + } + if (first) { + newColumnFirst.put(colName, true); + } + columnsDef.add(colName.toLowerCase()); + } else if (sqlAlterTableItem instanceof SQLAlterTableAddColumn) { + SQLAlterTableAddColumn addColumn = (SQLAlterTableAddColumn) sqlAlterTableItem; + newColumnDefinition = addColumn.getColumns().get(0); + colName = SQLUtils.normalizeNoTrim(newColumnDefinition.getColumnName()); + afterColumn = addColumn.getAfterColumn(); + first = addColumn.isFirst(); + + newColumnDefinitionMap4AddColumn.put(colName, newColumnDefinition); + if (afterColumn != null) { + newColumnAfter4AddColumn.put(colName, SQLUtils.normalizeNoTrim(afterColumn.getSimpleName())); + } + if (first) { + newColumnFirst4AddColumn.put(colName, true); + } + columnsDef.add(colName.toLowerCase()); + addNewColumns.add(colName.toLowerCase()); + } else if (sqlAlterTableItem instanceof SQLAlterTableDropColumnItem) { + SQLAlterTableDropColumnItem dropColumnItem = (SQLAlterTableDropColumnItem) sqlAlterTableItem; + colName = SQLUtils.normalizeNoTrim(dropColumnItem.getColumns().get(0).getSimpleName()); + + dropColumns.add(colName.toLowerCase()); + dropOldColumns.add(colName.toLowerCase()); + } else if (sqlAlterTableItem instanceof SQLAlterTableDropPrimaryKey) { + final Iterator it = stmt.getTableElementList().iterator(); + while (it.hasNext()) { + SQLTableElement tableElement = it.next(); + if (tableElement instanceof SQLColumnDefinition) { + final SQLColumnDefinition columnDefinition = (SQLColumnDefinition) tableElement; + if (null != columnDefinition.getConstraints()) { + final Iterator constraintIt = + columnDefinition.getConstraints().iterator(); + while (constraintIt.hasNext()) { + final SQLColumnConstraint constraint = constraintIt.next(); + if (constraint instanceof SQLColumnPrimaryKey) { + constraintIt.remove(); + } else if (constraint instanceof SQLColumnReference) { + // remove foreign key + constraintIt.remove(); + } + } + } + } else if (tableElement instanceof MySqlPrimaryKey) { + it.remove(); + } + } + } else if (sqlAlterTableItem instanceof SQLAlterTableAddConstraint) { + SQLAlterTableAddConstraint addConstraint = (SQLAlterTableAddConstraint) sqlAlterTableItem; + if (addConstraint.getConstraint() instanceof MySqlPrimaryKey) { + MySqlPrimaryKey addPrimaryKey = (MySqlPrimaryKey) addConstraint.getConstraint(); + final Iterator it = stmt.getTableElementList().iterator(); + Set colNameSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + addPrimaryKey.getColumns() + .forEach(e -> colNameSet.add(SQLUtils.normalizeNoTrim(e.getExpr().toString()))); + + boolean hasImplicitKey = false; + while (it.hasNext()) { + SQLTableElement tableElement = it.next(); + if (tableElement instanceof SQLColumnDefinition) { + final SQLColumnDefinition columnDefinition = (SQLColumnDefinition) tableElement; + if (colNameSet.contains(SQLUtils.normalizeNoTrim(columnDefinition.getColumnName()))) { + SQLNotNullConstraint sqlNotNullConstraint = new SQLNotNullConstraint(); + columnDefinition.addConstraint(sqlNotNullConstraint); + if (columnDefinition.getDefaultExpr() instanceof SQLNullExpr) { + columnDefinition.setDefaultExpr(null); + } + colNameSet.remove(SQLUtils.normalizeNoTrim(columnDefinition.getColumnName())); + } + if (com.alibaba.polardbx.druid.util.StringUtils.equalsIgnoreCase(IMPLICIT_COL_NAME, + SQLUtils.normalizeNoTrim(columnDefinition.getName().getSimpleName()))) { + hasImplicitKey = true; + } + } else if (tableElement instanceof MySqlPrimaryKey) { + it.remove(); + } + } + + if (!colNameSet.isEmpty()) { + throw new TddlRuntimeException(ErrorCode.ERR_OPTIMIZER, + "Unknown column " + colNameSet); + } + + // add new primary key + List colNames = addPrimaryKey.getColumns().stream() + .map(e -> new SQLSelectOrderByItem(new SQLIdentifierExpr(e.getExpr().toString()))) + .collect(Collectors.toList()); + + MySqlPrimaryKey newPrimaryKey = new MySqlPrimaryKey(); + SQLIndexDefinition indexDefinition = newPrimaryKey.getIndexDefinition(); + indexDefinition.setKey(true); + indexDefinition.setType("PRIMARY"); + + indexDefinition.getColumns().addAll(colNames); + + stmt.getTableElementList().add(newPrimaryKey); + + // add local index for implicit key (auto increment) + if (hasImplicitKey) { + MySqlKey implicitKey = new MySqlKey(); + implicitKey.setName(IMPLICIT_KEY_NAME); + SQLIndexDefinition indexDef = implicitKey.getIndexDefinition(); + indexDef.setKey(true); + indexDef.getColumns().add(new SQLSelectOrderByItem(new SQLIdentifierExpr(IMPLICIT_COL_NAME))); + + stmt.getTableElementList().add(implicitKey); + } + } + } + } + + List oldTableElementList = stmt.getTableElementList(); + List newTableElementList = new ArrayList<>(); + Map newColumnDefinitionMapTmp = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + newColumnDefinitionMapTmp.putAll(newColumnDefinitionMap); + + // 先处理 change modify 以及 drop column + for (SQLTableElement oldTableElement : oldTableElementList) { + SQLTableElement tableElement = oldTableElement.clone(); + if (tableElement instanceof SQLColumnDefinition) { + final SQLColumnDefinition columnDefinition = (SQLColumnDefinition) tableElement; + final String columnName = SQLUtils.normalizeNoTrim(columnDefinition.getName().getSimpleName()); + boolean autoIncrement = columnDefinition.isAutoIncrement(); + boolean isStringType = (columnDefinition.getDataType() instanceof SQLCharacterDataType); + + if (dropColumns.contains(columnName)) { + // ignore + } else if (newColumnDefinitionMap.containsKey(columnName)) { + SQLColumnDefinition newColumnDefinition = newColumnDefinitionMap.get(columnName); + newTableElementList.add(newColumnDefinition); + if (!autoIncrement) { + // for checker prepare + String colNameStr = columnName.toLowerCase(); + virtualColumnMap.put(colNameStr, GsiUtils.generateRandomGsiName(colNameStr)); + columnNewDef.put(colNameStr, + TableColumnUtils.getDataDefFromColumnDefNoDefault(newColumnDefinition)); + } + if (!newColumnAfter.containsKey(columnName) && !newColumnFirst.containsKey(columnName)) { + newColumnDefinitionMap.remove(columnName); + columnsDef.remove(columnName.toLowerCase()); + } + if (isStringType) { + modifyStringColumns.add(columnName.toLowerCase()); + } + } else { + newTableElementList.add(tableElement); + } + } else if (tableElement instanceof MySqlKey) { + final MySqlKey mySqlKey = (MySqlKey) tableElement; + SQLIndexDefinition sqlIndexDefinition = mySqlKey.getIndexDefinition(); + List columns = sqlIndexDefinition.getColumns(); + + final Iterator it = columns.iterator(); + while (it.hasNext()) { + SQLSelectOrderByItem column = it.next(); + String columnName = SQLUtils.normalizeNoTrim(column.getExpr().toString()); + if (dropColumns.contains(columnName)) { + it.remove(); + } else if (newColumnDefinitionMapTmp.containsKey(columnName)) { + SQLColumnDefinition newColumnDefinition = newColumnDefinitionMapTmp.get(columnName); + column.setExpr(new SQLIdentifierExpr(newColumnDefinition.getColumnName())); + } + } + newTableElementList.add(tableElement); + } else { + newTableElementList.add(tableElement); + } + } + + for (String colName : columnsDef) { + int idx = findIdxFromTableElementList(newTableElementList, SQLUtils.normalizeNoTrim(colName)); + // 处理add column + if (newColumnDefinitionMap4AddColumn.containsKey(colName)) { + if (idx != newTableElementList.size()) { + // add column 存在重复 + throw new TddlRuntimeException(ErrorCode.ERR_OPTIMIZER, + String.format("Duplicate column name '%s'", colName)); + } else if (!newColumnAfter4AddColumn.containsKey(colName) + && !newColumnFirst4AddColumn.containsKey(colName)) { + newTableElementList.add(newColumnDefinitionMap4AddColumn.get(colName)); + } else if (newColumnFirst4AddColumn.containsKey(colName)) { + // 处理 add column first 顺序 + newTableElementList.add(0, newColumnDefinitionMap4AddColumn.get(colName)); + } else { + // 处理 add column after 顺序 + String afterColName = newColumnAfter4AddColumn.get(colName); + int afterIdx = findIdxFromTableElementList(newTableElementList, afterColName); + if (afterIdx != newTableElementList.size()) { + newTableElementList.add(afterIdx + 1, newColumnDefinitionMap4AddColumn.get(colName)); + } + } + } else { + // remove old + SQLColumnDefinition newColDef = newColumnDefinitionMap.get(colName); + int removeItemIdx = + findIdxFromTableElementList(newTableElementList, + SQLUtils.normalizeNoTrim(newColDef.getColumnName())); + if (removeItemIdx != newTableElementList.size()) { + newTableElementList.remove(removeItemIdx); + } + + if (!newColumnAfter.containsKey(colName) && !newColumnFirst.containsKey(colName)) { + newTableElementList.add(newColDef); + } else if (newColumnFirst.containsKey(colName)) { + // 处理 change modify first 的顺序 + newTableElementList.add(0, newColDef); + } else { + // 处理 change modify after 顺序 + String afterColName = newColumnAfter.get(colName); + int afterIdx = findIdxFromTableElementList(newTableElementList, afterColName); + if (afterIdx != newTableElementList.size()) { + newTableElementList.add(afterIdx + 1, newColDef); + } + } + } + } + + stmt.setTableElementList(newTableElementList); + + String sourceSql = stmt.toString(); + SqlCreateTable primaryTableNode = + (SqlCreateTable) new FastsqlParser().parse(sourceSql, executionContext).get(0); + + return new Pair<>(sourceSql, primaryTableNode); + } + + private List buildIndexDefinition4Auto(String schemaName, String tableName, + BaseDdlOperation logicalDdlPlan, + ExecutionContext executionContext, + AlterTableWithGsiPreparedData gsiData, + AlterTablePreparedData alterTablePreparedData, + RebuildTablePrepareData rebuildTablePrepareData, + AtomicBoolean primaryKeyNotChanged, + List oldPrimaryKeys, + Pair primaryTableInfo, + SqlAlterTable ast) { + Map tableNameMap = rebuildTablePrepareData.getTableNameMap(); + Map tableNameMapReverse = rebuildTablePrepareData.getTableNameMapReverse(); + Map needReHash = rebuildTablePrepareData.getNeedReHash(); + List dropColumns = rebuildTablePrepareData.getDropColumns(); + boolean isUnique = false; List gsiList = new ArrayList<>(); - if (alterTablePreparedData.isNeedRepartition()) { + { String targetTableName; List indexKeys = new ArrayList<>(); List coveringKeys = new ArrayList<>(); @@ -1467,10 +1997,10 @@ private void initPrimaryTableDefinition4ModifySk(BaseDdlOperation logicalDdlPlan PartitionInfo refPartitionInfo = tableMeta.getPartitionInfo(); TableGroupConfig tableGroupConfig = - executionContext.getSchemaManager().getTddlRuleManager().getTableGroupInfoManager() + executionContext.getSchemaManager(schemaName).getTddlRuleManager().getTableGroupInfoManager() .getTableGroupConfigById(refPartitionInfo.getTableGroupId()); String tableGroupName = tableGroupConfig.getTableGroupRecord().getTg_name(); - String firstTbInTg = tableGroupConfig.getTables().get(0).getTableName(); + String firstTbInTg = tableGroupConfig.getTables().get(0); PartitionInfo firstTblIgPartInfo = executionContext.getSchemaManager(schemaName).getTable(firstTbInTg).getPartitionInfo(); @@ -1497,7 +2027,7 @@ private void initPrimaryTableDefinition4ModifySk(BaseDdlOperation logicalDdlPlan } } - primaryKeyChanged.set( + primaryKeyNotChanged.set( primaryKeyChanged(oldPrimaryKeys, alterTablePreparedData.getAddedPrimaryKeyColumns())); for (SqlNode item : sqlPartitionBy.getColumns()) { @@ -1529,6 +2059,8 @@ private void initPrimaryTableDefinition4ModifySk(BaseDdlOperation logicalDdlPlan } tableNameMap.put(tableName, targetTableName); + tableNameMapReverse.put(targetTableName, tableName); + needReHash.put(targetTableName, alterTablePreparedData.isNeedRepartition()); if (!alterTablePreparedData.isKeepPartitionKeyRange() && (refPartitionInfo.getPartitionBy().getStrategy() == PartitionStrategy.KEY @@ -1540,6 +2072,10 @@ private void initPrimaryTableDefinition4ModifySk(BaseDdlOperation logicalDdlPlan sqlPartitionBy = null; } + SqlIdentifier tgName = alterTablePreparedData.isKeepPartitionKeyRange() ? + new SqlIdentifier(tableGroupName, SqlParserPos.ZERO) : null; + + boolean withImplicitTg = StringUtils.isNotEmpty(ast.getTargetImplicitTableGroupName()); SqlIndexDefinition repartitionGsi = AlterRepartitionUtils.initIndexInfo( targetTableName, @@ -1549,20 +2085,21 @@ private void initPrimaryTableDefinition4ModifySk(BaseDdlOperation logicalDdlPlan isUnique, primaryTableInfo.getKey(), primaryTableInfo.getValue(), - sqlPartitionBy + sqlPartitionBy, + withImplicitTg ? new SqlIdentifier(ast.getTargetImplicitTableGroupName(), SqlParserPos.ZERO) : + tgName, + withImplicitTg ); repartitionGsi.setBroadcast(refPartitionInfo.isBroadcastTable()); repartitionGsi.setSingle(refPartitionInfo.isSingleTable()); gsiList.add(repartitionGsi); } - + Set indexNames = new TreeSet<>(String::compareToIgnoreCase); + indexNames.addAll(ast.getIndexTableGroupMap().keySet()); if (gsiData != null) { List globalIndexPreparedDataList = gsiData.getGlobalIndexPreparedData(); for (AlterTablePreparedData globalIndexPreparedData : globalIndexPreparedDataList) { - if (!globalIndexPreparedData.isNeedRepartition()) { - continue; - } String indexName = globalIndexPreparedData.getTableName(); TableMeta indexMeta = executionContext.getSchemaManager(schemaName).getTable(indexName); PartitionInfo refPartitionInfo = indexMeta.getPartitionInfo(); @@ -1572,7 +2109,7 @@ private void initPrimaryTableDefinition4ModifySk(BaseDdlOperation logicalDdlPlan executionContext.getSchemaManager(schemaName).getTddlRuleManager().getTableGroupInfoManager() .getTableGroupConfigById(indexTgId); String tableGroupName = tableGroupConfig.getTableGroupRecord().getTg_name(); - String firstTbInTg = tableGroupConfig.getTables().get(0).getTableName(); + String firstTbInTg = tableGroupConfig.getTables().get(0); PartitionInfo firstTablePartInfo = executionContext.getSchemaManager(schemaName).getTable(firstTbInTg).getPartitionInfo(); @@ -1610,9 +2147,9 @@ private void initPrimaryTableDefinition4ModifySk(BaseDdlOperation logicalDdlPlan } GsiMetaManager.GsiTableMetaBean gsiMeta = indexMeta.getGsiTableMetaBean(); - List gsiIndexKeys = gsiMeta.gsiMetaBean.indexColumns + List gsiIndexKeys = gsiMeta.gsiMetaBean.getIndexColumns() .stream().map(e -> e.columnName.toLowerCase()).collect(Collectors.toList()); - List gsiCoveringKeys = gsiMeta.gsiMetaBean.coveringColumns + List gsiCoveringKeys = gsiMeta.gsiMetaBean.getCoveringColumns() .stream().map(e -> e.columnName.toLowerCase()).collect(Collectors.toList()); isUnique = !gsiMeta.gsiMetaBean.nonUnique; @@ -1625,9 +2162,35 @@ private void initPrimaryTableDefinition4ModifySk(BaseDdlOperation logicalDdlPlan } } + gsiIndexKeys = gsiIndexKeys.stream() + .filter(e -> !dropColumns.contains(e.toLowerCase())) + .map(e -> rebuildTablePrepareData.getBackfillColumnMap().getOrDefault(e.toLowerCase(), e)) + .collect(Collectors.toList()); + + gsiCoveringKeys = gsiCoveringKeys.stream() + .filter(e -> !dropColumns.contains(e.toLowerCase())) + .map(e -> rebuildTablePrepareData.getBackfillColumnMap().getOrDefault(e.toLowerCase(), e)) + .collect(Collectors.toList()); + + if (gsiMeta.gsiMetaBean.clusteredIndex) { + // 聚簇索引也需要加列 + gsiCoveringKeys.addAll(rebuildTablePrepareData.getAddNewColumns()); + } + String targetGsiName = GsiUtils.generateRandomGsiName(indexName); tableNameMap.put(indexName, targetGsiName); + tableNameMapReverse.put(targetGsiName, indexName); + needReHash.put(targetGsiName, globalIndexPreparedData.isNeedRepartition()); + SqlIdentifier tgName = globalIndexPreparedData.isKeepPartitionKeyRange() ? + new SqlIdentifier(tableGroupName, SqlParserPos.ZERO) : null; + + String logicalIndexName = TddlSqlToRelConverter.unwrapGsiName(indexName); + String targetTableGroupName = ast.getIndexTableGroupMap().get(logicalIndexName); + boolean indexWithImplicitTg = (targetTableGroupName != null); + if (indexWithImplicitTg) { + indexNames.remove(logicalIndexName); + } SqlIndexDefinition repartitionGsiInfo = AlterRepartitionUtils.initIndexInfo( targetGsiName, @@ -1637,18 +2200,146 @@ private void initPrimaryTableDefinition4ModifySk(BaseDdlOperation logicalDdlPlan isUnique, primaryTableInfo.getKey(), primaryTableInfo.getValue(), - sqlPartitionBy + sqlPartitionBy, + //todo may specific the gsi's implicit tablegroup + indexWithImplicitTg ? new SqlIdentifier(targetTableGroupName, SqlParserPos.ZERO) : tgName, + indexWithImplicitTg ); gsiList.add(repartitionGsiInfo); + if (!globalIndexPreparedData.isNeedRepartition() && indexWithImplicitTg + && !tableGroupName.equalsIgnoreCase(targetTableGroupName)) { + needReHash.put(targetGsiName, true); + } } } - + if (!indexNames.isEmpty()) { + throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, + indexNames + " is not exists or not need to repartition"); + } List sqlAddIndexList = gsiList.stream().map(e -> StringUtils.equalsIgnoreCase(e.getType(), "UNIQUE") ? new SqlAddUniqueIndex(SqlParserPos.ZERO, e.getIndexName(), e) : new SqlAddIndex(SqlParserPos.ZERO, e.getIndexName(), e) ).collect(Collectors.toList()); - ast.getSkAlters().addAll(sqlAddIndexList); + return gsiList; + } + + private List buildIndexDefinition4Drds(String schemaName, String tableName, + ExecutionContext executionContext, + AlterTableWithGsiPreparedData gsiData, + AlterTablePreparedData alterTablePreparedData, + RebuildTablePrepareData rebuildTablePrepareData, + Pair primaryTableInfo, + SqlAlterTable ast) { + Map tableNameMap = rebuildTablePrepareData.getTableNameMap(); + Map tableNameMapReverse = rebuildTablePrepareData.getTableNameMapReverse(); + Map needReHash = rebuildTablePrepareData.getNeedReHash(); + List dropColumns = rebuildTablePrepareData.getDropColumns(); + + List gsiList = new ArrayList<>(); + + { + TableRule tableRule = + executionContext.getSchemaManager(schemaName).getTddlRuleManager().getTableRule(tableName); + String targetTableName = GsiUtils.generateRandomGsiName(tableName); + tableNameMap.put(tableName, targetTableName); + tableNameMapReverse.put(targetTableName, tableName); + needReHash.put(targetTableName, tableRule.isBroadcast() || alterTablePreparedData.isNeedRepartition()); + ast.setLogicalSecondaryTableName(targetTableName); + + SqlAlterTablePartitionKey sqlAlterTablePartitionKey = + generateSqlPartitionKey(schemaName, tableName, executionContext); + sqlAlterTablePartitionKey.setLogicalSecondaryTableName(targetTableName); + + gsiList.add(AlterRepartitionUtils.initIndexInfo(primaryTableInfo.getValue(), sqlAlterTablePartitionKey, + primaryTableInfo.getKey())); + } + + if (gsiData != null) { + List globalIndexPreparedDataList = gsiData.getGlobalIndexPreparedData(); + for (AlterTablePreparedData globalIndexPreparedData : globalIndexPreparedDataList) { + String indexName = globalIndexPreparedData.getTableName(); + + String targetGsiName = GsiUtils.generateRandomGsiName(indexName); + tableNameMap.put(indexName, targetGsiName); + tableNameMapReverse.put(targetGsiName, indexName); + needReHash.put(targetGsiName, globalIndexPreparedData.isNeedRepartition()); + + TableRule tableRule = + executionContext.getSchemaManager(schemaName).getTddlRuleManager().getTableRule(indexName); + TableMeta indexMeta = executionContext.getSchemaManager(schemaName).getTable(indexName); + + GsiMetaManager.GsiTableMetaBean gsiMeta = indexMeta.getGsiTableMetaBean(); + List gsiIndexKeys = gsiMeta.gsiMetaBean.indexColumns + .stream().map(e -> e.columnName.toLowerCase()).collect(Collectors.toList()); + List gsiCoveringKeys = gsiMeta.gsiMetaBean.coveringColumns + .stream().map(e -> e.columnName.toLowerCase()).collect(Collectors.toList()); + boolean isUnique = !gsiMeta.gsiMetaBean.nonUnique; + + List shardingKeys = tableRule.getShardColumns(); + + for (String shardingKey : shardingKeys) { + if (!gsiIndexKeys.contains(shardingKey.toLowerCase()) + && !gsiCoveringKeys.contains(shardingKey.toLowerCase())) { + gsiCoveringKeys.add(shardingKey.toLowerCase()); + } + } + + gsiIndexKeys = gsiIndexKeys.stream() + .filter(e -> !dropColumns.contains(e.toLowerCase())) + .map(e -> rebuildTablePrepareData.getBackfillColumnMap().getOrDefault(e.toLowerCase(), e)) + .collect(Collectors.toList()); + + gsiCoveringKeys = gsiCoveringKeys.stream() + .filter(e -> !dropColumns.contains(e.toLowerCase())) + .map(e -> rebuildTablePrepareData.getBackfillColumnMap().getOrDefault(e.toLowerCase(), e)) + .collect(Collectors.toList()); + + if (gsiMeta.gsiMetaBean.clusteredIndex) { + // 聚簇索引也需要加列 + gsiCoveringKeys.addAll(rebuildTablePrepareData.getAddNewColumns()); + } + + SqlAlterTablePartitionKey sqlAlterTablePartitionKey = + generateSqlPartitionKey(schemaName, indexName, executionContext); + + gsiList.add( + AlterRepartitionUtils.initIndexInfo4DrdsOmc(targetGsiName, gsiIndexKeys, gsiCoveringKeys, false, + isUnique, primaryTableInfo.getKey(), primaryTableInfo.getValue(), sqlAlterTablePartitionKey)); + } + } + + return gsiList; + } + + private static int findIdxFromTableElementList(List tableElementList, String columnName) { + int idx = 0; + for (SQLTableElement tableElement : tableElementList) { + if (tableElement instanceof SQLColumnDefinition) { + final SQLColumnDefinition columnDefinition = (SQLColumnDefinition) tableElement; + String curColumnName = SQLUtils.normalizeNoTrim(columnDefinition.getColumnName()); + if (com.alibaba.polardbx.druid.util.StringUtils.equalsIgnoreCase(curColumnName, columnName)) { + break; + } + } + idx++; + } + return idx; + } + + public List buildIndexDefinition4AutoForTest(String schemaName, String tableName, + BaseDdlOperation logicalDdlPlan, + ExecutionContext executionContext, + AlterTableWithGsiPreparedData gsiData, + AlterTablePreparedData alterTablePreparedData, + RebuildTablePrepareData rebuildTablePrepareData, + AtomicBoolean primaryKeyNotChanged, + List oldPrimaryKeys, + Pair primaryTableInfo, + SqlAlterTable ast) { + return buildIndexDefinition4Auto(schemaName, tableName, logicalDdlPlan, executionContext, gsiData, + alterTablePreparedData, rebuildTablePrepareData, primaryKeyNotChanged, oldPrimaryKeys, primaryTableInfo, + ast); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableModifyPartitionHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableModifyPartitionHandler.java index 8535a2f68..56961a914 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableModifyPartitionHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableModifyPartitionHandler.java @@ -22,7 +22,6 @@ import com.alibaba.polardbx.executor.ddl.job.factory.AlterTableModifyPartitionJobFactory; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.executor.utils.StringUtils; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; @@ -35,14 +34,12 @@ import com.alibaba.polardbx.optimizer.partition.common.PartitionStrategy; import com.alibaba.polardbx.optimizer.partition.pruning.SearchDatumComparator; import com.alibaba.polardbx.optimizer.partition.pruning.SearchDatumInfo; -import groovy.sql.Sql; import org.apache.calcite.rel.ddl.AlterTable; import org.apache.calcite.sql.SqlAlterTable; import org.apache.calcite.sql.SqlAlterTableModifyPartitionValues; import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlPartition; -import org.apache.calcite.sql.SqlPartitionValue; import org.apache.calcite.sql.SqlSubPartition; import org.apache.calcite.util.Util; @@ -217,11 +214,11 @@ protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext // partName)); // } - if (tableMeta.withGsi()) { + if (tableMeta.withGsi() && !tableMeta.withCci()) { throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, String.format("it's not support to drop value when table[%s] with GSI", logicalTableName)); } - if (tableMeta.isGsi()) { + if (tableMeta.isGsi() && !tableMeta.isColumnar()) { throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_MODIFY_PARTITION_DROP_VALUE, String.format("it's not support to drop value for global index[%s]", logicalTableName)); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableMovePartitionHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableMovePartitionHandler.java index 79f979c16..e9ffa1e59 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableMovePartitionHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableMovePartitionHandler.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob; import com.alibaba.polardbx.executor.partitionmanagement.AlterTableGroupUtils; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.topology.DbInfoManager; @@ -59,7 +60,9 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e return new TransientDdlJob(); } - logicalAlterTableMovePartition.preparedData(executionContext); + boolean usePhysicalBackfill = + PhysicalBackfillUtils.isSupportForPhysicalBackfill(logicalDdlPlan.getSchemaName(), executionContext); + logicalAlterTableMovePartition.preparedData(executionContext, usePhysicalBackfill); return AlterTableMovePartitionJobFactory.create(alterTable, (AlterTableMovePartitionPreparedData) logicalAlterTableMovePartition.getPreparedData(), executionContext); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTablePartitionCountHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTablePartitionCountHandler.java index 1d8c8c58b..9698a7ed3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTablePartitionCountHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTablePartitionCountHandler.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.handler.ddl; -import com.alibaba.polardbx.common.exception.NotSupportException; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; @@ -45,6 +44,7 @@ import org.apache.calcite.sql.SqlAddUniqueIndex; import org.apache.calcite.sql.SqlAlterTablePartitionCount; import org.apache.calcite.sql.SqlCreateTable; +import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlIndexDefinition; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.commons.lang3.StringUtils; @@ -53,6 +53,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.TreeMap; import java.util.stream.Collectors; public class LogicalAlterTablePartitionCountHandler extends LogicalCommonDdlHandler { @@ -91,12 +92,15 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e logicalAlterTablePartitionCount.getCreateGlobalIndexesPreparedData(); Map globalIndexPrepareData = new HashMap<>(); + Map indexTablePreparedDataMap = + new TreeMap<>(String::compareToIgnoreCase); for (CreateGlobalIndexPreparedData createGsiPreparedData : globalIndexesPreparedData) { DdlPhyPlanBuilder builder = CreateGlobalIndexBuilder.create( logicalAlterTablePartitionCount.relDdl, createGsiPreparedData, + indexTablePreparedDataMap, executionContext).build(); - + indexTablePreparedDataMap.put(createGsiPreparedData.getIndexTableName(), createGsiPreparedData); globalIndexPrepareData.put(createGsiPreparedData, builder.genPhysicalPlanData()); } @@ -119,6 +123,8 @@ private void initNewTableDefinition(BaseDdlOperation logicalDdlPlan, String primaryTableName = ast.getPrimaryTableName(); int partitionCnt = ast.getPartitionCount(); + boolean withImplicitTg = StringUtils.isNotEmpty(ast.getTargetImplicitTableGroupName()); + // logical table name --> new logical table name List createGsiPrepareDataList = new ArrayList<>(); @@ -182,7 +188,9 @@ private void initNewTableDefinition(BaseDdlOperation logicalDdlPlan, partitionCnt, createGsiPrepareDataList, primaryTableInfo.getValue(), - primaryTableInfo.getKey() + primaryTableInfo.getKey(), + withImplicitTg ? new SqlIdentifier(ast.getTargetImplicitTableGroupName(), SqlParserPos.ZERO) : null, + withImplicitTg ); List sqlAddIndexList = repartitionGsi.stream().map(e -> diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRemovePartitioningHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRemovePartitioningHandler.java index 0a158ff99..10c2bb47b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRemovePartitioningHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRemovePartitioningHandler.java @@ -22,7 +22,6 @@ import com.alibaba.polardbx.executor.ddl.job.builder.gsi.CreateGlobalIndexBuilder; import com.alibaba.polardbx.executor.ddl.job.converter.PhysicalPlanData; import com.alibaba.polardbx.executor.ddl.job.factory.gsi.RemovePartitioningJobFactory; -import com.alibaba.polardbx.executor.ddl.job.factory.gsi.RepartitionJobFactory; import com.alibaba.polardbx.executor.ddl.job.validator.IndexValidator; import com.alibaba.polardbx.executor.ddl.job.validator.ddl.RepartitionValidator; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; @@ -30,36 +29,30 @@ import com.alibaba.polardbx.executor.gms.util.AlterRepartitionUtils; import com.alibaba.polardbx.executor.gsi.GsiUtils; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.gms.metadb.table.IndexStatus; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.IndexMeta; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTableRemovePartitioning; -import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTableRepartition; -import com.alibaba.polardbx.optimizer.core.rel.ddl.data.RepartitionPrepareData; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateGlobalIndexPreparedData; +import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter; -import com.google.common.collect.ImmutableMap; import org.apache.calcite.sql.SqlAddIndex; import org.apache.calcite.sql.SqlAddUniqueIndex; import org.apache.calcite.sql.SqlAlterTableRemovePartitioning; -import org.apache.calcite.sql.SqlAlterTableRepartition; import org.apache.calcite.sql.SqlCreateTable; +import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlIndexDefinition; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.commons.lang3.StringUtils; import java.util.ArrayList; -import java.util.Formatter; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; -import java.util.Random; import java.util.Set; +import java.util.TreeMap; import java.util.TreeSet; import java.util.stream.Collectors; @@ -102,12 +95,15 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e Map globalIndexPrepareData = new HashMap<>(); + Map indexTablePreparedDataMap = + new TreeMap<>(String::compareToIgnoreCase); for (CreateGlobalIndexPreparedData createGsiPreparedData : globalIndexPreparedDataList) { DdlPhyPlanBuilder builder = CreateGlobalIndexBuilder.create( logicalAlterTableRemovePartitioning.relDdl, createGsiPreparedData, + indexTablePreparedDataMap, executionContext).build(); - + indexTablePreparedDataMap.put(createGsiPreparedData.getIndexTableName(), createGsiPreparedData); globalIndexPrepareData.put(createGsiPreparedData, builder.genPhysicalPlanData()); } @@ -132,6 +128,8 @@ private void initPrimaryTableDefinition(BaseDdlOperation logicalDdlPlan, SqlAlterTableRemovePartitioning ast = (SqlAlterTableRemovePartitioning) logicalDdlPlan.getNativeSqlNode(); + boolean withImplicitTg = StringUtils.isNotEmpty(ast.getTargetImplicitTableGroupName()); + // for primary table String primaryTableName = ast.getOriginTableName().getLastName(); String targetTableName = @@ -149,7 +147,9 @@ private void initPrimaryTableDefinition(BaseDdlOperation logicalDdlPlan, true, false, primaryTableInfo.getKey(), - primaryTableInfo.getValue() + primaryTableInfo.getValue(), + withImplicitTg ? new SqlIdentifier(ast.getTargetImplicitTableGroupName(), SqlParserPos.ZERO) : null, + withImplicitTg ); gsiList.add(repartitionGsi); @@ -191,7 +191,9 @@ private void initPrimaryTableDefinition(BaseDdlOperation logicalDdlPlan, false, indexMeta.isUniqueIndex(), primaryTableInfo.getKey(), - primaryTableInfo.getValue() + primaryTableInfo.getValue(), + null, + false ); // prepare for drop gsi columns diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRenamePartitionHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRenamePartitionHandler.java index e0aefe57b..b5b09a7b7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRenamePartitionHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRenamePartitionHandler.java @@ -16,8 +16,6 @@ package com.alibaba.polardbx.executor.handler.ddl; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.executor.ddl.job.factory.AlterTableRenamePartitionJobFactory; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.partitionmanagement.AlterTableGroupUtils; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRepartitionHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRepartitionHandler.java index a8ae047c4..424680342 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRepartitionHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableRepartitionHandler.java @@ -30,6 +30,7 @@ import com.alibaba.polardbx.executor.gms.util.AlterRepartitionUtils; import com.alibaba.polardbx.executor.gsi.GsiUtils; import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.executor.utils.DdlUtils; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.PlannerContext; @@ -81,12 +82,13 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e SqlAlterTableRepartition ast = (SqlAlterTableRepartition) logicalAlterTableRepartition.relDdl.sqlNode; - if (ast.isAlignToTableGroup()) { - String schemaName = logicalDdlPlan.getSchemaName(); - String tableName = logicalAlterTableRepartition.getTableName(); - - TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(tableName); + String schemaName = logicalDdlPlan.getSchemaName(); + String tableName = logicalAlterTableRepartition.getTableName(); + TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(tableName); + boolean repartitionGsi = false; + if (ast.isAlignToTableGroup()) { + repartitionGsi = tableMeta.isGsi(); String tableGroup = ast.getTableGroupName().getLastName(); TableGroupInfoManager tgInfoManager = OptimizerContext.getContext(schemaName).getTableGroupInfoManager(); TableGroupConfig tableGroupConfig = tgInfoManager.getTableGroupConfigByName(tableGroup); @@ -100,7 +102,7 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e String.format("the tablegroup:[%s] is empty, it's not expected", tableGroup)); } - String firstTbInTg = tableGroupConfig.getTables().get(0).getTableName(); + String firstTbInTg = tableGroupConfig.getTables().get(0); TableMeta refTableMeta = executionContext.getSchemaManager(schemaName).getTable(firstTbInTg); SqlPartitionBy sqlPartitionBy = AlterRepartitionUtils.generateSqlPartitionBy(tableName, tableGroup, @@ -136,11 +138,13 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e // prepare data for local indexes logicalAlterTableRepartition.prepareLocalIndexData(); RepartitionPrepareData repartitionPrepareData = logicalAlterTableRepartition.getRepartitionPrepareData(); + repartitionPrepareData.setRepartitionGsi(repartitionGsi); globalIndexPreparedData.setRepartitionPrepareData(repartitionPrepareData); DdlPhyPlanBuilder builder = CreateGlobalIndexBuilder.create( logicalAlterTableRepartition.relDdl, globalIndexPreparedData, + null, executionContext).build(); PhysicalPlanData physicalPlanData = builder.genPhysicalPlanData(); @@ -162,10 +166,10 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e return new TransientDdlJob(); } + final Long versionId = DdlUtils.generateVersionId(executionContext); + logicalAlterTableRepartition.setDdlVersionId(versionId); + // get foreign keys - String schemaName = logicalDdlPlan.getSchemaName(); - String tableName = logicalAlterTableRepartition.getTableName(); - TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(tableName); logicalAlterTableRepartition.prepareForeignKeyData(tableMeta, ast); return new RepartitionJobFactory( @@ -206,28 +210,6 @@ private void initPrimaryTableDefinition(BaseDdlOperation logicalDdlPlan, Executi ast.getAlters().addAll(sqlAddIndexList); } - private SqlPartitionBy generateSqlPartitionBy(String schemaName, String tableName, - PartitionInfo referPartitionInfo) { - SqlPartitionBy sqlPartitionBy; - switch (referPartitionInfo.getPartitionBy().getStrategy()) { - case HASH: - sqlPartitionBy = new SqlPartitionByHash(false, false, SqlParserPos.ZERO); - break; - case KEY: - sqlPartitionBy = new SqlPartitionByHash(true, false, SqlParserPos.ZERO); - break; - case RANGE: - case RANGE_COLUMNS: - sqlPartitionBy = new SqlPartitionByRange(SqlParserPos.ZERO); - break; - case LIST: - case LIST_COLUMNS: - sqlPartitionBy = new SqlPartitionByList(SqlParserPos.ZERO); - break; - } - return null; - } - @Override protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { return false; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableSetTableGroupHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableSetTableGroupHandler.java index 07c67daf7..3fa6b27dc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableSetTableGroupHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalAlterTableSetTableGroupHandler.java @@ -90,8 +90,8 @@ protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext } AlterTableSetTableGroupPreparedData preparedData = logicalAlterTableSetTableGroup.getPreparedData(); String tableGroup = ((AlterTableSetTableGroup) (logicalDdlPlan.relDdl)).getTableGroupName(); - if (StringUtils.isNotEmpty(tableGroup)) { - TableGroupValidator.validateTableGroupInfo(preparedData.getSchemaName(), tableGroup, true, + if (StringUtils.isNotEmpty(tableGroup) && !preparedData.isImplicit()) { + TableGroupValidator.validateTableGroupInfo(logicalDdlPlan.getSchemaName(), tableGroup, true, executionContext.getParamManager()); TableGroupConfig tableGroupConfig = OptimizerContext.getContext(logicalDdlPlan.getSchemaName()).getTableGroupInfoManager() diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCheckCciHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCheckCciHandler.java new file mode 100644 index 000000000..510225c96 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCheckCciHandler.java @@ -0,0 +1,174 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.ddl; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.ddl.job.task.columnar.CheckCciMetaTask; +import com.alibaba.polardbx.executor.ddl.job.task.columnar.CheckCciStartTask; +import com.alibaba.polardbx.executor.ddl.job.task.columnar.CheckCciTask; +import com.alibaba.polardbx.executor.ddl.job.task.gsi.ClearCheckReportTask; +import com.alibaba.polardbx.executor.ddl.job.task.gsi.ShowCheckReportTask; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory; +import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; +import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob; +import com.alibaba.polardbx.executor.gsi.CheckerManager; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCheckCci; +import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CheckCciPrepareData; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Sets; + +import java.util.Collections; +import java.util.List; + +/** + * CHECK COLUMNAR INDEX + */ +public class LogicalCheckCciHandler extends LogicalCommonDdlHandler { + + public LogicalCheckCciHandler(IRepository repo) { + super(repo); + } + + @Override + protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext ec) { + final LogicalCheckCci logicalCheckCci = (LogicalCheckCci) logicalDdlPlan; + final CheckCciPrepareData prepareData = logicalCheckCci.prepareData(ec); + + // Use TransientDdlJob for CHECK COLUMNAR INDEX SHOW + ExecutableDdlJob job = new TransientDdlJob(); + + if (prepareData.isCheck()) { + // CHECK COLUMNAR INDEX [CHECK] + CheckCciStartTask checkCciStartTask = + new CheckCciStartTask(prepareData.getSchemaName(), prepareData.getTableName(), + prepareData.getIndexName()); + CheckCciTask checkTask = CheckCciTask.create(prepareData); + + job = new ExecutableDdlJob(); + job.addSequentialTasks(ImmutableList.of(checkCciStartTask, checkTask)); + } else if (prepareData.isMeta()) { + // CHECK COLUMNAR INDEX META + final CheckCciMetaTask checkCciMetaTask = CheckCciMetaTask.create(prepareData); + + job = new ExecutableDdlJob(); + job.addSequentialTasks(ImmutableList.of(checkCciMetaTask)); + } + + final String fullTableName = + DdlJobFactory.concatWithDot(prepareData.getSchemaName(), prepareData.getTableName()); + final String fullIndexName = + DdlJobFactory.concatWithDot(prepareData.getSchemaName(), prepareData.getIndexName()); + job.addExcludeResources(Sets.newHashSet(fullTableName, fullIndexName)); + + return job; + } + + @Override + protected Cursor buildResultCursor(BaseDdlOperation baseDdl, DdlJob ddlJob, ExecutionContext ec) { + final LogicalCheckCci ddl = (LogicalCheckCci) baseDdl; + final boolean async = ec.getDdlContext().isAsyncMode(); + + final CheckCciPrepareData prepareData = ddl.prepareData(ec); + final String humanReadableIndexName = prepareData.humanReadableIndexName(); + + String finalResult = ""; + List checkerReports = Collections.emptyList(); + if (prepareData.isClear()) { + // Clear report from metadb + ClearCheckReportTask clear = new ClearCheckReportTask( + prepareData.getSchemaName(), + prepareData.getTableName(), + prepareData.getIndexName() + ); + clear.clear(); + finalResult = clear.getFinalResult(); + } else if (prepareData.isNeedReport(async)) { + // Return report as result of current CHECK COLUMNAR INDEX statement + ShowCheckReportTask show = new ShowCheckReportTask( + prepareData.getSchemaName(), + prepareData.getTableName(), + prepareData.getIndexName() + ); + show.show(ec); + finalResult = show.getFinalResult(); + checkerReports = show.getCheckerReports(); + } else if (!async) { + throw new TddlRuntimeException( + ErrorCode.ERR_DDL_JOB_ERROR, + "unknown checker action " + prepareData.getExtraCmd()); + } + + // Generate result cursor + ArrayResultCursor result = new ArrayResultCursor("checkColumnarIndex"); + if (async) { + generateResultForAsyncExecution(humanReadableIndexName, result); + } else { + addToResultCursor(humanReadableIndexName, finalResult, checkerReports, result); + } + return result; + } + + private static void generateResultForAsyncExecution(String humanReadableIndexName, ArrayResultCursor result) { + // Add column meta + result.addColumn("Table", DataTypes.StringType); + result.addColumn("Op", DataTypes.StringType); + result.addColumn("Msg_type", DataTypes.StringType); + result.addColumn("Msg_text", DataTypes.StringType); + + // Add result + result.addRow(new Object[] { + humanReadableIndexName, + "check CCI", + "status", + String.format( + "Use SHOW DDL to get checking status. Use SQL: CHECK COLUMNAR INDEX %s SHOW; to get result.", + humanReadableIndexName)}); + } + + private static void addToResultCursor(String humanReadableIndexName, + String finalResult, + List checkerReports, + ArrayResultCursor result) { + // Add column meta + result.addColumn("CCI", DataTypes.StringType); + result.addColumn("error_type", DataTypes.StringType); + result.addColumn("status", DataTypes.StringType); + result.addColumn("primary_key", DataTypes.StringType); + result.addColumn("details", DataTypes.StringType); + + // Add report records + for (CheckerManager.CheckerReport item : checkerReports) { + result.addRow(new Object[] { + humanReadableIndexName, + item.getErrorType(), + CheckerManager.CheckerReportStatus.of(item.getStatus()).name(), + item.getPrimaryKey(), + item.getDetails()}); + } + + // Add final result + result.addRow(new Object[] {humanReadableIndexName, "SUMMARY", "--", "--", finalResult}); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCheckGsiHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCheckGsiHandler.java index 631bb1829..18591f59e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCheckGsiHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCheckGsiHandler.java @@ -91,7 +91,7 @@ protected Cursor buildResultCursor(BaseDdlOperation baseDdl, DdlJob ddlJob, Exec CheckGsiPrepareData prepareData = ddl.prepareData(ec); String prettyTableName = prepareData.prettyTableName(); - String finalResult; + String finalResult = ""; List checkerReports = Collections.emptyList(); // query result from metadb @@ -113,7 +113,7 @@ protected Cursor buildResultCursor(BaseDdlOperation baseDdl, DdlJob ddlJob, Exec show.show(ec); finalResult = show.getFinalResult(); checkerReports = show.getCheckerReports(); - } else { + } else if (!async) { throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "unknown checker action"); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalClearFileStorageHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalClearFileStorageHandler.java new file mode 100644 index 000000000..77594ddf8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalClearFileStorageHandler.java @@ -0,0 +1,38 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.ddl; + +import com.alibaba.polardbx.executor.ddl.job.factory.ClearFileStorageJobFactory; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalClearFileStorage; + +public class LogicalClearFileStorageHandler extends LogicalCommonDdlHandler { + + public LogicalClearFileStorageHandler(IRepository repo) { + super(repo); + } + + @Override + protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + LogicalClearFileStorage logicalClearFileStorage = (LogicalClearFileStorage) logicalDdlPlan; + logicalClearFileStorage.preparedData(); + return new ClearFileStorageJobFactory(logicalClearFileStorage.getPreparedData(), executionContext).create(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCommonDdlHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCommonDdlHandler.java index 1fb242879..028a9cdf3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCommonDdlHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCommonDdlHandler.java @@ -19,31 +19,34 @@ import com.alibaba.polardbx.common.ddl.newengine.DdlConstants; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.CaseInsensitive; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.druid.DbType; import com.alibaba.polardbx.druid.sql.SQLUtils; -import com.alibaba.polardbx.druid.sql.ast.SQLIndexDefinition; +import com.alibaba.polardbx.druid.sql.ast.SQLExpr; +import com.alibaba.polardbx.druid.sql.ast.SQLName; import com.alibaba.polardbx.druid.sql.ast.SQLStatement; import com.alibaba.polardbx.druid.sql.ast.expr.SQLIdentifierExpr; -import com.alibaba.polardbx.druid.sql.ast.expr.SQLNullExpr; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnConstraint; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnDefinition; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnPrimaryKey; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLColumnReference; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLNotNullConstraint; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLSelectOrderByItem; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLMethodInvokeExpr; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableAddConstraint; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableAddIndex; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableItem; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAlterTableStatement; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLConstraint; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLConstraintImpl; import com.alibaba.polardbx.druid.sql.ast.statement.SQLTableElement; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlKey; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlPrimaryKey; -import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlAlterTableChangeColumn; -import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlAlterTableModifyColumn; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlUnique; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MysqlForeignKey; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; -import com.alibaba.polardbx.druid.util.JdbcConstants; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlTableIndex; +import com.alibaba.polardbx.druid.sql.parser.SQLParserUtils; +import com.alibaba.polardbx.druid.sql.visitor.VisitorFeature; import com.alibaba.polardbx.druid.util.StringUtils; import com.alibaba.polardbx.executor.common.RecycleBin; import com.alibaba.polardbx.executor.common.RecycleBinManager; @@ -57,12 +60,11 @@ import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob; import com.alibaba.polardbx.executor.ddl.newengine.serializable.SerializableClassMapper; -import com.alibaba.polardbx.executor.gsi.GsiUtils; import com.alibaba.polardbx.executor.handler.HandlerCommon; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.TableColumnUtils; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.DdlContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -72,6 +74,7 @@ import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCreateTable; import com.alibaba.polardbx.optimizer.core.row.Row; import com.alibaba.polardbx.optimizer.parse.FastsqlParser; +import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.PartitionInfoManager; import com.alibaba.polardbx.optimizer.partition.common.PartitionLocation; @@ -87,7 +90,6 @@ import org.apache.calcite.sql.SqlChangeColumn; import org.apache.calcite.sql.SqlColumnDeclaration; import org.apache.calcite.sql.SqlCreateTable; -import org.apache.calcite.sql.SqlDropPrimaryKey; import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlModifyColumn; @@ -100,16 +102,15 @@ import org.apache.commons.collections.CollectionUtils; import java.util.ArrayList; -import java.util.Iterator; +import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.TreeSet; -import java.util.stream.Collectors; -import static com.alibaba.polardbx.common.TddlConstants.IMPLICIT_COL_NAME; -import static com.alibaba.polardbx.common.TddlConstants.IMPLICIT_KEY_NAME; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcSqlUtils.SQL_PARSE_FEATURES; import static com.alibaba.polardbx.executor.handler.LogicalShowCreateTableHandler.reorgLogicalColumnOrder; +import static com.alibaba.polardbx.executor.handler.ddl.LogicalCreateTableHandler.generateCreateTableSqlForLike; +import static com.alibaba.polardbx.optimizer.sql.sql2rel.TddlSqlToRelConverter.unwrapGsiName; public abstract class LogicalCommonDdlHandler extends HandlerCommon { @@ -231,7 +232,9 @@ protected void initDdlContext(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext.setDdlContext(ddlContext); - rewriteOriginSqlWithForeignKey(logicalDdlPlan, executionContext, schemaName, objectName); + ForeignKeyUtils.rewriteOriginSqlWithForeignKey(logicalDdlPlan, executionContext, schemaName, objectName); + + rewriteOriginSqlToCdcMarkSql(logicalDdlPlan, executionContext, schemaName, objectName); } protected void handleDdlRequest(DdlJob ddlJob, ExecutionContext executionContext) { @@ -332,246 +335,179 @@ protected Pair genPrimaryTableInfo(BaseDdlOperation logi } } - protected Pair genPrimaryTableInfoAfterModifyColumn(BaseDdlOperation logicalDdlPlan, - ExecutionContext executionContext, - Map virtualColumnMap, - Map columnNewDef, - List oldPrimaryKeys) { - Pair primaryTableInfo = genPrimaryTableInfo(logicalDdlPlan, executionContext); - oldPrimaryKeys.addAll(primaryTableInfo.getValue().getPrimaryKey().getColumns() - .stream().map(e -> e.getColumnNameStr().toLowerCase()).collect(Collectors.toList())); + protected boolean isAvailableForRecycleBin(String tableName, ExecutionContext executionContext) { + final String appName = executionContext.getAppName(); + final RecycleBin recycleBin = RecycleBinManager.instance.getByAppName(appName); + return executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_RECYCLEBIN) && + !RecycleBin.isRecyclebinTable(tableName) && + recycleBin != null && !recycleBin.hasForeignConstraint(appName, tableName); + } + + // rewrite sql for cdc, @see com.alibaba.polardbx.cdc.ImplicitTableGroupUtil + // rewrite reason : adding a name for anonymous indexes, then we can add implicit table-group for the indexes + protected void rewriteOriginSqlToCdcMarkSql(BaseDdlOperation logicalDdlPlan, ExecutionContext ec, + String schemaName, String tableName) { + // some sql has no schema name. + // e.g. rebalance database policy='partition_balance' + if (StringUtils.isEmpty(logicalDdlPlan.getSchemaName())) { + return; + } - SqlAlterTable sqlAlterTable = (SqlAlterTable) logicalDdlPlan.getNativeSqlNode(); + boolean isNewPartDb = DbInfoManager.getInstance().isNewPartitionDb(logicalDdlPlan.getSchemaName()); - final List statementList = - SQLUtils.parseStatementsWithDefaultFeatures(primaryTableInfo.getKey(), JdbcConstants.MYSQL); - final MySqlCreateTableStatement stmt = (MySqlCreateTableStatement) statementList.get(0); - - boolean isFirst = false; - String alterSourceSql; - SqlIdentifier colName = null; - SqlIdentifier afterColName = null; - SqlColumnDeclaration newColumnDef = null; - SQLColumnDefinition newColumnDefinition = null; - for (SqlAlterSpecification sqlAlterSpecification : sqlAlterTable.getAlters()) { - if (sqlAlterSpecification instanceof SqlModifyColumn) { - SqlModifyColumn sqlModifyColumn = (SqlModifyColumn) sqlAlterSpecification; - isFirst = sqlModifyColumn.isFirst(); - afterColName = sqlModifyColumn.getAfterColumn(); - colName = sqlModifyColumn.getColName(); - alterSourceSql = sqlModifyColumn.getSourceSql(); - - final List alterStatement = - SQLUtils.parseStatementsWithDefaultFeatures(alterSourceSql, JdbcConstants.MYSQL); - newColumnDefinition = ((MySqlAlterTableModifyColumn) alterStatement.get(0) - .getChildren().get(1)).getNewColumnDefinition(); - } else if (sqlAlterSpecification instanceof SqlChangeColumn) { - SqlChangeColumn sqlChangeColumn = (SqlChangeColumn) sqlAlterSpecification; - if (!sqlChangeColumn.getOldName() - .equalsDeep(sqlChangeColumn.getNewName(), Litmus.IGNORE, EqualsContext.DEFAULT_EQUALS_CONTEXT)) { - throw new TddlRuntimeException(ErrorCode.ERR_OPTIMIZER, - "Do not support changing column name of sharding key"); + // only auto mode database support rewrite, because drds mode do not support table group and do not support using same GSI name in different table + // rewrite create like to actual create sql for adding implicit table group + // because target table`s table-group may be different from the base table`s table-group + if (logicalDdlPlan instanceof LogicalCreateTable) { + LogicalCreateTable logicalCreateTable = (LogicalCreateTable) logicalDdlPlan; + SqlCreateTable sqlCreateTable = (SqlCreateTable) logicalCreateTable.relDdl.sqlNode; + if (sqlCreateTable.getLikeTableName() != null) { + if (isNewPartDb) { + final String sourceCreateTableSql = generateCreateTableSqlForLike(sqlCreateTable, ec); + MySqlCreateTableStatement stmt = + (MySqlCreateTableStatement) FastsqlUtils.parseSql(sourceCreateTableSql).get(0); + stmt.getTableSource() + .setSimpleName(SqlIdentifier.surroundWithBacktick(logicalCreateTable.getTableName())); + ec.getDdlContext().setCdcRewriteDdlStmt(stmt.toString(VisitorFeature.OutputHashPartitionsByRange)); } + return; + } + } - isFirst = sqlChangeColumn.isFirst(); - afterColName = sqlChangeColumn.getAfterColumn(); - colName = sqlChangeColumn.getOldName(); - alterSourceSql = sqlChangeColumn.getSourceSql(); - - final List alterStatement = - SQLUtils.parseStatementsWithDefaultFeatures(alterSourceSql, JdbcConstants.MYSQL); - newColumnDefinition = ((MySqlAlterTableChangeColumn) alterStatement.get(0) - .getChildren().get(1)).getNewColumnDefinition(); - } else if (sqlAlterSpecification instanceof SqlDropPrimaryKey) { - final Iterator it = stmt.getTableElementList().iterator(); - while (it.hasNext()) { - SQLTableElement tableElement = it.next(); - if (tableElement instanceof SQLColumnDefinition) { - final SQLColumnDefinition columnDefinition = (SQLColumnDefinition) tableElement; - if (null != columnDefinition.getConstraints()) { - final Iterator constraintIt = - columnDefinition.getConstraints().iterator(); - while (constraintIt.hasNext()) { - final SQLColumnConstraint constraint = constraintIt.next(); - if (constraint instanceof SQLColumnPrimaryKey) { - constraintIt.remove(); - } else if (constraint instanceof SQLColumnReference) { - // remove foreign key - constraintIt.remove(); - } - } - } - } else if (tableElement instanceof MySqlPrimaryKey) { - it.remove(); + String originSql = ec.getOriginSql(); + final List statementList = + SQLParserUtils.createSQLStatementParser(originSql, DbType.mysql, SQL_PARSE_FEATURES).parseStatementList(); + if (statementList.isEmpty()) { + return; + } + + if (logicalDdlPlan.getDdlType() == DdlType.CREATE_TABLE && + statementList.get(0) instanceof MySqlCreateTableStatement) { + // MySqlExplainStatement also belongs to CREATE_TABLE + final MySqlCreateTableStatement stmt = (MySqlCreateTableStatement) statementList.get(0); + + Set indexNamesSet = new HashSet<>(); + for (final SQLTableElement tableElement : stmt.getTableElementList()) { + if (tableElement instanceof SQLConstraintImpl) { + final SQLConstraintImpl constraint = (SQLConstraintImpl) tableElement; + final SQLName indexName = constraint.getName(); + if (indexName != null && indexName.getSimpleName() != null + && !indexName.getSimpleName().isEmpty()) { + indexNamesSet.add(indexName.getSimpleName()); } } - continue; - } else if (sqlAlterSpecification instanceof SqlAddPrimaryKey) { - SqlAddPrimaryKey sqlAddPrimaryKey = (SqlAddPrimaryKey) sqlAlterSpecification; - final Iterator it = stmt.getTableElementList().iterator(); - Set colNameSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); - sqlAddPrimaryKey.getColumns().forEach(e -> colNameSet.add(e.getColumnNameStr())); - - boolean hasImplicitKey = false; - while (it.hasNext()) { - SQLTableElement tableElement = it.next(); - if (tableElement instanceof SQLColumnDefinition) { - final SQLColumnDefinition columnDefinition = (SQLColumnDefinition) tableElement; - if (colNameSet.contains(SQLUtils.normalizeNoTrim(columnDefinition.getColumnName()))) { - SQLNotNullConstraint sqlNotNullConstraint = new SQLNotNullConstraint(); - columnDefinition.addConstraint(sqlNotNullConstraint); - if (columnDefinition.getDefaultExpr() instanceof SQLNullExpr) { - columnDefinition.setDefaultExpr(null); + } + + for (final SQLTableElement tableElement : stmt.getTableElementList()) { + if (tableElement instanceof SQLConstraintImpl) { + final SQLConstraintImpl constraint = (SQLConstraintImpl) tableElement; + + // Assign name if no name. + if (!(tableElement instanceof MySqlPrimaryKey) && + (null == constraint.getName() || constraint.getName().getSimpleName().isEmpty())) { + String baseName = null; + int prob = 0; + if (tableElement instanceof MySqlKey) { + baseName = ((MySqlKey) tableElement).getColumns().get(0).getExpr().toString(); + } else if (tableElement instanceof MySqlTableIndex) { + SQLExpr expr = ((MySqlTableIndex) tableElement).getColumns().get(0).getExpr(); + if (expr instanceof SQLMethodInvokeExpr) { + baseName = ((SQLMethodInvokeExpr) expr).getMethodName(); + } else { + baseName = expr.toString(); } - colNameSet.remove(SQLUtils.normalizeNoTrim(columnDefinition.getColumnName())); } - if (StringUtils.equalsIgnoreCase(IMPLICIT_COL_NAME, - SQLUtils.normalizeNoTrim(columnDefinition.getName().getSimpleName()))) { - hasImplicitKey = true; + if (baseName != null) { + baseName = SQLUtils.normalizeNoTrim(baseName); + if (!indexNamesSet.contains(baseName)) { + constraint.setName(baseName); + indexNamesSet.add(baseName); + } else { + prob = 2; + baseName = baseName + "_"; + while (indexNamesSet.contains(baseName + prob)) { + ++prob; + } + constraint.setName(baseName + prob); + indexNamesSet.add(baseName + prob); + } + } else { + if (tableElement instanceof MysqlForeignKey) { + baseName = tableName.toLowerCase() + "_ibfk_"; + prob++; + } else { + baseName = "i_"; + } + while (indexNamesSet.contains(baseName + prob)) { + ++prob; + } + constraint.setName(baseName + prob); + indexNamesSet.add(baseName + prob); } - } else if (tableElement instanceof MySqlPrimaryKey) { - it.remove(); } } - - if (!colNameSet.isEmpty()) { - throw new TddlRuntimeException(ErrorCode.ERR_OPTIMIZER, - "Unknown column " + colNameSet); - } - - // add new primary key - List colNames = sqlAddPrimaryKey.getColumns().stream() - .map(e -> new SQLSelectOrderByItem(new SQLIdentifierExpr(e.getColumnNameStr()))) - .collect(Collectors.toList()); - - MySqlPrimaryKey newPrimaryKey = new MySqlPrimaryKey(); - SQLIndexDefinition indexDefinition = newPrimaryKey.getIndexDefinition(); - indexDefinition.setKey(true); - indexDefinition.setType("PRIMARY"); - - indexDefinition.getColumns().addAll(colNames); - - stmt.getTableElementList().add(newPrimaryKey); - - // add local index for implicit key (auto increment) - if (hasImplicitKey) { - MySqlKey implicitKey = new MySqlKey(); - implicitKey.setName(IMPLICIT_KEY_NAME); - SQLIndexDefinition indexDef = implicitKey.getIndexDefinition(); - indexDef.setKey(true); - indexDef.getColumns().add(new SQLSelectOrderByItem(new SQLIdentifierExpr(IMPLICIT_COL_NAME))); - - stmt.getTableElementList().add(implicitKey); - } - continue; - } else { - continue; } - - int first = -1; - int m = 0; - boolean isAutoIncrement = false; - for (; m < stmt.getTableElementList().size(); ++m) { - SQLTableElement tableElement = stmt.getTableElementList().get(m); - if (tableElement instanceof SQLColumnDefinition) { - first = first == -1 ? m : first; - final SQLColumnDefinition columnDefinition = (SQLColumnDefinition) tableElement; - final String columnName = SQLUtils.normalizeNoTrim(columnDefinition.getName().getSimpleName()); - if (columnName.equalsIgnoreCase(colName.getLastName())) { - isAutoIncrement = columnDefinition.isAutoIncrement(); - stmt.getTableElementList().remove(m); - break; - } - } + ec.getDdlContext().setCdcRewriteDdlStmt(stmt.toString(VisitorFeature.OutputHashPartitionsByRange)); + } else if (logicalDdlPlan.getDdlType() == DdlType.ALTER_TABLE && + statementList.get(0) instanceof SQLAlterTableStatement) { + final SQLAlterTableStatement stmt = (SQLAlterTableStatement) statementList.get(0); + + final Set existsNames = new TreeSet<>(CaseInsensitive.CASE_INSENSITIVE_ORDER); + TableMeta tableMeta = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(tableName); + tableMeta.getSecondaryIndexes().forEach(meta -> existsNames.add(meta.getPhysicalIndexName())); + if (tableMeta.getGsiTableMetaBean() != null && tableMeta.getGsiTableMetaBean().indexMap != null) { + tableMeta.getGsiTableMetaBean().indexMap.forEach((k, v) -> existsNames.add(unwrapGsiName(k))); } - int n = 0; - if (afterColName != null) { - for (; n < stmt.getTableElementList().size(); ++n) { - SQLTableElement tableElement = stmt.getTableElementList().get(n); - if (tableElement instanceof SQLColumnDefinition) { - final SQLColumnDefinition columnDefinition = (SQLColumnDefinition) tableElement; - final String columnName = - SQLUtils.normalizeNoTrim(columnDefinition.getName().getSimpleName()); - if (columnName.equalsIgnoreCase(afterColName.getLastName())) { - break; + for (final SQLAlterTableItem item : stmt.getItems()) { + if (item instanceof SQLAlterTableAddIndex) { + SQLName indexName = ((SQLAlterTableAddIndex) item).getName(); + + if (null == indexName || null == indexName.getSimpleName() || indexName.getSimpleName().isEmpty()) { + String realName; + String baseName = ((SQLAlterTableAddIndex) item).getColumns().get(0).getExpr().toString(); + baseName = SQLUtils.normalizeNoTrim(baseName); + if (!existsNames.contains(baseName)) { + realName = baseName; + existsNames.add(realName); + } else { + baseName = baseName + "_"; + int prob = 2; + while (existsNames.contains(baseName + prob)) { + ++prob; + } + realName = baseName + prob; + existsNames.add(realName); + } + ((SQLAlterTableAddIndex) item).setName(new SQLIdentifierExpr(realName)); + } + } else if (item instanceof SQLAlterTableAddConstraint) { + SQLConstraint constraint = ((SQLAlterTableAddConstraint) item).getConstraint(); + SQLName indexName = constraint.getName(); + + if (constraint instanceof MySqlUnique && (null == indexName || null == indexName.getSimpleName() + || indexName.getSimpleName().isEmpty())) { + String realName; + String baseName = ((MySqlUnique) ((SQLAlterTableAddConstraint) item).getConstraint()) + .getIndexDefinition().getColumns().get(0).getExpr().toString(); + baseName = SQLUtils.normalizeNoTrim(baseName); + + if (!existsNames.contains(baseName)) { + realName = baseName; + } else { + baseName = baseName + "_"; + int prob = 2; + while (existsNames.contains(baseName + prob)) { + ++prob; + } + realName = baseName + prob; + existsNames.add(realName); } + ((SQLAlterTableAddConstraint) item).getConstraint().setName(new SQLIdentifierExpr(realName)); } } } - - if (isFirst) { - stmt.getTableElementList().add(first, newColumnDefinition); - } else if (afterColName != null) { - stmt.getTableElementList().add(n + 1, newColumnDefinition); - } else { - stmt.getTableElementList().add(m, newColumnDefinition); - } - - if (!isAutoIncrement) { - String colNameStr = colName.getLastName(); - virtualColumnMap.put(colNameStr, GsiUtils.generateRandomGsiName(colNameStr)); - columnNewDef.put(colNameStr, TableColumnUtils.getDataDefFromColumnDefNoDefault(newColumnDefinition)); - } - } - - String sourceSql = stmt.toString(); - SqlCreateTable primaryTableNode = - (SqlCreateTable) new FastsqlParser().parse(sourceSql, executionContext).get(0); - - return new Pair<>(sourceSql, primaryTableNode); - } - - protected boolean isAvailableForRecycleBin(String tableName, ExecutionContext executionContext) { - final String appName = executionContext.getAppName(); - final RecycleBin recycleBin = RecycleBinManager.instance.getByAppName(appName); - return executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_RECYCLEBIN) && - !RecycleBin.isRecyclebinTable(tableName) && - recycleBin != null && !recycleBin.hasForeignConstraint(appName, tableName); - } - - protected void rewriteOriginSqlWithForeignKey(BaseDdlOperation logicalDdlPlan, ExecutionContext ec, - String schemaName, String tableName) { - // rewrite origin sql for different naming behaviours in 5.7 & 8.0 - boolean createTableWithFk = logicalDdlPlan.getDdlType() == DdlType.CREATE_TABLE - && !((LogicalCreateTable) logicalDdlPlan).getSqlCreateTable().getAddedForeignKeys().isEmpty(); - boolean alterTableAddFk = - logicalDdlPlan.getDdlType() == DdlType.ALTER_TABLE && logicalDdlPlan instanceof LogicalAlterTable - && ((LogicalAlterTable) logicalDdlPlan).getSqlAlterTable().getAlters().size() == 1 - && ((LogicalAlterTable) logicalDdlPlan).getSqlAlterTable().getAlters().get(0).getKind() - == SqlKind.ADD_FOREIGN_KEY; - boolean alterTableDropFk = - logicalDdlPlan.getDdlType() == DdlType.ALTER_TABLE && logicalDdlPlan instanceof LogicalAlterTable - && ((LogicalAlterTable) logicalDdlPlan).getSqlAlterTable().getAlters().size() == 1 - && ((LogicalAlterTable) logicalDdlPlan).getSqlAlterTable().getAlters().get(0).getKind() - == SqlKind.DROP_FOREIGN_KEY; - if (createTableWithFk) { - ec.getDdlContext().setForeignKeyOriginalSql( - ((LogicalCreateTable) logicalDdlPlan).getSqlCreateTable().toString()); - } else if (alterTableAddFk) { - final SqlAlterTable sqlTemplate = ((LogicalAlterTable) logicalDdlPlan).getSqlAlterTable(); - - SqlAddForeignKey sqlAddForeignKey = - (SqlAddForeignKey) ((LogicalAlterTable) logicalDdlPlan).getSqlAlterTable().getAlters().get(0); - // create foreign key constraints symbol - String symbol = - ForeignKeyUtils.getForeignKeyConstraintName(schemaName, tableName); - if (sqlAddForeignKey.getConstraint() == null) { - sqlAddForeignKey.setConstraint(new SqlIdentifier(SQLUtils.normalizeNoTrim(symbol), SqlParserPos.ZERO)); - } - SqlPrettyWriter writer = new SqlPrettyWriter(MysqlSqlDialect.DEFAULT); - writer.setAlwaysUseParentheses(true); - writer.setSelectListItemsOnSeparateLines(false); - writer.setIndentation(0); - final int leftPrec = sqlTemplate.getOperator().getLeftPrec(); - final int rightPrec = sqlTemplate.getOperator().getRightPrec(); - sqlTemplate.getAlters().clear(); - sqlTemplate.getAlters().add(sqlAddForeignKey); - sqlTemplate.unparse(writer, leftPrec, rightPrec, true); - - ec.getDdlContext().setForeignKeyOriginalSql(writer.toSqlString().getSql()); - } else if (alterTableDropFk) { - ec.getDdlContext().setForeignKeyOriginalSql(ec.getOriginSql()); + ec.getDdlContext().setCdcRewriteDdlStmt(stmt.toString(VisitorFeature.OutputHashPartitionsByRange)); } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateDatabaseHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateDatabaseHandler.java index a8950f7ab..d496545e0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateDatabaseHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateDatabaseHandler.java @@ -16,17 +16,12 @@ package com.alibaba.polardbx.executor.handler.ddl; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; -import com.alibaba.polardbx.common.charset.CharsetName; -import com.alibaba.polardbx.common.charset.CollationName; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; -import com.alibaba.polardbx.common.properties.DynamicConfig; -import com.alibaba.polardbx.config.ConfigDataMode; -import com.alibaba.polardbx.config.ConfigDataMode.Mode; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.handler.HandlerCommon; @@ -69,6 +64,11 @@ public LogicalCreateDatabaseHandler(IRepository repo) { @Override public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + return handleByGms(logicalPlan, executionContext); + } + + public Cursor handleByGms(RelNode logicalPlan, ExecutionContext executionContext) { + final LogicalCreateDatabase createDatabase = (LogicalCreateDatabase) logicalPlan; final SqlCreateDatabase sqlCreateDatabase = (SqlCreateDatabase) createDatabase.getNativeSqlNode(); final LocalityManager lm = LocalityManager.getInstance(); @@ -134,6 +134,12 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } } else { defaultSingle = false; + if (!localityDesc.holdEmptyLocality()) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "database of drds mode doesn't support locality specification!" + ); + + } } CreateDbInfo createDbInfo = DbTopologyManager.initCreateDbInfo( @@ -146,7 +152,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { DbEventUtil.logFirstAutoDbCreationEvent(createDbInfo); CdcManagerHelper.getInstance() .notifyDdl(dbName, null, sqlCreateDatabase.getKind().name(), executionContext.getOriginSql(), - DdlVisibility.Public, buildExtendParameter(executionContext)); + null, CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); if (!finalLocalityDesc.holdEmptyDnList()) { lm.setLocalityOfDb(dbId, finalLocalityDesc.toString()); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateDatabaseLikeAsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateDatabaseLikeAsHandler.java index bf75950eb..2c6732728 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateDatabaseLikeAsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateDatabaseLikeAsHandler.java @@ -144,7 +144,7 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e // } final TableMeta tm = schemaManager.getTable(tbName); rows = (long) tm.getRowCount(null); - CostEstimableDdlTask.CostInfo info = CostEstimableDdlTask.createCostInfo(rows, null); + CostEstimableDdlTask.CostInfo info = CostEstimableDdlTask.createCostInfo(rows, null, null); allTableCostInfos.put(tbName, info); } ); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateFileStorageHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateFileStorageHandler.java index 133e0c4ab..b5db773dc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateFileStorageHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateFileStorageHandler.java @@ -33,7 +33,6 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; public class LogicalCreateFileStorageHandler extends LogicalCommonDdlHandler { public LogicalCreateFileStorageHandler(IRepository repo) { @@ -63,6 +62,7 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e // check fileStorageInfoKey Map with = new HashMap<>(); + Map azureSettings = null; for (Map.Entry e : createFileStorage.getWith().entrySet()) { String key = e.getKey(); String value = e.getValue(); @@ -71,8 +71,34 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS, "error key: " + key); } with.put(fileStorageInfoKey, value); + if (fileStorageInfoKey == FileStorageInfoKey.AZURE_CONNECTION_STRING) { + azureSettings = parseConnectionString(value); + } } - return new CreateFileStorageJobFactory(engine, with, executionContext).create(); + return new CreateFileStorageJobFactory(engine, with, azureSettings, executionContext).create(); + } + + private static Map parseConnectionString( + String connectionString) { + Map parts = new HashMap<>(); + // Split the connection string on semicolons + String[] pairs = connectionString.split(";"); + for (String pair : pairs) { + // Split each pair on the first equals sign to separate key and value + int idx = pair.indexOf('='); + if (idx > 0 && idx < pair.length() - 1) { + // Ensure that the key and value are non-empty + String key = pair.substring(0, idx); + String value = pair.substring(idx + 1); + FileStorageInfoKey.AzureConnectionStringKey azureKey; + if ((azureKey = FileStorageInfoKey.AzureConnectionStringKey.of(key)) == null) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS, + "error key of connection string: " + key); + } + parts.put(azureKey, value); + } + } + return parts; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateIndexHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateIndexHandler.java index d63952bd2..ae5a98e7c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateIndexHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateIndexHandler.java @@ -33,6 +33,7 @@ import com.alibaba.polardbx.druid.util.JdbcConstants; import com.alibaba.polardbx.executor.ddl.job.factory.CreateIndexJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.gsi.CreatePartitionGsiJobFactory; +import com.alibaba.polardbx.executor.ddl.job.factory.gsi.columnar.CreateColumnarIndexJobFactory; import com.alibaba.polardbx.executor.ddl.job.task.gsi.StatisticSampleTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateTableVersionTask; import com.alibaba.polardbx.executor.ddl.job.validator.IndexValidator; @@ -41,6 +42,7 @@ import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.executor.utils.DdlUtils; import com.alibaba.polardbx.optimizer.PlannerContext; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.GeneratedColumnUtil; @@ -89,8 +91,12 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e boolean globalIndex = logicalCreateIndex.isClustered() || logicalCreateIndex.isGsi(); boolean expressionIndex = isExpressionIndex(logicalCreateIndex, executionContext); + boolean isColumnar = logicalCreateIndex.isColumnar(); + final Long versionId = DdlUtils.generateVersionId(executionContext); - if (expressionIndex) { + if (isColumnar) { + return buildCreateColumnarIndexJob(logicalCreateIndex, versionId, executionContext); + } else if (expressionIndex) { if (!executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_CREATE_EXPRESSION_INDEX)) { throw new TddlRuntimeException(ErrorCode.ERR_OPTIMIZER, "create expression index is not enabled"); } @@ -98,13 +104,13 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e if (globalIndex) { return buildCreateGlobalExpressionIndexJob(); } else { - return buildCreateLocalExpressionIndexJob(logicalCreateIndex, executionContext); + return buildCreateLocalExpressionIndexJob(logicalCreateIndex, versionId, executionContext); } } else { if (globalIndex) { - return buildCreateGsiJob(logicalCreateIndex, executionContext); + return buildCreateGsiJob(logicalCreateIndex, versionId, executionContext); } else { - return buildCreateLocalIndexJob(logicalCreateIndex, executionContext); + return buildCreateLocalIndexJob(logicalCreateIndex, versionId, executionContext); } } } @@ -124,8 +130,11 @@ protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext return false; } - private DdlJob buildCreateLocalIndexJob(LogicalCreateIndex logicalCreateIndex, ExecutionContext executionContext) { + private DdlJob buildCreateLocalIndexJob(LogicalCreateIndex logicalCreateIndex, + Long ddlVersionId, + ExecutionContext executionContext) { logicalCreateIndex.prepareData(); + logicalCreateIndex.setDdlVersionId(ddlVersionId); ExecutableDdlJob localIndexJob = CreateIndexJobFactory.createLocalIndex( logicalCreateIndex.relDdl, logicalCreateIndex.getNativeSqlNode(), @@ -151,11 +160,34 @@ private DdlJob buildCreateLocalIndexJob(LogicalCreateIndex logicalCreateIndex, E return localIndexJob; } - private DdlJob buildCreateGsiJob(LogicalCreateIndex logicalCreateIndex, ExecutionContext executionContext) { + private DdlJob buildCreateColumnarIndexJob(LogicalCreateIndex logicalCreateIndex, + Long ddlVersionId, + ExecutionContext executionContext) { initPrimaryTableDefinition(logicalCreateIndex, executionContext); // Should prepare data after initializing the primary table definition. logicalCreateIndex.prepareData(); + logicalCreateIndex.setDdlVersionId(ddlVersionId); + + CreateIndexWithGsiPreparedData preparedData = logicalCreateIndex.getCreateIndexWithGsiPreparedData(); + CreateGlobalIndexPreparedData globalIndexPreparedData = preparedData.getGlobalIndexPreparedData(); + + ExecutableDdlJob cciJob = CreateColumnarIndexJobFactory.create4CreateCci( + logicalCreateIndex.relDdl, + globalIndexPreparedData, + executionContext); + + return cciJob; + } + + private DdlJob buildCreateGsiJob(LogicalCreateIndex logicalCreateIndex, + Long ddlVersionId, + ExecutionContext executionContext) { + initPrimaryTableDefinition(logicalCreateIndex, executionContext); + + // Should prepare data after initializing the primary table definition. + logicalCreateIndex.prepareData(); + logicalCreateIndex.setDdlVersionId(ddlVersionId); CreateIndexWithGsiPreparedData preparedData = logicalCreateIndex.getCreateIndexWithGsiPreparedData(); CreateGlobalIndexPreparedData globalIndexPreparedData = preparedData.getGlobalIndexPreparedData(); @@ -184,7 +216,7 @@ private DdlJob buildCreateGsiJob(LogicalCreateIndex logicalCreateIndex, Executio } gsiJob.addSequentialTasksAfter(gsiJob.getTail(), Lists.newArrayList(new StatisticSampleTask( globalIndexPreparedData.getSchemaName(), - globalIndexPreparedData.getIndexTableName() + globalIndexPreparedData.getPrimaryTableName() ))); return gsiJob; } @@ -202,6 +234,10 @@ private void initPrimaryTableDefinition(LogicalCreateIndex logicalDdlPlan, Execu sqlCreateIndex = logicalDdlPlan.getSqlCreateIndex(); sqlCreateIndex.setPrimaryTableDefinition(primaryTableInfo.getKey()); sqlCreateIndex.setPrimaryTableNode(primaryTableInfo.getValue()); + + sqlCreateIndex = logicalDdlPlan.getNormalizedOriginalDdl(); + sqlCreateIndex.setPrimaryTableDefinition(primaryTableInfo.getKey()); + sqlCreateIndex.setPrimaryTableNode(primaryTableInfo.getValue()); } private boolean isExpressionIndex(LogicalCreateIndex logicalCreateIndex, ExecutionContext executionContext) { @@ -252,6 +288,7 @@ private boolean isExpressionIndex(LogicalCreateIndex logicalCreateIndex, Executi } private DdlJob buildCreateLocalExpressionIndexJob(LogicalCreateIndex logicalCreateIndex, + Long ddlVersionId, ExecutionContext executionContext) { String schemaName = logicalCreateIndex.getSchemaName(); String tableName = logicalCreateIndex.getTableName(); @@ -290,7 +327,7 @@ private DdlJob buildCreateLocalExpressionIndexJob(LogicalCreateIndex logicalCrea logicalAlterTable.setRewrittenAlterSql(true); LogicalAlterTableHandler logicalAlterTableHandler = new LogicalAlterTableHandler(repo); - return logicalAlterTableHandler.buildDdlJob(logicalAlterTable, executionContext); + return logicalAlterTableHandler.doBuildDdlJob(logicalAlterTable, ddlVersionId, executionContext); } private DdlJob buildCreateGlobalExpressionIndexJob() { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateMaterializedViewHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateMaterializedViewHandler.java index 99cb2f1eb..aeeac8f1e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateMaterializedViewHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateMaterializedViewHandler.java @@ -16,12 +16,13 @@ package com.alibaba.polardbx.executor.handler.ddl; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; -import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateViewStatement; import com.alibaba.polardbx.executor.ExecutorHelper; @@ -34,6 +35,7 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.CreateViewSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.group.jdbc.TGroupDirectConnection; @@ -44,7 +46,6 @@ import com.alibaba.polardbx.optimizer.context.DdlContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.Blob; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.dialect.DbType; import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; import com.alibaba.polardbx.optimizer.core.planner.Planner; @@ -69,12 +70,14 @@ import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.sql.SqlCreateTable; +import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.TDDLSqlSelect; import java.sql.PreparedStatement; import java.util.ArrayList; import java.util.List; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; import static com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx.BLOCK_SIZE; public class LogicalCreateMaterializedViewHandler extends LogicalCreateTableHandler { @@ -154,9 +157,9 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } else { logicalCreateeRelNode.prepareData(executionContext); if (!isNewPartDb) { - ddlJob = buildCreateTableJob(logicalCreateeRelNode, executionContext); + ddlJob = buildCreateTableJob(logicalCreateeRelNode, executionContext, null); } else { - ddlJob = buildCreatePartitionTableJob(logicalCreateeRelNode, executionContext); + ddlJob = buildCreatePartitionTableJob(logicalCreateeRelNode, executionContext, null); } } @@ -171,6 +174,9 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } catch (Throwable t) { throw new RuntimeException(t); } + + markDdlForCdc(executionContext, schemaName, tableName, executionContext.getOriginSql()); + if (!logicalCreateTable.bRefresh) { syncView(logicalCreateTable, executionContext, columnNameList); } @@ -369,6 +375,20 @@ public void syncView(final LogicalCreateMaterializedView materializedView, Execu + "write"); } - SyncManagerHelper.sync(new CreateViewSyncAction(schemaName, viewName), schemaName); + SyncManagerHelper.sync(new CreateViewSyncAction(schemaName, viewName), schemaName, SyncScope.CURRENT_ONLY); + } + + //TODO cdc@shengyu + private void markDdlForCdc(ExecutionContext executionContext, String schemaName, String viewName, String ddlSql) { + CdcManagerHelper.getInstance().notifyDdlNew( + schemaName, + viewName, + SqlKind.CREATE_MATERIALIZED_VIEW.name(), + ddlSql, + DdlType.UNSUPPORTED, + null, + null, + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateTableHandler.java index 47c83c509..a470f649e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateTableHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateTableHandler.java @@ -18,9 +18,6 @@ import com.alibaba.polardbx.common.ArchiveMode; import com.alibaba.polardbx.common.Engine; -import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; -import com.alibaba.polardbx.common.constants.SequenceAttribute; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; @@ -29,11 +26,9 @@ import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLSelectQuery; import com.alibaba.polardbx.druid.sql.SQLUtils; import com.alibaba.polardbx.druid.sql.ast.SQLPartitionBy; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; -import com.alibaba.polardbx.druid.sql.parser.ByteString; import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; @@ -46,9 +41,10 @@ import com.alibaba.polardbx.executor.ddl.job.factory.CreateTableJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.CreateTableSelectJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.CreateTableWithGsiJobFactory; +import com.alibaba.polardbx.executor.ddl.job.factory.PureCdcDdlMark4CreateTableJobFactory; +import com.alibaba.polardbx.executor.ddl.job.factory.ReimportTableJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.oss.CreatePartitionOssTableJobFactory; import com.alibaba.polardbx.executor.ddl.job.task.basic.InsertIntoTask; -import com.alibaba.polardbx.executor.ddl.job.task.basic.LogicalInsertTask; import com.alibaba.polardbx.executor.ddl.job.validator.ColumnValidator; import com.alibaba.polardbx.executor.ddl.job.validator.ConstraintValidator; import com.alibaba.polardbx.executor.ddl.job.validator.ForeignKeyValidator; @@ -56,26 +52,28 @@ import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob; -import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateGsi; -import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreatePartitionGsi; -import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreatePartitionTable; -import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4CreateTable; -import com.alibaba.polardbx.executor.ddl.newengine.job.wrapper.ExecutableDdlJob4InsertOverwrite; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; import com.alibaba.polardbx.executor.handler.LogicalShowCreateTableHandler; import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.executor.utils.DdlUtils; +import com.alibaba.polardbx.executor.utils.StandardToEnterpriseEditionUtil; +import com.alibaba.polardbx.gms.locality.LocalityDesc; import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.metadb.limit.LimitValidator; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskAccessor; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.gms.topology.DbTopologyManager; import com.alibaba.polardbx.gms.util.TableGroupNameUtil; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.PlannerContext; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.DefaultExprUtil; import com.alibaba.polardbx.optimizer.config.table.GeneratedColumnUtil; -import com.alibaba.polardbx.optimizer.context.DdlContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.CursorMeta; import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; import com.alibaba.polardbx.optimizer.core.planner.Planner; import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; @@ -83,6 +81,7 @@ import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCreateTable; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.CreateTablePreparedData; +import com.alibaba.polardbx.optimizer.core.rel.ddl.data.LikeTableInfo; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.gsi.CreateTableWithGsiPreparedData; import com.alibaba.polardbx.optimizer.core.row.Row; import com.alibaba.polardbx.optimizer.parse.FastsqlParser; @@ -91,16 +90,14 @@ import com.alibaba.polardbx.optimizer.parse.visitor.FastSqlToCalciteNodeVisitor; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.common.PartitionTableType; -import com.alibaba.polardbx.optimizer.utils.RelUtils; -import groovy.sql.Sql; +import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; +import com.alibaba.polardbx.optimizer.tablegroup.TableGroupUtils; +import io.grpc.netty.shaded.io.netty.util.internal.StringUtil; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.DDL; import org.apache.calcite.rel.ddl.CreateTable; -import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; -import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; -import org.apache.calcite.rel.core.DDL; -import org.apache.calcite.rel.ddl.CreateTable; +import org.apache.calcite.rel.type.RelDataTypeFieldImpl; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlCall; import org.apache.calcite.sql.SqlColumnDeclaration; @@ -109,30 +106,29 @@ import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlIndexColumnName; import org.apache.calcite.sql.SqlIndexDefinition; -import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlLiteral; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlNodeList; -import org.apache.calcite.sql.SqlSelect; -import org.apache.calcite.sql.SqlNode; +import org.apache.calcite.sql.SqlPartition; import org.apache.calcite.sql.SqlPartitionBy; +import org.apache.calcite.sql.SqlPartitionByHash; +import org.apache.calcite.sql.SqlPartitionValue; +import org.apache.calcite.sql.SqlPartitionValueItem; +import org.apache.calcite.sql.SqlSelect; import org.apache.calcite.sql.SqlShowCreateTable; -import org.apache.calcite.sql.dialect.MysqlSqlDialect; +import org.apache.calcite.sql.SqlSubPartition; import org.apache.calcite.sql.SqlSubPartitionBy; +import org.apache.calcite.sql.SqlSubPartitionByHash; +import org.apache.calcite.sql.dialect.MysqlSqlDialect; import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.calcite.sql.pretty.SqlPrettyWriter; import org.apache.calcite.sql.type.BasicSqlType; -import org.apache.calcite.sql.type.SqlTypeFamily; -import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.util.Pair; -import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; import org.apache.commons.lang3.StringUtils; import java.sql.Connection; import java.util.ArrayList; import java.util.List; -import java.util.ArrayList; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -175,6 +171,33 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e sqlCreateTable = (SqlCreateTable) logicalCreateTable.relDdl.sqlNode; + // return job factory for cdc ddl mark in case of creating table if not exits, if table already exist + final String schemaName = logicalDdlPlan.getSchemaName(); + final String logicalTableName = logicalDdlPlan.getTableName(); + boolean tableExists = TableValidator.checkIfTableExists(schemaName, logicalTableName); + if (tableExists && sqlCreateTable.isIfNotExists()) { + LimitValidator.validateTableNameLength(schemaName); + LimitValidator.validateTableNameLength(logicalTableName); + + // Prompt "show warning" only. + DdlHelper.storeFailedMessage(schemaName, ERROR_TABLE_EXISTS, + " Table '" + logicalTableName + "' already exists", executionContext); + executionContext.getDdlContext().setUsingWarning(true); + + return new PureCdcDdlMark4CreateTableJobFactory(schemaName, logicalTableName).create(); + } + + //import table, reimport table + boolean importTable = executionContext.getParamManager().getBoolean(ConnectionParams.IMPORT_TABLE); + boolean reImportTable = executionContext.getParamManager().getBoolean(ConnectionParams.REIMPORT_TABLE); + if (importTable) { + logicalCreateTable.setImportTable(true); + } + if (reImportTable) { + logicalCreateTable.setReImportTable(true); + } + + LikeTableInfo likeTableInfo = null; if (sqlCreateTable.getLikeTableName() != null) { final String sourceCreateTableSql = generateCreateTableSqlForLike(sqlCreateTable, executionContext); MySqlCreateTableStatement stmt = @@ -213,9 +236,20 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e "cannot create table using ARCHIVE_MODE if the engine of target table is INNODB."); } + List dictColumns; + if ((dictColumns = sqlCreateTable.getDictColumns()) != null) { + createTableLikeSqlAst.setDictColumns(dictColumns); + } + + if (!Engine.isFileStore(engine) && dictColumns != null) { + throw GeneralUtil.nestedException( + "cannot create table with DICTIONARY_COLUMNS if the engine of target table is INNODB."); + } + SqlIdentifier sourceTableName = (SqlIdentifier) sqlCreateTable.getLikeTableName(); String sourceSchema = sourceTableName.names.size() > 1 ? sourceTableName.names.get(0) : executionContext.getSchemaName(); + likeTableInfo = new LikeTableInfo(sourceSchema, sourceTableName.getLastName()); SqlIdentifier targetTableName = (SqlIdentifier) sqlCreateTable.getName(); String targetSchema = @@ -340,6 +374,8 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e expandTableGroupDefinition(logicalCreateTable.relDdl, logicalCreateTable.getSchemaName(), executionContext); } logicalCreateTable.prepareData(executionContext); + final Long versionId = DdlUtils.generateVersionId(executionContext); + logicalCreateTable.setDdlVersionId(versionId); if (flag) { String createTableSql = logicalCreateTable.getNativeSql(); String insertIntoSql = logicalCreateTable.getCreateTablePreparedData().getSelectSql(); @@ -349,15 +385,15 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e boolean isNewPartDb = DbInfoManager.getInstance().isNewPartitionDb(logicalCreateTable.getSchemaName()); if (!isNewPartDb) { if (logicalCreateTable.isWithGsi()) { - return buildCreateTableWithGsiJob(logicalCreateTable, executionContext); + return buildCreateTableWithGsiJob(logicalCreateTable, executionContext, likeTableInfo); } else { - return buildCreateTableJob(logicalCreateTable, executionContext); + return buildCreateTableJob(logicalCreateTable, executionContext, likeTableInfo); } } else { if (logicalCreateTable.isWithGsi()) { - return buildCreatePartitionTableWithGsiJob(logicalCreateTable, executionContext); + return buildCreatePartitionTableWithGsiJob(logicalCreateTable, executionContext, likeTableInfo); } else { - return buildCreatePartitionTableJob(logicalCreateTable, executionContext); + return buildCreatePartitionTableJob(logicalCreateTable, executionContext, likeTableInfo); } } } @@ -376,7 +412,7 @@ private void expandTableGroupDefinition(DDL createTable, String schemaName, Exec SqlCreateTable sqlCreateTable = (SqlCreateTable) createTable.sqlNode; String tableGroupName = sqlCreateTable.getTableGroupName() == null ? null : ((SqlIdentifier) sqlCreateTable.getTableGroupName()).getLastName(); - if (StringUtils.isEmpty(tableGroupName)) { + if (StringUtils.isEmpty(tableGroupName) || sqlCreateTable.isWithImplicitTableGroup()) { return; } @@ -386,35 +422,56 @@ private void expandTableGroupDefinition(DDL createTable, String schemaName, Exec if (tableGroupConfig == null) { throw new TddlRuntimeException(ErrorCode.ERR_TABLE_GROUP_NOT_EXISTS, tableGroupName); } - String partitionDef = tableGroupConfig.getPreDefinePartitionInfo(); + String partitionDef = TableGroupUtils.getPreDefinePartitionInfo(tableGroupConfig, ec); if (StringUtils.isEmpty(partitionDef)) { return; } - SqlPartitionBy tableSqlPartitionBy = (SqlPartitionBy) sqlCreateTable.getSqlPartition(); - SQLPartitionBy fastSqlPartitionBy = FastsqlUtils.parsePartitionBy(partitionDef, true); - FastSqlToCalciteNodeVisitor visitor = - new FastSqlToCalciteNodeVisitor(new ContextParameters(false), ec); - fastSqlPartitionBy.accept(visitor); - SqlPartitionBy tableGroupSqlPartitionBy = (SqlPartitionBy) visitor.getSqlNode(); + boolean isSingle = tableGroupConfig.getTableGroupRecord().isSingleTableGroup() + || tableGroupConfig.isEmpty() && partitionDef.trim().equalsIgnoreCase("SINGLE"); + if (!isSingle) { + + SqlPartitionBy tableSqlPartitionBy = (SqlPartitionBy) sqlCreateTable.getSqlPartition(); + SQLPartitionBy fastSqlPartitionBy = FastsqlUtils.parsePartitionBy(partitionDef, true); + if (!tableGroupConfig.isEmpty()) { + //i.e. partition by udf_hash(Mymurmurhash64var(c1)), Mymurmurhash64var(int) is not a valid column definition, + // but it will be as part of result when /*+TDDL:cmd_extra(SHOW_HASH_PARTITIONS_BY_RANGE=TRUE)*/ show create full table + fastSqlPartitionBy.getColumnsDefinition().clear(); + } + if (fastSqlPartitionBy.getSubPartitionBy() != null) { + fastSqlPartitionBy.getSubPartitionBy().getColumnsDefinition().clear(); + } + FastSqlToCalciteNodeVisitor visitor = + new FastSqlToCalciteNodeVisitor(new ContextParameters(false), ec); + fastSqlPartitionBy.accept(visitor); + SqlPartitionBy tableGroupSqlPartitionBy = (SqlPartitionBy) visitor.getSqlNode(); - validateSqlPartitionBy(tableGroupSqlPartitionBy, sqlCreateTable); + validateSqlPartitionBy(tableGroupSqlPartitionBy, sqlCreateTable); - tableGroupSqlPartitionBy.getColumns().clear(); - tableGroupSqlPartitionBy.getColumns().addAll(tableSqlPartitionBy.getColumns()); + tableGroupSqlPartitionBy.getColumns().clear(); + tableGroupSqlPartitionBy.getColumns().addAll(tableSqlPartitionBy.getColumns()); - SqlSubPartitionBy sqlSubPartitionBy = tableGroupSqlPartitionBy.getSubPartitionBy(); - if (sqlSubPartitionBy != null) { - sqlSubPartitionBy.getColumns().clear(); - sqlSubPartitionBy.getColumns().addAll(tableSqlPartitionBy.getSubPartitionBy().getColumns()); - } - sqlCreateTable.setSqlPartition(tableGroupSqlPartitionBy); + SqlSubPartitionBy sqlSubPartitionBy = tableGroupSqlPartitionBy.getSubPartitionBy(); + if (sqlSubPartitionBy != null) { + sqlSubPartitionBy.getColumns().clear(); + sqlSubPartitionBy.getColumns().addAll(tableSqlPartitionBy.getSubPartitionBy().getColumns()); + } - SqlConverter sqlConverter = SqlConverter.getInstance(schemaName, ec); - PlannerContext plannerContext = PlannerContext.getPlannerContext(createTable.getCluster()); + normlizePartitionBy(tableGroupSqlPartitionBy); + normlizeSubPartitionBy(sqlSubPartitionBy); - Map partRexInfoCtx = sqlConverter.convertPartition(tableGroupSqlPartitionBy, plannerContext); + sqlCreateTable.setSqlPartition(tableGroupSqlPartitionBy); - ((CreateTable) (createTable)).getPartBoundExprInfo().putAll(partRexInfoCtx); + SqlConverter sqlConverter = SqlConverter.getInstance(schemaName, ec); + PlannerContext plannerContext = PlannerContext.getPlannerContext(createTable.getCluster()); + + Map partRexInfoCtx = + sqlConverter.convertPartition(tableGroupSqlPartitionBy, plannerContext); + + ((CreateTable) (createTable)).getPartBoundExprInfo().putAll(partRexInfoCtx); + } else { + sqlCreateTable.setSingle(true); + sqlCreateTable.setSqlPartition(null); + } } private void validateSqlPartitionBy(SqlPartitionBy tableGroupSqlPartitionBy, SqlCreateTable sqlCreateTable) { @@ -449,8 +506,6 @@ private void validateSqlPartitionBy(SqlPartitionBy tableGroupSqlPartitionBy, Sql } } - tableSqlPartitionBy.getColumns(); - sqlCreateTable.getColDefs(); SqlSubPartitionBy tgSqlSubPartitionBy = tableGroupSqlPartitionBy.getSubPartitionBy(); SqlSubPartitionBy tbSqlSubPartitionBy = tableSqlPartitionBy.getSubPartitionBy(); if (tgSqlSubPartitionBy != null && tbSqlSubPartitionBy == null) { @@ -491,6 +546,55 @@ protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext final String logicalTableName = logicalDdlPlan.getTableName(); boolean isNewPart = DbInfoManager.getInstance().isNewPartitionDb(schemaName); + //validate import/reimport table + boolean isImportTable = executionContext.getParamManager().getBoolean(ConnectionParams.IMPORT_TABLE); + boolean reImportTable = executionContext.getParamManager().getBoolean(ConnectionParams.REIMPORT_TABLE); + if (isImportTable || reImportTable) { + String locality = sqlCreateTable.getLocality(); + if (StringUtil.isNullOrEmpty(locality)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "physical table's locality is required when import table"); + } + LocalityDesc localityDesc = LocalityDesc.parse(locality); + if (localityDesc.getDnList().size() != 1) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "only one DN Id is allowed"); + } + String dnName = localityDesc.getDnList().get(0); + List dbGroupInfoRecords = + DbTopologyManager.getAllDbGroupInfoRecordByInstId(schemaName, dnName); + if (dbGroupInfoRecords.size() != 1) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "it's not allowed to import table when db group size exceeds 1"); + } + + String phyDbName = dbGroupInfoRecords.get(0).phyDbName; + + Set phyTables = + StandardToEnterpriseEditionUtil.queryPhysicalTableListFromPhysicalDabatase(dnName, phyDbName); + if (!phyTables.contains(logicalTableName)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("physical table [%s] not found when %s table in schema [%s] instantId [%s]", + logicalTableName, + isImportTable ? "import" : "reimport", + schemaName, dnName) + ); + } + + if (reImportTable) { + Set logicalTables = + StandardToEnterpriseEditionUtil.getTableNamesFromLogicalDatabase(schemaName, executionContext); + if (!logicalTables.contains(logicalTableName)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("logical table [%s] not found when reimport table in schema [%s]", + logicalTableName, schemaName)); + } + } + + //use database-level locality, so no need of table-level locality + sqlCreateTable.setLocality(null); + } + if (isNewPart) { if (sqlCreateTable.isBroadCast()) { String tableGroupName = sqlCreateTable.getTableGroupName() == null ? null : @@ -548,16 +652,9 @@ protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext boolean tableExists = TableValidator.checkIfTableExists(schemaName, logicalTableName); if (tableExists && sqlCreateTable.isIfNotExists()) { - DdlContext ddlContext = executionContext.getDdlContext(); - CdcManagerHelper.getInstance().notifyDdlNew(schemaName, logicalTableName, SqlKind.CREATE_TABLE.name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), null, null, - DdlVisibility.Public, buildExtendParameter(executionContext)); - - // Prompt "show warning" only. - DdlHelper.storeFailedMessage(schemaName, ERROR_TABLE_EXISTS, - " Table '" + logicalTableName + "' already exists", executionContext); - executionContext.getDdlContext().setUsingWarning(true); - return true; + // do nothing + } else if (reImportTable) { + // do nothing } else if (tableExists) { throw new TddlRuntimeException(ERR_TABLE_ALREADY_EXISTS, logicalTableName); } @@ -574,7 +671,8 @@ protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext return false; } - protected DdlJob buildCreateTableJob(LogicalCreateTable logicalCreateTable, ExecutionContext executionContext) { + protected DdlJob buildCreateTableJob(LogicalCreateTable logicalCreateTable, ExecutionContext executionContext, + LikeTableInfo likeTableInfo) { CreateTablePreparedData createTablePreparedData = logicalCreateTable.getCreateTablePreparedData(); DdlPhyPlanBuilder createTableBuilder = @@ -588,7 +686,9 @@ protected DdlJob buildCreateTableJob(LogicalCreateTable logicalCreateTable, Exec createTablePreparedData.getSpecialDefaultValueFlags(), createTablePreparedData.getAddedForeignKeys(), physicalPlanData, - executionContext + createTablePreparedData.getDdlVersionId(), + executionContext, + likeTableInfo ); logicalCreateTable.setAffectedRows(ret.getAffectRows()); if (createTablePreparedData.getSelectSql() != null) { @@ -599,7 +699,8 @@ protected DdlJob buildCreateTableJob(LogicalCreateTable logicalCreateTable, Exec } protected DdlJob buildCreatePartitionTableJob(LogicalCreateTable logicalCreateTable, - ExecutionContext executionContext) { + ExecutionContext executionContext, + LikeTableInfo likeTableInfo) { PartitionTableType partitionTableType = PartitionTableType.SINGLE_TABLE; if (logicalCreateTable.isPartitionTable()) { partitionTableType = PartitionTableType.PARTITION_TABLE; @@ -613,15 +714,17 @@ protected DdlJob buildCreatePartitionTableJob(LogicalCreateTable logicalCreateTa new CreatePartitionTableBuilder(logicalCreateTable.relDdl, createTablePreparedData, executionContext, partitionTableType).build(); PhysicalPlanData physicalPlanData = createTableBuilder.genPhysicalPlanData(); + Engine tableEngine = ((SqlCreateTable) logicalCreateTable.relDdl.sqlNode).getEngine(); ArchiveMode archiveMode = ((SqlCreateTable) logicalCreateTable.relDdl.sqlNode).getArchiveMode(); + List dictColumns = ((SqlCreateTable) logicalCreateTable.relDdl.sqlNode).getDictColumns(); if (Engine.isFileStore(tableEngine)) { CreatePartitionOssTableJobFactory ret = new CreatePartitionOssTableJobFactory( createTablePreparedData.isAutoPartition(), createTablePreparedData.isTimestampColumnDefault(), createTablePreparedData.getSpecialDefaultValues(), createTablePreparedData.getSpecialDefaultValueFlags(), - physicalPlanData, executionContext, createTablePreparedData, tableEngine, archiveMode); + physicalPlanData, executionContext, createTablePreparedData, tableEngine, archiveMode, dictColumns); if (createTablePreparedData.getSelectSql() != null) { ret.setSelectSql(createTablePreparedData.getSelectSql()); } @@ -630,24 +733,38 @@ protected DdlJob buildCreatePartitionTableJob(LogicalCreateTable logicalCreateTa } PartitionInfo partitionInfo = createTableBuilder.getPartitionInfo(); - CreatePartitionTableJobFactory ret = new CreatePartitionTableJobFactory( - createTablePreparedData.isAutoPartition(), createTablePreparedData.isTimestampColumnDefault(), - createTablePreparedData.getSpecialDefaultValues(), - createTablePreparedData.getSpecialDefaultValueFlags(), - createTablePreparedData.getAddedForeignKeys(), - physicalPlanData, executionContext, createTablePreparedData, partitionInfo); - if (createTablePreparedData.getSelectSql() != null) { - ret.setSelectSql(createTablePreparedData.getSelectSql()); + if (logicalCreateTable.isReImportTable()) { + return new ReimportTableJobFactory(createTablePreparedData.isAutoPartition(), + createTablePreparedData.isTimestampColumnDefault(), + createTablePreparedData.getSpecialDefaultValues(), + createTablePreparedData.getSpecialDefaultValueFlags(), + createTablePreparedData.getAddedForeignKeys(), + physicalPlanData, executionContext, createTablePreparedData, partitionInfo).create(); + + } else { + CreatePartitionTableJobFactory ret = new CreatePartitionTableJobFactory( + createTablePreparedData.isAutoPartition(), createTablePreparedData.isTimestampColumnDefault(), + createTablePreparedData.getSpecialDefaultValues(), + createTablePreparedData.getSpecialDefaultValueFlags(), + createTablePreparedData.getAddedForeignKeys(), + physicalPlanData, executionContext, createTablePreparedData, partitionInfo, likeTableInfo); + + if (createTablePreparedData.getSelectSql() != null) { + ret.setSelectSql(createTablePreparedData.getSelectSql()); + } + logicalCreateTable.setAffectedRows(ret.getAffectRows()); + return ret.create(); } - logicalCreateTable.setAffectedRows(ret.getAffectRows()); - return ret.create(); } private DdlJob buildCreateTableWithGsiJob(LogicalCreateTable logicalCreateTable, - ExecutionContext executionContext) { + ExecutionContext executionContext, + LikeTableInfo likeTableInfo) { CreateTableWithGsiPreparedData createTableWithGsiPreparedData = logicalCreateTable.getCreateTableWithGsiPreparedData(); + createTableWithGsiPreparedData.getPrimaryTablePreparedData().setLikeTableInfo(likeTableInfo); + CreateTableWithGsiJobFactory ret = new CreateTableWithGsiJobFactory( logicalCreateTable.relDdl, createTableWithGsiPreparedData, @@ -661,9 +778,11 @@ private DdlJob buildCreateTableWithGsiJob(LogicalCreateTable logicalCreateTable, } private DdlJob buildCreatePartitionTableWithGsiJob(LogicalCreateTable logicalCreateTable, - ExecutionContext executionContext) { + ExecutionContext executionContext, + LikeTableInfo likeTableInfo) { CreateTableWithGsiPreparedData createTableWithGsiPreparedData = logicalCreateTable.getCreateTableWithGsiPreparedData(); + createTableWithGsiPreparedData.getPrimaryTablePreparedData().setLikeTableInfo(likeTableInfo); CreatePartitionTableWithGsiJobFactory ret = new CreatePartitionTableWithGsiJobFactory( logicalCreateTable.relDdl, createTableWithGsiPreparedData, @@ -758,8 +877,15 @@ public LogicalCreateTable generatePlan(LogicalCreateTable logicalCreateTable, Ex RelNode selectRelNode = selectPlan.getPlan(); SqlSelect selectFinshed = (SqlSelect) selectPlan.getAst(); SqlNodeList selectList = selectFinshed.getSelectList(); + // 获取数据行的类型 - List selectRowTypes = selectRelNode.getRowType().getFieldList(); + List columnMetaList = selectPlan.getCursorMeta().getColumns(); + List selectRowTypes = new ArrayList<>(columnMetaList.size()); + for (int i = 0; i < columnMetaList.size(); i++) { + ColumnMeta columnMeta = columnMetaList.get(i); + selectRowTypes.add( + new RelDataTypeFieldImpl(columnMeta.getOriginColumnName(), i, columnMeta.getField().getRelType())); + } List> createCols = sqlCreateTable.getColDefs(); @@ -892,4 +1018,64 @@ record = accessor.archiveQuery(jobId, taskId); return new AffectRowCursor(affectRows); } + protected void normlizePartitionBy(SqlPartitionBy partitionBy) { + boolean key = partitionBy instanceof SqlPartitionByHash && + ((SqlPartitionByHash) partitionBy).isKey(); + if (key) { + int partCols = partitionBy.getColumns().size(); + for (SqlNode sqlPartition : partitionBy.getPartitions()) { + SqlPartitionValue sqlPartitionValue = ((SqlPartition) sqlPartition).getValues(); + while (sqlPartitionValue.getItems().size() < partCols) { + SqlPartitionValueItem item = + new SqlPartitionValueItem(new SqlIdentifier("MAXVALUE", SqlParserPos.ZERO)); + item.setMaxValue(true); + sqlPartitionValue.getItems().add(item); + } + } + } + SqlSubPartitionBy subPartitionBy = partitionBy.getSubPartitionBy(); + if (subPartitionBy == null) { + return; + } + boolean keySubPart = subPartitionBy instanceof SqlSubPartitionByHash && + ((SqlSubPartitionByHash) subPartitionBy).isKey(); + if (!keySubPart) { + return; + } + int subPartCols = subPartitionBy.getColumns().size(); + for (SqlNode sqlPartition : partitionBy.getPartitions()) { + List subPartitions = ((SqlPartition) sqlPartition).getSubPartitions(); + for (SqlNode sqlSubPartition : subPartitions) { + SqlPartitionValue sqlPartitionValue = ((SqlSubPartition) sqlSubPartition).getValues(); + while (sqlPartitionValue.getItems().size() < subPartCols) { + SqlPartitionValueItem item = + new SqlPartitionValueItem(new SqlIdentifier("MAXVALUE", SqlParserPos.ZERO)); + item.setMaxValue(true); + sqlPartitionValue.getItems().add(item); + } + } + + } + } + + protected void normlizeSubPartitionBy(SqlSubPartitionBy subPartitionBy) { + if (subPartitionBy == null) { + return; + } + boolean key = subPartitionBy instanceof SqlSubPartitionByHash && + ((SqlSubPartitionByHash) subPartitionBy).isKey(); + if (!key) { + return; + } + int partCols = subPartitionBy.getColumns().size(); + for (SqlNode sqlPartition : subPartitionBy.getSubPartitions()) { + SqlPartitionValue sqlPartitionValue = ((SqlSubPartition) sqlPartition).getValues(); + while (sqlPartitionValue.getItems().size() < partCols) { + SqlPartitionValueItem item = + new SqlPartitionValueItem(new SqlIdentifier("MAXVALUE", SqlParserPos.ZERO)); + item.setMaxValue(true); + sqlPartitionValue.getItems().add(item); + } + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateViewHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateViewHandler.java index c92dc8127..9697ee131 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateViewHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalCreateViewHandler.java @@ -16,66 +16,48 @@ package com.alibaba.polardbx.executor.handler.ddl; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateViewStatement; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.properties.ConnectionProperties; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; -import com.alibaba.polardbx.executor.handler.HandlerCommon; +import com.alibaba.polardbx.executor.ddl.job.factory.CreateViewJobFactory; +import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.executor.sync.CreateViewSyncAction; -import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.dialect.DbType; -import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; -import com.alibaba.polardbx.optimizer.core.planner.Planner; -import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; +import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCreateView; -import com.alibaba.polardbx.optimizer.parse.FastsqlParser; -import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; -import com.alibaba.polardbx.optimizer.planmanager.PlanManagerUtil; -import com.alibaba.polardbx.optimizer.utils.RelUtils; import com.alibaba.polardbx.optimizer.view.ViewManager; -import org.apache.calcite.rel.RelNode; -import org.apache.calcite.rel.type.RelDataType; -import org.apache.calcite.sql.SqlNode; -import org.apache.calcite.sql.TDDLSqlSelect; import java.io.UnsupportedEncodingException; -import java.util.List; /** * @author dylan */ -public class LogicalCreateViewHandler extends HandlerCommon { - - private static final Logger logger = LoggerFactory.getLogger(LogicalCreateViewHandler.class); - +public class LogicalCreateViewHandler extends LogicalCommonDdlHandler { private static final int MAX_VIEW_NAME_LENGTH = 64; - private static final int MAX_VIEW_NUMBER = 10000; + public static final int MAX_VIEW_NUMBER = 10000; public LogicalCreateViewHandler(IRepository repo) { super(repo); } @Override - public Cursor handle(final RelNode logicalPlan, ExecutionContext executionContext) { - - LogicalCreateView logicalCreateView = (LogicalCreateView) logicalPlan; + protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + return new CreateViewJobFactory((LogicalCreateView) logicalDdlPlan, executionContext).create(); + } + @Override + protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + LogicalCreateView logicalCreateView = (LogicalCreateView) logicalDdlPlan; String schemaName = logicalCreateView.getSchemaName(); - String viewName = logicalCreateView.getViewName(); + String viewName = logicalCreateView.getTableName(); boolean isReplace = logicalCreateView.isReplace(); - List columnList = logicalCreateView.getColumnList(); - String viewDefinition = RelUtils.toNativeSql(logicalCreateView.getDefinition(), DbType.MYSQL); - String planString = null; - String planType = null; + + // Notice, since we reused the logic of new ddl engine, so we should validate view name as table name + TableValidator.validateTableName(viewName); + TableValidator.validateTableNameLength(viewName); if (!checkUtf8(viewName)) { throw new TddlRuntimeException(ErrorCode.ERR_VIEW, @@ -86,40 +68,9 @@ public Cursor handle(final RelNode logicalPlan, ExecutionContext executionContex } ViewManager viewManager = OptimizerContext.getContext(schemaName).getViewManager(); - if (viewManager.count(schemaName) > MAX_VIEW_NUMBER) { throw new TddlRuntimeException(ErrorCode.ERR_VIEW, "View number at most " + MAX_VIEW_NUMBER); } - - if (logicalCreateView.getDefinition() instanceof TDDLSqlSelect) { - TDDLSqlSelect tddlSqlSelect = (TDDLSqlSelect) logicalCreateView.getDefinition(); - if (tddlSqlSelect.getHints() != null && tddlSqlSelect.getHints().size() != 0) { - String withHintSql = - ((SQLCreateViewStatement) FastsqlUtils.parseSql(executionContext.getSql()).get(0)).getSubQuery() - .toString(); - // FIXME: by now only support SMP plan. - executionContext.getExtraCmds().put(ConnectionProperties.ENABLE_MPP, false); - executionContext.getExtraCmds().put(ConnectionProperties.ENABLE_PARAMETER_PLAN, false); - ExecutionPlan executionPlan = - Planner.getInstance().plan(withHintSql, executionContext.copy()); - if (PlanManagerUtil.canConvertToJson(executionPlan, executionContext.getParamManager())) { - planString = PlanManagerUtil.relNodeToJson(executionPlan.getPlan()); - planType = "SMP"; - } - } - } - - if (columnList != null) { - SqlNode ast = new FastsqlParser().parse(viewDefinition).get(0); - SqlConverter converter = SqlConverter.getInstance(schemaName, executionContext); - SqlNode validatedNode = converter.validate(ast); - RelDataType rowType = converter.toRel(validatedNode).getRowType(); - if (rowType.getFieldCount() != columnList.size()) { - throw new TddlRuntimeException(ErrorCode.ERR_VIEW, - "View's SELECT and view's field list have different column counts"); - } - } - // check view name TableMeta tableMeta; try { @@ -136,32 +87,7 @@ public Cursor handle(final RelNode logicalPlan, ExecutionContext executionContex throw new TddlRuntimeException(ErrorCode.ERR_VIEW, "table '" + viewName + "' already exists "); } } - - boolean success = false; - - if (isReplace) { - success = viewManager - .replace(viewName, columnList, viewDefinition, executionContext.getConnection().getUser(), planString, - planType); - } else { - if (viewManager.select(viewName) != null) { - throw new TddlRuntimeException(ErrorCode.ERR_VIEW, "table '" + viewName + "' already exists "); - } - success = viewManager - .insert(viewName, columnList, viewDefinition, executionContext.getConnection().getUser(), planString, - planType); - } - - if (!success) { - throw new TddlRuntimeException(ErrorCode.ERR_VIEW, - "create view fail for " + viewManager.getSystemTableView().getTableName() + " can not " - + "write"); - - } - - SyncManagerHelper.sync(new CreateViewSyncAction(schemaName, viewName), schemaName); - - return new AffectRowCursor(new int[] {0}); + return super.validatePlan(logicalDdlPlan, executionContext); } private boolean checkUtf8(String s) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropDatabaseHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropDatabaseHandler.java index 33c4548b6..d20768d0d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropDatabaseHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropDatabaseHandler.java @@ -17,8 +17,8 @@ package com.alibaba.polardbx.executor.handler.ddl; import com.alibaba.polardbx.common.TddlConstants; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; @@ -31,22 +31,19 @@ import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.accessor.PlParameterAccessor; import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.accessor.ProcedureAccessor; import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils; +import com.alibaba.polardbx.executor.handler.DropDatabaseHandlerCommon; import com.alibaba.polardbx.executor.handler.HandlerCommon; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.BaselineInvalidateSchemaSyncAction; import com.alibaba.polardbx.executor.sync.DropDbRelatedProcedureSyncAction; import com.alibaba.polardbx.executor.sync.GsiStatisticsSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; import com.alibaba.polardbx.gms.sync.SyncScope; -import com.alibaba.polardbx.gms.topology.DbInfoAccessor; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.gms.topology.DbInfoRecord; import com.alibaba.polardbx.gms.topology.DbTopologyManager; import com.alibaba.polardbx.gms.topology.DropDbInfo; import com.alibaba.polardbx.gms.util.MetaDbUtil; -import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropDatabase; import com.alibaba.polardbx.optimizer.locality.LocalityManager; @@ -75,6 +72,10 @@ public LogicalDropDatabaseHandler(IRepository repo) { @Override public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + return handleByGms(logicalPlan, executionContext); + } + + public Cursor handleByGms(RelNode logicalPlan, ExecutionContext executionContext) { final LogicalDropDatabase dropDatabase = (LogicalDropDatabase) logicalPlan; final SqlDropDatabase sqlDropDatabase = (SqlDropDatabase) dropDatabase.getNativeSqlNode(); final LocalityManager localityManager = LocalityManager.getInstance(); @@ -82,34 +83,21 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { final String dbName = sqlDropDatabase.getDbName().getSimple(); final DbInfoRecord dbInfo = DbInfoManager.getInstance().getDbInfo(dbName); - validateChangeSetExists(dbName, executionContext); + // validateChangeSetExists(dbName, executionContext); final ITimestampOracle timestampOracle = executionContext.getTransaction().getTransactionManagerUtil().getTimestampOracle(); long ts = timestampOracle.nextTimestamp(); - // not allow to drop ref database first even check_foreign_key is off - DbInfoAccessor dbInfoAccessor = new DbInfoAccessor(); - dbInfoAccessor.setConnection(MetaDbDataSource.getInstance().getConnection()); - if (dbInfoAccessor.getDbInfoByDbNameForUpdate(dbName) != null - && OptimizerContext.getContext(dbName) != null) { - for (TableMeta tableMeta : OptimizerContext.getContext(dbName).getLatestSchemaManager().getAllTables()) { - for (Map.Entry e : tableMeta.getReferencedForeignKeys().entrySet()) { - String referredSchemaName = e.getValue().schema; - if (OptimizerContext.getContext(referredSchemaName) != null && - !referredSchemaName.equalsIgnoreCase(dbName)) { - String referencedSchemaName = e.getValue().schema; - String referencedTableName = e.getValue().tableName; - String constraint = e.getValue().constraint; - throw new TddlRuntimeException(ErrorCode.ERR_DROP_TABLE_FK_CONSTRAINT, tableMeta.getTableName(), - constraint, referencedSchemaName, referencedTableName); - } - } - } - } + DbTopologyManager.checkRefForeignKeyWhenDropDatabase(dbName); boolean isDropIfExists = sqlDropDatabase.isIfExists(); DropDbInfo dropDbInfo = new DropDbInfo(); + boolean isImportDatabase = executionContext.getParamManager().getBoolean(ConnectionParams.IMPORT_DATABASE); + if (isImportDatabase) { + dropDbInfo.setReservePhyDb(true); + } + dropDbInfo.setDbName(dbName); dropDbInfo.setDropIfExists(isDropIfExists); dropDbInfo.setAllowDropForce( @@ -118,12 +106,13 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { dropDbInfo.setTs(ts); Long socketTimeout = executionContext.getParamManager().getLong(ConnectionParams.SOCKET_TIMEOUT); dropDbInfo.setSocketTimeout(socketTimeout == null ? -1 : socketTimeout); + DbTopologyManager.dropLogicalDb(dropDbInfo); CdcManagerHelper.getInstance() .notifyDdl(dbName, null, sqlDropDatabase.getKind().name(), executionContext.getOriginSql(), - DdlVisibility.Public, buildExtendParameter(executionContext)); + null, CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); - SyncManagerHelper.syncWithDefaultDB(new BaselineInvalidateSchemaSyncAction(dbName)); + SyncManagerHelper.syncWithDefaultDB(new BaselineInvalidateSchemaSyncAction(dbName), SyncScope.ALL); if (dbInfo != null) { localityManager.deleteLocalityOfDb(dbInfo.id); @@ -137,7 +126,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } private void dropGsiStatistic(String dbName) { - SyncManagerHelper.sync(new GsiStatisticsSyncAction(dbName, null, null, GsiStatisticsSyncAction.DELETE_SCHEMA)); + SyncManagerHelper.sync(new GsiStatisticsSyncAction(dbName, null, null, GsiStatisticsSyncAction.DELETE_SCHEMA), + SyncScope.ALL); } private void dropRelatedProcedures(String dbName) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropIndexHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropIndexHandler.java index 4fe92e930..6e0e4b8fd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropIndexHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropIndexHandler.java @@ -19,6 +19,7 @@ import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.ddl.job.factory.DropIndexJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.gsi.DropGsiJobFactory; +import com.alibaba.polardbx.executor.ddl.job.factory.gsi.columnar.DropColumnarIndexJobFactory; import com.alibaba.polardbx.executor.ddl.job.task.basic.SubJobTask; import com.alibaba.polardbx.executor.ddl.job.task.gsi.ValidateTableVersionTask; import com.alibaba.polardbx.executor.ddl.job.validator.IndexValidator; @@ -27,6 +28,7 @@ import com.alibaba.polardbx.executor.ddl.newengine.job.DdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.executor.utils.DdlUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropIndex; @@ -59,11 +61,15 @@ public LogicalDropIndexHandler(IRepository repo) { @Override protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + final Long versionId = DdlUtils.generateVersionId(executionContext); LogicalDropIndex logicalDropIndex = (LogicalDropIndex) logicalDdlPlan; logicalDropIndex.prepareData(); + logicalDropIndex.setDdlVersionId(versionId); if (logicalDropIndex.isGsi()) { return buildDropGsiJob(logicalDropIndex, executionContext); + } else if (logicalDropIndex.isColumnar()) { + return buildDropColumnarIndexJob(logicalDropIndex, executionContext); } else { return buildDropLocalIndexJob(logicalDropIndex, executionContext); } @@ -167,4 +173,20 @@ public static DdlTask genRenameLocalIndexTask(RenameLocalIndexPreparedData renam return null; } + public DdlJob buildDropColumnarIndexJob(LogicalDropIndex logicalDropIndex, ExecutionContext executionContext) { + DropIndexWithGsiPreparedData dropIndexPreparedData = logicalDropIndex.getDropIndexWithGsiPreparedData(); + DropGlobalIndexPreparedData preparedData = dropIndexPreparedData.getGlobalIndexPreparedData(); + + ExecutableDdlJob gsiJob = DropColumnarIndexJobFactory.create(preparedData, executionContext, false, true); + + Map tableVersions = new HashMap<>(); + tableVersions.put(preparedData.getPrimaryTableName(), preparedData.getTableVersion()); + ValidateTableVersionTask validateTableVersionTask = + new ValidateTableVersionTask(preparedData.getSchemaName(), tableVersions); + + gsiJob.addTask(validateTableVersionTask); + gsiJob.addTaskRelationship(validateTableVersionTask, gsiJob.getHead()); + + return gsiJob; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropMaterializedViewHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropMaterializedViewHandler.java index f391caafd..00fe3d0d6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropMaterializedViewHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropMaterializedViewHandler.java @@ -16,6 +16,8 @@ package com.alibaba.polardbx.executor.handler.ddl; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; @@ -28,6 +30,7 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.DropViewSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.PlannerContext; @@ -38,12 +41,16 @@ import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropMaterializedView; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropTable; import com.alibaba.polardbx.optimizer.parse.FastsqlParser; -import com.alibaba.polardbx.optimizer.view.DrdsSystemTableView; +import com.alibaba.polardbx.optimizer.view.PolarDbXSystemTableView; +import com.alibaba.polardbx.optimizer.view.SystemTableView; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlDropTable; +import org.apache.calcite.sql.SqlKind; import java.util.ArrayList; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + public class LogicalDropMaterializedViewHandler extends LogicalDropTableHandler { public LogicalDropMaterializedViewHandler(IRepository repo) { @@ -102,6 +109,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { // Handle the client DDL request on the worker side. handleDdlRequest(ddlJob, executionContext); + markDdlForCdc(executionContext, schemaName, tableName, executionContext.getOriginSql()); + //sync all nodes syncView(schemaName, tableName + "_Materialized"); @@ -110,7 +119,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { public void syncView(String schemaName, String viewName) { - DrdsSystemTableView.Row row = OptimizerContext.getContext(schemaName).getViewManager().select(viewName); + SystemTableView.Row row = OptimizerContext.getContext(schemaName).getViewManager().select(viewName); if (row == null) { throw new TddlRuntimeException(ErrorCode.ERR_VIEW, "Unknown view " + viewName); } @@ -119,11 +128,25 @@ public void syncView(String schemaName, String viewName) { if (!success) { throw new TddlRuntimeException(ErrorCode.ERR_VIEW, - "drop view fail for " + DrdsSystemTableView.TABLE_NAME + " can not write"); + "drop view fail for " + PolarDbXSystemTableView.TABLE_NAME + " can not write"); } ArrayList viewList = new ArrayList<>(); viewList.add(viewName); - SyncManagerHelper.sync(new DropViewSyncAction(schemaName, viewList), schemaName); + SyncManagerHelper.sync(new DropViewSyncAction(schemaName, viewList), schemaName, SyncScope.CURRENT_ONLY); + } + + //TODO cdc@shengyu + private void markDdlForCdc(ExecutionContext executionContext, String schemaName, String viewName, String ddlSql) { + CdcManagerHelper.getInstance().notifyDdlNew( + schemaName, + viewName, + SqlKind.DROP_MATERIALIZED_VIEW.name(), + ddlSql, + DdlType.UNSUPPORTED, + null, + null, + CdcDdlMarkVisibility.Protected, + buildExtendParameter(executionContext)); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropProcedureHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropProcedureHandler.java index e4b989212..a1fd2b8a2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropProcedureHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropProcedureHandler.java @@ -16,23 +16,15 @@ package com.alibaba.polardbx.executor.handler.ddl; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.druid.sql.SQLUtils; -import com.alibaba.polardbx.druid.sql.ast.SQLName; -import com.alibaba.polardbx.druid.sql.ast.expr.SQLPropertyExpr; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLDropProcedureStatement; -import com.alibaba.polardbx.druid.sql.parser.SQLParserFeature; import com.alibaba.polardbx.executor.ddl.job.factory.DropProcedureJobFactory; import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.executor.pl.ProcedureManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropProcedure; -import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; import org.apache.calcite.sql.SqlDropProcedure; public class LogicalDropProcedureHandler extends LogicalCommonDdlHandler { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropTableHandler.java index e4802b993..730625c05 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropTableHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropTableHandler.java @@ -17,8 +17,6 @@ package com.alibaba.polardbx.executor.handler.ddl; import com.alibaba.polardbx.common.Engine; -import com.alibaba.polardbx.common.cdc.CdcManagerHelper; -import com.alibaba.polardbx.common.cdc.DdlVisibility; import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; import com.alibaba.polardbx.common.ddl.newengine.DdlConstants; import com.alibaba.polardbx.common.exception.TddlRuntimeException; @@ -35,6 +33,7 @@ import com.alibaba.polardbx.executor.ddl.job.factory.DropPartitionTableWithGsiJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.DropTableJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.DropTableWithGsiJobFactory; +import com.alibaba.polardbx.executor.ddl.job.factory.PureCdcDdlMark4DropTableJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.RecycleOssTableJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.RenameTableJobFactory; import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcTruncateWithRecycleMarkTask; @@ -42,14 +41,14 @@ import com.alibaba.polardbx.executor.ddl.job.validator.TableValidator; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; -import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.executor.utils.DdlUtils; +import com.alibaba.polardbx.gms.metadb.limit.LimitValidator; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.archive.CheckOSSArchiveUtil; import com.alibaba.polardbx.optimizer.config.table.TableMeta; -import com.alibaba.polardbx.optimizer.context.DdlContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropTable; @@ -60,7 +59,6 @@ import org.apache.calcite.rel.ddl.RenameTable; import org.apache.calcite.sql.SqlDropTable; import org.apache.calcite.sql.SqlIdentifier; -import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlRenameTable; import org.apache.calcite.sql.parser.SqlParserPos; @@ -68,8 +66,6 @@ import java.util.HashMap; import java.util.Map; -import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; - public class LogicalDropTableHandler extends LogicalCommonDdlHandler { public LogicalDropTableHandler(IRepository repo) { @@ -79,6 +75,12 @@ public LogicalDropTableHandler(IRepository repo) { @Override protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { LogicalDropTable logicalDropTable = (LogicalDropTable) logicalDdlPlan; + + boolean importTable = executionContext.getParamManager().getBoolean(ConnectionParams.IMPORT_TABLE); + if (importTable) { + logicalDropTable.setImportTable(true); + } + if (executionContext.getParamManager().getBoolean(ConnectionParams.PURGE_FILE_STORAGE_TABLE) && logicalDropTable.isPurge()) { LogicalRenameTableHandler.makeTableVisible(logicalDropTable.getSchemaName(), @@ -88,10 +90,23 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e if (logicalDropTable.ifExists()) { if (!TableValidator.checkIfTableExists(logicalDdlPlan.getSchemaName(), logicalDropTable.getTableName())) { - return new TransientDdlJob(); + LimitValidator.validateTableNameLength(logicalDdlPlan.getSchemaName()); + LimitValidator.validateTableNameLength(logicalDropTable.getTableName()); + + // Prompt "show warning" only. + DdlHelper.storeFailedMessage(logicalDdlPlan.getSchemaName(), DdlConstants.ERROR_UNKNOWN_TABLE, + "Unknown table '" + logicalDdlPlan.getSchemaName() + + "." + logicalDropTable.getTableName() + "'", executionContext); + executionContext.getDdlContext().setUsingWarning(true); + + return new PureCdcDdlMark4DropTableJobFactory(logicalDdlPlan.getSchemaName(), + logicalDropTable.getTableName()).create(); } } + final Long versionId = DdlUtils.generateVersionId(executionContext); + logicalDropTable.setDdlVersionId(versionId); + boolean isNewPartDb = DbInfoManager.getInstance().isNewPartitionDb(logicalDropTable.getSchemaName()); CheckOSSArchiveUtil.checkWithoutOSS(logicalDropTable.getSchemaName(), logicalDropTable.getTableName()); @@ -143,19 +158,15 @@ protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext final String schemaName = logicalDdlPlan.getSchemaName(); final String logicalTableName = logicalDdlPlan.getTableName(); + boolean isImportTable = executionContext.getParamManager().getBoolean(ConnectionParams.IMPORT_TABLE); + TableValidator.validateTableName(logicalTableName); final boolean tableExists = TableValidator.checkIfTableExists(schemaName, logicalTableName); if (!tableExists && sqlDropTable.isIfExists()) { - DdlContext ddlContext = executionContext.getDdlContext(); - CdcManagerHelper.getInstance().notifyDdlNew(schemaName, logicalTableName, SqlKind.DROP_TABLE.name(), - ddlContext.getDdlStmt(), ddlContext.getDdlType(), null, null, - DdlVisibility.Public, buildExtendParameter(executionContext)); - - // Prompt "show warning" only. - DdlHelper.storeFailedMessage(schemaName, DdlConstants.ERROR_UNKNOWN_TABLE, - "Unknown table '" + schemaName + "." + logicalTableName + "'", executionContext); - executionContext.getDdlContext().setUsingWarning(true); + // do nothing + } else if (isImportTable) { + //do nothing } else if (!tableExists) { throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_TABLE, schemaName, logicalTableName); } @@ -235,8 +246,10 @@ private DdlJob buildRenameTableJob(LogicalRenameTable logicalRenameTable, Execut RenameTableBuilder.create(logicalRenameTable.relDdl, renameTablePreparedData, executionContext).build(); PhysicalPlanData physicalPlanData = renameTableBuilder.genPhysicalPlanData(); + physicalPlanData.setRenamePhyTable(renameTablePreparedData.isNeedRenamePhyTable()); + Long versionId = DdlUtils.generateVersionId(executionContext); - return new RenameTableJobFactory(physicalPlanData, executionContext).create(); + return new RenameTableJobFactory(physicalPlanData, executionContext, versionId).create(); } private DdlJob buildRecycleFileStorageTableJob(LogicalDropTable logicalDropTable, @@ -268,6 +281,7 @@ public static DdlJob buildOssRecycleTableJob(LogicalRenameTable logicalRenameTab renameTablePreparedData, executionContext).build(); PhysicalPlanData physicalPlanData = renameTableBuilder.genPhysicalPlanData(); + physicalPlanData.setRenamePhyTable(renameTablePreparedData.isNeedRenamePhyTable()); Map tableVersions = new HashMap<>(); @@ -296,7 +310,8 @@ protected DdlJob buildDropPartitionTableJob(LogicalDropTable logicalDropTable, E ValidateTableVersionTask validateTableVersionTask = new ValidateTableVersionTask(dropTablePreparedData.getSchemaName(), tableVersions); - ExecutableDdlJob result = new DropPartitionTableJobFactory(physicalPlanData, executionContext).create(); + ExecutableDdlJob result = + new DropPartitionTableJobFactory(physicalPlanData, executionContext, dropTablePreparedData).create(); result.addTask(validateTableVersionTask); result.addTaskRelationship(validateTableVersionTask, result.getHead()); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropViewHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropViewHandler.java index 2be10c133..824a597fa 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropViewHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalDropViewHandler.java @@ -16,18 +16,11 @@ package com.alibaba.polardbx.executor.handler.ddl; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; -import com.alibaba.polardbx.executor.handler.HandlerCommon; +import com.alibaba.polardbx.executor.ddl.job.factory.DropViewJobFactory; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.executor.sync.DropViewSyncAction; -import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropView; import com.alibaba.polardbx.optimizer.view.PolarDbXSystemTableView; import com.alibaba.polardbx.optimizer.view.SystemTableView; @@ -38,41 +31,15 @@ /** * @author dylan */ -public class LogicalDropViewHandler extends HandlerCommon { - - private static final Logger logger = LoggerFactory.getLogger(LogicalDropViewHandler.class); +public class LogicalDropViewHandler extends LogicalCommonDdlHandler { public LogicalDropViewHandler(IRepository repo) { super(repo); } @Override - public Cursor handle(final RelNode logicalPlan, ExecutionContext executionContext) { - - LogicalDropView logicalDropView = (LogicalDropView) logicalPlan; - - String schemaName = logicalDropView.getSchemaName(); - String viewName = logicalDropView.getViewName(); - - if (!logicalDropView.isIfExists()) { - SystemTableView.Row row = OptimizerContext.getContext(schemaName).getViewManager().select(viewName); - if (row == null) { - throw new TddlRuntimeException(ErrorCode.ERR_VIEW, "Unknown view " + viewName); - } - } - - boolean success = OptimizerContext.getContext(schemaName).getViewManager().delete(viewName); - - if (!success) { - throw new TddlRuntimeException(ErrorCode.ERR_VIEW, - "drop view fail for " + PolarDbXSystemTableView.TABLE_NAME + " can not write"); - } - - ArrayList viewList = new ArrayList<>(); - viewList.add(viewName); - SyncManagerHelper.sync(new DropViewSyncAction(schemaName, viewList), schemaName); - - return new AffectRowCursor(new int[] {0}); + protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + return new DropViewJobFactory((LogicalDropView) logicalDdlPlan).create(); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalImportDatabaseHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalImportDatabaseHandler.java new file mode 100644 index 000000000..365e4fad7 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalImportDatabaseHandler.java @@ -0,0 +1,475 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.ddl; + +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.charset.MySQLCharsetDDLValidator; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.druid.sql.SQLUtils; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.executor.utils.ImportTableTaskManager; +import com.alibaba.polardbx.executor.utils.StandardToEnterpriseEditionUtil; +import com.alibaba.polardbx.gms.locality.LocalityDesc; +import com.alibaba.polardbx.gms.topology.CreateDbInfo; +import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.gms.topology.DbInfoRecord; +import com.alibaba.polardbx.gms.topology.DbTopologyManager; +import com.alibaba.polardbx.gms.topology.ImportTableResult; +import com.alibaba.polardbx.gms.topology.StorageInfoRecord; +import com.alibaba.polardbx.gms.util.DbEventUtil; +import com.alibaba.polardbx.gms.util.DbNameUtil; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.SchemaManager; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalImportDatabase; +import com.alibaba.polardbx.optimizer.locality.LocalityManager; +import com.alibaba.polardbx.optimizer.partition.common.PartitionTableType; +import com.alibaba.polardbx.optimizer.utils.KeyWordsUtil; +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableList; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlImportDatabase; +import org.apache.commons.lang.StringUtils; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.FutureTask; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +public class LogicalImportDatabaseHandler extends LogicalCreateDatabaseHandler { + private final static Logger logger = LoggerFactory.getLogger(LogicalImportDatabaseHandler.class); + + public LogicalImportDatabaseHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + final LogicalImportDatabase importDatabase = (LogicalImportDatabase) logicalPlan; + final SqlImportDatabase sqlImportDatabase = (SqlImportDatabase) importDatabase.getNativeSqlNode(); + + handleImportDatabase(sqlImportDatabase, executionContext); + + Map result = handleImportTablesParallel(sqlImportDatabase, executionContext); + + return buildResult(result); + } + + protected void handleImportDatabase(SqlImportDatabase sqlImportDatabase, ExecutionContext executionContext) { + + validateImportDatabase(sqlImportDatabase, executionContext); + + String logicalDbName = SQLUtils.normalize(sqlImportDatabase.getDstLogicalDb()); + String phyDbName = SQLUtils.normalize(sqlImportDatabase.getSrcPhyDb()); + String locality = SQLUtils.normalize(sqlImportDatabase.getLocality()); + LocalityDesc localityDesc = LocalityDesc.parse(locality); + + String dnName = localityDesc.getDnList().get(0); + Predicate predLocality = (x -> localityDesc.matchStorageInstance(x.getInstanceId())); + + Map schemata = StandardToEnterpriseEditionUtil.queryDatabaseSchemata(dnName, phyDbName); + if (schemata.isEmpty()) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("failed to query information on phy database [%s]", phyDbName)); + } + Boolean encryption = null; + //for mysql 8.0 + if (schemata.get("DEFAULT_ENCRYPTION") != null && + (schemata.get("DEFAULT_ENCRYPTION").equalsIgnoreCase("YES") || schemata.get("DEFAULT_ENCRYPTION") + .equalsIgnoreCase("TRUE"))) { + encryption = true; + } + String charset = schemata.get("DEFAULT_CHARACTER_SET_NAME"); + String collate = schemata.get("DEFAULT_COLLATION_NAME"); + int dbType = DbInfoRecord.DB_TYPE_NEW_PART_DB; + Long socketTimeout = executionContext.getParamManager().getLong(ConnectionParams.SOCKET_TIMEOUT); + long socketTimeoutVal = socketTimeout == null ? -1 : socketTimeout; + CreateDbInfo createDbInfo = StandardToEnterpriseEditionUtil.initCreateDbInfo( + logicalDbName, phyDbName, charset, collate, encryption, localityDesc, predLocality, dbType, + socketTimeoutVal, sqlImportDatabase.isExistStillImport() + ); + + long dbId = DbTopologyManager.createLogicalDb(createDbInfo); + DbEventUtil.logStandardToEnterpriseEditionEvent(logicalDbName, phyDbName); + CdcManagerHelper.getInstance() + .notifyDdl(logicalDbName, null, sqlImportDatabase.getKind().name(), executionContext.getOriginSql(), + null, CdcDdlMarkVisibility.Public, buildExtendParameter(executionContext)); + + LocalityManager.getInstance().setLocalityOfDb(dbId, locality); + } + + protected void validateImportDatabase(SqlImportDatabase sqlImportDatabase, ExecutionContext executionContext) { + String logicalDbName = SQLUtils.normalize(sqlImportDatabase.getDstLogicalDb()); + String phyDbName = SQLUtils.normalize(sqlImportDatabase.getSrcPhyDb()); + String locality = SQLUtils.normalize(sqlImportDatabase.getLocality()); + LocalityDesc localityDesc = LocalityDesc.parse(locality); + String storageInstId = localityDesc.getDnList().get(0); + + //每次只允许指定1个DN + if (localityDesc.getDnList().size() != 1) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "only one DN Id is allowed"); + } + + //validate db name + if (!DbNameUtil.validateDbName(logicalDbName, KeyWordsUtil.isKeyWord(logicalDbName))) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("Failed to import database because the db name [%s] is invalid", logicalDbName)); + } + + //validate db count + int normalDbCnt = DbTopologyManager.getNormalDbCountFromMetaDb(); + int maxDbCnt = DbTopologyManager.maxLogicalDbCount; + if (normalDbCnt >= maxDbCnt) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format( + "Failed to import database because there are too many databases, the max count of database is %s", + maxDbCnt)); + } + + //validate "exist still import" + if (!sqlImportDatabase.isExistStillImport()) { + //不允许把 "有逻辑库的物理库" import到新的逻辑库 + List dbGroupInfoRecords = + DbTopologyManager.getAllDbGroupInfoRecordByInstIdAndPhyDbName(phyDbName, storageInstId); + if (dbGroupInfoRecords.size() > 0) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, String.format( + "It's not allowed to import phy database [%s] because it belongs to another logical db", phyDbName + )); + } + + //不允许把 物理库 import 到正常的逻辑库中 + if (null != DbInfoManager.getInstance().getDbInfo(logicalDbName)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, String.format( + "It's not allowed to import phy database on existing logical database [%s]", logicalDbName + )); + } + } else { + //必须是"已经import过"的物理库和逻辑库 + List dbGroupInfoRecords = + DbTopologyManager.getAllDbGroupInfoRecordByInstIdAndPhyDbName(phyDbName, storageInstId); + if (dbGroupInfoRecords.isEmpty()) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, String.format( + "phy database [%s] must belong to a logical database", phyDbName + )); + } + + if (null == DbInfoManager.getInstance().getDbInfo(logicalDbName)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, String.format( + "logical database [%s] must exist", logicalDbName + )); + } + + //库内不允许存在"分区表" "广播表" + Set tablesInLogicalDatabase = StandardToEnterpriseEditionUtil + .getTableNamesFromLogicalDatabase(logicalDbName, executionContext); + final SchemaManager sm = OptimizerContext.getContext(logicalDbName).getLatestSchemaManager(); + for (String table : tablesInLogicalDatabase) { + TableMeta tableMeta = sm.getTable(table); + if (tableMeta == null) { + continue; + } + if (tableMeta.getPartitionInfo().getTableType() != PartitionTableType.SINGLE_TABLE) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, String.format( + "import database failed because of invalid table type, table [%s] is %s", table, + tableMeta.getPartitionInfo().getTableType())); + } + } + } + + //validate charset + Map schemata = StandardToEnterpriseEditionUtil.queryDatabaseSchemata(storageInstId, phyDbName); + if (schemata.isEmpty()) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("failed to query information on phy database [%s]", phyDbName)); + } + Boolean encryption = schemata.get("DEFAULT_ENCRYPTION") != null && + !(schemata.get("DEFAULT_ENCRYPTION").equalsIgnoreCase("NO") || schemata.get("DEFAULT_ENCRYPTION") + .equalsIgnoreCase("FALSE")); + String charset = schemata.get("DEFAULT_CHARACTER_SET_NAME"); + String collate = schemata.get("DEFAULT_COLLATION_NAME"); + validateCharset(charset, collate); + } + + protected Map handleImportTablesParallel(SqlImportDatabase sqlImportDatabase, + ExecutionContext executionContext) { + + String logicalDbName = sqlImportDatabase.getDstLogicalDb(); + String phyDbName = sqlImportDatabase.getSrcPhyDb(); + String locality = SQLUtils.normalize(sqlImportDatabase.getLocality()); + LocalityDesc localityDesc = LocalityDesc.parse(locality); + String storageInstId = localityDesc.getDnList().get(0); + + Set tablesInLogicalDatabase = StandardToEnterpriseEditionUtil + .getTableNamesFromLogicalDatabase(logicalDbName, executionContext); + + Set tablesInPhysicalDatabase = StandardToEnterpriseEditionUtil + .queryPhysicalTableListFromPhysicalDabatase(storageInstId, phyDbName); + + //逻辑元信息还在,物理表已经被删除 + Set needCleanTables = new TreeSet<>(String::compareToIgnoreCase); + //没有逻辑元信息,物理表存在 + Set needCreateTables = new TreeSet<>(String::compareToIgnoreCase); + //逻辑元信息和物理表都存在,但check table不通过,需要重建元信息 + Set needRecreateTables = new TreeSet<>(String::compareToIgnoreCase); + + for (String phyTable : tablesInPhysicalDatabase) { + if (!tablesInLogicalDatabase.contains(phyTable)) { + needCreateTables.add(phyTable); + } + } + + for (String logicalTable : tablesInLogicalDatabase) { + if (!tablesInPhysicalDatabase.contains(logicalTable)) { + needCleanTables.add(logicalTable); + } + } + + List needCheckTables = tablesInLogicalDatabase.stream() + .filter(tb -> !needCleanTables.contains(tb)).collect(Collectors.toList()); + for (String needCheckTable : needCheckTables) { + if (!StandardToEnterpriseEditionUtil.logicalCheckTable( + needCheckTable, logicalDbName + )) { + needRecreateTables.add(needCheckTable); + } + } + + Map importTableResults = new CaseInsensitiveConcurrentHashMap(); + int parallelism = executionContext.getParamManager().getInt(ConnectionParams.IMPORT_TABLE_PARALLELISM); + ImportTableTaskManager manager = null; + try { + manager = new ImportTableTaskManager(parallelism); + List> taskList = new ArrayList<>(); + + for (String needCreateTable : needCreateTables) { + taskList.add(new FutureTask(() -> { + importOneTable(storageInstId, logicalDbName, phyDbName, needCreateTable, false, importTableResults); + return new Object(); + })); + } + + for (String needReCreateTable : needRecreateTables) { + taskList.add(new FutureTask(() -> { + importOneTable(storageInstId, logicalDbName, phyDbName, needReCreateTable, true, + importTableResults); + return new Object(); + })); + } + + for (String needCleanTable : needCleanTables) { + taskList.add(new FutureTask(() -> { + cleanOneTable(logicalDbName, needCleanTable, importTableResults); + return new Object(); + })); + } + + for (FutureTask task : taskList) { + manager.execute(task); + } + + //wait to finish + for (FutureTask task : taskList) { + try { + task.get(); + } catch (Exception e) { + logger.error(String.format("import table failed. " + task), e); + } + } + + } catch (Exception e) { + logger.error(String.format("import database failed. "), e); + } finally { + if (manager != null) { + manager.shutdown(); + } + } + + return importTableResults; + } + + protected void importOneTable(String dnName, String logicalDbName, String phyDbName, String tableName, + boolean reimport, Map result) { + final String importTableHint = "/*+TDDL: import_table=true CN_FOREIGN_KEY_CHECKS=0*/ "; + final String reimportTableHint = "/*+TDDL: reimport_table=true CN_FOREIGN_KEY_CHECKS=0*/ "; + final String action = (reimport ? "reimport table" : "import table"); + + String createTableSql = StandardToEnterpriseEditionUtil.queryCreateTableSql( + dnName, phyDbName, tableName + ); + + LocalityDesc localityDesc = new LocalityDesc(ImmutableList.of(dnName)); + + String finalSql = null; + try { + String normalizedCreateSql = + StandardToEnterpriseEditionUtil.normalizePhyTableStructure(tableName, createTableSql, + localityDesc.toString()); + + finalSql = (reimport ? reimportTableHint : importTableHint) + normalizedCreateSql; + + DdlHelper.getServerConfigManager().executeBackgroundSql(finalSql, logicalDbName, null); + } catch (Exception e) { + //collect failed msg + ImportTableResult importTableResult = new ImportTableResult(); + importTableResult.setTableName(tableName); + importTableResult.setAction(action); + importTableResult.setPhysicalCreateTableSql(createTableSql); + importTableResult.setLogicalCreateTableSql(finalSql); + importTableResult.setErrMsg(e.getMessage()); + + logger.error(String.format("import table %s failed. ", tableName), e); + + result.put(tableName, importTableResult); + } + } + + protected void cleanOneTable(String logicalDbName, String logicalTableName, Map result) { + final String importTableHint = "/*+TDDL: import_table=true CN_FOREIGN_KEY_CHECKS=0*/ "; + final String dropTableSql = "drop table `%s` "; + final String action = "clean table"; + + final String finalSql = String.format( + importTableHint + dropTableSql, logicalTableName + ); + try { + DdlHelper.getServerConfigManager().executeBackgroundSql(finalSql, logicalDbName, null); + } catch (Exception e) { + //collect failed msg + ImportTableResult importTableResult = new ImportTableResult(); + importTableResult.setTableName(logicalTableName); + importTableResult.setAction(action); + importTableResult.setErrMsg(e.getMessage()); + + logger.error(String.format("clean table %s failed. ", logicalTableName), e); + + result.put(logicalTableName, importTableResult); + } + } + + private void validateCharset(String charset, String collate) { + boolean useMySql80 = ExecUtils.isMysql80Version(); + if (!MySQLCharsetDDLValidator.checkCharsetSupported(charset, collate, true)) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + String.format( + "The specified charset[%s] or collate[%s] is not supported", + charset, collate)); + } + if (MySQLCharsetDDLValidator.checkIfMySql80NewCollation(collate) && !useMySql80) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + String.format( + "The specified charset[%s] or collate[%s] is only supported for mysql 8.0", + charset, collate)); + } + if (!MySQLCharsetDDLValidator.checkCharset(charset)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format( + "Unknown character set: %s", + charset)); + } + + if (!StringUtils.isEmpty(collate)) { + if (!MySQLCharsetDDLValidator.checkCollation(collate)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format( + "Unknown collation: %s", + collate)); + } + + if (!MySQLCharsetDDLValidator.checkCharsetCollation(charset, collate)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format( + "Unknown character set and collation: %s %s", + charset, collate)); + } + } + } + + Cursor buildResult(Map resultMap) { + ArrayResultCursor result = new ArrayResultCursor("Result"); + if (!resultMap.isEmpty()) { + result.addColumn("table_name", DataTypes.StringType); + result.addColumn("action", DataTypes.StringType); + result.addColumn("state", DataTypes.StringType); + result.addColumn("physical_sql", DataTypes.StringType); + result.addColumn("logical_sql", DataTypes.StringType); + result.addColumn("err_msg", DataTypes.StringType); + + for (ImportTableResult record : resultMap.values()) { + result.addRow( + new Object[] { + record.getTableName(), + record.getAction(), + "fail", + record.getPhysicalCreateTableSql(), + record.getLogicalCreateTableSql(), + record.getErrMsg() + } + ); + } + } else { + result.addColumn("state", DataTypes.StringType); + result.addRow(new Object[] {"ALL SUCCESS"}); + } + + return result; + } + + public static class CaseInsensitiveConcurrentHashMap extends ConcurrentHashMap { + + @Override + public T put(String key, T value) { + return super.put(key.toLowerCase(), value); + } + + public T get(String key) { + return super.get(key.toLowerCase()); + } + + public boolean containsKey(String key) { + return super.containsKey(key.toLowerCase()); + } + + public T remove(String key) { + return super.remove(key.toLowerCase()); + } + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalMergeTableGroupHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalMergeTableGroupHandler.java index 838cb85fd..b9dc7c867 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalMergeTableGroupHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalMergeTableGroupHandler.java @@ -22,7 +22,6 @@ import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.executor.ddl.job.factory.AlterJoinGroupJobFactory; import com.alibaba.polardbx.executor.ddl.job.factory.MergeTableGroupJobFactory; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob; @@ -33,10 +32,8 @@ import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; -import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterJoinGroup; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalMergeTableGroup; import com.alibaba.polardbx.optimizer.core.rel.ddl.data.MergeTableGroupPreparedData; -import org.apache.calcite.sql.SqlAlterTableGroup; /** * Created by luoyanxin. diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalOptimizeTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalOptimizeTableHandler.java index 677aff971..0f731f44c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalOptimizeTableHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalOptimizeTableHandler.java @@ -18,20 +18,15 @@ import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.common.properties.IntConfigParam; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.ddl.job.builder.DirectPhysicalSqlPlanBuilder; import com.alibaba.polardbx.executor.ddl.job.task.basic.OptimizeTablePhyDdlTask; -import com.alibaba.polardbx.executor.ddl.job.task.localpartition.LocalPartitionPhyDdlTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; -import com.alibaba.polardbx.executor.ddl.newengine.job.TransientDdlJob; -import com.alibaba.polardbx.executor.gsi.GsiManager; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.optimizer.config.table.GlobalIndexMeta; -import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; @@ -49,7 +44,6 @@ import org.apache.commons.collections.CollectionUtils; import java.util.ArrayList; -import java.util.Collections; import java.util.List; /** @@ -70,17 +64,19 @@ public LogicalOptimizeTableHandler(IRepository repo) { protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext ec) { LogicalOptimizeTable logicalOptimizeTable = (LogicalOptimizeTable) logicalDdlPlan; SqlOptimizeTableDdl sqlOptimizeTableDdl = (SqlOptimizeTableDdl) logicalOptimizeTable.getNativeSqlNode(); - List> tableNameList = extractTableList(sqlOptimizeTableDdl.getTableNames(), ec.getSchemaName(), ec); + List> tableNameList = + extractTableList(sqlOptimizeTableDdl.getTableNames(), ec.getSchemaName(), ec); final int parallelism = ec.getParamManager().getInt(ConnectionParams.OPTIMIZE_TABLE_PARALLELISM); - if(parallelism < 1 || parallelism > 4096){ + if (parallelism < 1 || parallelism > 4096) { throw new TddlNestableRuntimeException("OPTIMIZE_TABLE_PARALLELISM must in range 0-4096"); } ExecutableDdlJob result = new ExecutableDdlJob(); - for (Pair targetTable: tableNameList){ + for (Pair targetTable : tableNameList) { OptimizeTablePhyDdlTask phyDdlTask = - genPhyDdlTask(logicalDdlPlan.relDdl, targetTable.getKey(), targetTable.getValue(), OPTIMIZE_TABLE_DDL_TEMPLATE, ec); + genPhyDdlTask(logicalDdlPlan.relDdl, targetTable.getKey(), targetTable.getValue(), + OPTIMIZE_TABLE_DDL_TEMPLATE, ec); final String fullTableName = DdlJobFactory.concatWithDot(targetTable.getKey(), targetTable.getValue()); ExecutableDdlJob job = new ExecutableDdlJob(); job.addSequentialTasks(Lists.newArrayList(phyDdlTask.partition(parallelism))); @@ -101,44 +97,47 @@ protected Cursor buildResultCursor(BaseDdlOperation logicalDdlPlan, DdlJob ddlJo LogicalOptimizeTable logicalOptimizeTable = (LogicalOptimizeTable) logicalDdlPlan; SqlOptimizeTableDdl sqlOptimizeTableDdl = (SqlOptimizeTableDdl) logicalOptimizeTable.getNativeSqlNode(); - List> tableNameList = extractTableList(sqlOptimizeTableDdl.getTableNames(), ec.getSchemaName(), ec); + List> tableNameList = + extractTableList(sqlOptimizeTableDdl.getTableNames(), ec.getSchemaName(), ec); - for (Pair targetTable: tableNameList){ + for (Pair targetTable : tableNameList) { final String fullTableName = DdlJobFactory.concatWithDot(targetTable.getKey(), targetTable.getValue()); - result.addRow(new Object[]{ - fullTableName, - "optimize", - "note", - "Table does not support optimize, doing recreate + analyze instead" + result.addRow(new Object[] { + fullTableName, + "optimize", + "note", + "Table does not support optimize, doing recreate + analyze instead" }); - result.addRow(new Object[]{ - fullTableName, - "optimize", - "status", - "OK" + result.addRow(new Object[] { + fullTableName, + "optimize", + "status", + "OK" }); } return result; } - private OptimizeTablePhyDdlTask genPhyDdlTask(DDL ddl, String schemaName, String tableName, String phySql, ExecutionContext executionContext){ - ddl.sqlNode = SqlPhyDdlWrapper.createForAllocateLocalPartition(new SqlIdentifier(tableName, SqlParserPos.ZERO), phySql); + private OptimizeTablePhyDdlTask genPhyDdlTask(DDL ddl, String schemaName, String tableName, String phySql, + ExecutionContext executionContext) { + ddl.sqlNode = + SqlPhyDdlWrapper.createForAllocateLocalPartition(new SqlIdentifier(tableName, SqlParserPos.ZERO), phySql); DirectPhysicalSqlPlanBuilder builder = new DirectPhysicalSqlPlanBuilder( - ddl, new ReorganizeLocalPartitionPreparedData(schemaName, tableName), executionContext + ddl, new ReorganizeLocalPartitionPreparedData(schemaName, tableName), executionContext ); builder.build(); OptimizeTablePhyDdlTask phyDdlTask = new OptimizeTablePhyDdlTask(schemaName, builder.genPhysicalPlanData()); return phyDdlTask; } - - private List> extractTableList(List tableNameSqlNodeList, String currentSchemaName, ExecutionContext ec){ - if(CollectionUtils.isEmpty(tableNameSqlNodeList)){ + private List> extractTableList(List tableNameSqlNodeList, String currentSchemaName, + ExecutionContext ec) { + if (CollectionUtils.isEmpty(tableNameSqlNodeList)) { return new ArrayList<>(); } List> result = new ArrayList<>(); - for(SqlNode sqlNode: tableNameSqlNodeList){ + for (SqlNode sqlNode : tableNameSqlNodeList) { String schema = currentSchemaName; if (!((SqlIdentifier) sqlNode).isSimple()) { schema = ((SqlIdentifier) sqlNode).names.get(0); @@ -147,8 +146,8 @@ private List> extractTableList(List tableNameSqlNo result.add(Pair.of(schema, table)); List gsiNames = GlobalIndexMeta.getPublishedIndexNames(table, schema, ec); - if(CollectionUtils.isNotEmpty(gsiNames)){ - for(String gsi: gsiNames){ + if (CollectionUtils.isNotEmpty(gsiNames)) { + for (String gsi : gsiNames) { result.add(Pair.of(schema, gsi)); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalRenameTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalRenameTableHandler.java index f765b58df..47671afc5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalRenameTableHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalRenameTableHandler.java @@ -17,6 +17,8 @@ package com.alibaba.polardbx.executor.handler.ddl; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.ddl.job.builder.DdlPhyPlanBuilder; import com.alibaba.polardbx.executor.ddl.job.builder.RenameTableBuilder; @@ -29,7 +31,9 @@ import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.executor.utils.DdlUtils; import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.lbac.LBACSecurityManager; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; @@ -74,6 +78,11 @@ protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext TableValidator.validateTableNamesForRename(schemaName, sourceTableName, targetTableName); + if (LBACSecurityManager.getInstance().getTablePolicy(schemaName, sourceTableName) != null) { + throw new TddlRuntimeException(ErrorCode.ERR_LBAC, + "table with security policy is not allowed to be renamed"); + } + return false; } @@ -87,6 +96,7 @@ private DdlJob buildRenameTableJob(LogicalRenameTable logicalRenameTable, Execut if (executionContext.getParamManager().getBoolean(ConnectionParams.FLASHBACK_RENAME)) { physicalPlanData.setFlashbackRename(true); } + physicalPlanData.setRenamePhyTable(renameTablePreparedData.isNeedRenamePhyTable()); Map tableVersions = new HashMap<>(); @@ -94,8 +104,9 @@ private DdlJob buildRenameTableJob(LogicalRenameTable logicalRenameTable, Execut renameTablePreparedData.getTableVersion()); ValidateTableVersionTask validateTableVersionTask = new ValidateTableVersionTask(renameTablePreparedData.getSchemaName(), tableVersions); + Long versionId = DdlUtils.generateVersionId(executionContext); - ExecutableDdlJob result = new RenameTableJobFactory(physicalPlanData, executionContext).create(); + ExecutableDdlJob result = new RenameTableJobFactory(physicalPlanData, executionContext, versionId).create(); result.addTask(validateTableVersionTask); result.addTaskRelationship(validateTableVersionTask, result.getHead()); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalSequenceDdlHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalSequenceDdlHandler.java index aee4b62e8..fa7911832 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalSequenceDdlHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalSequenceDdlHandler.java @@ -16,138 +16,49 @@ package com.alibaba.polardbx.executor.handler.ddl; -import com.alibaba.polardbx.common.constants.SequenceAttribute.Type; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.logger.LoggerInit; -import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.TStringUtil; -import com.alibaba.polardbx.config.ConfigDataMode; -import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.ddl.job.factory.LogicalSequenceDdlJobFactory; import com.alibaba.polardbx.executor.ddl.job.validator.SequenceValidator; -import com.alibaba.polardbx.executor.ddl.sync.ClearPlanCacheSyncAction; -import com.alibaba.polardbx.executor.gms.util.SequenceUtil; -import com.alibaba.polardbx.executor.handler.HandlerCommon; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.executor.sync.SequenceSyncAction; -import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.gms.metadb.seq.SequenceBaseRecord; -import com.alibaba.polardbx.gms.metadb.seq.SequencesAccessor; -import com.alibaba.polardbx.gms.sync.SyncScope; -import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalSequenceDdl; -import com.alibaba.polardbx.optimizer.sequence.SequenceManagerProxy; -import com.alibaba.polardbx.sequence.exception.SequenceException; -import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SequenceBean; -import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlSequence; -import java.sql.Connection; -import java.sql.SQLException; - -import static com.alibaba.polardbx.common.constants.SequenceAttribute.AUTO_SEQ_PREFIX; - -public class LogicalSequenceDdlHandler extends HandlerCommon { +public class LogicalSequenceDdlHandler extends LogicalCommonDdlHandler { public LogicalSequenceDdlHandler(IRepository repo) { super(repo); } @Override - public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { - final LogicalSequenceDdl sequenceDdl = (LogicalSequenceDdl) logicalPlan; - final SequenceBean sequence = ((SqlSequence) sequenceDdl.relDdl.sqlNode).getSequenceBean(); - - String schemaName = sequence.getSchemaName(); + protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + final LogicalSequenceDdl sequenceDdl = (LogicalSequenceDdl) logicalDdlPlan; + final SequenceBean sequenceBean = ((SqlSequence) sequenceDdl.relDdl.sqlNode).getSequenceBean(); + String schemaName = sequenceBean.getSchemaName(); if (TStringUtil.isBlank(schemaName)) { schemaName = executionContext.getSchemaName(); - sequence.setSchemaName(schemaName); + sequenceBean.setSchemaName(schemaName); } - String seqName = sequence.getName(); - - SequenceValidator.validate(sequence, executionContext); - - Cursor cursor = handleSequence(sequence, executionContext); - - LoggerInit.TDDL_SEQUENCE_LOG.info(String.format("Sequence operation %s for %s was successful in %s", - sequence.getKind(), seqName, schemaName)); - SyncManagerHelper.sync(new SequenceSyncAction(schemaName, seqName), schemaName, SyncScope.CURRENT_ONLY); - - // Clean up plan cache, but avoid unnecessary cleanup since it's schema-level. - if (TStringUtil.startsWithIgnoreCase(seqName, AUTO_SEQ_PREFIX) && - sequence.getKind() == SqlKind.CREATE_SEQUENCE) { - SyncManagerHelper.sync(new ClearPlanCacheSyncAction(schemaName), schemaName); - } - - return cursor; + return new LogicalSequenceDdlJobFactory(schemaName, + sequenceDdl.getTableName() == null ? "" : sequenceDdl.getTableName(), sequenceBean, + executionContext).create(); } - private Cursor handleSequence(SequenceBean sequence, ExecutionContext executionContext) { - int affectedRows = 0; - - try (Connection metaDbConn = MetaDbUtil.getConnection()) { - SequencesAccessor sequencesAccessor = new SequencesAccessor(); - sequencesAccessor.setConnection(metaDbConn); - - final String seqSchema = sequence.getSchemaName(); - final String seqName = sequence.getName(); - - SequenceBaseRecord record = SequenceUtil.convert(sequence, null, executionContext); - - long newSeqCacheSize = executionContext.getParamManager().getLong(ConnectionParams.NEW_SEQ_CACHE_SIZE); - newSeqCacheSize = newSeqCacheSize < 1 ? 0 : newSeqCacheSize; - - switch (sequence.getKind()) { - case CREATE_SEQUENCE: - affectedRows = sequencesAccessor.insert(record, newSeqCacheSize, - SequenceUtil.buildFailPointInjector(executionContext)); - break; - case ALTER_SEQUENCE: - boolean alterWithoutTypeChange = true; - - if (sequence.getToType() != null && sequence.getToType() != Type.NA) { - Pair recordPair = - SequenceUtil.change(sequence, null, executionContext); - if (recordPair != null) { - affectedRows = sequencesAccessor.change(recordPair, newSeqCacheSize, - SequenceUtil.buildFailPointInjector(executionContext)); - alterWithoutTypeChange = false; - } - } - - if (alterWithoutTypeChange) { - Type existingType = SequenceManagerProxy.getInstance().checkIfExists(seqSchema, seqName); - if (existingType != Type.TIME) { - affectedRows = sequencesAccessor.update(record, newSeqCacheSize); - } - } - - break; - case DROP_SEQUENCE: - if (!ConfigDataMode.isSupportDropAutoSeq() - && TStringUtil.startsWithIgnoreCase(seqName, AUTO_SEQ_PREFIX)) { - throw new SequenceException( - "A sequence associated with a table is not allowed to be dropped separately"); - } - - affectedRows = sequencesAccessor.delete(record); - - break; - case RENAME_SEQUENCE: - affectedRows = sequencesAccessor.rename(record); - break; - default: - throw new SequenceException("Unexpected operation: " + sequence.getKind()); - } - } catch (SQLException e) { - throw new TddlRuntimeException(ErrorCode.ERR_GMS_GET_CONNECTION, e, e.getMessage()); + @Override + protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + final LogicalSequenceDdl sequenceDdl = (LogicalSequenceDdl) logicalDdlPlan; + final SequenceBean sequenceBean = ((SqlSequence) sequenceDdl.relDdl.sqlNode).getSequenceBean(); + String schemaName = sequenceBean.getSchemaName(); + if (TStringUtil.isBlank(schemaName)) { + schemaName = executionContext.getSchemaName(); + sequenceBean.setSchemaName(schemaName); } - return new AffectRowCursor(new int[] {affectedRows}); + SequenceValidator.validate(sequenceBean, executionContext, true); + return false; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalTruncateTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalTruncateTableHandler.java index b0dcb0995..4d66e304b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalTruncateTableHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/ddl/LogicalTruncateTableHandler.java @@ -86,6 +86,7 @@ import java.util.Objects; import java.util.TreeMap; +import static com.alibaba.polardbx.common.cdc.ICdcManager.DEFAULT_DDL_VERSION_ID; import static com.alibaba.polardbx.executor.ddl.job.factory.CreateTableJobFactory.CREATE_TABLE_SYNC_TASK; import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcTruncateWithRecycleMarkTask.CDC_RECYCLE_HINTS; @@ -115,7 +116,7 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e } } } else { - if (logicalTruncateTable.isWithGsi()) { + if (logicalTruncateTable.isWithGsi() || logicalTruncateTable.hasColumnarIndex()) { return buildTruncateTableWithGsiJob(logicalTruncateTable, true, executionContext); } else { return buildTruncatePartitionTableJob(logicalTruncateTable, executionContext); @@ -204,7 +205,8 @@ private ExecutableDdlJob buildCreateTableJob(LogicalCreateTable logicalCreateTab Map>> tableTopology = createTableBuilder.getTableTopology(); List physicalPlans = createTableBuilder.getPhysicalPlans(); - PhysicalPlanData physicalPlanData = DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, physicalPlans); + PhysicalPlanData physicalPlanData = + DdlJobDataConverter.convertToPhysicalPlanData(tableTopology, physicalPlans, executionContext); return new CreateTableJobFactory( createTablePreparedData.isAutoPartition(), @@ -213,8 +215,10 @@ private ExecutableDdlJob buildCreateTableJob(LogicalCreateTable logicalCreateTab createTablePreparedData.getSpecialDefaultValueFlags(), createTablePreparedData.getAddedForeignKeys(), physicalPlanData, + DEFAULT_DDL_VERSION_ID, executionContext, - true + true, + null ).create(); } @@ -316,10 +320,17 @@ public LogicalCreateTable generateLogicalCreateTmpTable(String schemaName, Strin entry.getValue().getIndexTablePreparedData().getPartitionInfo()); } - rewritePartitions(sqlCreateTable.getGlobalKeys(), gsiPartitionInfoMap, executionContext); - rewritePartitions(sqlCreateTable.getGlobalUniqueKeys(), gsiPartitionInfoMap, executionContext); - rewritePartitions(sqlCreateTable.getClusteredKeys(), gsiPartitionInfoMap, executionContext); - rewritePartitions(sqlCreateTable.getClusteredUniqueKeys(), gsiPartitionInfoMap, executionContext); + sqlCreateTable.setGlobalKeys( + rewritePartitions(sqlCreateTable.getGlobalKeys(), gsiPartitionInfoMap, executionContext)); + sqlCreateTable.setGlobalUniqueKeys( + rewritePartitions(sqlCreateTable.getGlobalUniqueKeys(), gsiPartitionInfoMap, executionContext)); + sqlCreateTable.setClusteredKeys( + rewritePartitions(sqlCreateTable.getClusteredKeys(), gsiPartitionInfoMap, executionContext)); + sqlCreateTable.setClusteredUniqueKeys( + rewritePartitions(sqlCreateTable.getClusteredUniqueKeys(), gsiPartitionInfoMap, executionContext)); + + // skip columnar indexes + sqlCreateTable.setColumnarKeys(null); } ExecutionPlan createTablePlan = Planner.getInstance().getPlan(sqlCreateTable, plannerContext); @@ -404,11 +415,15 @@ private List> renameKeys(List> keys, - Map gsiPartitionInfoMap, ExecutionContext executionContext) { + private List> rewritePartitions( + List> keys, + Map gsiPartitionInfoMap, + ExecutionContext executionContext) { if (null == keys) { - return; + return keys; } + + final List> result = new ArrayList<>(); for (Pair key : keys) { String indexName = key.getKey().getLastName(); PartitionInfo partitionInfo = gsiPartitionInfoMap.get(indexName); @@ -419,8 +434,12 @@ private void rewritePartitions(List> key SqlNode partitioning = FastSqlConstructUtils.convertPartitionBy(partitionBy, new ContextParameters(false), executionContext); - key.getValue().setPartitioning(partitioning); + result.add(Pair.of(key.getKey(), key.getValue().replacePartitioning(partitioning))); + } else { + result.add(key); } } + + return result; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaAutoSplitScheduleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaAutoSplitScheduleHandler.java index 2f01eff97..19a833475 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaAutoSplitScheduleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaAutoSplitScheduleHandler.java @@ -87,26 +87,5 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, return cursor; } - - Set getFilterValues(VirtualView virtualView, int index, ExecutionContext executionContext) { - List indexList = virtualView.getIndex().get(index); - - Map params = executionContext.getParams().getCurrentParameter(); - - Set tableNames = new HashSet<>(); - if (CollectionUtils.isNotEmpty(indexList)) { - for (Object obj : indexList) { - if (obj instanceof RexDynamicParam) { - String tableName = String.valueOf(params.get(((RexDynamicParam) obj).getIndex() + 1).getValue()); - tableNames.add(tableName.toLowerCase()); - } else if (obj instanceof RexLiteral) { - String tableName = ((RexLiteral) obj).getValueAs(String.class); - tableNames.add(tableName.toLowerCase()); - } - } - } - - return tableNames; - } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCclRuleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCclRuleHandler.java index a0e4473c2..5ddf633b3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCclRuleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCclRuleHandler.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.handler.subhandler; -import com.google.common.collect.Lists; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.LogicalShowCclRuleHandler; @@ -24,6 +23,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.view.InformationSchemaCclRules; import com.alibaba.polardbx.optimizer.view.VirtualView; +import com.google.common.collect.Lists; import org.apache.calcite.sql.SqlShowCclRule; import org.apache.calcite.sql.SqlSpecialIdentifier; import org.apache.calcite.sql.parser.SqlParserPos; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCclTriggerHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCclTriggerHandler.java index 32681084a..97c2eac7c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCclTriggerHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCclTriggerHandler.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.handler.subhandler; -import com.google.common.collect.Lists; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.LogicalShowCclTriggerHandler; @@ -24,6 +23,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.view.InformationSchemaCclTriggers; import com.alibaba.polardbx.optimizer.view.VirtualView; +import com.google.common.collect.Lists; import org.apache.calcite.sql.SqlShowCclTrigger; import org.apache.calcite.sql.SqlSpecialIdentifier; import org.apache.calcite.sql.parser.SqlParserPos; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnStatisticsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnStatisticsHandler.java index e69de29bb..efdae7c07 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnStatisticsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnStatisticsHandler.java @@ -0,0 +1,112 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.subhandler; + +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.schema.InformationSchema; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.row.Row; +import com.alibaba.polardbx.optimizer.view.InformationSchemaColumnStatistics; +import com.alibaba.polardbx.optimizer.view.InformationSchemaTables; +import com.alibaba.polardbx.optimizer.view.VirtualView; + +import java.util.ArrayList; + +import static com.alibaba.polardbx.common.TddlConstants.IMPLICIT_COL_NAME; + +/** + * @author shengyu + */ +public class InformationSchemaColumnStatisticsHandler extends BaseVirtualViewSubClassHandler { + + private static final Logger logger = LoggerFactory.getLogger(InformationSchemaColumnStatisticsHandler.class); + + public InformationSchemaColumnStatisticsHandler( + VirtualViewHandler virtualViewHandler) { + super(virtualViewHandler); + } + + @Override + public boolean isSupport(VirtualView virtualView) { + return virtualView instanceof InformationSchemaColumnStatistics; + } + + @Override + public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { + InformationSchemaColumnStatistics informationSchemaColumnStatistics + = (InformationSchemaColumnStatistics) virtualView; + InformationSchemaTables informationSchemaTables = + new InformationSchemaTables(informationSchemaColumnStatistics.getCluster(), + informationSchemaColumnStatistics.getTraitSet()); + + informationSchemaTables.copyFilters(informationSchemaColumnStatistics); + + Cursor tablesCursor = null; + + try { + tablesCursor = virtualViewHandler.handle(informationSchemaTables, executionContext); + + Row row; + while ((row = tablesCursor.next()) != null) { + String tableSchema = row.getString(1); + String tableName = row.getString(2); + if (InformationSchema.NAME.equalsIgnoreCase(tableSchema)) { + continue; + + } + + try { + TableMeta tableMeta = + OptimizerContext.getContext(tableSchema).getLatestSchemaManager().getTable(tableName); + + for (ColumnMeta columnMeta : tableMeta.getAllColumns()) { + String columnName = columnMeta.getName(); + if (columnName.equalsIgnoreCase(IMPLICIT_COL_NAME)) { + continue; + } + String histogramStr = + StatisticManager.getInstance().getHistogramSerializable(tableSchema, tableName, columnName); + if (histogramStr != null) { + cursor.addRow(new Object[] { + tableSchema, + tableName, + columnName, + histogramStr + }); + } + } + } catch (Throwable t) { + logger.error(t); + } + } + } finally { + if (tablesCursor != null) { + tablesCursor.close(new ArrayList<>()); + } + } + + return cursor; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnarIndexStatusHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnarIndexStatusHandler.java new file mode 100644 index 000000000..d5901426b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnarIndexStatusHandler.java @@ -0,0 +1,293 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.subhandler; + +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.oss.ColumnarFileType; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.balancer.stats.StatsUtils; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableMappingAccessor; +import com.alibaba.polardbx.gms.metadb.table.IndexStatus; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.function.calc.scalar.filter.Like; +import com.alibaba.polardbx.optimizer.view.InformationSchemaColumnarIndexStatus; +import com.alibaba.polardbx.optimizer.view.VirtualView; +import io.airlift.slice.DataSize; +import org.apache.commons.lang.StringUtils; +import org.jetbrains.annotations.NotNull; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.stream.Collectors; + +public class InformationSchemaColumnarIndexStatusHandler extends BaseVirtualViewSubClassHandler { + + private static final Logger logger = LoggerFactory.getLogger(InformationSchemaColumnarIndexStatusHandler.class); + + private static final String QUERY_ORC_FILES_SQL_FORMAT = + "SELECT logical_table_name AS table_id," + + " count(*) AS file_count, sum(table_rows) AS row_count, sum(extent_size) AS file_size FROM files" + + " WHERE logical_table_name IN (%s) AND RIGHT(file_name, 3) = 'orc'" + + " GROUP BY table_id"; + + private static final String QUERY_CSV_DEL_FILES_SQL_FORMAT = + "SELECT f.logical_table_name AS table_id, RIGHT(f.file_name, 3) AS suffix, COUNT(*) AS file_count, " + + "SUM(caf.total_rows) AS row_count, SUM(caf.append_offset + caf.append_length) AS file_size FROM files f " + + "INNER JOIN (SELECT file_name, MAX(checkpoint_tso) AS max_checkpoint_tso FROM columnar_appended_files " + + "GROUP BY file_name) latest_checkpoint ON f.file_name = latest_checkpoint.file_name " + + "INNER JOIN columnar_appended_files caf ON f.file_name = caf.file_name " + + "AND caf.checkpoint_tso = latest_checkpoint.max_checkpoint_tso " + + "WHERE f.logical_table_name IN (%s) AND RIGHT(f.file_name, 3) IN ('csv', 'del') " + + "GROUP BY f.logical_table_name, RIGHT(f.file_name, 3)"; + + public InformationSchemaColumnarIndexStatusHandler(VirtualViewHandler virtualViewHandler) { + super(virtualViewHandler); + } + + @Override + public boolean isSupport(VirtualView virtualView) { + return virtualView instanceof InformationSchemaColumnarIndexStatus; + } + + @Override + public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { + final int schemaIndex = InformationSchemaColumnarIndexStatus.getTableSchemaIndex(); + final int tableIndex = InformationSchemaColumnarIndexStatus.getTableNameIndex(); + Map params = executionContext.getParams().getCurrentParameter(); + // only new partitioning db + Set schemaNames = new TreeSet<>(String::compareToIgnoreCase); + schemaNames.addAll(StatsUtils.getDistinctSchemaNames()); + + schemaNames = virtualView.applyFilters(schemaIndex, params, schemaNames); + + // tableIndex + Set indexTableNames = virtualView.getEqualsFilterValues(tableIndex, params); + // tableLike + String tableLike = virtualView.getLikeString(tableIndex, params); + + queryColumnarIndexStatus(schemaNames, indexTableNames, tableLike, cursor, executionContext); + + return cursor; + } + + private void queryColumnarIndexStatus(Set schemaNames, Set logicalTableNames, String tableLike, + ArrayResultCursor cursor, ExecutionContext executionContext) { + + List columnarIndexInfoList = + getColumnarIndexInfoList(schemaNames, logicalTableNames, tableLike, executionContext); + + Map indexInfoMap = queryTableIdMap(columnarIndexInfoList); + + if (indexInfoMap.isEmpty()) { + return; + } + + Map indexStatusMap = queryFullStatus(indexInfoMap); + + fillIndexStatus(indexInfoMap, indexStatusMap, cursor); + } + + private void fillIndexStatus(Map indexInfoMap, + Map indexStatusMap, ArrayResultCursor cursor) { + indexStatusMap.forEach((tableId, indexStatus) -> { + ColumnarIndexInfo indexInfo = indexInfoMap.get(tableId); + + if (indexInfo == null) { + return; + } + + ColumnarFilesStatus orcStatus = indexStatus.orcStatus; + ColumnarFilesStatus csvStatus = indexStatus.csvStatus; + ColumnarFilesStatus delStatus = indexStatus.delStatus; + + long orcSize = orcStatus == null ? 0 : orcStatus.fileSize; + long csvSize = csvStatus == null ? 0 : csvStatus.fileSize; + long delSize = delStatus == null ? 0 : delStatus.fileSize; + + cursor.addRow(new Object[] { + indexInfo.tableSchema, + indexInfo.tableName, + indexInfo.indexName, + indexInfo.indexStatus.name(), + orcStatus == null ? 0L : orcStatus.fileCount, + orcStatus == null ? 0L : orcStatus.rowCount, + orcSize, + csvStatus == null ? 0L : csvStatus.fileCount, + csvStatus == null ? 0L : csvStatus.rowCount, + csvSize, + delStatus == null ? 0L : delStatus.fileCount, + delStatus == null ? 0L : delStatus.rowCount, + delSize, + orcSize + csvSize + delSize + }); + }); + } + + @NotNull + private Map queryTableIdMap(List columnarIndexInfoList) { + try (Connection connection = MetaDbUtil.getConnection()) { + ColumnarTableMappingAccessor tableMappingAccessor = new ColumnarTableMappingAccessor(); + tableMappingAccessor.setConnection(connection); + + return tableMappingAccessor.querySchemaTableIndexes( + columnarIndexInfoList.stream().map(ColumnarIndexInfo::toKey).collect(Collectors.toList()) + ).stream().collect(Collectors.toMap( + record -> record.tableId, + record -> new ColumnarIndexInfo(record.tableSchema, record.tableName, record.indexName, + IndexStatus.valueOf(record.status)) + )); + } catch (Throwable t) { + throw GeneralUtil.nestedException(t); + } + } + + @NotNull + private Map queryFullStatus(Map indexInfoMap) { + Map indexStatusMap = new HashMap<>(); + try (Connection connection = MetaDbUtil.getConnection()) { + // query orc status + String sql = String.format(QUERY_ORC_FILES_SQL_FORMAT, StringUtils.join(indexInfoMap.keySet(), ",")); + + try (Statement stmt = connection.createStatement(); ResultSet rs = stmt.executeQuery(sql)) { + while (rs.next()) { + Long tableId = Long.valueOf(rs.getString("table_id")); + long fileCount = rs.getLong("file_count"); + long rowCount = rs.getLong("row_count"); + long fileSize = rs.getLong("file_size"); + + indexStatusMap.computeIfAbsent(tableId, + id -> new ColumnarIndexStatus() + ).orcStatus = new ColumnarFilesStatus(fileCount, rowCount, fileSize); + } + } + + sql = String.format(QUERY_CSV_DEL_FILES_SQL_FORMAT, StringUtils.join(indexInfoMap.keySet(), ",")); + + try (Statement stmt = connection.createStatement(); ResultSet rs = stmt.executeQuery(sql)) { + while (rs.next()) { + Long tableId = Long.valueOf(rs.getString("table_id")); + String suffix = rs.getString("suffix"); + long fileCount = rs.getLong("file_count"); + long rowCount = rs.getLong("row_count"); + long fileSize = rs.getLong("file_size"); + + ColumnarFilesStatus filesStatus = new ColumnarFilesStatus(fileCount, rowCount, fileSize); + ColumnarIndexStatus indexStatus = + indexStatusMap.computeIfAbsent(tableId, id -> new ColumnarIndexStatus()); + + switch (ColumnarFileType.of(suffix)) { + case CSV: + indexStatus.csvStatus = filesStatus; + break; + case DEL: + indexStatus.delStatus = filesStatus; + break; + default: + } + } + } + + // query csv and del status, this may cost a lot + } catch (Throwable t) { + throw GeneralUtil.nestedException(t); + } + return indexStatusMap; + } + + @NotNull + private List getColumnarIndexInfoList(Set schemaNames, Set logicalTableNames, + String tableLike, ExecutionContext executionContext) { + final GsiMetaManager metaManager = + ExecutorContext.getContext(executionContext.getSchemaName()).getGsiManager().getGsiMetaManager(); + + // TODO(siyun): should fetch index that already dropped? + final GsiMetaManager.GsiMetaBean meta = metaManager.getAllGsiMetaBean(schemaNames, logicalTableNames); + + List columnarIndexInfoList = new ArrayList<>(); + + for (GsiMetaManager.GsiTableMetaBean tableMetaBean : meta.getTableMeta().values()) { + GsiMetaManager.GsiIndexMetaBean indexMetaBean = tableMetaBean.gsiMetaBean; + if (indexMetaBean != null && indexMetaBean.columnarIndex) { + if (tableLike != null && !new Like().like(indexMetaBean.tableName, tableLike)) { + continue; + } + + columnarIndexInfoList.add(new ColumnarIndexInfo( + indexMetaBean.tableSchema, + indexMetaBean.tableName, + indexMetaBean.indexName, + indexMetaBean.indexStatus) + ); + } + } + + return columnarIndexInfoList; + } + + private static class ColumnarIndexInfo { + final public String tableSchema; + final public String tableName; + final public String indexName; + final public IndexStatus indexStatus; + + private ColumnarIndexInfo(String tableSchema, String tableName, String indexName, IndexStatus indexStatus) { + this.tableSchema = tableSchema; + this.tableName = tableName; + this.indexName = indexName; + this.indexStatus = indexStatus; + } + + public Pair> toKey() { + return Pair.of(tableSchema, Pair.of(tableName, indexName)); + } + } + + private static class ColumnarIndexStatus { + public ColumnarFilesStatus orcStatus; + public ColumnarFilesStatus csvStatus; + public ColumnarFilesStatus delStatus; + } + + private static class ColumnarFilesStatus { + final public long fileCount; + final public long rowCount; + final public long fileSize; + + private ColumnarFilesStatus(long fileCount, long rowCount, long fileSize) { + this.fileCount = fileCount; + this.rowCount = rowCount; + this.fileSize = fileSize; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnarStatusHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnarStatusHandler.java new file mode 100644 index 000000000..108484671 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnarStatusHandler.java @@ -0,0 +1,144 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.subhandler; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.gms.util.ColumnarTransactionUtils; +import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableMappingAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableMappingRecord; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableStatus; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.view.InformationSchemaColumnarStatus; +import com.alibaba.polardbx.optimizer.view.VirtualView; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +public class InformationSchemaColumnarStatusHandler extends BaseVirtualViewSubClassHandler { + + private static final Logger logger = LoggerFactory.getLogger(InformationSchemaColumnarStatusHandler.class); + + public InformationSchemaColumnarStatusHandler(VirtualViewHandler virtualViewHandler) { + super(virtualViewHandler); + } + + @Override + public boolean isSupport(VirtualView virtualView) { + return virtualView instanceof InformationSchemaColumnarStatus; + } + + @Override + public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { + final int tsoIndex = InformationSchemaColumnarStatus.getTsoIndex(); + final int schemaIndex = InformationSchemaColumnarStatus.getSchemaIndex(); + final int tableIndex = InformationSchemaColumnarStatus.getTableIndex(); + final int indexIndex = InformationSchemaColumnarStatus.getIndexNameIndex(); + + Map params = executionContext.getParams().getCurrentParameter(); + + // Tso + Long tso = null; + Set tsoFilter = virtualView.getEqualsFilterValues(tsoIndex, params); + if (tsoFilter.isEmpty()) { + //默认最新tso + tso = ColumnarTransactionUtils.getLatestShowColumnarStatusTsoFromGms(); + } else { + tso = Long.valueOf(tsoFilter.iterator().next()); + } + + if (tso == null) { + tso = Long.MIN_VALUE; + } + + //1、获取所有columnar index表 + List columnarRecords = new ArrayList<>(); + try (Connection metaDbConn = MetaDbUtil.getConnection()) { + + ColumnarTableMappingAccessor tableMappingAccessor = new ColumnarTableMappingAccessor(); + tableMappingAccessor.setConnection(metaDbConn); + + //同时显示正在创建的表,方便查看进度 + columnarRecords.addAll(tableMappingAccessor.queryByStatus(ColumnarTableStatus.PUBLIC.name())); + columnarRecords.addAll(tableMappingAccessor.queryByStatus(ColumnarTableStatus.CREATING.name())); + + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, e, "Fail to fetch columnar index"); + } + Set schemaNames = new TreeSet<>(String::compareToIgnoreCase); + Set tableNames = new TreeSet<>(String::compareToIgnoreCase); + Set indexNames = new TreeSet<>(String::compareToIgnoreCase); + + for (ColumnarTableMappingRecord record : columnarRecords) { + schemaNames.add(record.tableSchema); + tableNames.add(record.tableName); + indexNames.add(record.indexName); + } + + //过滤条件,确定需要统计的index + schemaNames = virtualView.applyFilters(schemaIndex, params, schemaNames); + tableNames = virtualView.applyFilters(tableIndex, params, tableNames); + indexNames = virtualView.applyFilters(indexIndex, params, indexNames); + + List needReadIndexRecords = new ArrayList<>(); + for (ColumnarTableMappingRecord record : columnarRecords) { + if (schemaNames.contains(record.tableSchema) + && tableNames.contains(record.tableName) + && indexNames.contains(record.indexName)) { + needReadIndexRecords.add(record); + } + } + + List rows = + ColumnarTransactionUtils.queryColumnarIndexStatus(tso, needReadIndexRecords); + + rows.forEach(row -> cursor.addRow(new Object[] { + row.tso, + row.tableSchema, + row.tableName, + row.indexName, + row.indexId, + row.partitionNum, + row.csvFileNum, + row.csvRows, + row.csvFileSize, + row.orcFileNum, + row.orcRows, + row.orcFileSize, + row.delFileNum, + row.delRows, + row.delFileSize, + row.csvRows + row.orcRows - row.delRows, + row.csvFileSize + row.orcFileSize + row.delFileSize, + row.status + })); + + return cursor; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnsHandler.java index e69de29bb..9153ef430 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaColumnsHandler.java @@ -0,0 +1,169 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.subhandler; + +import com.alibaba.druid.util.JdbcUtils; +import com.alibaba.polardbx.common.jdbc.MasterSlave; +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.group.jdbc.TGroupDataSource; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.function.calc.scalar.CanAccessTable; +import com.alibaba.polardbx.optimizer.view.InformationSchemaColumns; +import com.alibaba.polardbx.optimizer.view.VirtualView; +import org.apache.commons.lang.StringUtils; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +/** + * @author shengyu + */ +public class InformationSchemaColumnsHandler extends BaseVirtualViewSubClassHandler { + + private static final Logger logger = LoggerFactory.getLogger(InformationSchemaColumnsHandler.class); + + public InformationSchemaColumnsHandler(VirtualViewHandler virtualViewHandler) { + super(virtualViewHandler); + } + + @Override + public boolean isSupport(VirtualView virtualView) { + return virtualView instanceof InformationSchemaColumns; + } + + @Override + public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { + final int schemaIndex = InformationSchemaColumns.getTableSchemaIndex(); + final int tableIndex = InformationSchemaColumns.getTableNameIndex(); + Map params = executionContext.getParams().getCurrentParameter(); + Set schemaNames = + virtualView.applyFilters(schemaIndex, params, OptimizerContext.getActiveSchemaNames()); + + // tableIndex + Set indexTableNames = virtualView.getEqualsFilterValues(tableIndex, params); + // tableLike + String tableLike = virtualView.getLikeString(tableIndex, params); + + for (String schemaName : schemaNames) { + Map>> groupToPair = + virtualViewHandler.getGroupToPair(schemaName, indexTableNames, tableLike, + executionContext.isTestMode()); + + for (String groupName : groupToPair.keySet()) { + + TGroupDataSource groupDataSource = + (TGroupDataSource) ExecutorContext.getContext(schemaName).getTopologyExecutor() + .getGroupExecutor(groupName).getDataSource(); + + String actualDbName = groupDataSource.getConfigManager().getDataSource(MasterSlave.MASTER_ONLY) + .getDsConfHandle().getRunTimeConf().getDbName(); + + Set> collection = groupToPair.get(groupName); + + if (collection.isEmpty()) { + continue; + } + + StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append("select * from information_schema.columns where table_schema = "); + stringBuilder.append("'"); + stringBuilder.append(actualDbName.replace("'", "\\'")); + stringBuilder.append("' and table_name in ("); + + boolean first = true; + + // physicalTableName -> logicalTableName + Map physicalTableToLogicalTable = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + for (Pair pair : collection) { + String logicalTableName = pair.getKey(); + String physicalTableName = pair.getValue(); + physicalTableToLogicalTable.put(physicalTableName, logicalTableName); + if (!first) { + stringBuilder.append(", "); + } + first = false; + stringBuilder.append("'"); + stringBuilder.append(physicalTableName.replace("'", "\\'")); + stringBuilder.append("'"); + } + stringBuilder.append(")"); + + Connection conn = null; + Statement stmt = null; + ResultSet rs = null; + try { + conn = groupDataSource.getConnection(); + stmt = conn.createStatement(); + rs = stmt.executeQuery(stringBuilder.toString()); + while (rs.next()) { + String logicalTableName = + physicalTableToLogicalTable.get(rs.getString("TABLE_NAME")); + + if (!CanAccessTable.verifyPrivileges(schemaName, logicalTableName, executionContext)) { + continue; + } + + cursor.addRow(new Object[] { + rs.getObject("TABLE_CATALOG"), + StringUtils.lowerCase(schemaName), + StringUtils.lowerCase(logicalTableName), + rs.getObject("COLUMN_NAME"), + rs.getObject("ORDINAL_POSITION"), + rs.getObject("COLUMN_DEFAULT"), + rs.getObject("IS_NULLABLE"), + rs.getObject("DATA_TYPE"), + rs.getObject("CHARACTER_MAXIMUM_LENGTH"), + rs.getObject("CHARACTER_OCTET_LENGTH"), + rs.getObject("NUMERIC_PRECISION"), + rs.getObject("NUMERIC_SCALE"), + rs.getObject("DATETIME_PRECISION"), + rs.getObject("CHARACTER_SET_NAME"), + rs.getObject("COLLATION_NAME"), + rs.getObject("COLUMN_TYPE"), + rs.getObject("COLUMN_KEY"), + rs.getObject("EXTRA"), + rs.getObject("PRIVILEGES"), + rs.getObject("COLUMN_COMMENT"), + null, + // GENERATION_EXPRESSION mysql 5.6 do not contain this column, we use default null to resolve + }); + } + } catch (Throwable t) { + logger.error(t); + } finally { + JdbcUtils.close(rs); + JdbcUtils.close(stmt); + JdbcUtils.close(conn); + } + } + } + + return cursor; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCreateDatabaseAsBackFillHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCreateDatabaseAsBackFillHandler.java index c35899b0d..89ab1b5d4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCreateDatabaseAsBackFillHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaCreateDatabaseAsBackFillHandler.java @@ -16,13 +16,11 @@ package com.alibaba.polardbx.executor.handler.subhandler; -import com.alibaba.fastjson.JSON; import com.alibaba.polardbx.common.ddl.newengine.DdlState; import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.executor.backfill.Throttle; import com.alibaba.polardbx.executor.backfill.ThrottleInfo; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaDdlPlanHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaDdlPlanHandler.java index 186141818..10e815056 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaDdlPlanHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaDdlPlanHandler.java @@ -17,11 +17,8 @@ package com.alibaba.polardbx.executor.handler.subhandler; import com.alibaba.polardbx.common.ddl.newengine.DdlPlanState; -import com.alibaba.polardbx.common.ddl.newengine.DdlState; import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; -import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.cursor.Cursor; @@ -29,30 +26,18 @@ import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineSchedulerManager; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineAccessor; -import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; import com.alibaba.polardbx.gms.scheduler.DdlPlanAccessor; import com.alibaba.polardbx.gms.scheduler.DdlPlanRecord; -import com.alibaba.polardbx.gms.tablegroup.ComplexTaskOutlineRecord; import com.alibaba.polardbx.gms.util.MetaDbUtil; -import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.view.InformationSchemaDdlPlan; -import com.alibaba.polardbx.optimizer.view.InformationSchemaMoveDatabase; import com.alibaba.polardbx.optimizer.view.VirtualView; -import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import java.sql.Connection; import java.sql.SQLException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; - -import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.NONE; -import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.PERCENTAGE; /** * Created by luoyanxin. @@ -68,7 +53,6 @@ public InformationSchemaDdlPlanHandler(VirtualViewHandler virtualViewHandler) { super(virtualViewHandler); } - @Override public boolean isSupport(VirtualView virtualView) { return virtualView instanceof InformationSchemaDdlPlan; @@ -89,9 +73,11 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, if (jobId > 0 && !success) { progress = getTaskProgress(jobId); } - Object[] row = new Object[] {ddlPlanRecord.getId(), ddlPlanRecord.getPlanId(), ddlPlanRecord.getJobId(), + Object[] row = new Object[] { + ddlPlanRecord.getId(), ddlPlanRecord.getPlanId(), ddlPlanRecord.getJobId(), ddlPlanRecord.getTableSchema(), ddlPlanRecord.getDdlStmt(), ddlPlanRecord.getState(), - ddlPlanRecord.getDdlType(), progress, ddlPlanRecord.getRetryCount(), ddlPlanRecord.getResult(), ddlPlanRecord.getExtras(), + ddlPlanRecord.getDdlType(), progress, ddlPlanRecord.getRetryCount(), ddlPlanRecord.getResult(), + ddlPlanRecord.getExtras(), ddlPlanRecord.getGmtCreate(), ddlPlanRecord.getGmtModified(), ddlPlanRecord.getResource()}; cursor.addRow(row); @@ -117,11 +103,11 @@ private int getTaskProgress(long jobId) { finishedCount++; } } - if(totalCount == 0){ + if (totalCount == 0) { return 0; } return finishedCount * 100 / totalCount; - }catch (Exception e){ + } catch (Exception e) { LOGGER.error("get task progress error, jobId:" + jobId, e); return 0; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaDnPerfHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaDnPerfHandler.java index 394b67b4e..bdd080433 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaDnPerfHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaDnPerfHandler.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaDnPerf; @@ -64,7 +65,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } catch (Exception e) { throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - List>> results = SyncManagerHelper.sync(syncAction); + List>> results = SyncManagerHelper.sync(syncAction, SyncScope.CURRENT_ONLY); for (List> rs : results) { if (rs == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFileStorageFilesMetaHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFileStorageFilesMetaHandler.java index 58bd03ac5..14427fd08 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFileStorageFilesMetaHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFileStorageFilesMetaHandler.java @@ -22,9 +22,9 @@ import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; -import com.alibaba.polardbx.gms.engine.FileStorageMetaStore; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.gms.engine.FileStorageFilesMetaRecord; +import com.alibaba.polardbx.gms.engine.FileStorageMetaStore; import com.alibaba.polardbx.gms.engine.FileSystemGroup; import com.alibaba.polardbx.gms.engine.FileSystemManager; import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; @@ -37,7 +37,6 @@ /** * @author chenzilin - * @date 2022/3/16 10:34 */ public class InformationSchemaFileStorageFilesMetaHandler extends BaseVirtualViewSubClassHandler { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFileStorageHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFileStorageHandler.java index af0a82a11..ebd666036 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFileStorageHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFileStorageHandler.java @@ -49,16 +49,16 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, group.getMaster().getWorkingDirectory(), engine, "MASTER", - FileSystemManager.getReadLockCount(engine), - FileSystemManager.isWriteLocked(engine) ? 1 : 0 + 0, + 0 }); for (FileSystem slave : group.getSlaves()) { cursor.addRow(new Object[] { slave.getWorkingDirectory(), engine, "SLAVE", - FileSystemManager.getReadLockCount(engine), - FileSystemManager.isWriteLocked(engine) ? 1 : 0 + 0, + 0 }); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFunctionCacheCapacityHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFunctionCacheCapacityHandler.java index 592f7059b..9cba7e2ec 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFunctionCacheCapacityHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFunctionCacheCapacityHandler.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.FetchFunctionCacheCapacitySyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaFunctionCacheCapacity; @@ -41,7 +42,8 @@ public boolean isSupport(VirtualView virtualView) { @Override public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { - List>> results = SyncManagerHelper.sync(new FetchFunctionCacheCapacitySyncAction()); + List>> results = SyncManagerHelper.sync(new FetchFunctionCacheCapacitySyncAction(), + SyncScope.ALL); for (List> nodeRows : results) { if (nodeRows == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFunctionCacheHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFunctionCacheHandler.java index 1cc4f5a17..d7e5c6fd3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFunctionCacheHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaFunctionCacheHandler.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.FetchFunctionCacheSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaFunctionCache; @@ -44,7 +45,7 @@ public boolean isSupport(VirtualView virtualView) { public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { List>> results = SyncManagerHelper.sync(new FetchFunctionCacheSyncAction(), - TddlConstants.INFORMATION_SCHEMA); + TddlConstants.INFORMATION_SCHEMA, SyncScope.ALL); for (List> nodeRows : results) { if (nodeRows == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaGlobalIndexesHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaGlobalIndexesHandler.java index 08a1bd123..61e2d9ab2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaGlobalIndexesHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaGlobalIndexesHandler.java @@ -18,7 +18,6 @@ import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.jdbc.Parameters; -import com.alibaba.polardbx.common.jdbc.RawString; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.common.ExecutorContext; @@ -30,6 +29,7 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.gms.metadb.table.GsiStatisticsAccessorDelegate; import com.alibaba.polardbx.gms.metadb.table.IndexesRecord; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticResult; @@ -43,16 +43,12 @@ import com.google.common.collect.ImmutableSet; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.rex.RexBuilder; -import org.apache.calcite.rex.RexDynamicParam; -import org.apache.calcite.rex.RexLiteral; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.fun.SqlStdOperatorTable; -import org.apache.commons.collections.CollectionUtils; import java.math.BigDecimal; import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -94,8 +90,8 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, // schemaName -> gsi table beans Map> schemaToGsiTables = meta.getTableMeta().values().stream() - // only collect gsi table bean - .filter(bean -> bean.gsiMetaBean != null) + // only collect gsi table bean, excluding columnar index + .filter(bean -> bean.gsiMetaBean != null && !bean.gsiMetaBean.columnarIndex) // group by schema name .collect(Collectors.groupingBy(bean -> bean.tableSchema, Collectors.toList())); @@ -128,7 +124,7 @@ private void generateRows(String schemaName, // filter condition 1: TABLE_SCHEMA = {schemaName} RexNode filterCondition = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, - rexBuilder.makeInputRef(informationSchemaTables, informationSchemaTables.getTableSchemaIndex()), + rexBuilder.makeInputRef(informationSchemaTables, InformationSchemaTables.getTableSchemaIndex()), rexBuilder.makeLiteral(schemaName)); // filter condition 2: TABLE_NAME IN (tableName1, tableName2, ...) @@ -136,7 +132,7 @@ private void generateRows(String schemaName, gsiTableBeans.forEach(bean -> tableNameLiterals.add(rexBuilder.makeLiteral(bean.tableName))); RexNode inCondition = rexBuilder.makeCall(SqlStdOperatorTable.IN, - rexBuilder.makeInputRef(informationSchemaTables, informationSchemaTables.getTableNameIndex()), + rexBuilder.makeInputRef(informationSchemaTables, InformationSchemaTables.getTableNameIndex()), rexBuilder.makeCall(SqlStdOperatorTable.ROW, tableNameLiterals)); // final filter condition: condition 1 AND condition 2 @@ -145,7 +141,7 @@ private void generateRows(String schemaName, informationSchemaTables.setIncludeGsi(true); // GSI table name -> GSI size in MB - Map gsiSizes = new HashMap<>(gsiTableBeans.size()); + Map gsiSizes = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); // If some GSI can not get size, just set its size to NULL. // In this way, all GSI tables are made sure to be presented int the final result. Cursor cursor = null; @@ -187,7 +183,7 @@ private void generateRows(String schemaName, //query gsi statistics info Map> schemaAndGsis = new TreeMap<>(String::compareToIgnoreCase); gsiTableBeans.forEach(tableBean -> { - if (tableBean != null && tableBean.gsiMetaBean != null) { + if (tableBean != null && tableBean.gsiMetaBean != null && !tableBean.gsiMetaBean.columnarIndex) { GsiMetaManager.GsiIndexMetaBean indexBean = tableBean.gsiMetaBean; String schema = indexBean.tableSchema; String index = indexBean.indexName; @@ -197,11 +193,12 @@ private void generateRows(String schemaName, schemaAndGsis.get(schema).add(index); } }); - GsiStatisticsManager statisticsManager = GsiStatisticsManager.getInstance(); - if (statisticsManager.enableGsiStatisticsCollection()) { + + if (GsiStatisticsManager.enableGsiStatisticsCollection()) { for (String schemaToSync : schemaAndGsis.keySet()) { SyncManagerHelper.sync( - new GsiStatisticsSyncAction(schemaToSync, null, null, GsiStatisticsSyncAction.QUERY_RECORD)); + new GsiStatisticsSyncAction(schemaToSync, null, null, GsiStatisticsSyncAction.QUERY_RECORD), + SyncScope.ALL); } } @@ -226,7 +223,7 @@ private void generateRows(String schemaName, // make sure all GSI tables are presented in the final result gsiTableBeans.forEach(tableBean -> { - if (tableBean != null && tableBean.gsiMetaBean != null) { + if (tableBean != null && tableBean.gsiMetaBean != null && !tableBean.gsiMetaBean.columnarIndex) { GsiMetaManager.GsiIndexMetaBean indexBean = tableBean.gsiMetaBean; // visit count, last access time String schema = indexBean.tableSchema; @@ -288,28 +285,8 @@ private void generateRows(String schemaName, * @return empty set if no schema names specified in conditions */ Set getEqualSchemaNames(VirtualView virtualView, ExecutionContext executionContext) { - InformationSchemaGlobalIndexes informationSchemaGlobalIndexes = (InformationSchemaGlobalIndexes) virtualView; - // get schema indexes in filter condition, objects in {schemaIndexList} are like '?0', '?1' - List schemaIndexList = virtualView.getIndex() - .get(informationSchemaGlobalIndexes.getTableSchemaIndex()); - - // use parameters to map the objects in {schemaIndexList} to schema names: '?0' -> 'db1' Map params = executionContext.getParams().getCurrentParameter(); - - Set schemaNames = new HashSet<>(); - if (CollectionUtils.isNotEmpty(schemaIndexList)) { - for (Object obj : schemaIndexList) { - if (obj instanceof RexDynamicParam) { - String schemaName = String.valueOf(params.get(((RexDynamicParam) obj).getIndex() + 1).getValue()); - schemaNames.add(schemaName.toLowerCase()); - } else if (obj instanceof RexLiteral) { - String schemaName = ((RexLiteral) obj).getValueAs(String.class); - schemaNames.add(schemaName.toLowerCase()); - } - } - } - - return schemaNames; + return virtualView.getEqualsFilterValues(InformationSchemaGlobalIndexes.getTableSchemaIndex(), params); } /** @@ -320,36 +297,8 @@ Set getEqualSchemaNames(VirtualView virtualView, ExecutionContext execut * @return empty set if no table names specified in conditions */ Set getEqualTableNames(VirtualView virtualView, ExecutionContext executionContext) { - InformationSchemaGlobalIndexes informationSchemaGlobalIndexes = (InformationSchemaGlobalIndexes) virtualView; - // get schema indexes in filter condition, objects in {schemaIndexList} are like '?0', '?1' - List tableIndexList = virtualView.getIndex() - .get(informationSchemaGlobalIndexes.getTableNameIndex()); - - // use parameters to map the objects in {tableIndexList} to table names: '?0' -> 'db1' Map params = executionContext.getParams().getCurrentParameter(); - - Set tableNames = new HashSet<>(); - if (CollectionUtils.isNotEmpty(tableIndexList)) { - for (Object obj : tableIndexList) { - if (obj instanceof RexDynamicParam) { - if (params.get(((RexDynamicParam) obj).getIndex() + 1).getValue() instanceof RawString) { - RawString rawString = (RawString) params.get(((RexDynamicParam) obj).getIndex() + 1).getValue(); - for (Object o : rawString.getObjList()) { - tableNames.add(String.valueOf(o).toLowerCase()); - } - } else { - String tableName = - String.valueOf(params.get(((RexDynamicParam) obj).getIndex() + 1).getValue()); - tableNames.add(tableName.toLowerCase()); - } - } else if (obj instanceof RexLiteral) { - String tableName = ((RexLiteral) obj).getValueAs(String.class); - tableNames.add(tableName.toLowerCase()); - } - } - } - - return tableNames; + return virtualView.getEqualsFilterValues(InformationSchemaGlobalIndexes.getTableNameIndex(), params); } private List queryGsiStatisticsByCondition(Set schemaNames, Set tableNames, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbBufferHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbBufferHandler.java index 977a248a2..93b3f7ebe 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbBufferHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbBufferHandler.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.handler.subhandler; +import com.alibaba.druid.util.JdbcUtils; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; @@ -23,6 +24,8 @@ import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.group.jdbc.TGroupDataSource; +import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.executor.utils.transaction.TransactionUtils; import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.optimizer.context.ExecutionContext; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbLockWaitsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbLockWaitsHandler.java index ba8e6338e..5f989ce00 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbLockWaitsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbLockWaitsHandler.java @@ -16,14 +16,14 @@ package com.alibaba.polardbx.executor.handler.subhandler; -import com.alibaba.polardbx.executor.utils.ExecUtils; -import com.alibaba.polardbx.executor.utils.transaction.TransactionUtils; -import com.alibaba.polardbx.executor.utils.transaction.TrxLookupSet; -import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.common.jdbc.IConnection; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.executor.utils.transaction.TransactionUtils; +import com.alibaba.polardbx.executor.utils.transaction.TrxLookupSet; +import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.view.InformationSchemaInnodbLockWaits; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbLocksHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbLocksHandler.java index 142145f75..d57a2efd8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbLocksHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbLocksHandler.java @@ -16,14 +16,14 @@ package com.alibaba.polardbx.executor.handler.subhandler; -import com.alibaba.polardbx.executor.utils.ExecUtils; -import com.alibaba.polardbx.executor.utils.transaction.TransactionUtils; -import com.alibaba.polardbx.executor.utils.transaction.TrxLookupSet; -import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.common.jdbc.IConnection; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.executor.utils.transaction.TransactionUtils; +import com.alibaba.polardbx.executor.utils.transaction.TrxLookupSet; +import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.view.InformationSchemaInnodbLocks; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbTrxHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbTrxHandler.java index b7911cc07..3c8f3a129 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbTrxHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaInnodbTrxHandler.java @@ -16,14 +16,14 @@ package com.alibaba.polardbx.executor.handler.subhandler; -import com.alibaba.polardbx.executor.utils.ExecUtils; -import com.alibaba.polardbx.executor.utils.transaction.TransactionUtils; -import com.alibaba.polardbx.executor.utils.transaction.TrxLookupSet; -import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.common.jdbc.IConnection; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.executor.utils.transaction.TransactionUtils; +import com.alibaba.polardbx.executor.utils.transaction.TrxLookupSet; +import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.view.InformationSchemaInnodbTrx; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaJoinGroupHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaJoinGroupHandler.java index 7cb597ed7..8a859a959 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaJoinGroupHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaJoinGroupHandler.java @@ -18,7 +18,6 @@ import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.executor.balancer.stats.StatsUtils; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; @@ -27,19 +26,13 @@ import com.alibaba.polardbx.gms.tablegroup.JoinGroupUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.optimizer.core.function.calc.scalar.filter.Like; import com.alibaba.polardbx.optimizer.view.InformationSchemaJoinGroup; -import com.alibaba.polardbx.optimizer.view.InformationSchemaTableDetail; import com.alibaba.polardbx.optimizer.view.VirtualView; -import org.apache.calcite.rex.RexDynamicParam; -import org.apache.calcite.rex.RexLiteral; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; -import java.util.stream.Collectors; /** * Created by luoyanxin. @@ -58,54 +51,17 @@ public boolean isSupport(VirtualView virtualView) { @Override public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { - InformationSchemaJoinGroup informationSchemaJoinGroup = (InformationSchemaJoinGroup) virtualView; - List tableSchemaIndexValue = - virtualView.getIndex().get(informationSchemaJoinGroup.getTableSchemaIndex()); - - Object tableSchemaLikeValue = - virtualView.getLike().get(informationSchemaJoinGroup.getTableSchemaIndex()); - // only new partitioning db Set schemaNames = new TreeSet<>(String::compareToIgnoreCase); schemaNames.addAll(JoinGroupUtils.getDistinctSchemaNames(null)); + int schemaIndex = InformationSchemaJoinGroup.getTableSchemaIndex(); Map params = executionContext.getParams().getCurrentParameter(); - // schemaIndex - Set indexSchemaNames = new HashSet<>(); - if (tableSchemaIndexValue != null && !tableSchemaIndexValue.isEmpty()) { - for (Object obj : tableSchemaIndexValue) { - if (obj instanceof RexDynamicParam) { - String schemaName = String.valueOf(params.get(((RexDynamicParam) obj).getIndex() + 1).getValue()); - indexSchemaNames.add(schemaName.toLowerCase()); - } else if (obj instanceof RexLiteral) { - String schemaName = ((RexLiteral) obj).getValueAs(String.class); - indexSchemaNames.add(schemaName.toLowerCase()); - } - } - schemaNames = schemaNames.stream() - .filter(schemaName -> indexSchemaNames.contains(schemaName.toLowerCase())) - .collect(Collectors.toSet()); - } - - // schemaLike - String schemaLike = null; - if (tableSchemaLikeValue != null) { - if (tableSchemaLikeValue instanceof RexDynamicParam) { - schemaLike = - String.valueOf(params.get(((RexDynamicParam) tableSchemaLikeValue).getIndex() + 1).getValue()); - } else if (tableSchemaLikeValue instanceof RexLiteral) { - schemaLike = ((RexLiteral) tableSchemaLikeValue).getValueAs(String.class); - } - if (schemaLike != null) { - final String likeArg = schemaLike; - schemaNames = schemaNames.stream().filter(schemaName -> new Like(null, null).like(schemaName, likeArg)).collect( - Collectors.toSet()); - } - } + schemaNames = virtualView.applyFilters(schemaIndex, params, schemaNames); List joinGroupInfoRecords = JoinGroupUtils.getAllJoinGroupInfos(null); for (JoinGroupInfoRecord joinGroupInfoRecord : GeneralUtil.emptyIfNull(joinGroupInfoRecords)) { - if(!schemaNames.contains(joinGroupInfoRecord.tableSchema) ) { + if (!schemaNames.contains(joinGroupInfoRecord.tableSchema)) { continue; } List joinGroupTableDetailRecords = diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalPartitionsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalPartitionsHandler.java index 62ac3d325..773a7997b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalPartitionsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalPartitionsHandler.java @@ -35,12 +35,9 @@ import com.alibaba.polardbx.repo.mysql.checktable.LocalPartitionDescription; import com.alibaba.polardbx.repo.mysql.checktable.TableDescription; import com.alibaba.polardbx.repo.mysql.spi.MyRepository; -import org.apache.calcite.rex.RexDynamicParam; -import org.apache.calcite.rex.RexLiteral; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -62,13 +59,12 @@ public boolean isSupport(VirtualView virtualView) { @Override public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { - - InformationSchemaLocalPartitions localPartitionView = (InformationSchemaLocalPartitions) virtualView; + Map params = executionContext.getParams().getCurrentParameter(); Set equalSchemaNames = - getFilterValues(virtualView, localPartitionView.getTableSchemaIndex(), executionContext); + virtualView.getEqualsFilterValues(InformationSchemaLocalPartitions.getTableSchemaIndex(), params); Set equalTableNames = - getFilterValues(virtualView, localPartitionView.getTableNameIndex(), executionContext); + virtualView.getEqualsFilterValues(InformationSchemaLocalPartitions.getTableNameIndex(), params); if (CollectionUtils.isEmpty(equalSchemaNames) || CollectionUtils.size(equalSchemaNames) != 1) { throw new TddlNestableRuntimeException("table_schema must be specified"); @@ -141,26 +137,5 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, return cursor; } - - Set getFilterValues(VirtualView virtualView, int index, ExecutionContext executionContext) { - List indexList = virtualView.getIndex().get(index); - - Map params = executionContext.getParams().getCurrentParameter(); - - Set tableNames = new HashSet<>(); - if (CollectionUtils.isNotEmpty(indexList)) { - for (Object obj : indexList) { - if (obj instanceof RexDynamicParam) { - String tableName = String.valueOf(params.get(((RexDynamicParam) obj).getIndex() + 1).getValue()); - tableNames.add(tableName.toLowerCase()); - } else if (obj instanceof RexLiteral) { - String tableName = ((RexLiteral) obj).getValueAs(String.class); - tableNames.add(tableName.toLowerCase()); - } - } - } - - return tableNames; - } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalPartitionsScheduleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalPartitionsScheduleHandler.java index 4de0ce851..cfad8bf3a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalPartitionsScheduleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalPartitionsScheduleHandler.java @@ -89,26 +89,5 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, return cursor; } - - Set getFilterValues(VirtualView virtualView, int index, ExecutionContext executionContext) { - List indexList = virtualView.getIndex().get(index); - - Map params = executionContext.getParams().getCurrentParameter(); - - Set tableNames = new HashSet<>(); - if (CollectionUtils.isNotEmpty(indexList)) { - for (Object obj : indexList) { - if (obj instanceof RexDynamicParam) { - String tableName = String.valueOf(params.get(((RexDynamicParam) obj).getIndex() + 1).getValue()); - tableNames.add(tableName.toLowerCase()); - } else if (obj instanceof RexLiteral) { - String tableName = ((RexLiteral) obj).getValueAs(String.class); - tableNames.add(tableName.toLowerCase()); - } - } - } - - return tableNames; - } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalityInfoHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalityInfoHandler.java index 2f76dc8f0..2d92333aa 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalityInfoHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaLocalityInfoHandler.java @@ -128,9 +128,7 @@ private void handleNewPartitionDatabase(ExecutionContext executionContext, Strin partitionGroupRecordList = tableGroupConfig.getPartitionGroupRecords(); objectName = tableGroupConfig.getTableGroupRecord().getTg_name(); locality = tableGroupConfig.getLocalityDesc().toString(); - List tableList = - tableGroupConfig.getAllTables().stream().map(o -> o.getTableName()).collect(Collectors.toList()); - String tableListString = String.join(",", tableList); + String tableListString = String.join(",", tableGroupConfig.getAllTables()); result.addRow( new Object[] {schemaName, "tablegroup", objectName, objectId, "", locality, tableListString, ""}); for (PartitionGroupRecord partitionGroupRecord : partitionGroupRecordList) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaMetadataLockHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaMetadataLockHandler.java index 495692f57..67cd47455 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaMetadataLockHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaMetadataLockHandler.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaMetadataLock; @@ -64,7 +65,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } catch (Exception e) { throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - List>> results = SyncManagerHelper.sync(syncAction); + List>> results = SyncManagerHelper.sync(syncAction, SyncScope.CURRENT_ONLY); for (List> rs : results) { if (rs == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaModuleEventHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaModuleEventHandler.java index e390410ae..0817b4467 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaModuleEventHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaModuleEventHandler.java @@ -24,10 +24,10 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.optimizer.view.InformationSchemaModule; import com.alibaba.polardbx.optimizer.view.InformationSchemaModuleEvent; import com.alibaba.polardbx.optimizer.view.VirtualView; import com.google.common.collect.Lists; @@ -75,7 +75,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } List>> results = - SyncManagerHelper.sync(showSyncAction, SystemDbHelper.INFO_SCHEMA_DB_NAME); + SyncManagerHelper.sync(showSyncAction, SystemDbHelper.INFO_SCHEMA_DB_NAME, SyncScope.CURRENT_ONLY); List rows = Lists.newArrayList(); for (List> nodeRows : results) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaModuleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaModuleHandler.java index 68bd460cb..4baa5d4b7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaModuleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaModuleHandler.java @@ -24,11 +24,11 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaModule; -import com.alibaba.polardbx.optimizer.view.InformationSchemaWorkload; import com.alibaba.polardbx.optimizer.view.VirtualView; import java.util.List; @@ -75,7 +75,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } List>> results = - SyncManagerHelper.sync(showSyncAction, SystemDbHelper.INFO_SCHEMA_DB_NAME); + SyncManagerHelper.sync(showSyncAction, SystemDbHelper.INFO_SCHEMA_DB_NAME, SyncScope.CURRENT_ONLY); for (List> nodeRows : results) { if (nodeRows == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaOptimizerAlertHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaOptimizerAlertHandler.java index e6d022e9e..25be56bde 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaOptimizerAlertHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaOptimizerAlertHandler.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.OptimizerAlertViewSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaOptimizerAlert; @@ -43,7 +44,7 @@ public boolean isSupport(VirtualView virtualView) { @Override public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { List>> results = SyncManagerHelper.syncWithDefaultDB( - new OptimizerAlertViewSyncAction()); + new OptimizerAlertViewSyncAction(), SyncScope.CURRENT_ONLY); for (List> nodeRows : results) { if (nodeRows == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPartitionsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPartitionsHandler.java index df991707c..8da6ee0e0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPartitionsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPartitionsHandler.java @@ -22,31 +22,20 @@ import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; -import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.executor.utils.PartitionMetaUtil; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.config.table.SchemaManager; -import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.optimizer.core.function.calc.scalar.filter.Like; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; -import com.alibaba.polardbx.optimizer.partition.common.PartitionTableType; import com.alibaba.polardbx.optimizer.view.InformationSchemaPartitions; -import com.alibaba.polardbx.optimizer.view.InformationSchemaTableDetail; import com.alibaba.polardbx.optimizer.view.VirtualView; -import org.apache.calcite.rex.RexDynamicParam; -import org.apache.calcite.rex.RexLiteral; import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; -import java.util.stream.Collectors; /** * @author chenghui.lch @@ -64,72 +53,20 @@ public boolean isSupport(VirtualView virtualView) { @Override public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { - - InformationSchemaPartitions informationSchemaPartitions = (InformationSchemaPartitions) virtualView; - // only new partitioning db Set schemaNames = new TreeSet<>(String::compareToIgnoreCase); schemaNames.addAll(StatsUtils.getDistinctSchemaNames()); - List tableSchemaIndexValue = - virtualView.getIndex().get(informationSchemaPartitions.getTableSchemaIndex()); - - Object tableSchemaLikeValue = - virtualView.getLike().get(informationSchemaPartitions.getTableSchemaIndex()); - - List tableNameIndexValue = - virtualView.getIndex().get(informationSchemaPartitions.getTableNameIndex()); - - Object tableNameLikeValue = - virtualView.getLike().get(informationSchemaPartitions.getTableNameIndex()); - + final int schemaIndex = InformationSchemaPartitions.getTableSchemaIndex(); + final int tableIndex = InformationSchemaPartitions.getTableNameIndex(); Map params = executionContext.getParams().getCurrentParameter(); - // schemaIndex - Set indexSchemaNames = new HashSet<>(); - if (tableSchemaIndexValue != null && !tableSchemaIndexValue.isEmpty()) { - for (Object obj : tableSchemaIndexValue) { - ExecUtils.handleTableNameParams(obj, params, indexSchemaNames); - } - schemaNames = schemaNames.stream() - .filter(schemaName -> indexSchemaNames.contains(schemaName.toLowerCase())) - .collect(Collectors.toSet()); - } - - // schemaLike - String schemaLike = null; - if (tableSchemaLikeValue != null) { - if (tableSchemaLikeValue instanceof RexDynamicParam) { - schemaLike = - String.valueOf(params.get(((RexDynamicParam) tableSchemaLikeValue).getIndex() + 1).getValue()); - } else if (tableSchemaLikeValue instanceof RexLiteral) { - schemaLike = ((RexLiteral) tableSchemaLikeValue).getValueAs(String.class); - } - if (schemaLike != null) { - final String likeArg = schemaLike; - schemaNames = schemaNames.stream().filter(schemaName -> new Like().like(schemaName, likeArg)).collect( - Collectors.toSet()); - } - } + schemaNames = virtualView.applyFilters(schemaIndex, params, schemaNames); // tableIndex - Set indexTableNames = new HashSet<>(); - if (tableNameIndexValue != null && !tableNameIndexValue.isEmpty()) { - for (Object obj : tableNameIndexValue) { - ExecUtils.handleTableNameParams(obj, params, indexTableNames); - } - } - + Set indexTableNames = virtualView.getEqualsFilterValues(tableIndex, params); // tableLike - String tableLike = null; - if (tableNameLikeValue != null) { - if (tableNameLikeValue instanceof RexDynamicParam) { - tableLike = - String.valueOf(params.get(((RexDynamicParam) tableNameLikeValue).getIndex() + 1).getValue()); - } else if (tableNameLikeValue instanceof RexLiteral) { - tableLike = ((RexLiteral) tableNameLikeValue).getValueAs(String.class); - } - } + String tableLike = virtualView.getLikeString(tableIndex, params); handlePartitionsStat(schemaNames, indexTableNames, tableLike, executionContext, cursor); return cursor; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPlanCacheCapacityHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPlanCacheCapacityHandler.java index 18f5bb964..d5d75a588 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPlanCacheCapacityHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPlanCacheCapacityHandler.java @@ -21,8 +21,8 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.FetchPlanCacheCapacitySyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.SystemDbHelper; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaPlanCacheCapacity; @@ -30,7 +30,6 @@ import java.util.List; import java.util.Map; -import java.util.Set; /** * 获取每个schema下plan cache的entry数量与总容量 @@ -49,7 +48,8 @@ public boolean isSupport(VirtualView virtualView) { @Override public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { List>> results = - SyncManagerHelper.sync(new FetchPlanCacheCapacitySyncAction(), SystemDbHelper.INFO_SCHEMA_DB_NAME); + SyncManagerHelper.sync(new FetchPlanCacheCapacitySyncAction(), SystemDbHelper.INFO_SCHEMA_DB_NAME, + SyncScope.CURRENT_ONLY); for (List> nodeRows : results) { if (nodeRows == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPlanCacheHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPlanCacheHandler.java index 34d0ad510..6be9271d2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPlanCacheHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPlanCacheHandler.java @@ -21,8 +21,8 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.FetchPlanCacheSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.SystemDbHelper; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaPlanCache; @@ -30,7 +30,6 @@ import java.util.List; import java.util.Map; -import java.util.Set; /** * @author dylan @@ -48,44 +47,36 @@ public boolean isSupport(VirtualView virtualView) { @Override public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { + List>> results = SyncManagerHelper.sync(new FetchPlanCacheSyncAction(""), + SystemDbHelper.INFO_SCHEMA_DB_NAME, SyncScope.CURRENT_ONLY); - Set schemaNames = OptimizerContext.getActiveSchemaNames(); - for (String schemaName : schemaNames) { - - if (SystemDbHelper.CDC_DB_NAME.equalsIgnoreCase(schemaName)) { + for (List> nodeRows : results) { + if (nodeRows == null) { continue; } - List>> results = SyncManagerHelper.sync(new FetchPlanCacheSyncAction(schemaName), - schemaName); - - for (List> nodeRows : results) { - if (nodeRows == null) { - continue; - } - - for (Map row : nodeRows) { + for (Map row : nodeRows) { - final String host = DataTypes.StringType.convertFrom(row.get("COMPUTE_NODE")); - final String tableNames = DataTypes.StringType.convertFrom(row.get("TABLE_NAMES")); - final String id = DataTypes.StringType.convertFrom(row.get("ID")); - final Long hitCount = DataTypes.LongType.convertFrom(row.get("HIT_COUNT")); - final String sql = DataTypes.StringType.convertFrom(row.get("SQL")); - final Long typeDigest = DataTypes.LongType.convertFrom(row.get("TYPE_DIGEST")); - final String plan = DataTypes.StringType.convertFrom(row.get("PLAN")); - String parameter = DataTypes.StringType.convertFrom(row.get("PARAMETER")); - cursor.addRow(new Object[] { - host, - schemaName, - tableNames, - id, - hitCount, - sql, - typeDigest, - plan, - parameter - }); - } + final String host = DataTypes.StringType.convertFrom(row.get("COMPUTE_NODE")); + final String schemaName = DataTypes.StringType.convertFrom(row.get("SCHEMA_NAME")); + final String tableNames = DataTypes.StringType.convertFrom(row.get("TABLE_NAMES")); + final String id = DataTypes.StringType.convertFrom(row.get("ID")); + final Long hitCount = DataTypes.LongType.convertFrom(row.get("HIT_COUNT")); + final String sql = DataTypes.StringType.convertFrom(row.get("SQL")); + final Long typeDigest = DataTypes.LongType.convertFrom(row.get("TYPE_DIGEST")); + final String plan = DataTypes.StringType.convertFrom(row.get("PLAN")); + String parameter = DataTypes.StringType.convertFrom(row.get("PARAMETER")); + cursor.addRow(new Object[] { + host, + schemaName, + tableNames, + id, + hitCount, + sql, + typeDigest, + plan, + parameter + }); } } return cursor; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPolardbxTrxHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPolardbxTrxHandler.java index 4b0a59d6a..336375c3e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPolardbxTrxHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaPolardbxTrxHandler.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.view.InformationSchemaPolardbxTrx; import com.alibaba.polardbx.optimizer.view.VirtualView; @@ -66,7 +67,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } final String schema = executionContext.getSchemaName(); - List>> results = SyncManagerHelper.sync(syncAction, schema); + List>> results = SyncManagerHelper.sync(syncAction, schema, SyncScope.ALL); for (List> rs : results) { if (rs == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaProcedureCacheCapacityHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaProcedureCacheCapacityHandler.java index 6ff824443..bf0284009 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaProcedureCacheCapacityHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaProcedureCacheCapacityHandler.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.FetchProcedureCacheCapacitySyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaProcedureCacheCapacity; @@ -41,7 +42,8 @@ public boolean isSupport(VirtualView virtualView) { @Override public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { - List>> results = SyncManagerHelper.sync(new FetchProcedureCacheCapacitySyncAction()); + List>> results = SyncManagerHelper.sync(new FetchProcedureCacheCapacitySyncAction(), + SyncScope.NOT_COLUMNAR_SLAVE); for (List> nodeRows : results) { if (nodeRows == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaProcedureCacheHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaProcedureCacheHandler.java index 8ffe7c1bb..c9949e9fc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaProcedureCacheHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaProcedureCacheHandler.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.FetchProcedureCacheSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaProcedureCache; @@ -44,7 +45,7 @@ public boolean isSupport(VirtualView virtualView) { public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { List>> results = SyncManagerHelper.sync(new FetchProcedureCacheSyncAction(), - TddlConstants.INFORMATION_SCHEMA); + TddlConstants.INFORMATION_SCHEMA, SyncScope.ALL); for (List> nodeRows : results) { if (nodeRows == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaQueryInfoHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaQueryInfoHandler.java index 66c434d58..45d2a21c6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaQueryInfoHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaQueryInfoHandler.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaQueryInfo; @@ -70,7 +71,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } List>> results = SyncManagerHelper.sync(showQueryListSyncAction, - executionContext.getSchemaName()); + executionContext.getSchemaName(), SyncScope.ALL); for (List> nodeRows : results) { if (nodeRows == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaReactorPerfHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaReactorPerfHandler.java index 95bf1d15f..c34cc0b24 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaReactorPerfHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaReactorPerfHandler.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaReactorPerf; @@ -64,7 +65,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } catch (Exception e) { throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - List>> results = SyncManagerHelper.sync(syncAction); + List>> results = SyncManagerHelper.sync(syncAction, SyncScope.CURRENT_ONLY); for (List> rs : results) { if (rs == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaRebalanceBackFillHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaRebalanceBackFillHandler.java index 02f233d0c..9a0926c4a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaRebalanceBackFillHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaRebalanceBackFillHandler.java @@ -27,7 +27,6 @@ import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.ddl.job.task.CostEstimableDdlTask; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlJobManager; -import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlPlanManager; import com.alibaba.polardbx.executor.ddl.newengine.sync.DdlBackFillSpeedSyncAction; import com.alibaba.polardbx.executor.ddl.newengine.utils.TaskHelper; import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; @@ -35,7 +34,6 @@ import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; -import com.alibaba.polardbx.gms.scheduler.DdlPlanRecord; import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.optimizer.context.ExecutionContext; @@ -47,7 +45,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.stream.Collectors; /** @@ -61,7 +58,6 @@ public InformationSchemaRebalanceBackFillHandler(VirtualViewHandler virtualViewH super(virtualViewHandler); } - @Override public boolean isSupport(VirtualView virtualView) { return virtualView instanceof InformationSchemaRebalanceBackFill; @@ -72,13 +68,14 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, return buildRebalanceBackFillView(cursor); } - public static Cursor buildRebalanceBackFillView(ArrayResultCursor cursor){ + public static Cursor buildRebalanceBackFillView(ArrayResultCursor cursor) { GsiBackfillManager backfillManager = new GsiBackfillManager(SystemDbHelper.DEFAULT_DB_NAME); DdlJobManager ddlJobManager = new DdlJobManager(); List ddlRecordList = ddlJobManager.fetchRecords(DdlState.ALL_STATES); //filter out REBALANCE JOBs - ddlRecordList = ddlRecordList.stream().filter(e -> DdlType.valueOf(e.ddlType) == DdlType.REBALANCE).collect(Collectors.toList()); - if(CollectionUtils.isEmpty(ddlRecordList)){ + ddlRecordList = ddlRecordList.stream().filter(e -> DdlType.valueOf(e.ddlType) == DdlType.REBALANCE) + .collect(Collectors.toList()); + if (CollectionUtils.isEmpty(ddlRecordList)) { return cursor; } @@ -89,13 +86,14 @@ public static Cursor buildRebalanceBackFillView(ArrayResultCursor cursor){ List allTasks = ddlJobManager.fetchAllSuccessiveTaskByJobId(jobId); List allRootTasks = - allTasks.stream().filter(e -> e.getJobId() == jobId).collect(Collectors.toList()); + allTasks.stream().filter(e -> e.getJobId() == jobId).collect(Collectors.toList()); //all submitted BackFill tasks, and yet there may be some BackFill tasks haven't been submitted List allBackFillTasks = - allTasks.stream().filter(e -> StringUtils.containsIgnoreCase(e.getName(), "BackFill")) - .collect(Collectors.toList()); + allTasks.stream().filter(e -> StringUtils.containsIgnoreCase(e.getName(), "BackFill")) + .collect(Collectors.toList()); List backFillAggInfoList = - backfillManager.queryBackFillAggInfoById(allBackFillTasks.stream().map(e -> e.taskId).collect(Collectors.toList())); + backfillManager.queryBackFillAggInfoById( + allBackFillTasks.stream().map(e -> e.taskId).collect(Collectors.toList())); long totalRows = 0L; long totalSize = 0L; @@ -110,18 +108,18 @@ public static Cursor buildRebalanceBackFillView(ArrayResultCursor cursor){ for (GsiBackfillManager.BackFillAggInfo backFillAggInfo : backFillAggInfoList) { ThrottleInfo throttleInfo = throttleInfoMap.get(backFillAggInfo.getBackFillId()); - long duration = backFillAggInfo.getDuration()==0 ? 1L: backFillAggInfo.getDuration(); + long duration = backFillAggInfo.getDuration() == 0 ? 1L : backFillAggInfo.getDuration(); addRow( - cursor, - jobId, - backFillAggInfo.getBackFillId(), - backFillAggInfo.getTableSchema(), - backFillAggInfo.getStartTime(), - GsiBackfillManager.BackfillStatus.display(backFillAggInfo.getStatus()), - throttleInfo==null? "0" : throttleInfo.getSpeed(), - backFillAggInfo.getSuccessRowCount()/duration, - backFillAggInfo.getSuccessRowCount(), - totalRows + cursor, + jobId, + backFillAggInfo.getBackFillId(), + backFillAggInfo.getTableSchema(), + backFillAggInfo.getStartTime(), + GsiBackfillManager.BackfillStatus.display(backFillAggInfo.getStatus()), + throttleInfo == null ? "0" : throttleInfo.getSpeed(), + backFillAggInfo.getSuccessRowCount() / duration, + backFillAggInfo.getSuccessRowCount(), + totalRows ); } } @@ -129,34 +127,36 @@ public static Cursor buildRebalanceBackFillView(ArrayResultCursor cursor){ return cursor; } - private static Map collectThrottleInfoMap(){ + private static Map collectThrottleInfoMap() { Map throttleInfoMap = new HashMap<>(); for (ThrottleInfo throttleInfo : Throttle.getThrottleInfoList()) { throttleInfoMap.put(throttleInfo.getBackFillId(), throttleInfo); } try { List>> result = SyncManagerHelper.sync( - new DdlBackFillSpeedSyncAction(), SystemDbHelper.DEFAULT_DB_NAME, SyncScope.MASTER_ONLY); + new DdlBackFillSpeedSyncAction(), SystemDbHelper.DEFAULT_DB_NAME, SyncScope.MASTER_ONLY); for (List> list : GeneralUtil.emptyIfNull(result)) { for (Map map : GeneralUtil.emptyIfNull(list)) { throttleInfoMap.put(Long.parseLong(String.valueOf(map.get("BACKFILL_ID"))), - new ThrottleInfo( - Long.parseLong(String.valueOf(map.get("BACKFILL_ID"))), - Double.parseDouble(String.valueOf(map.get("SPEED"))), - Long.parseLong(String.valueOf(map.get("TOTAL_ROWS"))) - )); + new ThrottleInfo( + Long.parseLong(String.valueOf(map.get("BACKFILL_ID"))), + Double.parseDouble(String.valueOf(map.get("SPEED"))), + Long.parseLong(String.valueOf(map.get("TOTAL_ROWS"))) + )); } } - }catch (Exception e){ + } catch (Exception e) { LOGGER.error("collect ThrottleInfo from remote nodes error", e); } return throttleInfoMap; } private static void addRow(ArrayResultCursor cursor, long jobId, long taskId, String schemaName, - String startTime, - String state, Object currentSpeed, Object averageSpeed, long finishedRows, long totalRows) { - cursor.addRow(new Object[]{jobId, taskId, schemaName, startTime, state, currentSpeed, averageSpeed, finishedRows, totalRows}); + String startTime, + String state, Object currentSpeed, Object averageSpeed, long finishedRows, + long totalRows) { + cursor.addRow(new Object[] { + jobId, taskId, schemaName, startTime, state, currentSpeed, averageSpeed, finishedRows, totalRows}); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaRebalanceProgressHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaRebalanceProgressHandler.java new file mode 100644 index 000000000..957cd942d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaRebalanceProgressHandler.java @@ -0,0 +1,445 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.subhandler; + +import com.alibaba.polardbx.common.ddl.newengine.DdlState; +import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; +import com.alibaba.polardbx.common.ddl.newengine.DdlType; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.ddl.job.task.CostEstimableDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.basic.CloneTableDataFileTask; +import com.alibaba.polardbx.executor.ddl.job.task.basic.DdlBackfillCostRecordTask; +import com.alibaba.polardbx.executor.ddl.job.task.basic.PhysicalBackfillTask; +import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlJobManager; +import com.alibaba.polardbx.executor.ddl.newengine.utils.TaskHelper; +import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillManager; +import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; +import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; +import com.alibaba.polardbx.gms.tablegroup.ComplexTaskOutlineRecord; +import com.alibaba.polardbx.gms.topology.SystemDbHelper; +import com.alibaba.polardbx.optimizer.config.table.ComplexTaskMetaManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.view.InformationSchemaRebalanceProgress; +import com.alibaba.polardbx.optimizer.view.VirtualView; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; + +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Created by luoyanxin. + * + * @author luoyanxin + */ +public class InformationSchemaRebalanceProgressHandler extends BaseVirtualViewSubClassHandler { + public InformationSchemaRebalanceProgressHandler(VirtualViewHandler virtualViewHandler) { + super(virtualViewHandler); + } + + @Override + public boolean isSupport(VirtualView virtualView) { + return virtualView instanceof InformationSchemaRebalanceProgress; + } + + @Override + public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { + return buildRebalanceBackFillView(cursor); + } + + public static Cursor buildRebalanceBackFillView(ArrayResultCursor cursor) { + + DdlJobManager ddlJobManager = new DdlJobManager(); + List ddlRecordList = ddlJobManager.fetchRecords(DdlState.ALL_STATES); + ddlRecordList = ddlRecordList.stream().filter(e -> !e.isSubJob()).collect(Collectors.toList()); + List mayMovePartitionList = ddlRecordList.stream().filter( + e -> DdlType.valueOf(e.ddlType) == DdlType.ALTER_TABLEGROUP + || DdlType.valueOf(e.ddlType) == DdlType.ALTER_TABLE).collect(Collectors.toList()); + + //filter out REBALANCE JOBs + List rebalanceList = ddlRecordList.stream().filter( + e -> DdlType.valueOf(e.ddlType) == DdlType.REBALANCE || DdlType.valueOf(e.ddlType) == DdlType.MOVE_DATABASE) + .collect(Collectors.toList()); + + for (DdlEngineRecord ddlRecord : mayMovePartitionList) { + List complexTaskOutlineRecords = + ComplexTaskMetaManager.getMovePartitionTasksBySchJob(ddlRecord.schemaName, ddlRecord.jobId); + if (GeneralUtil.isNotEmpty(complexTaskOutlineRecords)) { + rebalanceList.add(ddlRecord); + } + } + + if (CollectionUtils.isEmpty(rebalanceList)) { + return cursor; + } + + for (DdlEngineRecord record : rebalanceList) { + Long logicalTableCount = 0L; + final Long jobId = record.jobId; + List allTasks = ddlJobManager.fetchAllSuccessiveTaskByJobId(jobId); + List allRootTasks = + allTasks.stream().filter(e -> e.getJobId() == jobId).collect(Collectors.toList()); + for (DdlEngineTaskRecord taskRecord : allRootTasks) { + if (StringUtils.isEmpty(taskRecord.getCost())) { + continue; + } + CostEstimableDdlTask.CostInfo costInfo = TaskHelper.decodeCostInfo(taskRecord.getCost()); + logicalTableCount += costInfo.tableCount; + } + + List allImportTableSpaceTasks = + allTasks.stream().filter(e -> StringUtils.containsIgnoreCase(e.getName(), "importTableSpaceDdlTask")) + .collect(Collectors.toList()); + if (GeneralUtil.isEmpty(allImportTableSpaceTasks)) { + continue; + } + addBackfillRow(cursor, record, allTasks, logicalTableCount); + addImportTableSpaceRow(cursor, record, allTasks, logicalTableCount); + addDataValidationRow(cursor, record, allTasks, logicalTableCount); + } + + return cursor; + } + + private static void addBackfillRow(ArrayResultCursor cursor, DdlEngineRecord ddlRecord, + List allTasks, Long logicalTableCount) { + Long jobId = ddlRecord.jobId; + PhysicalBackfillManager backfillManager = new PhysicalBackfillManager(SystemDbHelper.DEFAULT_DB_NAME); + List allRootTasks = + allTasks.stream().filter(e -> e.getJobId() == jobId).collect(Collectors.toList()); + //all submitted BackFill tasks, and yet there may be some BackFill tasks haven't been submitted + List allBackFillTasks = + allTasks.stream().filter(e -> (StringUtils.containsIgnoreCase(e.getName(), "PhysicalBackfillTask"))) + .collect(Collectors.toList()); + + List allCloneTableDataFileTasks = + allTasks.stream().filter(e -> (StringUtils.containsIgnoreCase(e.getName(), "CloneTableDataFileTask"))) + .collect(Collectors.toList()); + + if (GeneralUtil.isEmpty(allCloneTableDataFileTasks)) { + //ignore backfill? + } else { + //the physical_backfill_object use cloneTask's taskId as it's jobId + List backFillAggInfoList = + backfillManager.queryBackFillAggInfoById( + allCloneTableDataFileTasks.stream().map(e -> e.taskId).collect(Collectors.toList())); + + long totalSize = 0L; + for (DdlEngineTaskRecord taskRecord : allRootTasks) { + if (StringUtils.isEmpty(taskRecord.getCost())) { + continue; + } + CostEstimableDdlTask.CostInfo costInfo = TaskHelper.decodeCostInfo(taskRecord.getCost()); + totalSize += costInfo.dataSize; + } + if (totalSize == 0L) { + //this is trigger by alter tablegroup move partition or move database command + //not trigger by rebalance database/cluster + List costTasks = allTasks.stream() + .filter(e -> StringUtils.containsIgnoreCase(e.getName(), DdlBackfillCostRecordTask.getTaskName())) + .collect(Collectors.toList()); + for (DdlEngineTaskRecord costRecordTask : costTasks) { + if (StringUtils.isEmpty(costRecordTask.getCost())) { + continue; + } + CostEstimableDdlTask.CostInfo costInfo = TaskHelper.decodeCostInfo(costRecordTask.getCost()); + totalSize += costInfo.dataSize; + } + } + + Timestamp startTime = null; + Timestamp endTime = null; + long successBufferSize = 0l; + boolean allBackFillFinished = true; + boolean backFillFailed = false; + int successCount = 0; + int failureCount = 0; + int runningCount = 0; + int initialCount = 0; + for (PhysicalBackfillManager.BackFillAggInfo backFillAggInfo : backFillAggInfoList) { + if (startTime == null) { + startTime = backFillAggInfo.getStartTime(); + endTime = backFillAggInfo.getEndTime(); + } else { + if (backFillAggInfo.getStartTime().before(startTime)) { + startTime = backFillAggInfo.getStartTime(); + } + if (backFillAggInfo.getEndTime().after(endTime)) { + endTime = backFillAggInfo.getEndTime(); + } + } + successBufferSize += backFillAggInfo.getSuccessBufferSize(); + if (backFillAggInfo.getStatus() != PhysicalBackfillManager.BackfillStatus.SUCCESS.getValue()) { + allBackFillFinished = false; + } + if (backFillAggInfo.getStatus() == PhysicalBackfillManager.BackfillStatus.FAILED.getValue()) { + backFillFailed = true; + } + if (backFillAggInfo.getStatus() == PhysicalBackfillManager.BackfillStatus.SUCCESS.getValue()) { + successCount++; + } + } + + logicalTableCount = + logicalTableCount < allBackFillTasks.size() ? allBackFillTasks.size() : logicalTableCount; + + successCount = 0; + for (DdlEngineTaskRecord backfillTask : allBackFillTasks) { + DdlTaskState state = DdlTaskState.valueOf(backfillTask.state); + switch (state) { + case READY: + initialCount++; + break; + case DIRTY: + runningCount++; + break; + case SUCCESS: + successCount++; + break; + case ROLLBACK_SUCCESS: + failureCount++; + break; + } + } + + allBackFillFinished = + !backFillFailed && allBackFillFinished && backFillAggInfoList.size() == logicalTableCount.intValue() + && ( + successCount == logicalTableCount.intValue()); + double avgSpeed = 0; + String status; + if (GeneralUtil.isEmpty(backFillAggInfoList)) { + //not start yet + status = PhysicalBackfillManager.BackfillStatus.INIT.name(); + } else { + long duration = Math.max(1, (endTime.getTime() - startTime.getTime()) / 1000); + avgSpeed = (double) successBufferSize / 1024 / 1024 / duration; + avgSpeed = Double.valueOf(String.format("%.2f", avgSpeed)).doubleValue(); + if (allBackFillFinished) { + status = PhysicalBackfillManager.BackfillStatus.SUCCESS.name(); + } else if (backFillFailed) { + status = PhysicalBackfillManager.BackfillStatus.FAILED.name(); + } else if (successBufferSize == 0) { + status = PhysicalBackfillManager.BackfillStatus.INIT.name(); + } else { + status = PhysicalBackfillManager.BackfillStatus.RUNNING.name(); + } + } + + initialCount = Math.max(logicalTableCount.intValue() - successCount - failureCount - runningCount, 0); + String info = + String.format( + "estimate size:%s MB, finish:%s MB, speed:%s MB/s", + totalSize / 1024 / 1024, successBufferSize / 1024 / 1024, + avgSpeed); + double progress = Math.min(1.0 * successBufferSize / Math.max(totalSize, 1.00), 1.0) * 100; + progress = Double.valueOf(String.format("%.2f", progress)).doubleValue(); + addRow(cursor, ddlRecord.jobId, ddlRecord.schemaName, RebalanceStage.DATA_COPY.name(), status, progress, + logicalTableCount.intValue(), successCount, runningCount, initialCount, failureCount, info, startTime, + endTime, + ddlRecord.ddlStmt); + } + } + + private static void addImportTableSpaceRow(ArrayResultCursor cursor, DdlEngineRecord ddlRecord, + List allTasks, Long logicalTableCount) { + //all submitted BackFill tasks, and yet there may be some BackFill tasks haven't been submitted + List allImportTasks = + allTasks.stream().filter(e -> StringUtils.containsIgnoreCase(e.getName(), "importTableSpaceDdlTask")) + .collect(Collectors.toList()); + int successCount = 0; + int failureCount = 0; + int runningCount = 0; + int initialCount = 0; + if (GeneralUtil.isEmpty(allImportTasks)) { + //ignore importtablespace + assert false; + } else { + for (DdlEngineTaskRecord importTask : allImportTasks) { + DdlTaskState state = DdlTaskState.valueOf(importTask.state); + switch (state) { + case READY: + initialCount++; + break; + case DIRTY: + runningCount++; + break; + case SUCCESS: + successCount++; + break; + case ROLLBACK_SUCCESS: + failureCount++; + break; + } + } + } + logicalTableCount = + logicalTableCount < allImportTasks.size() ? allImportTasks.size() : logicalTableCount; + String status = ""; + if (successCount == logicalTableCount.intValue()) { + status = "SUCCESS"; + } else if (failureCount > 0) { + status = "FAILED"; + } else if (runningCount > 0) { + status = "RUNNING"; + } else { + if (successCount == 0) { + status = "INIT"; + } else { + status = "RUNNING"; + } + } + initialCount = Math.max(logicalTableCount.intValue() - successCount - failureCount - runningCount, 0); + String info = + String.format("total tasks:%s, finish:%s, running:%s, not start:%s, failed:%s", logicalTableCount, + successCount, runningCount, initialCount, failureCount); + double progress = Math.min(1.0 * successCount / Math.max(logicalTableCount, 1), 1.0) * 100; + + progress = Double.valueOf(String.format("%.2f", progress)).doubleValue(); + addRow(cursor, ddlRecord.jobId, ddlRecord.schemaName, RebalanceStage.DATA_IMPORT.name(), status, progress, + logicalTableCount.intValue(), successCount, runningCount, initialCount, failureCount, info, + null, null, + ""); + } + + private static void addDataValidationRow(ArrayResultCursor cursor, DdlEngineRecord ddlRecord, + List allTasks, Long logicalTableCount) { + //all submitted BackFill tasks, and yet there may be some BackFill tasks haven't been submitted + //AlterTableGroupMovePartitionsCheckTask MoveTableCheckTask + List allDataValidationTasks = + allTasks.stream().filter( + e -> StringUtils.containsIgnoreCase(e.getName(), "AlterTableGroupMovePartitionsCheckTask") + || StringUtils.containsIgnoreCase(e.getName(), "MoveTableCheckTask")) + .collect(Collectors.toList()); + int successCount = 0; + int failureCount = 0; + int runningCount = 0; + int initialCount = 0; + if (GeneralUtil.isEmpty(allDataValidationTasks)) { + //ignore checker + } else { + for (DdlEngineTaskRecord importTask : allDataValidationTasks) { + DdlTaskState state = DdlTaskState.valueOf(importTask.state); + switch (state) { + case READY: + initialCount++; + break; + case DIRTY: + runningCount++; + break; + case SUCCESS: + successCount++; + break; + case ROLLBACK_SUCCESS: + failureCount++; + break; + } + } + } + logicalTableCount = + logicalTableCount < allDataValidationTasks.size() ? allDataValidationTasks.size() : logicalTableCount; + String status = ""; + if (successCount == logicalTableCount.intValue()) { + status = "SUCCESS"; + } else if (failureCount > 0) { + status = "FAILED"; + } else if (runningCount > 0) { + status = "RUNNING"; + } else { + if (successCount == 0) { + status = "INIT"; + } else { + status = "RUNNING"; + } + } + initialCount = Math.max(logicalTableCount.intValue() - successCount - failureCount - runningCount, 0); + String info = + String.format("total tasks:%s, finish:%s, running:%s, not start:%s, failed:%s", + logicalTableCount, + successCount, runningCount, initialCount, failureCount); + double progress = Math.min(1.0 * successCount / Math.max(logicalTableCount, 1), 1.0) * 100; + progress = Double.valueOf(String.format("%.2f", progress)).doubleValue(); + addRow(cursor, ddlRecord.jobId, ddlRecord.schemaName, RebalanceStage.DATA_VALIDATION.name(), status, progress, + logicalTableCount.intValue(), successCount, runningCount, initialCount, failureCount, + info, + null, null, + ""); + } + + private static void addRow(ArrayResultCursor cursor, long jobId, String schemaName, + String stage, + String state, + double progress, + int totalTableCount, + int finishedTableCount, + int runningTableCount, + int notStartedTableCount, + int failedTableCount, + String info, + Timestamp startTime, + Timestamp endTime, + String ddlStmt) { + cursor.addRow(new Object[] { + jobId, schemaName, stage, state, progress, totalTableCount, finishedTableCount, runningTableCount, + notStartedTableCount, failedTableCount, info, startTime, endTime, ddlStmt}); + } + + public enum RebalanceStage { + DATA_COPY(0), DATA_IMPORT(1), DATA_VALIDATION(2); + private long value; + + RebalanceStage(long value) { + this.value = value; + } + + public long getValue() { + return value; + } + + public static RebalanceStage of(long value) { + switch ((int) value) { + case 0: + return DATA_COPY; + case 1: + return DATA_IMPORT; + case 2: + return DATA_VALIDATION; + default: + throw new IllegalArgumentException("Unsupported RebalanceStage value " + value); + } + } + + public static String display(long value) { + switch ((int) value) { + case 0: + return DATA_COPY.name(); + case 1: + return DATA_IMPORT.name(); + case 2: + return DATA_VALIDATION.name(); + default: + return "UNKNOWN"; + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaSPMHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaSPMHandler.java index feaa39a29..959977ad3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaSPMHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaSPMHandler.java @@ -21,7 +21,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.FetchSPMSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.gms.topology.SystemDbHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -52,12 +52,14 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, Set schemaNames = OptimizerContext.getActiveSchemaNames(); for (String schemaName : schemaNames) { List>> results = SyncManagerHelper.sync(new FetchSPMSyncAction(schemaName), - schemaName); + schemaName, SyncScope.CURRENT_ONLY); for (List> nodeRows : results) { if (nodeRows == null) { continue; } for (Map row : nodeRows) { + final String host = DataTypes.StringType.convertFrom(row.get("HOST")); + final String instId = DataTypes.StringType.convertFrom(row.get("INST_ID")); final String baselineId = DataTypes.StringType.convertFrom(row.get("BASELINE_ID")); final String planId = DataTypes.StringType.convertFrom(row.get("PLAN_ID")); final Integer fixed = DataTypes.BooleanType.convertFrom(row.get("FIXED")); @@ -77,6 +79,8 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, final String usePostPlanner = DataTypes.StringType.convertFrom(row.get("USE_POST_PLANNER")); cursor.addRow(new Object[] { + host, + instId, baselineId, schemaName, planId, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaScheduleJobsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaScheduleJobsHandler.java index 9e60289c2..2963cfa2e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaScheduleJobsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaScheduleJobsHandler.java @@ -17,22 +17,19 @@ package com.alibaba.polardbx.executor.handler.subhandler; import com.alibaba.polardbx.common.utils.timezone.TimeZoneUtils; -import com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.scheduler.ScheduledJobsManager; import com.alibaba.polardbx.gms.scheduler.ExecutableScheduledJob; +import com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType; import com.alibaba.polardbx.gms.scheduler.ScheduledJobsRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.view.InformationSchemaScheduleJobs; import com.alibaba.polardbx.optimizer.view.VirtualView; import java.sql.Timestamp; -import java.time.Instant; -import java.time.ZoneId; import java.util.Comparator; -import java.util.Date; import java.util.List; import java.util.TimeZone; @@ -54,8 +51,13 @@ public boolean isSupport(VirtualView virtualView) { public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { List jobs = ScheduledJobsManager.queryScheduledJobsRecord(); for (ScheduledJobsRecord scheduledJob : jobs) { - ScheduledJobExecutorType executorType = - ScheduledJobExecutorType.valueOf(scheduledJob.getExecutorType()); + ScheduledJobExecutorType executorType; + try { + executorType = ScheduledJobExecutorType.valueOf(scheduledJob.getExecutorType()); + } catch (IllegalArgumentException e) { + continue; + } + List eJobList = ScheduledJobsManager.getScheduledJobResult(scheduledJob.getScheduleId()); ExecutableScheduledJob lastEJob = findLastJob(eJobList); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaSessionPerfHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaSessionPerfHandler.java index ee2115c35..30f6f306b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaSessionPerfHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaSessionPerfHandler.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaSessionPerf; @@ -64,7 +65,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } catch (Exception e) { throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - List>> results = SyncManagerHelper.sync(syncAction); + List>> results = SyncManagerHelper.sync(syncAction, SyncScope.CURRENT_ONLY); for (List> rs : results) { if (rs == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaShowHelpHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaShowHelpHandler.java new file mode 100644 index 000000000..5051fed5b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaShowHelpHandler.java @@ -0,0 +1,84 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.subhandler; + +import com.alibaba.polardbx.executor.ExecutorHelper; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.optimizer.config.server.DefaultServerConfigManager; +import com.alibaba.polardbx.optimizer.config.server.IServerConfigManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.view.InformationSchemaShowHelp; +import com.alibaba.polardbx.optimizer.view.VirtualView; +import org.apache.calcite.sql.SqlKind; + +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * @author pangzhaoxing + */ +public class InformationSchemaShowHelpHandler extends BaseVirtualViewSubClassHandler { + public InformationSchemaShowHelpHandler(VirtualViewHandler virtualViewHandler) { + super(virtualViewHandler); + } + + @Override + public boolean isSupport(VirtualView virtualView) { + return virtualView instanceof InformationSchemaShowHelp; + } + + @Override + public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { + for (String show : getShowSQL()) { + cursor.addRow(new Object[] {show}); + } + return cursor; + } + + private List getShowSQL() { + List shows = new ArrayList<>(); + try { + Class clazz = Class.forName("com.alibaba.polardbx.server.parser.ServerParseShow"); + Field[] fields = clazz.getDeclaredFields(); + for (Field field : fields) { + field.setAccessible(true); + if (field.getType() == int.class && Modifier.isStatic(field.getModifiers())) { + String showContent = field.getName().toLowerCase().replace("_", " "); + if ("other".equals(showContent)) { + continue; + } + shows.add("show " + showContent); + } + } + } catch (ClassNotFoundException e) { + //ignore + } + + for (SqlKind kind : SqlKind.values()) { + if (kind.lowerName.contains("show") && !"show".equals(kind.lowerName)) { + String show = kind.lowerName.replace("_", " "); + shows.add(show); + } + } + return shows; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStatementSummaryHistoryHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStatementSummaryHistoryHandler.java index 7bb46bbb4..a363eb69f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStatementSummaryHistoryHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStatementSummaryHistoryHandler.java @@ -26,6 +26,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.view.InformationSchemaStatementSummaryHistory; import com.alibaba.polardbx.optimizer.view.VirtualView; + import static com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaStatementSummaryHandler.buildFinalResultFromSync; /** diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStatisticsDataHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStatisticsDataHandler.java new file mode 100644 index 000000000..6b93126e2 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStatisticsDataHandler.java @@ -0,0 +1,147 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.subhandler; + +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.gms.util.StatisticUtils; +import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.executor.sync.StatisticQuerySyncAction; +import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.config.impl.InstConfUtil; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.gms.topology.SystemDbHelper; +import com.alibaba.polardbx.optimizer.config.table.statistic.Histogram; +import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; +import com.alibaba.polardbx.optimizer.config.table.statistic.inf.StatisticResultSource; +import com.alibaba.polardbx.optimizer.config.table.statistic.inf.SystemTableColumnStatistic; +import com.alibaba.polardbx.optimizer.config.table.statistic.inf.SystemTableTableStatistic; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.parse.util.Pair; +import com.alibaba.polardbx.optimizer.view.InformationSchemaStatisticsData; +import com.alibaba.polardbx.optimizer.view.VirtualView; +import com.google.common.collect.Maps; +import org.apache.calcite.sql.type.SqlTypeName; + +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static com.alibaba.polardbx.optimizer.config.table.statistic.StatisticUtils.buildSketchKey; + +/** + * @author fangwu + */ +public class InformationSchemaStatisticsDataHandler extends BaseVirtualViewSubClassHandler { + + public InformationSchemaStatisticsDataHandler(VirtualViewHandler virtualViewHandler) { + super(virtualViewHandler); + } + + @Override + public boolean isSupport(VirtualView virtualView) { + return virtualView instanceof InformationSchemaStatisticsData; + } + + @Override + public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { + List>> results = + SyncManagerHelper.sync(new StatisticQuerySyncAction(), SystemDbHelper.DEFAULT_DB_NAME, SyncScope.ALL); + for (List> nodeRows : results) { + if (nodeRows == null) { + continue; + } + for (Map row : nodeRows) { + Object[] rowObj = new Object[InformationSchemaStatisticsData.meta.size()]; + for (int i = 0; i < rowObj.length; i++) { + Pair pair = InformationSchemaStatisticsData.meta.get(i); + String colName = pair.getKey(); + SqlTypeName colType = pair.getValue(); + DataType dataType = InformationSchemaStatisticsData.transform(colType); + rowObj[i] = dataType.convertFrom(row.get(colName)); + } + cursor.addRow(rowObj); + } + } + + // handle statistic data from meta + Collection tableRowList = + StatisticManager.getInstance().getSds().loadAllTableStatistic(0L); + Collection columnRowList = + StatisticManager.getInstance().getSds().loadAllColumnStatistic(0L); + Map cardinalitySketch = Maps.newHashMap(); + cardinalitySketch.putAll(StatisticManager.getInstance().getSds().loadAllCardinality()); + Map tableRowMap = Maps.newHashMap(); + for (SystemTableTableStatistic.Row tableRow : tableRowList) { + String schema = tableRow.getSchema(); + String table = tableRow.getTableName(); + tableRowMap.put(schema + ":" + table, tableRow.getRowCount()); + } + Object[] rowObj; + for (SystemTableColumnStatistic.Row colRow : columnRowList) { + String schema = colRow.getSchema(); + String table = colRow.getTableName(); + String column = colRow.getColumnName(); + + // skip oss table cause sample process would do the same + if (StatisticUtils.isFileStore(schema, table)) { + continue; + } + + Long ndv = cardinalitySketch.get(buildSketchKey(schema, table, column)); + String ndvSource = StatisticResultSource.HLL_SKETCH.name(); + if (ndv == null) { + ndv = colRow.getCardinality(); + ndvSource = StatisticResultSource.CACHE_LINE.name(); + } + String topN = colRow.getTopN() == null ? "" : colRow.getTopN().manualReading(); + + Histogram histogram = colRow.getHistogram(); + rowObj = new Object[] { + "metadb", + schema, + table, + column, + tableRowMap.get(schema + ":" + table), + ndv, + ndvSource, + topN == null ? "" : topN, + histogram == null || histogram.getBuckets().size() == 0 ? "" : histogram.manualReading(), + colRow.getSampleRate() + }; + cursor.addRow(rowObj); + } + boolean statisticInconsistentTest = InstConfUtil.getBool(ConnectionParams.ALERT_STATISTIC_INCONSISTENT); + if (statisticInconsistentTest) { + cursor.addRow(new Object[] { + "metadb", + "mock", + "mock", + "mock", + 0, + 0, + "mock", + "", + "", + 1.0 + }); + } + return cursor; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStatisticsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStatisticsHandler.java index e69de29bb..f7416427c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStatisticsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStatisticsHandler.java @@ -0,0 +1,149 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.subhandler; + +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.schema.InformationSchema; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.IndexMeta; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; +import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticResult; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.row.Row; +import com.alibaba.polardbx.optimizer.view.InformationSchemaStatistics; +import com.alibaba.polardbx.optimizer.view.InformationSchemaTables; +import com.alibaba.polardbx.optimizer.view.VirtualView; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.common.TddlConstants.IMPLICIT_COL_NAME; + +/** + * @author shengyu + */ +public class InformationSchemaStatisticsHandler extends BaseVirtualViewSubClassHandler { + private static final Logger logger = LoggerFactory.getLogger(InformationSchemaStatisticsHandler.class); + + public InformationSchemaStatisticsHandler(VirtualViewHandler virtualViewHandler) { + super(virtualViewHandler); + } + + @Override + public boolean isSupport(VirtualView virtualView) { + return virtualView instanceof InformationSchemaStatistics; + } + + @Override + public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { + InformationSchemaStatistics informationSchemaStatistics = (InformationSchemaStatistics) virtualView; + InformationSchemaTables informationSchemaTables = + new InformationSchemaTables(informationSchemaStatistics.getCluster(), + informationSchemaStatistics.getTraitSet()); + + informationSchemaTables.copyFilters(informationSchemaStatistics); + + Cursor tablesCursor = null; + + try { + tablesCursor = virtualViewHandler.handle(informationSchemaTables, executionContext); + + Row row; + while ((row = tablesCursor.next()) != null) { + String tableSchema = row.getString(1); + String tableName = row.getString(2); + if (InformationSchema.NAME.equalsIgnoreCase(tableSchema)) { + continue; + } + + try { + TableMeta tableMeta = + OptimizerContext.getContext(tableSchema).getLatestSchemaManager().getTable(tableName); + for (ColumnMeta columnMeta : tableMeta.getAllColumns()) { + String tableCatalog = "def"; + int nonUnique = 0; + String indexSchema = tableSchema; + String indexName = null; + int seqInIndex = 0; + String columnName = columnMeta.getName(); + if (columnName.toLowerCase().equals(IMPLICIT_COL_NAME)) { + continue; + } + String collation = "A"; + StatisticResult statisticResult = + StatisticManager.getInstance() + .getCardinality(tableSchema, tableName, columnName, false, false); + Long cardinality = statisticResult.getLongValue(); + String subPart = null; + String packed = null; + String nullable = null; + String indexType = null; + String comment = null; + String indexComment = null; + + List indexMetaList = + tableMeta.getIndexes().stream() + .filter(x -> x.getKeyColumns().indexOf(tableMeta.getColumn(columnName)) != -1) + .collect( + Collectors.toList()); + + if (indexMetaList != null) { + for (IndexMeta indexMeta : indexMetaList) { + indexName = indexMeta.getPhysicalIndexName(); + seqInIndex = 1 + indexMeta.getKeyColumns().indexOf(tableMeta.getColumn(columnName)); + indexType = indexMeta.getIndexType().name(); + nonUnique = indexMeta.isUniqueIndex() ? 0 : 1; + cursor.addRow(new Object[] { + tableCatalog, + tableSchema, + tableName, + nonUnique, + indexSchema, + indexName, + seqInIndex, + columnName, + collation, + cardinality, + subPart, + packed, + nullable, + indexType, + comment, + indexComment}); + } + } + } + + } catch (Throwable t) { + logger.error(t); + } + } + } finally { + if (tablesCursor != null) { + tablesCursor.close(new ArrayList<>()); + } + } + return cursor; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStoragePropertiesHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStoragePropertiesHandler.java index 8683e3cf4..3dad7dd55 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStoragePropertiesHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStoragePropertiesHandler.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.StoragePropertiesSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.view.InformationSchemaStorageProperties; import com.alibaba.polardbx.optimizer.view.VirtualView; @@ -51,7 +52,8 @@ public boolean isSupport(VirtualView virtualView) { public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { final String schema = executionContext.getSchemaName(); - List>> results = SyncManagerHelper.sync(new StoragePropertiesSyncAction(), schema); + List>> results = SyncManagerHelper.sync(new StoragePropertiesSyncAction(), schema, + SyncScope.CURRENT_ONLY); Map functionStatus = new HashMap<>(); for (List> rs : results) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStorageStatusHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStorageStatusHandler.java index e7b999056..29f1d9cfd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStorageStatusHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaStorageStatusHandler.java @@ -22,7 +22,6 @@ import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; -import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.gms.ha.impl.StorageHaManager; import com.alibaba.polardbx.gms.ha.impl.StorageInstHaContext; import com.alibaba.polardbx.gms.topology.DbTopologyManager; @@ -30,8 +29,6 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.function.calc.scalar.filter.Like; import com.alibaba.polardbx.optimizer.view.*; -import org.apache.calcite.rex.RexDynamicParam; -import org.apache.calcite.rex.RexLiteral; import java.sql.Connection; import java.sql.PreparedStatement; @@ -67,87 +64,26 @@ public boolean isSupport(VirtualView virtualView) { @Override public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { - - InformationSchemaStorageStatus informationSchemaStorageStatus = (InformationSchemaStorageStatus) virtualView; - HashMap storageStatus = new HashMap(); - List tableStorageInstIdIndexValue = - virtualView.getIndex().get(informationSchemaStorageStatus.getTableStorageInstIdIndex()); - - Object tableStorageInstLikeValue = - virtualView.getLike().get(informationSchemaStorageStatus.getTableStorageInstIdIndex()); - - List tableInstRoleIndexValue = - virtualView.getIndex().get(informationSchemaStorageStatus.getTableInstRoleIndex()); - - Object tableInstRoleLikeValue = - virtualView.getLike().get(informationSchemaStorageStatus.getTableInstRoleIndex()); - - List tableInstKindIndexValue = - virtualView.getIndex().get(informationSchemaStorageStatus.getTableInstKindIndex()); - - Object tableInstKindLikeValue = - virtualView.getLike().get(informationSchemaStorageStatus.getTableInstKindIndex()); + final int instIdIndex = InformationSchemaStorageStatus.getTableStorageInstIdIndex(); + final int instRoleIndex = InformationSchemaStorageStatus.getTableInstRoleIndex(); + final int instKindIndex = InformationSchemaStorageStatus.getTableInstKindIndex(); Map params = executionContext.getParams().getCurrentParameter(); // StorageInstIdIndex - Set indexStorageInstId = new HashSet<>(); - if (tableStorageInstIdIndexValue != null && !tableStorageInstIdIndexValue.isEmpty()) { - for (Object obj : tableStorageInstIdIndexValue) { - ExecUtils.handleTableNameParams(obj, params, indexStorageInstId); - } - } - + Set indexStorageInstId = virtualView.getEqualsFilterValues(instIdIndex, params); // StorageInstIdLike - String storageInstIdLike = null; - if (tableStorageInstLikeValue != null) { - if (tableStorageInstLikeValue instanceof RexDynamicParam) { - storageInstIdLike = - String.valueOf(params.get(((RexDynamicParam) tableStorageInstLikeValue).getIndex() + 1).getValue()); - } else if (tableStorageInstLikeValue instanceof RexLiteral) { - storageInstIdLike = ((RexLiteral) tableStorageInstLikeValue).getValueAs(String.class); - } - } - + String storageInstIdLike = virtualView.getLikeString(instIdIndex, params); // InstRoleIndex - Set indexInstRole = new HashSet<>(); - if (tableInstRoleIndexValue != null && !tableInstRoleIndexValue.isEmpty()) { - for (Object obj : tableInstRoleIndexValue) { - ExecUtils.handleTableNameParams(obj, params, indexInstRole); - } - } - + Set indexInstRole = virtualView.getEqualsFilterValues(instRoleIndex, params); // InstRoleLike - String instRoleLike = null; - if (tableInstRoleLikeValue != null) { - if (tableInstRoleLikeValue instanceof RexDynamicParam) { - instRoleLike = - String.valueOf(params.get(((RexDynamicParam) tableInstRoleLikeValue).getIndex() + 1).getValue()); - } else if (tableInstRoleLikeValue instanceof RexLiteral) { - instRoleLike = ((RexLiteral) tableInstRoleLikeValue).getValueAs(String.class); - } - } - + String instRoleLike = virtualView.getLikeString(instRoleIndex, params); // InstKindIndex - Set indexInstKind = new HashSet<>(); - if (tableInstKindIndexValue != null && !tableInstKindIndexValue.isEmpty()) { - for (Object obj : tableInstKindIndexValue) { - ExecUtils.handleTableNameParams(obj, params, indexInstKind); - } - } - + Set indexInstKind = virtualView.getEqualsFilterValues(instKindIndex, params); // InstKindLike - String instKindLike = null; - if (tableInstKindIndexValue != null) { - if (tableInstKindLikeValue instanceof RexDynamicParam) { - instKindLike = - String.valueOf(params.get(((RexDynamicParam) tableInstKindLikeValue).getIndex() + 1).getValue()); - } else if (tableInstKindLikeValue instanceof RexLiteral) { - instKindLike = ((RexLiteral) tableInstKindLikeValue).getValueAs(String.class); - } - } + String instKindLike = virtualView.getLikeString(instKindIndex, params); Map storageStatusMap = StorageHaManager.getInstance().getStorageHaCtxCache(); @@ -162,12 +98,14 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, String instanceRole = (instanceId.equals(masterInstanceId) ? "leader" : "learner"); String instanceKind = StorageInfoRecord.getInstKind(ctx.getStorageKind()); - if ((!indexStorageInstId.isEmpty() && !indexStorageInstId.contains(instanceId) || (!indexInstRole.isEmpty() - && !indexInstRole.contains(instanceRole)) || (!indexInstKind.isEmpty() && !indexInstKind.contains( - instanceKind)) - || (storageInstIdLike != null && !new Like().like(instanceId, - storageInstIdLike)) || (instRoleLike != null && !new Like().like(instanceRole, instRoleLike)) - || (instKindLike != null && !new Like().like(instanceKind, instKindLike)))) { + if ((!indexStorageInstId.isEmpty() && !indexStorageInstId.contains(instanceId.toLowerCase())) || + (!indexInstRole.isEmpty() && !indexInstRole.contains(instanceRole.toLowerCase())) || + (!indexInstKind.isEmpty() && !indexInstKind.contains(instanceKind.toLowerCase()))) { + continue; + } + if ((storageInstIdLike != null && !new Like().like(instanceId, storageInstIdLike)) || + (instRoleLike != null && !new Like().like(instanceRole, instRoleLike)) || + (instKindLike != null && !new Like().like(instanceKind, instKindLike))) { continue; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableAccessHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableAccessHandler.java index 5583af8e3..24ef2b6b7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableAccessHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableAccessHandler.java @@ -20,18 +20,18 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.CaseInsensitive; -import com.alibaba.polardbx.druid.util.StringUtils; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.statis.PlanAccessStat; import com.alibaba.polardbx.optimizer.view.InformationSchemaTableAccess; import com.alibaba.polardbx.optimizer.view.VirtualView; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -97,10 +97,10 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } List>> results = - SyncManagerHelper.sync(showTableAccessAction, executionContext.getSchemaName()); + SyncManagerHelper.sync(showTableAccessAction, executionContext.getSchemaName(), SyncScope.ALL); List>> joinClosureResults = - SyncManagerHelper.sync(showTableJoinClosureAction, executionContext.getSchemaName()); + SyncManagerHelper.sync(showTableJoinClosureAction, executionContext.getSchemaName(), SyncScope.ALL); List joinClosureStatInfos = PlanAccessStat.collectTableJoinClosureStat(joinClosureResults); @@ -119,7 +119,16 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, // result.addColumn("OTHER_TABLE_NAME", DataTypes.StringType); // result.addColumn("ACCESS_COUNT", DataTypes.LongType); // result.addColumn("TEMPLATE_ID_SET", DataTypes.StringType); + for (List> rs : results) { + if (rs == null) { + /** + * some cn maybe is not init, + * so some results from SyncManagerHelper.sync(xxx) maybe null , + * so here filer the null result + */ + continue; + } for (int i = 0; i < rs.size(); i++) { Map accessItem = rs.get(i); String relKey = (String) accessItem.get("RELATION_KEY"); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableConstraintsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableConstraintsHandler.java index e69de29bb..c4c2194f7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableConstraintsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableConstraintsHandler.java @@ -0,0 +1,124 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.handler.subhandler; + +import com.alibaba.polardbx.common.ddl.foreignkey.ForeignKeyData; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.handler.VirtualViewHandler; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.schema.InformationSchema; +import com.alibaba.polardbx.optimizer.config.table.IndexMeta; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.row.Row; +import com.alibaba.polardbx.optimizer.view.InformationSchemaTableConstraints; +import com.alibaba.polardbx.optimizer.view.InformationSchemaTables; +import com.alibaba.polardbx.optimizer.view.VirtualView; + +import java.util.ArrayList; +import java.util.Map; +import java.util.Objects; + +/** + * @author shengyu + */ +public class InformationSchemaTableConstraintsHandler extends BaseVirtualViewSubClassHandler { + private static final Logger logger = LoggerFactory.getLogger(InformationSchemaTableConstraintsHandler.class); + + public InformationSchemaTableConstraintsHandler(VirtualViewHandler virtualViewHandler) { + super(virtualViewHandler); + } + + @Override + public boolean isSupport(VirtualView virtualView) { + return virtualView instanceof InformationSchemaTableConstraints; + } + + @Override + public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { + InformationSchemaTableConstraints informationSchemaTableConstraints = + (InformationSchemaTableConstraints) virtualView; + InformationSchemaTables informationSchemaTables = + new InformationSchemaTables(informationSchemaTableConstraints.getCluster(), + informationSchemaTableConstraints.getTraitSet()); + + informationSchemaTables.copyFilters(informationSchemaTableConstraints); + + Cursor tablesCursor = null; + try { + tablesCursor = virtualViewHandler.handle(informationSchemaTables, executionContext); + + Row row; + while ((row = tablesCursor.next()) != null) { + String tableSchema = row.getString(1); + String tableName = row.getString(2); + if (InformationSchema.NAME.equalsIgnoreCase(tableSchema)) { + continue; + } + + try { + TableMeta tableMeta = + Objects.requireNonNull(OptimizerContext.getContext(tableSchema)).getLatestSchemaManager() + .getTable(tableName); + for (IndexMeta indexMeta : tableMeta.getIndexes()) { + //for primary key and unique key, they must be in the same table + if (indexMeta.isPrimaryKeyIndex()) { + cursor.addRow(new Object[] { + "def", + tableSchema, + indexMeta.getPhysicalIndexName(), + tableSchema, + tableName, + "PRIMARY KEY", + "YES"}); + } else if (indexMeta.isUniqueIndex()) { + cursor.addRow(new Object[] { + "def", + tableSchema, + indexMeta.getPhysicalIndexName(), + tableSchema, + tableName, + "UNIQUE", + "YES"}); + } + } + for (Map.Entry entry : tableMeta.getForeignKeys().entrySet()) { + cursor.addRow(new Object[] { + "def", + tableSchema, + entry.getValue().constraint, + tableSchema, + tableName, + "FOREIGN KEY", + "YES"}); + } + } catch (Throwable t) { + logger.error(t); + } + } + } finally { + if (tablesCursor != null) { + tablesCursor.close(new ArrayList<>()); + } + } + + return cursor; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableDetailHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableDetailHandler.java index 31551f3cb..0674c59c6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableDetailHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableDetailHandler.java @@ -80,72 +80,20 @@ public boolean isSupport(VirtualView virtualView) { @Override public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, ArrayResultCursor cursor) { - InformationSchemaTableDetail informationSchemaTableDetail = (InformationSchemaTableDetail) virtualView; - // only new partitioning db Set schemaNames = new TreeSet<>(String::compareToIgnoreCase); schemaNames.addAll(StatsUtils.getDistinctSchemaNames()); - List tableSchemaIndexValue = - virtualView.getIndex().get(informationSchemaTableDetail.getTableSchemaIndex()); - - Object tableSchemaLikeValue = - virtualView.getLike().get(informationSchemaTableDetail.getTableSchemaIndex()); - - List tableNameIndexValue = - virtualView.getIndex().get(informationSchemaTableDetail.getTableNameIndex()); - - Object tableNameLikeValue = - virtualView.getLike().get(informationSchemaTableDetail.getTableNameIndex()); - + final int schemaIndex = InformationSchemaTableDetail.getTableSchemaIndex(); + final int tableIndex = InformationSchemaTableDetail.getTableNameIndex(); Map params = executionContext.getParams().getCurrentParameter(); - // schemaIndex - Set indexSchemaNames = new HashSet<>(); - if (tableSchemaIndexValue != null && !tableSchemaIndexValue.isEmpty()) { - for (Object obj : tableSchemaIndexValue) { - ExecUtils.handleTableNameParams(obj, params, indexSchemaNames); - } - schemaNames = schemaNames.stream() - .filter(schemaName -> indexSchemaNames.contains(schemaName.toLowerCase())) - .collect(Collectors.toSet()); - } - - // schemaLike - String schemaLike = null; - if (tableSchemaLikeValue != null) { - if (tableSchemaLikeValue instanceof RexDynamicParam) { - schemaLike = - String.valueOf(params.get(((RexDynamicParam) tableSchemaLikeValue).getIndex() + 1).getValue()); - } else if (tableSchemaLikeValue instanceof RexLiteral) { - schemaLike = ((RexLiteral) tableSchemaLikeValue).getValueAs(String.class); - } - if (schemaLike != null) { - final String likeArg = schemaLike; - schemaNames = schemaNames.stream().filter(schemaName -> new Like(null, null).like( - schemaName, likeArg)).collect( - Collectors.toSet()); - } - } + schemaNames = virtualView.applyFilters(schemaIndex, params, schemaNames); // tableIndex - Set indexTableNames = new HashSet<>(); - if (tableNameIndexValue != null && !tableNameIndexValue.isEmpty()) { - for (Object obj : tableNameIndexValue) { - ExecUtils.handleTableNameParams(obj, params, indexSchemaNames); - } - } - + Set indexTableNames = virtualView.getEqualsFilterValues(tableIndex, params); // tableLike - String tableLike = null; - if (tableNameLikeValue != null) { - if (tableNameLikeValue instanceof RexDynamicParam) { - tableLike = - String.valueOf(params.get(((RexDynamicParam) tableNameLikeValue).getIndex() + 1).getValue()); - } else if (tableNameLikeValue instanceof RexLiteral) { - tableLike = ((RexLiteral) tableNameLikeValue).getValueAs(String.class); - } - } + String tableLike = virtualView.getLikeString(tableIndex, params); List allTableGroupConfigs = StatsUtils.getTableGroupConfigs(schemaNames); @@ -199,8 +147,8 @@ private void queryStats(String tableLike, partitionPyhDbMap.putAll(partitionGroupRecords.stream().collect(Collectors.toMap( PartitionGroupRecord::getPartition_name, PartitionGroupRecord::getPhy_db))); } - for (TablePartRecordInfoContext context : tableGroupConfig.getAllTables()) { - String logicalTableName = context.getTableName().toLowerCase(); + for (String tableName : tableGroupConfig.getAllTables()) { + String logicalTableName = tableName.toLowerCase(); TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(logicalTableName); String indexName = StringUtils.EMPTY; if (tableMeta.isGsi()) { @@ -208,14 +156,14 @@ private void queryStats(String tableLike, continue; } logicalTableName = tableMeta.getGsiTableMetaBean().gsiMetaBean.tableName; - indexName = TddlSqlToRelConverter.unwrapGsiName(context.getTableName().toLowerCase()); + indexName = TddlSqlToRelConverter.unwrapGsiName(tableName.toLowerCase()); } if (!StatsUtils.isFilterTable(logicalTableNames, tableLike, logicalTableName) && isPrimaryTable) { continue; } Map> phyTblStatInfoOfOneLogTb = - tableGroupStatInfo.get(context.getLogTbRec().tableName.toLowerCase()); + tableGroupStatInfo.get(tableName.toLowerCase()); if (phyTblStatInfoOfOneLogTb == null) { continue; @@ -226,7 +174,8 @@ private void queryStats(String tableLike, } Objects.requireNonNull(phyTblStatInfoOfOneLogTb, - String.format("table meta corrupted: %s.%s", schemaName, context.getTableName())); + String.format("table meta corrupted: %s.%s", schemaName, tableName)); + Long totalRows = 0L; for (Map.Entry> phyEntry : phyTblStatInfoOfOneLogTb.entrySet()) { totalRows += DataTypes.LongType.convertFrom(phyEntry.getValue().get("physicalTableRows")); @@ -235,21 +184,20 @@ private void queryStats(String tableLike, /** * Fetch all the phyPartRecords of metadb */ - List tablePartitionRecords = - context.fetchAllPhysicalPartitionRecList().stream().filter( - o -> (o.partLevel != TablePartitionRecord.PARTITION_LEVEL_LOGICAL_TABLE)).collect( - Collectors.toList()); - for (int i = 0; i < tablePartitionRecords.size(); i++) { + List partitionSpecs = + tableMeta.getPartitionInfo().getPartitionBy().getPhysicalPartitions(); + for (int i = 0; i < partitionSpecs.size(); i++) { /** * record is a record of phySpec */ - TablePartitionRecord record = tablePartitionRecords.get(i); + PartitionSpec record = partitionSpecs.get(i); - Map tableStatRow = phyTblStatInfoOfOneLogTb.get(record.phyTable.toLowerCase()); + Map tableStatRow = + phyTblStatInfoOfOneLogTb.get(record.getLocation().getPhyTableName().toLowerCase()); Objects.requireNonNull(tableStatRow, String.format("physical table meta corrupted: %s.%s.%s", - schemaName, record.tableName, record.phyTable)); + schemaName, tableMeta.getTableName(), record.getLocation().getPhyTableName())); String partName = DataTypes.StringType.convertFrom(tableStatRow.get("partName")); String subpartName = DataTypes.StringType.convertFrom(tableStatRow.get("subpartName")); @@ -287,12 +235,12 @@ private void queryStats(String tableLike, /** * fetch phyDb by the phyPartName of phySpec */ - String phyDb = partitionPyhDbMap.get(record.partName); + String phyDb = partitionPyhDbMap.get(record.getName()); Pair pair = storageInstIdGroupNames.get(phyDb); String storageInstId = pair.getKey(); String groupName = pair.getValue(); - String phyTblName = record.phyTable; + String phyTblName = record.getLocation().getPhyTableName(); Object[] row = new Object[21]; cursor.addRow(row); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableGroupHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableGroupHandler.java index fe708f551..d9d82a7df 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableGroupHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableGroupHandler.java @@ -21,7 +21,6 @@ import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; -import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; import com.alibaba.polardbx.gms.topology.DbInfoManager; @@ -99,9 +98,8 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, boolean isSingleTbOrBroadcastTb = false; if (tableCount > 0) { - List tbInfoList = tableGroupConfig.getTables(); - for (int i = 0; i < tbInfoList.size(); i++) { - String logTblName = tableGroupConfig.getTables().get(i).getLogTbRec().tableName; + for (int i = 0; i < tableGroupConfig.getTables().size(); i++) { + String logTblName = tableGroupConfig.getTables().get(i); TableMeta tableMeta = schemaManager.getTable(logTblName); PartitionInfo partInfo = tableMeta.getPartitionInfo(); PartitionTableType tableType = partInfo.getTableType(); @@ -113,7 +111,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, logTblCnt++; } } - String firstTblName = tableGroupConfig.getTables().get(0).getLogTbRec().tableName; + String firstTblName = tableGroupConfig.getTables().get(0); TableMeta tableMeta = schemaManager.getTable(firstTblName); PartitionInfo firstPartInfo = tableMeta.getPartitionInfo(); isSingleTbOrBroadcastTb = firstPartInfo.getTableType() == PartitionTableType.SINGLE_TABLE @@ -183,9 +181,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } else { if (tableGroupConfig.getTableCount() > 0) { int tableCount = 0; - for (TablePartRecordInfoContext context : tableGroupConfig - .getAllTables()) { - String tableName = context.getLogTbRec().tableName; + for (String tableName : tableGroupConfig.getAllTables()) { TableMeta tableMeta = schemaManager.getTable(tableName); if (tableCount == 0) { try { @@ -202,7 +198,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } else { sb.append(","); } - if (tableMeta.isGsi()) { + if (tableMeta.isGsi() || tableMeta.isColumnar()) { String primaryTable = tableMeta.getGsiTableMetaBean().gsiMetaBean.tableName; String unwrapGsiName = TddlSqlToRelConverter.unwrapGsiName(tableName); tableName = String.format(LOGICAL_GSI_NAME, primaryTable, unwrapGsiName); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableJoinClosureHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableJoinClosureHandler.java index 3484403af..9e2fe2195 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableJoinClosureHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTableJoinClosureHandler.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.statis.PlanAccessStat; import com.alibaba.polardbx.optimizer.view.InformationSchemaTableJoinClosure; @@ -73,7 +74,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } List>> results = - SyncManagerHelper.sync(showTableJoinClosureAction, executionContext.getSchemaName()); + SyncManagerHelper.sync(showTableJoinClosureAction, executionContext.getSchemaName(), SyncScope.ALL); List statInfos = PlanAccessStat.collectTableJoinClosureStat(results); for (int i = 0; i < statInfos.size(); i++) { PlanAccessStat.PlanJoinClosureStatInfo statInfo = statInfos.get(i); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTablesHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTablesHandler.java index c133afbd8..f0e780c57 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTablesHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTablesHandler.java @@ -38,13 +38,13 @@ import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticResult; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.datatype.BigIntegerType; import com.alibaba.polardbx.optimizer.core.function.calc.scalar.CanAccessTable; import com.alibaba.polardbx.optimizer.core.function.calc.scalar.filter.Like; import com.alibaba.polardbx.optimizer.view.InformationSchemaTables; import com.alibaba.polardbx.optimizer.view.VirtualView; import org.apache.calcite.rex.RexDynamicParam; import org.apache.calcite.rex.RexLiteral; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.math.BigInteger; @@ -79,82 +79,20 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, return cursor; } InformationSchemaTables informationSchemaTables = (InformationSchemaTables) virtualView; - Set schemaNames = OptimizerContext.getActiveSchemaNames(); - - List tableSchemaIndexValue = - virtualView.getIndex().get(informationSchemaTables.getTableSchemaIndex()); - - Object tableSchemaLikeValue = - virtualView.getLike().get(informationSchemaTables.getTableSchemaIndex()); - - List tableNameIndexValue = - virtualView.getIndex().get(informationSchemaTables.getTableNameIndex()); - - Object tableNameLikeValue = - virtualView.getLike().get(informationSchemaTables.getTableNameIndex()); + final int schemaIndex = InformationSchemaTables.getTableSchemaIndex(); + final int tableIndex = InformationSchemaTables.getTableNameIndex(); Map params = executionContext.getParams().getCurrentParameter(); - // schemaIndex - Set indexSchemaNames = new HashSet<>(); - if (tableSchemaIndexValue != null && !tableSchemaIndexValue.isEmpty()) { - for (Object obj : tableSchemaIndexValue) { - if (obj instanceof RexDynamicParam) { - String schemaName = String.valueOf(params.get(((RexDynamicParam) obj).getIndex() + 1).getValue()); - indexSchemaNames.add(schemaName.toLowerCase()); - } else if (obj instanceof RexLiteral) { - String schemaName = ((RexLiteral) obj).getValueAs(String.class); - indexSchemaNames.add(schemaName.toLowerCase()); - } - } - schemaNames = schemaNames.stream() - .filter(schemaName -> indexSchemaNames.contains(schemaName.toLowerCase())) - .collect(Collectors.toSet()); - } - - // schemaLike - String schemaLike = null; - if (tableSchemaLikeValue != null) { - if (tableSchemaLikeValue instanceof RexDynamicParam) { - schemaLike = - String.valueOf(params.get(((RexDynamicParam) tableSchemaLikeValue).getIndex() + 1).getValue()); - } else if (tableSchemaLikeValue instanceof RexLiteral) { - schemaLike = ((RexLiteral) tableSchemaLikeValue).getValueAs(String.class); - } - if (schemaLike != null) { - final String likeArg = schemaLike; - schemaNames = - schemaNames.stream().filter(schemaName -> new Like(null, null).like(schemaName, likeArg)).collect( - Collectors.toSet()); - } - } + Set schemaNames = + virtualView.applyFilters(schemaIndex, params, OptimizerContext.getActiveSchemaNames()); // tableIndex - Set indexTableNames = new HashSet<>(); - if (tableNameIndexValue != null && !tableNameIndexValue.isEmpty()) { - for (Object obj : tableNameIndexValue) { - ExecUtils.handleTableNameParams(obj, params, indexSchemaNames); - } - } - + Set indexTableNames = virtualView.getEqualsFilterValues(tableIndex, params); // tableLike - String tableLike = null; - if (tableNameLikeValue != null) { - if (tableNameLikeValue instanceof RexDynamicParam) { - tableLike = - String.valueOf(params.get(((RexDynamicParam) tableNameLikeValue).getIndex() + 1).getValue()); - } else if (tableNameLikeValue instanceof RexLiteral) { - tableLike = ((RexLiteral) tableNameLikeValue).getValueAs(String.class); - } - } - - BigIntegerType bigIntegerType = new BigIntegerType(); + String tableLike = virtualView.getLikeString(tableIndex, params); boolean once = true; - - boolean enableLowerCase = - executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_LOWER_CASE_TABLE_NAMES); - for (String schemaName : schemaNames) { SchemaManager schemaManager = OptimizerContext.getContext(schemaName).getLatestSchemaManager(); @@ -280,8 +218,8 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, cursor.addRow(new Object[] { rs.getObject("TABLE_CATALOG"), - enableLowerCase ? StringUtils.lowerCase(tableSchema) : tableSchema, - enableLowerCase ? StringUtils.lowerCase(logicalTableName) : logicalTableName, + StringUtils.lowerCase(tableSchema), + StringUtils.lowerCase(logicalTableName), rs.getObject("TABLE_TYPE"), rs.getObject("ENGINE"), rs.getObject("VERSION"), diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTcpPerfHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTcpPerfHandler.java index 2f0e36439..353d2a62c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTcpPerfHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaTcpPerfHandler.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaTcpPerf; @@ -64,7 +65,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } catch (Exception e) { throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - List>> results = SyncManagerHelper.sync(syncAction); + List>> results = SyncManagerHelper.sync(syncAction, SyncScope.CURRENT_ONLY); for (List> rs : results) { if (rs == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaVariablesHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaVariablesHandler.java index 49df2d42f..f7520166f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaVariablesHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaVariablesHandler.java @@ -25,6 +25,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; import com.alibaba.polardbx.optimizer.core.planner.Planner; @@ -91,7 +92,8 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, } catch (Exception e) { throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - final List>> allTaskValues = SyncManagerHelper.sync(fetchTimerTaskInfoSyncAction); + final List>> allTaskValues = SyncManagerHelper.sync(fetchTimerTaskInfoSyncAction, + SyncScope.MASTER_ONLY); if (allTaskValues == null) { return resultCursor; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaWorkloadHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaWorkloadHandler.java index 88b7f56b7..4e8360ac3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaWorkloadHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/InformationSchemaWorkloadHandler.java @@ -24,7 +24,7 @@ import com.alibaba.polardbx.executor.handler.VirtualViewHandler; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.view.InformationSchemaWorkload; @@ -32,7 +32,6 @@ import java.util.List; import java.util.Map; -import java.util.Set; /** * @author dylan @@ -74,7 +73,8 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - List>> results = SyncManagerHelper.sync(showProcesslistSyncAction); + List>> results = SyncManagerHelper.sync(showProcesslistSyncAction, + SyncScope.CURRENT_ONLY); for (List> nodeRows : results) { if (nodeRows == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/VirtualStatisticHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/VirtualStatisticHandler.java index b75eb0b19..b0f468211 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/VirtualStatisticHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/handler/subhandler/VirtualStatisticHandler.java @@ -16,8 +16,8 @@ package com.alibaba.polardbx.executor.handler.subhandler; +import com.alibaba.polardbx.common.utils.LoggerUtil; import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.handler.VirtualViewHandler; @@ -34,6 +34,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.view.VirtualStatistic; import com.alibaba.polardbx.optimizer.view.VirtualView; +import com.google.common.collect.ImmutableList; import java.util.Locale; import java.util.Map; @@ -45,7 +46,7 @@ */ public class VirtualStatisticHandler extends BaseVirtualViewSubClassHandler { - private static final Logger logger = LoggerFactory.getLogger("STATISTICS"); + private static final Logger logger = LoggerUtil.statisticsLogger; public VirtualStatisticHandler(VirtualViewHandler virtualViewHandler) { super(virtualViewHandler); @@ -93,7 +94,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, for (ColumnMeta columnMeta : tableMeta.getAllColumns()) { String columnName = columnMeta.getOriginColumnName(); columnName = columnName.toLowerCase(Locale.ROOT); - Object[] objects = new Object[11]; + Object[] objects = new Object[12]; objects[0] = schema; objects[1] = tableName; objects[2] = cacheLine.getRowCount(); @@ -113,6 +114,9 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, objects[8] = cacheLine.getSampleRate(); objects[9] = cacheLine.getLastModifyTime(); objects[10] = cacheLine.getLastAccessTime(); + objects[11] = StatisticManager.getInstance() + .hotColumns(schema, tableName, ImmutableList.of(columnName), false) + .getBooleanValue(); cursor.addRow(objects); } @@ -124,7 +128,7 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, continue; } String columnsName = StatisticUtils.buildColumnsName(cols); - Object[] objects = new Object[11]; + Object[] objects = new Object[12]; objects[0] = schema; objects[1] = tableName; objects[2] = cacheLine.getRowCount(); @@ -143,6 +147,9 @@ public Cursor handle(VirtualView virtualView, ExecutionContext executionContext, objects[8] = cacheLine.getSampleRate(); objects[9] = cacheLine.getLastModifyTime(); objects[10] = cacheLine.getLastAccessTime(); + objects[11] = + StatisticManager.getInstance().hotColumns(schema, tableName, cols, false) + .getBooleanValue(); cursor.addRow(objects); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlContext.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlContext.java index 1bc519e41..96a20dffe 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlContext.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlContext.java @@ -16,9 +16,8 @@ package com.alibaba.polardbx.executor.mdl; -import com.google.common.base.Preconditions; - import com.alibaba.polardbx.executor.mpp.metadata.NotNull; +import com.google.common.base.Preconditions; import java.util.Iterator; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlKey.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlKey.java index 6cb1e81c1..addfb7f56 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlKey.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlKey.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.mdl; +import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import com.google.common.base.Preconditions; -import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import java.util.Objects; /** diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlLock.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlLock.java index cb15cbe0b..5777079be 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlLock.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlLock.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.mdl; import com.alibaba.polardbx.common.utils.Assert; - import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import java.util.concurrent.locks.StampedLock; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlManager.java index efe65b4b6..af08e9484 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlManager.java @@ -20,15 +20,13 @@ import com.alibaba.polardbx.executor.mdl.context.MdlContextStamped; import com.alibaba.polardbx.executor.mdl.context.PreemptiveMdlContextStamped; import com.alibaba.polardbx.executor.mdl.manager.MdlManagerStamped; -import com.google.common.base.Preconditions; - import com.alibaba.polardbx.executor.mpp.metadata.NotNull; +import com.google.common.base.Preconditions; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; -import java.util.function.Function; /** * @author chenmo.cm diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlRequest.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlRequest.java index b285ecad5..584771df1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlRequest.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/MdlRequest.java @@ -16,8 +16,6 @@ package com.alibaba.polardbx.executor.mdl; -import com.alibaba.polardbx.executor.mpp.metadata.NotNull; -import com.alibaba.polardbx.common.jdbc.BytesSql; import com.alibaba.polardbx.druid.sql.parser.ByteString; import com.alibaba.polardbx.executor.mpp.metadata.NotNull; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/context/MdlContextStamped.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/context/MdlContextStamped.java index b082c3bcd..75f14858f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/context/MdlContextStamped.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/context/MdlContextStamped.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.mdl.context; -import com.alibaba.polardbx.common.jdbc.BytesSql; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.druid.sql.parser.ByteString; @@ -25,7 +24,6 @@ import com.alibaba.polardbx.executor.mdl.MdlManager; import com.alibaba.polardbx.executor.mdl.MdlRequest; import com.alibaba.polardbx.executor.mdl.MdlTicket; - import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import java.util.Iterator; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/context/PreemptiveMdlContextStamped.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/context/PreemptiveMdlContextStamped.java index 53e3ecadf..3da514f99 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/context/PreemptiveMdlContextStamped.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/context/PreemptiveMdlContextStamped.java @@ -27,13 +27,13 @@ import com.alibaba.polardbx.executor.mdl.MdlRequest; import com.alibaba.polardbx.executor.mdl.MdlTicket; import com.alibaba.polardbx.executor.mdl.MdlType; +import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.google.common.primitives.Longs; -import com.alibaba.polardbx.executor.mpp.metadata.NotNull; - import java.util.List; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadPoolExecutor; @@ -146,6 +146,6 @@ private void killByFrontendConnId(long frontendConnId) { } catch (Exception e) { throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); } - SyncManagerHelper.sync(killSyncAction, schemaName); + SyncManagerHelper.sync(killSyncAction, schemaName, SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/lock/MdlLockStamped.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/lock/MdlLockStamped.java index 3f9fefd6a..550eb2449 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/lock/MdlLockStamped.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/lock/MdlLockStamped.java @@ -20,7 +20,6 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.executor.mdl.MdlKey; import com.alibaba.polardbx.executor.mdl.MdlLock; - import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import java.util.concurrent.locks.StampedLock; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/manager/MdlManagerStamped.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/manager/MdlManagerStamped.java index 58352c23c..df35bdf1a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/manager/MdlManagerStamped.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mdl/manager/MdlManagerStamped.java @@ -30,7 +30,6 @@ import com.alibaba.polardbx.executor.mdl.MdlTicket; import com.alibaba.polardbx.executor.mdl.MdlType; import com.alibaba.polardbx.executor.mdl.lock.MdlLockStamped; - import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import java.util.ArrayList; @@ -63,9 +62,13 @@ public class MdlManagerStamped extends MdlManager { private final ScheduledExecutorService scheduler; - private final int cleanInterval = 60 * 60; + private final static int cleanInterval = 60 * 60; public MdlManagerStamped(String schema) { + this(schema, cleanInterval); + } + + public MdlManagerStamped(String schema, int cleanIntervalInSec) { super(schema); scheduler = ExecutorUtil.createScheduler(1, @@ -82,8 +85,10 @@ public MdlManagerStamped(String schema) { mdlKeys.forEach(k -> mdlMap.computeIfPresent(k, (key, lock) -> { if (lock.latchWrite()) { try { - // remove unused lock - return null; + if (!lock.isLocked()) { + // remove unused lock + return null; + } } finally { lock.unlatchWrite(); } @@ -95,7 +100,7 @@ public MdlManagerStamped(String schema) { } catch (Exception e) { logger.error(e); } - }, cleanInterval, cleanInterval, TimeUnit.SECONDS); + }, cleanIntervalInSec, cleanIntervalInSec, TimeUnit.SECONDS); } // For show metadata lock. diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/OutputBuffers.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/OutputBuffers.java index e50528419..aff39fb53 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/OutputBuffers.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/OutputBuffers.java @@ -29,11 +29,11 @@ */ package com.alibaba.polardbx.executor.mpp; +import com.alibaba.polardbx.util.MoreObjects; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonValue; import com.google.common.collect.ImmutableMap; -import com.alibaba.polardbx.util.MoreObjects; import java.util.HashMap; import java.util.Map; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/Session.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/Session.java index ebdfae7ba..23580bd33 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/Session.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/Session.java @@ -137,8 +137,10 @@ public void generateTsoInfo() throws SQLException { this.omitTso = storageInfoManager.supportCtsTransaction() || this.lizard1PC; } if (!omitTso) { - this.tsoTime = ((IMppTsoTransaction) clientContext.getTransaction()).nextTimestamp(t -> { - }); + long externalTso = clientContext.getSnapshotTs(); + this.tsoTime = externalTso > 0 ? externalTso : + ((IMppTsoTransaction) clientContext.getTransaction()).nextTimestamp(t -> { + }); } for (Map.Entry group : groups.entrySet()) { @@ -230,9 +232,11 @@ public SessionRepresentation toSessionRepresentation() { clientContext.getConnection().getLastInsertId(), clientContext.getTimeZone(), tsoTime, + clientContext.getFinalPlan().isUseColumnar(), dnLsns, omitTso, lizard1PC, + clientContext.getColumnarTracer(), clientContext.getWorkloadType(), extraServerVariables); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/AbstractStatementClient.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/AbstractStatementClient.java index 675267939..2ed4265e9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/AbstractStatementClient.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/AbstractStatementClient.java @@ -19,8 +19,8 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.util.MoreObjects; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.util.MoreObjects; import io.airlift.http.client.FullJsonResponseHandler; import io.airlift.http.client.HttpStatus; import io.airlift.http.client.Request; @@ -31,9 +31,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_DATA_OUTPUT; import static com.google.common.base.Preconditions.checkState; import static com.google.common.net.HttpHeaders.USER_AGENT; -import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_DATA_OUTPUT; import static io.airlift.http.client.Request.Builder.prepareGet; import static java.lang.String.format; import static java.util.Objects.requireNonNull; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/DriverResultCursor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/DriverResultCursor.java index 0485a6ac3..d5ebf2224 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/DriverResultCursor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/DriverResultCursor.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.mpp.client; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.chunk.Chunk; @@ -34,6 +33,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.CursorMeta; import com.alibaba.polardbx.optimizer.core.row.Row; +import com.google.common.util.concurrent.ListenableFuture; import java.util.ArrayList; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/FailureInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/FailureInfo.java index c63b48cb7..3e5e7ff6a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/FailureInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/FailureInfo.java @@ -29,14 +29,14 @@ */ package com.alibaba.polardbx.executor.mpp.client; +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.collect.ImmutableList; -import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; -import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/QueryError.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/QueryError.java index dd59d4c70..5818ff828 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/QueryError.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/QueryError.java @@ -29,15 +29,15 @@ */ package com.alibaba.polardbx.executor.mpp.client; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import com.alibaba.polardbx.util.MoreObjects; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; -import com.alibaba.polardbx.executor.mpp.metadata.NotNull; @Immutable public class QueryError { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/QueryResults.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/QueryResults.java index 309339c82..ef421929f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/QueryResults.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/QueryResults.java @@ -29,13 +29,13 @@ */ package com.alibaba.polardbx.executor.mpp.client; +import com.alibaba.polardbx.executor.mpp.metadata.NotNull; +import com.alibaba.polardbx.util.MoreObjects; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.alibaba.polardbx.util.MoreObjects; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; -import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import java.net.URI; import static com.google.common.collect.Iterables.unmodifiableIterable; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/SmpResultCursor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/SmpResultCursor.java index d84a7fb06..77dfb18df 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/SmpResultCursor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/client/SmpResultCursor.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.mpp.client; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; @@ -33,6 +32,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.CursorMeta; import com.alibaba.polardbx.optimizer.core.row.Row; +import com.google.common.util.concurrent.ListenableFuture; import java.util.ArrayList; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/BaseModule.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/BaseModule.java index 6bc5ede77..13a913db3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/BaseModule.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/BaseModule.java @@ -16,12 +16,12 @@ package com.alibaba.polardbx.executor.mpp.deploy; -import com.google.inject.Provides; import com.alibaba.polardbx.common.properties.MppConfig; import com.alibaba.polardbx.executor.mpp.Threads; import com.alibaba.polardbx.executor.mpp.execution.ForDriverYieldExecutor; import com.alibaba.polardbx.executor.mpp.execution.ForQueryExecution; import com.alibaba.polardbx.executor.mpp.execution.ForTaskNotificationExecutor; +import com.google.inject.Provides; import io.airlift.configuration.AbstractConfigurationAwareModule; import javax.inject.Singleton; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/BootstrapConfig.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/BootstrapConfig.java index 29015bc2b..abe57371a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/BootstrapConfig.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/BootstrapConfig.java @@ -24,6 +24,7 @@ public class BootstrapConfig { public static final String CONFIG_KEY_NODE_ID = "node.id"; public static final String CONFIG_KEY_HTTP_PORT = "http-server.http.port"; public static final String CONFIG_KEY_HTTP_SERVER_LOG_ENABLED = "http-server.log.enabled"; + public static final String CONFIG_KEY_HTTP_SERVER_ADMIN_ENABLED = "http-server.admin.enabled"; public static final String CONFIG_KEY_HTTP_SERVER_MAX_THREADS = "http-server.threads.max"; //200 public static final String CONFIG_KEY_HTTP_SERVER_MIN_THREADS = "http-server.threads.min"; //2 diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/DiscoveryModule.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/DiscoveryModule.java index 66ad364b0..f6f80c952 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/DiscoveryModule.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/DiscoveryModule.java @@ -16,13 +16,13 @@ package com.alibaba.polardbx.executor.mpp.deploy; +import com.alibaba.polardbx.executor.mpp.Threads; import com.google.common.base.Preconditions; import com.google.inject.Binder; import com.google.inject.Module; import com.google.inject.Provides; import com.google.inject.Scopes; import com.google.inject.multibindings.Multibinder; -import com.alibaba.polardbx.executor.mpp.Threads; import io.airlift.configuration.ConfigBinder; import io.airlift.discovery.client.Announcement; import io.airlift.discovery.client.Announcer; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/LocalModule.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/LocalModule.java index f0844aa5a..1ea8f2e78 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/LocalModule.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/LocalModule.java @@ -16,9 +16,6 @@ package com.alibaba.polardbx.executor.mpp.deploy; -import com.google.inject.Binder; -import com.google.inject.Provides; -import com.google.inject.Scopes; import com.alibaba.polardbx.common.utils.version.Version; import com.alibaba.polardbx.executor.mpp.Threads; import com.alibaba.polardbx.executor.mpp.discover.LocalNodeManager; @@ -60,6 +57,9 @@ import com.alibaba.polardbx.gms.node.InternalNode; import com.alibaba.polardbx.gms.node.InternalNodeManager; import com.alibaba.polardbx.gms.node.NodeVersion; +import com.google.inject.Binder; +import com.google.inject.Provides; +import com.google.inject.Scopes; import io.airlift.units.Duration; import javax.inject.Singleton; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/LocalServer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/LocalServer.java index efcd19087..cc402f8f6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/LocalServer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/LocalServer.java @@ -60,6 +60,7 @@ public LocalServer(int id, String serverHost, int mppHttpPort) { bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_HTTP_PORT, String.valueOf(this.mppPort)); bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_NODE_ENV, MPP_POLARDBX); bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_HTTP_SERVER_LOG_ENABLED, "false"); + bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_HTTP_SERVER_ADMIN_ENABLED, "false"); bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_HTTP_SERVER_MAX_THREADS, String.valueOf(MppConfig.getInstance().getHttpServerMaxThreads())); bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_HTTP_SERVER_MIN_THREADS, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/MainModule.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/MainModule.java index 2e2ed2fe2..a253a2785 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/MainModule.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/MainModule.java @@ -16,10 +16,6 @@ package com.alibaba.polardbx.executor.mpp.deploy; -import com.google.inject.Binder; -import com.google.inject.Provides; -import com.google.inject.Scopes; -import com.google.inject.TypeLiteral; import com.alibaba.polardbx.common.properties.MppConfig; import com.alibaba.polardbx.common.utils.version.Version; import com.alibaba.polardbx.executor.mpp.Threads; @@ -55,6 +51,10 @@ import com.alibaba.polardbx.gms.node.InternalNode; import com.alibaba.polardbx.gms.node.InternalNodeManager; import com.alibaba.polardbx.gms.node.NodeVersion; +import com.google.inject.Binder; +import com.google.inject.Provides; +import com.google.inject.Scopes; +import com.google.inject.TypeLiteral; import io.airlift.concurrent.BoundedExecutor; import io.airlift.units.DataSize; import io.airlift.units.Duration; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/MppServer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/MppServer.java index 2ea995007..00cb96022 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/MppServer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/MppServer.java @@ -78,6 +78,7 @@ public MppServer(int id, boolean isMppServer, boolean isMppWorker, String server bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_HTTP_PORT, String.valueOf(this.mppPort)); bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_NODE_ENV, MPP_POLARDBX); bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_HTTP_SERVER_LOG_ENABLED, "false"); + bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_HTTP_SERVER_ADMIN_ENABLED, "false"); bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_HTTP_SERVER_MAX_THREADS, String.valueOf(MppConfig.getInstance().getHttpServerMaxThreads())); bootstrapProperties.put(BootstrapConfig.CONFIG_KEY_HTTP_SERVER_MIN_THREADS, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/Server.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/Server.java index 9889bf716..595f2ffbc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/Server.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/Server.java @@ -21,16 +21,12 @@ import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.Threads; import com.alibaba.polardbx.executor.mpp.discover.PolarDBXNodeStatusManager; -import com.alibaba.polardbx.executor.mpp.execution.AdaptiveMemoryHandlerImpl; -import com.alibaba.polardbx.executor.mpp.execution.MemoryKiller; import com.alibaba.polardbx.executor.mpp.execution.MemoryRevokingScheduler; import com.alibaba.polardbx.executor.mpp.execution.QueryManager; import com.alibaba.polardbx.executor.mpp.execution.TaskExecutor; import com.alibaba.polardbx.executor.operator.spill.SpillerFactory; import com.alibaba.polardbx.gms.node.InternalNode; import com.alibaba.polardbx.gms.node.InternalNodeManager; -import com.alibaba.polardbx.optimizer.memory.AdaptiveMemoryHandler; -import com.alibaba.polardbx.optimizer.memory.MemoryManager; import com.alibaba.polardbx.optimizer.memory.MemorySetting; import java.util.concurrent.ScheduledExecutorService; @@ -92,10 +88,6 @@ public void start() { memoryRevokingScheduler = new MemoryRevokingScheduler(memoryManagementExecutor); memoryRevokingScheduler.start(); } - - AdaptiveMemoryHandler adaptiveMemoryHandlerImpl = new AdaptiveMemoryHandlerImpl(new MemoryKiller()); - MemoryManager.getInstance().getApMemoryPool().setAdaptiveMemoryHandler(adaptiveMemoryHandlerImpl); - MemoryManager.getInstance().getTpMemoryPool().setAdaptiveMemoryHandler(adaptiveMemoryHandlerImpl); } public void stop() { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/ServerModule.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/ServerModule.java index c154f6021..65f929e36 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/ServerModule.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/ServerModule.java @@ -16,8 +16,6 @@ package com.alibaba.polardbx.executor.mpp.deploy; -import com.google.inject.Binder; -import com.google.inject.Scopes; import com.alibaba.polardbx.executor.mpp.execution.NodeTaskMap; import com.alibaba.polardbx.executor.mpp.execution.QueryManager; import com.alibaba.polardbx.executor.mpp.execution.RemoteTaskFactory; @@ -34,6 +32,8 @@ import com.alibaba.polardbx.executor.mpp.web.ForQueryInfo; import com.alibaba.polardbx.executor.mpp.web.QueryResource; import com.alibaba.polardbx.executor.mpp.web.StageResource; +import com.google.inject.Binder; +import com.google.inject.Scopes; import io.airlift.configuration.AbstractConfigurationAwareModule; import static io.airlift.http.client.HttpClientBinder.httpClientBinder; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/WorkerModule.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/WorkerModule.java index 5e9ba86c4..14ec613ef 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/WorkerModule.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/deploy/WorkerModule.java @@ -16,9 +16,6 @@ package com.alibaba.polardbx.executor.mpp.deploy; -import com.google.inject.Binder; -import com.google.inject.Key; -import com.google.inject.Scopes; import com.alibaba.polardbx.executor.mpp.execution.SqlTaskManager; import com.alibaba.polardbx.executor.mpp.execution.TaskManagementExecutor; import com.alibaba.polardbx.executor.mpp.execution.TaskManager; @@ -27,6 +24,9 @@ import com.alibaba.polardbx.executor.mpp.web.StatusResource; import com.alibaba.polardbx.executor.mpp.web.ThreadResource; import com.alibaba.polardbx.executor.mpp.web.WorkerResource; +import com.google.inject.Binder; +import com.google.inject.Key; +import com.google.inject.Scopes; import io.airlift.configuration.AbstractConfigurationAwareModule; import static io.airlift.http.client.HttpClientBinder.httpClientBinder; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/discover/DiscoveryManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/discover/DiscoveryManager.java new file mode 100644 index 000000000..ff7c588a0 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/discover/DiscoveryManager.java @@ -0,0 +1,193 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.mpp.discover; + +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.mpp.Threads; +import com.alibaba.polardbx.executor.mpp.deploy.MppServer; +import com.alibaba.polardbx.executor.mpp.deploy.Server; +import com.alibaba.polardbx.executor.mpp.deploy.ServiceProvider; +import com.alibaba.polardbx.gms.node.AllNodes; +import com.alibaba.polardbx.gms.node.InternalNode; +import com.alibaba.polardbx.gms.node.NodeState; +import com.google.common.net.HttpHeaders; +import com.google.common.net.MediaType; +import io.airlift.http.client.HttpUriBuilder; +import io.airlift.http.client.JsonBodyGenerator; +import io.airlift.http.client.Request; + +import java.net.URI; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import static io.airlift.http.client.HttpUriBuilder.uriBuilderFrom; +import static io.airlift.http.client.JsonBodyGenerator.jsonBodyGenerator; +import static io.airlift.http.client.JsonResponseHandler.createJsonResponseHandler; +import static io.airlift.http.client.Request.Builder.prepareDelete; +import static io.airlift.http.client.Request.Builder.preparePost; +import static java.util.concurrent.Executors.newSingleThreadScheduledExecutor; + +public class DiscoveryManager { + + private static final Logger logger = LoggerFactory.getLogger(DiscoveryManager.class); + private static DiscoveryManager instance; + private ScheduledExecutorService nodeStateUpdateExecutor; + private Map nodeMap = new ConcurrentHashMap<>(); + private Server server; + + public static DiscoveryManager getInstance() { + if (instance == null) { + synchronized (DiscoveryManager.class) { + if (instance == null) { + instance = new DiscoveryManager(); + } + } + } + return instance; + } + + private DiscoveryManager() { + if (ServiceProvider.getInstance().clusterMode()) { + this.nodeStateUpdateExecutor = newSingleThreadScheduledExecutor( + Threads.threadsNamed("discovery-node-state")); + this.server = ServiceProvider.getInstance().getServer(); + nodeStateUpdateExecutor.scheduleWithFixedDelay(() -> { + if (server != null && server.getLocalNode().isLeader()) { + try { + boolean changed = false; + if (nodeMap.size() > 0) { + //30s未心跳,认为节点不可用 + long timeInactive = System.currentTimeMillis() - 30000L; + for (String nodeId : nodeMap.keySet()) { + NodeDiscoveryStatus status = nodeMap.get(nodeId); + if (status != null && status.nodestate == NodeState.ACTIVE + && status.modifyTime < timeInactive) { + status.nodestate = NodeState.INACTIVE; + changed = true; + logger.warn("inactive node:" + status.node); + } + } + } + if (changed) { + updateNodes(); + } + } catch (Exception e) { + logger.error(e); + } + } + }, 5, 5, TimeUnit.SECONDS); + } + } + + public void updateNodes() { + if (!ServiceProvider.getInstance().clusterMode()) { + return; + } + Set activeNodes = new HashSet<>(); + Set inactiveNodes = new HashSet<>(); + Set shuttingDownNodes = new HashSet<>(); + for (String nodeId : nodeMap.keySet()) { + NodeDiscoveryStatus status = nodeMap.get(nodeId); + switch (status.nodestate) { + case ACTIVE: + if (!status.node.isInBlacklist()) { + activeNodes.add(status.node); + } + break; + case SHUTTING_DOWN: + shuttingDownNodes.add(status.node); + break; + case INACTIVE: + case TEMP_INACTIVE: + inactiveNodes.add(status.node); + break; + default: + break; + } + } + ((MppServer) server).getNodeManager().updateNodes(activeNodes, null, inactiveNodes, shuttingDownNodes); + } + + public boolean notifyNode(InternalNode node) { + NodeDiscoveryStatus dnode = nodeMap.get(node.getNodeIdentifier()); + if (dnode != null && dnode.nodestate == NodeState.ACTIVE) { + dnode.modifyTime = System.currentTimeMillis(); + if (!dnode.node.toString().equals(node.toString())) { + logger.warn("modify node:" + node); + dnode.node = node; + updateNodes(); + return true; + } + } else if (dnode != null && dnode.nodestate == NodeState.TEMP_INACTIVE) { + if (logger.isDebugEnabled()) { + logger.debug("check temp inactive node:" + node); + } + if (System.currentTimeMillis() - dnode.modifyTime > 60000L) { + logger.warn("reactive node:" + node); + nodeMap.put(node.getNodeIdentifier(), new NodeDiscoveryStatus(node)); + updateNodes(); + return true; + } + } else { + logger.warn("input node:" + node); + nodeMap.put(node.getNodeIdentifier(), new NodeDiscoveryStatus(node)); + updateNodes(); + return true; + } + return false; + } + + public boolean removeNode(InternalNode node, String type) { + logger.warn("removeNode:" + node.getNodeIdentifier() + ",type=" + type); + NodeDiscoveryStatus dnode = nodeMap.get(node.getNodeIdentifier()); + if (dnode != null) { + if (type.equalsIgnoreCase("temp") && dnode.nodestate == NodeState.ACTIVE) { + dnode.modifyTime = System.currentTimeMillis(); + dnode.nodestate = NodeState.TEMP_INACTIVE; + updateNodes(); + return true; + } else if (type.equalsIgnoreCase("black")) { + dnode.node.setInBlacklist(true); + updateNodes(); + return true; + } else if (type.equalsIgnoreCase("unblack")) { + dnode.node.setInBlacklist(false); + updateNodes(); + return true; + } + } + return false; + } + + + class NodeDiscoveryStatus { + InternalNode node; + long modifyTime; + NodeState nodestate; + + public NodeDiscoveryStatus(InternalNode node) { + this.node = node; + this.modifyTime = System.currentTimeMillis(); + this.nodestate = NodeState.ACTIVE; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/AdaptiveMemoryHandlerImpl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/AdaptiveMemoryHandlerImpl.java deleted file mode 100644 index 717064cb3..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/AdaptiveMemoryHandlerImpl.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.mpp.execution; - -import com.alibaba.polardbx.optimizer.memory.AdaptiveMemoryHandler; -import com.alibaba.polardbx.optimizer.memory.AdaptiveMemoryPool; -import com.alibaba.polardbx.optimizer.memory.MemoryManager; -import com.alibaba.polardbx.optimizer.memory.MemorySetting; - -public class AdaptiveMemoryHandlerImpl implements AdaptiveMemoryHandler { - - protected MemoryKiller memoryKiller; - - public AdaptiveMemoryHandlerImpl(MemoryKiller memoryKiller) { - this.memoryKiller = memoryKiller; - } - - @Override - public void revokeReleaseMemory() { - -// MemoryRevokingScheduler memoryRevokingScheduler = -// ServiceProvider.getInstance().getServer().getMemoryRevokingScheduler(); -// if (memoryRevokingScheduler != null) { -// AdaptiveMemoryPool apMemoryPool = MemoryManager.getInstance().getApMemoryPool(); -// long currentUsage = apMemoryPool.getMemoryUsage(); -// if (currentUsage > apMemoryPool.getMinLimit()) { -// long releaseMemory = (currentUsage - apMemoryPool.getMinLimit()) / 2; -// memoryRevokingScheduler.releaseMemory(releaseMemory); -// } -// } - } - - @Override - public void killApQuery() { - if (memoryKiller != null && MemorySetting.ENABLE_KILL) { - AdaptiveMemoryPool apMemoryPool = MemoryManager.getInstance().getApMemoryPool(); - long currentUsage = apMemoryPool.getMemoryUsage(); - if (currentUsage > apMemoryPool.getMinLimit()) { - long releaseMemory = (currentUsage - apMemoryPool.getMinLimit()) / 2; - memoryKiller.killMemory(releaseMemory); - } - } - } - - @Override - public void limitTpRate() { - //TODO - } - - @Override - public void limitApRate() { - //TODO -// MemoryManager.getInstance().getApMemoryPool().initApTokens(); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/DriverSplitRunner.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/DriverSplitRunner.java index 8c21dfa9a..5901eb93e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/DriverSplitRunner.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/DriverSplitRunner.java @@ -16,14 +16,17 @@ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.DefaultSchema; import com.alibaba.polardbx.common.properties.MppConfig; import com.alibaba.polardbx.common.utils.logger.MDC; import com.alibaba.polardbx.executor.mpp.operator.Driver; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; +import com.google.common.util.concurrent.ListenableFuture; import javax.annotation.concurrent.GuardedBy; +import java.util.function.Supplier; + import static java.util.Objects.requireNonNull; public class DriverSplitRunner implements SplitRunner { @@ -108,4 +111,10 @@ public boolean moveLowPrioritizedQuery(long executeTime) { } return false; } + + @Override + public void runtimeStatsSupplier(Supplier supplier) { + DriverContext driverContext = driver.getDriverContext(); + driverContext.setDriverRuntimeStatisticsSupplier(supplier); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/ExecutionFailureInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/ExecutionFailureInfo.java index 2fe0d5064..74a06e098 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/ExecutionFailureInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/ExecutionFailureInfo.java @@ -29,18 +29,18 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.executor.mpp.client.FailureInfo; +import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import com.alibaba.polardbx.executor.mpp.util.ImmutableCollectors; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableList; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; -import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/FutureStateChange.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/FutureStateChange.java index aee848fbf..abf8474df 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/FutureStateChange.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/FutureStateChange.java @@ -29,10 +29,10 @@ */ package com.alibaba.polardbx.executor.mpp.execution; +import com.alibaba.polardbx.executor.mpp.util.MoreExecutors; import com.google.common.collect.ImmutableSet; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; -import com.alibaba.polardbx.executor.mpp.util.MoreExecutors; import javax.annotation.concurrent.GuardedBy; import javax.annotation.concurrent.ThreadSafe; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/MemoryKiller.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/MemoryKiller.java deleted file mode 100644 index bb78b2a77..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/MemoryKiller.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.mpp.execution; - -import com.google.common.collect.Ordering; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.executor.mpp.deploy.MppServer; -import com.alibaba.polardbx.executor.mpp.deploy.ServiceProvider; -import com.alibaba.polardbx.executor.sync.ISyncAction; -import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.memory.MemoryManager; -import com.alibaba.polardbx.optimizer.memory.MemoryPool; -import com.alibaba.polardbx.optimizer.memory.GlobalMemoryPool; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicLong; - -/** - * - **/ -public class MemoryKiller { - - private static final Logger log = LoggerFactory.getLogger(MemoryKiller.class); - - private static final Ordering TASK_ORDER_BY_REVOCABLE_MEMORY_SIZE = - Ordering.natural().onResultOf(taskContext -> taskContext.getContext().getMemoryPool().getRevocableBytes()); - - private GlobalMemoryPool memoryPool; - private TaskManager sqlTaskManager; - private QueryManager queryManager; - - public MemoryKiller() { - this.memoryPool = MemoryManager.getInstance().getGlobalMemoryPool(); - this.queryManager = ServiceProvider.getInstance().getServer().getQueryManager(); - - if (ServiceProvider.getInstance().getServer() instanceof MppServer) { - this.sqlTaskManager = ((MppServer) ServiceProvider.getInstance().getServer()).getTaskManager(); - } - } - - public synchronized void killMemory(long remainingBytesToRevoke) { - if (memoryPool.getRevocableBytes() > 0) { - List revocableTaskContexts = new ArrayList<>(); - if (sqlTaskManager != null) { - List sqlTasks = sqlTaskManager.getAllTasks(); - for (SqlTask task : sqlTasks) { - MemoryPool memoryPool = task.getTaskMemoryPool(); - if (memoryPool != null && memoryPool.getRevocableBytes() > 0) { - revocableTaskContexts.add(task.getTaskExecution().getTaskContext()); - } - } - } - if (queryManager != null) { - revocableTaskContexts.addAll(queryManager.getAllLocalQueryContext()); - } - requestKilling(revocableTaskContexts, remainingBytesToRevoke); - } - } - - private void requestKilling(List revocableTaskContexts, long remainingBytesToRevoke) { - AtomicLong remainingBytesToRevokeAtomic = new AtomicLong(remainingBytesToRevoke); - revocableTaskContexts.stream().sorted(TASK_ORDER_BY_REVOCABLE_MEMORY_SIZE) - .forEach(taskContext -> { - if (remainingBytesToRevokeAtomic.get() > 0) { - ExecutionContext executionContext = taskContext.getContext(); - log.warn("try kill:" + executionContext.getTraceId() + "," + executionContext.getOriginSql()); - try { - ISyncAction action = - (ISyncAction) Class.forName("com.alibaba.polardbx.server.response.KillSyncAction") - .getConstructor(String.class, long.class, boolean.class) - .newInstance(executionContext.getSchemaName(), executionContext.getConnId(), true); - SyncManagerHelper.sync(action, executionContext.getSchemaName()); - remainingBytesToRevokeAtomic.addAndGet(-executionContext.getMemoryPool().getMemoryUsage()); - } catch (Exception e) { - throw new TddlRuntimeException(ErrorCode.ERR_CONFIG, e, e.getMessage()); - } - } - }); - } - -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/MemoryRevokingScheduler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/MemoryRevokingScheduler.java index 9c5eb845e..5de91267f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/MemoryRevokingScheduler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/MemoryRevokingScheduler.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.collect.Ordering; import com.alibaba.polardbx.common.properties.MppConfig; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; @@ -31,6 +30,7 @@ import com.alibaba.polardbx.optimizer.memory.MemoryPoolListener; import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.memory.QueryMemoryPool; +import com.google.common.collect.Ordering; import javax.annotation.Nullable; import java.util.ArrayList; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/NodeTaskMap.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/NodeTaskMap.java index 3899ecdb2..4fbcf6f15 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/NodeTaskMap.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/NodeTaskMap.java @@ -29,12 +29,12 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.collect.Sets; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.gms.node.Node; import com.alibaba.polardbx.executor.mpp.util.FinalizerService; +import com.alibaba.polardbx.gms.node.Node; import com.alibaba.polardbx.util.MoreObjects; +import com.google.common.collect.Sets; import javax.annotation.concurrent.ThreadSafe; import javax.inject.Inject; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/PipelineContext.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/PipelineContext.java index b77716bd0..4af691888 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/PipelineContext.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/PipelineContext.java @@ -76,15 +76,15 @@ public class PipelineContext { private final List drivers = new CopyOnWriteArrayList<>(); private final Object driverLock = new Object(); - private AtomicLong totalScheduledTime = new AtomicLong(0); - private AtomicLong totalCpuTime = new AtomicLong(0); - private AtomicLong totalUserTime = new AtomicLong(0); - private AtomicLong totalBlockedTime = new AtomicLong(0); + private final AtomicLong totalScheduledTime = new AtomicLong(0); + private final AtomicLong totalCpuTime = new AtomicLong(0); + private final AtomicLong totalUserTime = new AtomicLong(0); + private final AtomicLong totalBlockedTime = new AtomicLong(0); @Nullable private PipelineProperties pipelineProperties; - private DateTime startTime = DateTime.now(); + private final DateTime startTime = DateTime.now(); public PipelineContext(int pipelineId, TaskContext taskContext) { this.pipelineId = pipelineId; @@ -252,7 +252,7 @@ public StageInfo buildLocalModeStageInfo( runningTasks++; } - TaskStats taskStats = taskInfo.getStats(); + TaskStats taskStats = taskInfo.getTaskStats(); if (taskStats != null) { totalPipelineExecs += taskStats.getTotalPipelineExecs(); @@ -263,10 +263,10 @@ public StageInfo buildLocalModeStageInfo( cumulativeMemory += taskStats.getCumulativeMemory(); totalMemoryReservation += taskStats.getMemoryReservation(); - totalScheduledTime += taskStats.getTotalScheduledTime(); - totalCpuTime += taskStats.getTotalCpuTime(); - totalUserTime += taskStats.getTotalUserTime(); - totalBlockedTime += taskStats.getTotalBlockedTime(); + totalScheduledTime += taskStats.getTotalScheduledTimeNanos(); + totalCpuTime += taskStats.getTotalCpuTimeNanos(); + totalUserTime += taskStats.getTotalUserTimeNanos(); + totalBlockedTime += taskStats.getTotalBlockedTimeNanos(); if (!taskState.isDone()) { fullyBlocked &= taskStats.isFullyBlocked(); blockedReasons.addAll(taskStats.getBlockedReasons()); @@ -284,9 +284,10 @@ public StageInfo buildLocalModeStageInfo( OperatorStats operator = taskStats.getOperatorStats().get(i); operators.add(new OperatorStats(Optional.of(stageId), operator.getPipelineId(), operator.getOperatorType(), - operator.getOperatorId(), operator.getOutputRowCount(), operator.getOutputBytes(), - operator.getStartupDuration(), operator.getDuration(), operator.getMemory(), - operator.getInstances(), operator.getSpillCnt())); + operator.getOperatorId(), operator.getOutputRowCount(), + operator.getRuntimeFilteredCount(), + operator.getOutputBytes(), operator.getStartupDuration(), operator.getDuration(), + operator.getMemory(), operator.getInstances(), operator.getSpillCnt())); } } else { if (taskStats.getOperatorStats() != null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/PlanInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/PlanInfo.java index 4e97d8bc1..cdf437d7d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/PlanInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/PlanInfo.java @@ -29,12 +29,12 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.optimizer.planmanager.PlanManagerUtil; import com.alibaba.polardbx.util.MoreObjects; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.calcite.rel.RelNode; import static java.util.Objects.requireNonNull; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryBloomFilter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryBloomFilter.java index 86b270fbe..677030bda 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryBloomFilter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryBloomFilter.java @@ -16,11 +16,11 @@ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.util.concurrent.SettableFuture; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.google.common.util.concurrent.SettableFuture; import java.util.Collection; import java.util.HashMap; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryContext.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryContext.java index 706e16ba6..1c3ea5a31 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryContext.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryContext.java @@ -29,7 +29,6 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.base.Preconditions; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.Session; @@ -39,6 +38,7 @@ import com.alibaba.polardbx.optimizer.memory.QueryMemoryPool; import com.alibaba.polardbx.optimizer.spill.QuerySpillSpaceMonitor; import com.alibaba.polardbx.optimizer.workload.WorkloadUtil; +import com.google.common.base.Preconditions; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryExecution.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryExecution.java index 9d152883b..6dff046f5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryExecution.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryExecution.java @@ -29,14 +29,13 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.alibaba.polardbx.executor.mpp.Session; import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.alibaba.polardbx.executor.mpp.Session; import io.airlift.units.Duration; import org.apache.calcite.rel.RelNode; import org.joda.time.DateTime; import java.util.List; - import java.util.concurrent.Future; import static java.util.Objects.requireNonNull; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryInfo.java index dcc25a220..22692af80 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryInfo.java @@ -29,12 +29,12 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.exception.code.ErrorType; import com.alibaba.polardbx.executor.mpp.client.FailureInfo; import com.alibaba.polardbx.util.MoreObjects; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; @@ -62,27 +62,27 @@ public class QueryInfo { @JsonCreator public QueryInfo( @JsonProperty("queryId") - String queryId, + String queryId, @JsonProperty("session") - SessionInfo session, + SessionInfo session, @JsonProperty("state") - QueryState state, + QueryState state, @JsonProperty("scheduled") - boolean scheduled, + boolean scheduled, @JsonProperty("self") - URI self, + URI self, @JsonProperty("query") - String query, + String query, @JsonProperty("queryStats") - QueryStats queryStats, + QueryStats queryStats, @JsonProperty("outputStage") - Optional outputStage, + Optional outputStage, @JsonProperty("failureInfo") - FailureInfo failureInfo, + FailureInfo failureInfo, @JsonProperty("errorCode") - ErrorCode errorCode, + ErrorCode errorCode, @JsonProperty("completeInfo") - boolean completeInfo + boolean completeInfo ) { requireNonNull(queryId, "queryId is null"); requireNonNull(session, "session is null"); @@ -195,7 +195,7 @@ public QueryInfo summary() { return this; } - if (this.outputStage.get().getTasks().get(0).getStats() == null) { + if (this.outputStage.get().getTasks().get(0).getTaskStats() == null) { return this; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QuerySplitStats.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QuerySplitStats.java new file mode 100644 index 000000000..984b24e3e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QuerySplitStats.java @@ -0,0 +1,133 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.alibaba.polardbx.executor.mpp.execution; + +import com.alibaba.polardbx.executor.mpp.operator.DriverStats; +import com.alibaba.polardbx.util.MoreObjects; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import javax.annotation.concurrent.Immutable; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; + +import static java.util.Objects.requireNonNull; + +@Immutable +public class QuerySplitStats { + private final String queryId; + private final SessionInfo session; + private final QueryState state; + private final String query; + private final List driverStats; + + @JsonCreator + public QuerySplitStats( + @JsonProperty("queryId") + String queryId, + @JsonProperty("session") + SessionInfo session, + @JsonProperty("state") + QueryState state, + @JsonProperty("query") + String query, + @JsonProperty("driverStats") + List driverStats + ) { + requireNonNull(queryId, "queryId is null"); + requireNonNull(session, "session is null"); + requireNonNull(state, "state is null"); + requireNonNull(query, "query is null"); + this.queryId = queryId; + this.session = session; + this.state = state; + this.query = query; + this.driverStats = driverStats; + } + + @JsonProperty + public String getQueryId() { + return queryId; + } + + @JsonProperty + public SessionInfo getSession() { + return session; + } + + @JsonProperty + public QueryState getState() { + return state; + } + + @JsonProperty + public String getQuery() { + return query; + } + + @JsonProperty + public List getDriverStats() { + return driverStats; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("queryId", queryId).add("state", state).toString(); + } + + public static QuerySplitStats from(QueryStatsInfo queryInfo) { + List driverStats = new ArrayList<>(); + QueryStatsInfo.StageStatsInfo outputStage = queryInfo.getOutputStage(); + addStageSplits(outputStage, driverStats); + + driverStats.sort(Comparator.comparing(DriverStats::getDriverId)); + return new QuerySplitStats(queryInfo.getQueryId(), queryInfo.getSession(), queryInfo.getState(), + queryInfo.getQuery(), + driverStats + ); + } + + private static void addStageSplits(QueryStatsInfo.StageStatsInfo outputStage, + List driverStats) { + if (outputStage.getTaskStats() != null) { + for (QueryStatsInfo.TaskStatsInfo taskStat : outputStage.getTaskStats()) { + driverStats.addAll(taskStat.getDetailedStats().getDriverStats()); + } + } + + if (outputStage.getSubStages() != null) { + for (QueryStatsInfo.StageStatsInfo subStage : outputStage.getSubStages()) { + addStageSplits(subStage, driverStats); + } + } + + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryStateMachine.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryStateMachine.java index f5a9d9c01..8eddc690c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryStateMachine.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryStateMachine.java @@ -29,9 +29,6 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.base.Ticker; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.SettableFuture; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.GeneralUtil; @@ -42,6 +39,9 @@ import com.alibaba.polardbx.executor.mpp.operator.BlockedReason; import com.alibaba.polardbx.executor.mpp.operator.OperatorStats; import com.alibaba.polardbx.executor.mpp.util.Failures; +import com.google.common.base.Ticker; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.SettableFuture; import io.airlift.units.Duration; import org.apache.commons.lang.exception.ExceptionUtils; import org.joda.time.DateTime; @@ -69,6 +69,7 @@ import static io.airlift.units.DataSize.succinctBytes; import static java.util.Objects.requireNonNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; @ThreadSafe public class QueryStateMachine implements StateMachineBase { @@ -243,10 +244,10 @@ public QueryInfo getQueryInfo(Optional rootStage, URI querySelf) { totalMemoryReservation += stageStats.getTotalMemoryReservation().toBytes(); peakMemoryReservation = getPeakMemoryInBytes(); - totalScheduledTime += stageStats.getTotalScheduledTime(); - totalCpuTime += stageStats.getTotalCpuTime(); - totalUserTime += stageStats.getTotalUserTime(); - totalBlockedTime += stageStats.getTotalBlockedTime(); + totalScheduledTime += stageStats.getTotalScheduledTimeNanos(); + totalCpuTime += stageStats.getTotalCpuTimeNanos(); + totalUserTime += stageStats.getTotalUserTimeNanos(); + totalBlockedTime += stageStats.getTotalBlockedTimeNanos(); if (!stageInfo.getState().isDone()) { fullyBlocked &= stageStats.isFullyBlocked(); blockedReasons.addAll(stageStats.getBlockedReasons()); @@ -291,10 +292,10 @@ public QueryInfo getQueryInfo(Optional rootStage, URI querySelf) { cumulativeMemory, totalMemoryReservation > 0 ? succinctBytes(totalMemoryReservation) : succinctBytes(0), peakMemoryReservation > 0 ? succinctBytes(peakMemoryReservation) : succinctBytes(0), - new Duration(totalScheduledTime, MILLISECONDS).convertToMostSuccinctTimeUnit(), - new Duration(totalCpuTime, MILLISECONDS).convertToMostSuccinctTimeUnit(), - new Duration(totalUserTime, MILLISECONDS).convertToMostSuccinctTimeUnit(), - new Duration(totalBlockedTime, MILLISECONDS).convertToMostSuccinctTimeUnit(), + new Duration(totalScheduledTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), + new Duration(totalCpuTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), + new Duration(totalUserTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), + new Duration(totalBlockedTime, NANOSECONDS).convertToMostSuccinctTimeUnit(), fullyBlocked, blockedReasons, processedInputDataSize > 0 ? succinctBytes(processedInputDataSize) : succinctBytes(0), diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryStats.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryStats.java index e58106760..e4218db0d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryStats.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryStats.java @@ -29,13 +29,14 @@ */ package com.alibaba.polardbx.executor.mpp.execution; +import com.alibaba.polardbx.executor.mpp.operator.BlockedReason; +import com.alibaba.polardbx.executor.mpp.operator.OperatorStats; +import com.alibaba.polardbx.util.MoreObjects; import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonFormat; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; -import com.alibaba.polardbx.executor.mpp.operator.BlockedReason; -import com.alibaba.polardbx.executor.mpp.operator.OperatorStats; -import com.alibaba.polardbx.util.MoreObjects; import io.airlift.units.DataSize; import io.airlift.units.Duration; import org.joda.time.DateTime; @@ -176,21 +177,25 @@ public QueryStats( } @JsonProperty + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8") public DateTime getCreateTime() { return createTime; } @JsonProperty + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8") public DateTime getExecutionStartTime() { return executionStartTime; } @JsonProperty + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8") public DateTime getLastHeartbeat() { return lastHeartbeat; } @JsonProperty + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8") public DateTime getEndTime() { return endTime; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryStatsInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryStatsInfo.java new file mode 100644 index 000000000..ed7a98bbd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/QueryStatsInfo.java @@ -0,0 +1,323 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.alibaba.polardbx.executor.mpp.execution; + +import com.alibaba.polardbx.executor.mpp.operator.TaskStats; +import com.alibaba.polardbx.util.MoreObjects; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import javax.annotation.concurrent.Immutable; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +import static java.util.Objects.requireNonNull; + +@Immutable +public class QueryStatsInfo { + private final String queryId; + private final SessionInfo session; + private final QueryState state; + private final String query; + private final QueryStats queryStats; + private final StageStatsInfo outputStage; + + @JsonCreator + public QueryStatsInfo( + @JsonProperty("queryId") + String queryId, + @JsonProperty("session") + SessionInfo session, + @JsonProperty("state") + QueryState state, + @JsonProperty("query") + String query, + @JsonProperty("queryStats") + QueryStats queryStats, + @JsonProperty("outputStage") + StageStatsInfo outputStage + ) { + requireNonNull(queryId, "queryId is null"); + requireNonNull(session, "session is null"); + requireNonNull(state, "state is null"); + requireNonNull(query, "query is null"); + requireNonNull(outputStage, "outputStage is null"); + this.queryId = queryId; + this.session = session; + this.state = state; + this.query = query; + this.queryStats = queryStats; + this.outputStage = outputStage; + } + + @JsonProperty + public String getQueryId() { + return queryId; + } + + @JsonProperty + public SessionInfo getSession() { + return session; + } + + @JsonProperty + public QueryState getState() { + return state; + } + + @JsonProperty + public String getQuery() { + return query; + } + + @JsonProperty + public QueryStats getQueryStats() { + return queryStats; + } + + @JsonProperty + public StageStatsInfo getOutputStage() { + return outputStage; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("queryId", queryId).add("state", state).toString(); + } + + public static QueryStatsInfo from(QueryInfo queryInfo) { + return new QueryStatsInfo(queryInfo.getQueryId(), queryInfo.getSession(), queryInfo.getState(), + queryInfo.getQuery(), + queryInfo.getQueryStats(), + StageStatsInfo.from(queryInfo.getOutputStage()) + ); + } + + public static class StageStatsInfo { + + private final StageId stageId; + private final StageState state; + private final PlanInfo plan; + private final StageStats stageStats; + private final List taskStats; + private final List subStages; + + public static StageStatsInfo from(Optional stage) { + if (!stage.isPresent()) { + return null; + } + StageInfo stageInfo = stage.get(); + List tasks = Optional.ofNullable(stageInfo.getTasks()) + .map(taskInfos -> + taskInfos.stream().map(TaskStatsInfo::from) + .collect(Collectors.toList())) + .orElse(null); + List subStages = Optional.ofNullable(stageInfo.getSubStages()) + .map(subStage -> + subStage.stream() + .map(subStageInfo -> StageStatsInfo.from(Optional.of(subStageInfo))) + .collect(Collectors.toList())) + .orElse(null); + return new StageStatsInfo(stageInfo.getStageId(), stageInfo.getState(), stageInfo.getPlan(), + stageInfo.getStageStats(), tasks, subStages); + } + + @JsonCreator + public StageStatsInfo( + @JsonProperty("stageId") + StageId stageId, + @JsonProperty("state") + StageState state, + @JsonProperty("plan") + PlanInfo plan, + @JsonProperty("stageStats") + StageStats stageStats, + @JsonProperty("tasks") + List taskStats, + @JsonProperty("subStages") + List subStages) { + this.stageId = stageId; + this.state = state; + this.plan = plan; + this.stageStats = stageStats; + this.taskStats = taskStats; + this.subStages = subStages; + } + + @JsonProperty + public StageId getStageId() { + return stageId; + } + + @JsonProperty + public StageState getState() { + return state; + } + + @JsonProperty + public PlanInfo getPlan() { + return plan; + } + + @JsonProperty + public StageStats getStageStats() { + return stageStats; + } + + @JsonProperty + public List getTaskStats() { + return taskStats; + } + + @JsonProperty + public List getSubStages() { + return subStages; + } + } + + public static class TaskStatsInfo { + + private final TaskStats detailedStats; + private final TaskStatus taskStatus; + private final boolean complete; + + private final int completedPipelineExecs; + private final int totalPipelineExecs; + private final long elapsedTimeMillis; + private final long totalCpuTime; + private final long processTimeMillis; + private final long processWall; + private final long pullDataTimeMillis; + private final long deliveryTimeMillis; + + @JsonCreator + public TaskStatsInfo( + @JsonProperty("detailedStats") + TaskStats detailedStats, + @JsonProperty("taskStatus") + TaskStatus taskStatus, + @JsonProperty("complete") + boolean complete, + @JsonProperty("completedPipelineExecs") + int completedPipelineExecs, + @JsonProperty("totalPipelineExecs") + int totalPipelineExecs, + @JsonProperty("elapsedTime") + long elapsedTimeMillis, + @JsonProperty("totalCpuTime") + long totalCpuTime, + @JsonProperty("processTime") + long processTimeMillis, + @JsonProperty("processWall") + long processWall, + @JsonProperty("pullDataTimeMillis") + long pullDataTimeMillis, + @JsonProperty("deliveryTimeMillis") + long deliveryTimeMillis) { + this.detailedStats = detailedStats; + this.taskStatus = taskStatus; + this.complete = complete; + this.completedPipelineExecs = completedPipelineExecs; + this.totalPipelineExecs = totalPipelineExecs; + this.elapsedTimeMillis = elapsedTimeMillis; + this.totalCpuTime = totalCpuTime; + this.processTimeMillis = processTimeMillis; + this.processWall = processWall; + this.pullDataTimeMillis = pullDataTimeMillis; + this.deliveryTimeMillis = deliveryTimeMillis; + } + + public static TaskStatsInfo from(TaskInfo taskInfo) { + if (taskInfo == null) { + return null; + } + return new TaskStatsInfo(taskInfo.getTaskStats(), taskInfo.getTaskStatus(), taskInfo.isComplete(), + taskInfo.getCompletedPipelineExecs(), taskInfo.getTotalPipelineExecs(), + taskInfo.getElapsedTimeMillis(), taskInfo.getTotalCpuTime(), taskInfo.getProcessTimeMillis(), + taskInfo.getProcessWall(), taskInfo.getPullDataTimeMillis(), taskInfo.getDeliveryTimeMillis()); + } + + @JsonProperty + public TaskStats getDetailedStats() { + return detailedStats; + } + + @JsonProperty + public TaskStatus getTaskStatus() { + return taskStatus; + } + + @JsonProperty + public boolean isComplete() { + return complete; + } + + @JsonProperty + public int getCompletedPipelineExecs() { + return completedPipelineExecs; + } + + @JsonProperty + public int getTotalPipelineExecs() { + return totalPipelineExecs; + } + + @JsonProperty + public long getElapsedTimeMillis() { + return elapsedTimeMillis; + } + + @JsonProperty + public long getTotalCpuTime() { + return totalCpuTime; + } + + @JsonProperty + public long getProcessTimeMillis() { + return processTimeMillis; + } + + @JsonProperty + public long getProcessWall() { + return processWall; + } + + @JsonProperty + public long getPullDataTimeMillis() { + return pullDataTimeMillis; + } + + @JsonProperty + public long getDeliveryTimeMillis() { + return deliveryTimeMillis; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/RemoteTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/RemoteTask.java index 1e214d5a4..4ebfae42b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/RemoteTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/RemoteTask.java @@ -29,10 +29,10 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.collect.Multimap; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.metadata.Split; +import com.google.common.collect.Multimap; +import com.google.common.util.concurrent.ListenableFuture; public interface RemoteTask { TaskId getTaskId(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/RemoteTaskFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/RemoteTaskFactory.java index 8aae902a7..618e12787 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/RemoteTaskFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/RemoteTaskFactory.java @@ -29,13 +29,13 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.collect.Multimap; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.Session; import com.alibaba.polardbx.executor.mpp.metadata.Split; import com.alibaba.polardbx.executor.mpp.metadata.TaskLocation; import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; import com.alibaba.polardbx.gms.node.Node; +import com.google.common.collect.Multimap; import java.util.concurrent.ExecutorService; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/ScheduledSplit.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/ScheduledSplit.java index eea2a1eee..2a8faa30d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/ScheduledSplit.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/ScheduledSplit.java @@ -29,10 +29,10 @@ */ package com.alibaba.polardbx.executor.mpp.execution; +import com.alibaba.polardbx.executor.mpp.metadata.Split; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.primitives.Longs; -import com.alibaba.polardbx.executor.mpp.metadata.Split; import static com.google.common.base.MoreObjects.toStringHelper; import static java.util.Objects.requireNonNull; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SessionInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SessionInfo.java index 7bf554466..262866a54 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SessionInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SessionInfo.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.mpp.execution; +import com.alibaba.polardbx.util.MoreObjects; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.alibaba.polardbx.util.MoreObjects; import java.util.Map; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SessionRepresentation.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SessionRepresentation.java index 864c172c2..509feb54c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SessionRepresentation.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SessionRepresentation.java @@ -23,12 +23,15 @@ import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.mpp.Session; import com.alibaba.polardbx.executor.mpp.server.TaskResource; +import com.alibaba.polardbx.executor.spi.ITransactionManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.memory.MemoryType; import com.alibaba.polardbx.optimizer.memory.QueryMemoryPool; import com.alibaba.polardbx.optimizer.parse.privilege.PrivilegeContext; import com.alibaba.polardbx.optimizer.spill.QuerySpillSpaceMonitor; +import com.alibaba.polardbx.optimizer.statis.ColumnarTracer; import com.alibaba.polardbx.optimizer.statis.SQLTracer; +import com.alibaba.polardbx.optimizer.utils.IColumnarTransaction; import com.alibaba.polardbx.optimizer.utils.IMppReadOnlyTransaction; import com.alibaba.polardbx.optimizer.utils.ITransaction; import com.alibaba.polardbx.optimizer.workload.WorkloadType; @@ -65,10 +68,12 @@ public class SessionRepresentation { private long lastInsertId; private InternalTimeZone logicalTimeZone; private long tsoTimeStamp; + private boolean useColumnar; private Map dnLsnMap = new HashMap<>(); private WorkloadType workloadType; private boolean omitTso; private boolean lizard1PC; + private ColumnarTracer columnarTracer; /** * 暂时只增加polardbx_server_id参数,避免长度增加较多;后续如有需要可以再修改 @@ -100,9 +105,11 @@ public SessionRepresentation( @JsonProperty("lastInsertId") long lastInsertId, @JsonProperty("logicalTimeZone") InternalTimeZone logicalTimeZone, @JsonProperty("tsoTimeStamp") long tsoTimeStamp, + @JsonProperty("useColumnar") boolean useColumnar, @JsonProperty("dnLsnMap") Map dnLsnMap, @JsonProperty("omitTso") boolean omitTso, @JsonProperty("lizard1PC") boolean lizard1PC, + @JsonProperty("columnarTracer") ColumnarTracer columnarTracer, @JsonProperty("workloadType") WorkloadType workloadType, @JsonProperty("extraServerVariables") Map extraServerVariables) { this.traceId = traceId; @@ -128,9 +135,11 @@ public SessionRepresentation( this.lastInsertId = lastInsertId; this.logicalTimeZone = logicalTimeZone; this.tsoTimeStamp = tsoTimeStamp; + this.useColumnar = useColumnar; this.dnLsnMap = dnLsnMap; this.omitTso = omitTso; this.lizard1PC = lizard1PC; + this.columnarTracer = columnarTracer; this.workloadType = workloadType; this.extraServerVariables = extraServerVariables; } @@ -159,9 +168,11 @@ public SessionRepresentation( long lastInsertId, InternalTimeZone logicalTimeZone, long tsoTimeStamp, + boolean useColumnar, Map dnLsnMap, boolean omitTso, boolean lizard1PC, + ColumnarTracer columnarTracer, WorkloadType workloadType, Map extraServerVariables) { this.traceId = traceId; @@ -187,10 +198,12 @@ public SessionRepresentation( this.lastInsertId = lastInsertId; this.logicalTimeZone = logicalTimeZone; this.tsoTimeStamp = tsoTimeStamp; + this.useColumnar = useColumnar; this.dnLsnMap = dnLsnMap; this.workloadType = workloadType; this.omitTso = omitTso; this.lizard1PC = lizard1PC; + this.columnarTracer = columnarTracer; this.extraServerVariables = extraServerVariables; } @@ -309,6 +322,11 @@ public long getTsoTimeStamp() { return tsoTimeStamp; } + @JsonProperty + public boolean isUseColumnar() { + return useColumnar; + } + @JsonProperty public Map getDnLsnMap() { return dnLsnMap; @@ -352,13 +370,22 @@ public Session toSession(TaskId taskId, QueryContext queryContext, long trxId) { ExecutionContext ec = TaskResource.getDrdsContextHandler().makeExecutionContext(schema, hintCmds, txIsolation); ec.setTxId(trxId); if (tsoTimeStamp > 0 || omitTso) { - IMppReadOnlyTransaction transaction = - (IMppReadOnlyTransaction) ExecutorContext.getContext(schema).getTransactionManager().createTransaction( + ITransactionManager tm = ExecutorContext.getContext(schema).getTransactionManager(); + + if (useColumnar) { + IColumnarTransaction transaction = (IColumnarTransaction) tm.createTransaction( + ITransactionPolicy.TransactionClass.COLUMNAR_READ_ONLY_TRANSACTION, ec); + transaction.setTsoTimestamp(tsoTimeStamp); + ec.setTransaction(transaction); + } else { + IMppReadOnlyTransaction transaction = (IMppReadOnlyTransaction) tm.createTransaction( ITransactionPolicy.TransactionClass.MPP_READ_ONLY_TRANSACTION, ec); - transaction.setDnLsnMap(dnLsnMap); - transaction.setTsoTimestamp(tsoTimeStamp); - transaction.enableOmitTso(omitTso, lizard1PC); - ec.setTransaction(transaction); + transaction.setSnapshotTimestamp(tsoTimeStamp); + transaction.setDnLsnMap(dnLsnMap); + transaction.enableOmitTso(omitTso, lizard1PC); + ec.setTransaction(transaction); + } + ec.setAutoCommit(true); } else { ITransaction transaction = ExecutorContext.getContext(schema).getTransactionManager().createTransaction( @@ -380,6 +407,7 @@ public Session toSession(TaskId taskId, QueryContext queryContext, long trxId) { ec.setEnableTrace(enableTrace); if (enableTrace) { ec.setTracer(new SQLTracer()); + ec.setColumnarTracer(columnarTracer); } ec.setServerVariables(serverVariables); ec.setUserDefVariables(userDefVariables); @@ -432,4 +460,12 @@ public Session toSession(TaskId taskId, QueryContext queryContext, long trxId) { return new Session(taskId.getStageId(), ec); } + + public ColumnarTracer getColumnarTracer() { + return columnarTracer; + } + + public void setColumnarTracer(ColumnarTracer columnarTracer) { + this.columnarTracer = columnarTracer; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SplitRunner.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SplitRunner.java index be2c8f2a1..9f4249787 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SplitRunner.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SplitRunner.java @@ -16,9 +16,11 @@ package com.alibaba.polardbx.executor.mpp.execution; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; import com.google.common.util.concurrent.ListenableFuture; import java.io.Closeable; +import java.util.function.Supplier; public interface SplitRunner extends Closeable { boolean isFinished(); @@ -40,4 +42,6 @@ public interface SplitRunner extends Closeable { * @return true - 需要作为低优先级执行 */ boolean moveLowPrioritizedQuery(long executeTime); + + void runtimeStatsSupplier(Supplier supplier); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryExecution.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryExecution.java index 8ad9e99a6..a049a4d56 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryExecution.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryExecution.java @@ -30,15 +30,16 @@ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.base.Throwables; -import com.google.inject.Inject; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.properties.ConnectionProperties; import com.alibaba.polardbx.common.properties.ParamManager; import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.Session; +import com.alibaba.polardbx.executor.mpp.execution.scheduler.ColumnarNodeSelector; import com.alibaba.polardbx.executor.mpp.execution.scheduler.NodeScheduler; import com.alibaba.polardbx.executor.mpp.execution.scheduler.NodeSelector; import com.alibaba.polardbx.executor.mpp.execution.scheduler.SqlQueryScheduler; @@ -48,15 +49,30 @@ import com.alibaba.polardbx.executor.mpp.planner.StageExecutionPlan; import com.alibaba.polardbx.executor.mpp.planner.SubPlan; import com.alibaba.polardbx.executor.utils.ExecUtils; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.core.planner.rule.util.CBOUtil; +import com.alibaba.polardbx.optimizer.core.rel.OSSTableScan; +import com.alibaba.polardbx.optimizer.utils.TableTopologyUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.google.common.base.Throwables; +import com.google.inject.Inject; import io.airlift.units.Duration; import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.RelShuttleImpl; +import org.apache.calcite.rel.core.TableScan; +import org.apache.calcite.rel.logical.LogicalFilter; +import org.apache.calcite.rel.logical.LogicalProject; import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexSubQuery; +import org.apache.calcite.rex.RexUtil; import javax.annotation.concurrent.ThreadSafe; import java.net.URI; +import java.text.MessageFormat; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadPoolExecutor; @@ -133,9 +149,18 @@ public void start() { Pair subPlan = PlanFragmenter.buildRootFragment(physicalPlan, session); int polarXParallelism = ExecUtils.getPolarDBXCores( session.getClientContext().getParamManager(), !existMppOnlyInstanceNode()); - int limitNode = subPlan.getValue() % polarXParallelism > 0 ? subPlan.getValue() / polarXParallelism + 1 : - subPlan.getValue() / polarXParallelism; - NodeSelector nodeSelector = nodeScheduler.createNodeSelector(session, limitNode); + int limitNode = session.getClientContext().getParamManager().getInt(ConnectionParams.MPP_NODE_SIZE); + if (limitNode <= 0) { + limitNode = subPlan.getValue() % polarXParallelism > 0 ? subPlan.getValue() / polarXParallelism + 1 : + subPlan.getValue() / polarXParallelism; + } + boolean randomNode = + session.getClientContext().getParamManager().getBoolean(ConnectionParams.MPP_NODE_RANDOM); + + NodeSelector nodeSelector = nodeScheduler.createNodeSelector(session, limitNode, randomNode); + if (nodeSelector instanceof ColumnarNodeSelector) { + optimizeScheduleUnderColumnar((ColumnarNodeSelector) nodeSelector); + } planDistribution(subPlan.getKey(), nodeSelector); stateMachine.recordDistributedPlanningTime(distributedPlanningStart); // transition to starting @@ -157,13 +182,75 @@ public void start() { } } + private void optimizeScheduleUnderColumnar(ColumnarNodeSelector nodeSelector) { + if (ExecUtils.needPutIfAbsent(session.getClientContext(), ConnectionProperties.SCHEDULE_BY_PARTITION)) { + PartScheduleChecker checker = new PartScheduleChecker(nodeSelector.getOrderedNode().size()); + physicalPlan.accept(checker); + boolean scheduleByPartition = checker.canScheduleByPart(); + session.getClientContext() + .putIntoHintCmds(ConnectionProperties.SCHEDULE_BY_PARTITION, scheduleByPartition); + nodeSelector.setScheduleByPartition(scheduleByPartition); + logger.info(MessageFormat.format("Trace id is: {0}, schedule by partition is {1}", + session.getClientContext().getTraceId(), checker.canScheduleByPart())); + } + } + + public static class PartScheduleChecker extends RelShuttleImpl { + private final int nodeSize; + + private boolean schedulerByPart = true; + + public PartScheduleChecker(int nodeSize) { + this.nodeSize = nodeSize; + } + + public boolean canScheduleByPart() { + return schedulerByPart; + } + + @Override + public RelNode visit(LogicalFilter filter) { + RexUtil.RexSubqueryListFinder finder = new RexUtil.RexSubqueryListFinder(); + filter.getCondition().accept(finder); + for (RexSubQuery subQuery : finder.getSubQueries()) { + subQuery.rel.accept(this); + } + return visitChild(filter, 0, filter.getInput()); + } + + @Override + public RelNode visit(LogicalProject project) { + RexUtil.RexSubqueryListFinder finder = new RexUtil.RexSubqueryListFinder(); + for (RexNode node : project.getProjects()) { + node.accept(finder); + } + for (RexSubQuery subQuery : finder.getSubQueries()) { + subQuery.rel.accept(this); + } + return visitChild(project, 0, project.getInput()); + } + + @Override + public RelNode visit(TableScan scan) { + if (scan instanceof OSSTableScan) { + TableMeta tm = CBOUtil.getTableMeta(scan.getTable()); + int shard = TableTopologyUtil.isShard(tm) ? + tm.getPartitionInfo().getPartitionBy().getPartitions().size() + : -1; + // or shard num is 1 or shard num is an integer multiple of node size. + schedulerByPart &= (shard == 1) || ((shard > 0) && (shard % nodeSize == 0)); + } + return scan; + } + } + public StageExecutionPlan getStagePlan(SubPlan plan, List planFragmentList) { List subStages = new ArrayList<>(); planFragmentList.add(plan.getFragment()); for (SubPlan subPlan : plan.getChildren()) { subStages.add(getStagePlan(subPlan, planFragmentList)); } - return new StageExecutionPlan(plan.getFragment(), plan.getLogicalViewInfo(), plan.getExpandSplitInfos(), + return new StageExecutionPlan(plan.getFragment(), plan.getLogicalViewInfos(), plan.getExpandSplitInfos(), subStages); } @@ -227,6 +314,17 @@ private QueryInfo buildQueryInfo(SqlQueryScheduler scheduler) { stageInfo = Optional.ofNullable(scheduler.getStageInfo()); } + ExecutionContext executionContext = session.getClientContext(); + if (executionContext.getDriverStatistics() != null + && stageInfo.isPresent() && stageInfo.get().isCompleteInfo()) { + // Check if this tree-structure StageInfo is completed and collect driver statistics. + Map> driverStatistics = executionContext.getDriverStatistics(); + + StageInfo rootStage = stageInfo.get(); + + StageInfo.collectStats(rootStage, driverStatistics); + } + QueryInfo queryInfo = stateMachine.updateQueryInfo(stageInfo, querySelf); if (queryInfo.isFinalQueryInfo()) { // capture the final query state and drop reference to the scheduler diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryLocalExecution.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryLocalExecution.java index 8d6ef7dc8..6f932a1fc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryLocalExecution.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryLocalExecution.java @@ -16,15 +16,10 @@ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.inject.Inject; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ParamManager; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.Session; @@ -39,6 +34,7 @@ import com.alibaba.polardbx.executor.mpp.operator.factory.LocalBufferExecutorFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.PipelineFactory; import com.alibaba.polardbx.executor.mpp.split.SplitInfo; +import com.alibaba.polardbx.executor.mpp.split.SplitManagerImpl; import com.alibaba.polardbx.executor.operator.spill.SpillerFactory; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; @@ -46,7 +42,13 @@ import com.alibaba.polardbx.optimizer.core.rel.LogicalView; import com.alibaba.polardbx.optimizer.memory.MemoryEstimator; import com.alibaba.polardbx.optimizer.utils.CalciteUtils; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.inject.Inject; import io.airlift.units.Duration; import org.apache.calcite.rel.RelNode; @@ -62,9 +64,9 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import static com.google.common.base.Preconditions.checkState; import static com.alibaba.polardbx.common.properties.ConnectionParams.INSERT_SELECT_LIMIT; import static com.alibaba.polardbx.common.properties.ConnectionParams.UPDATE_DELETE_SELECT_LIMIT; +import static com.google.common.base.Preconditions.checkState; import static java.util.Objects.requireNonNull; import static java.util.concurrent.TimeUnit.SECONDS; @@ -142,7 +144,8 @@ public void start() { LocalExecutionPlanner planner = new LocalExecutionPlanner(context, null, parallelism, parallelism, 1, context.getParamManager().getInt(ConnectionParams.PREFETCH_SHARDS), notificationExecutor, - taskContext.isSpillable() ? spillerFactory : null, null, null, false); + taskContext.isSpillable() ? spillerFactory : null, null, null, false, + -1, -1, ImmutableMap.of(), new SplitManagerImpl()); returnColumns = CalciteUtils.buildColumnMeta(physicalPlan, "Last"); boolean syncMode = stateMachine.getSession().isLocalResultIsSync(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryManager.java index 024029ef8..14e799cc0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlQueryManager.java @@ -48,8 +48,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -import static com.google.common.base.Preconditions.checkArgument; import static com.alibaba.polardbx.executor.mpp.execution.QueryState.FAILED; +import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; public class SqlQueryManager implements QueryManager { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlStageExecution.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlStageExecution.java index 8702f91c7..f7d72a4b4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlStageExecution.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlStageExecution.java @@ -16,17 +16,9 @@ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.base.Predicate; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMultimap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Multimap; -import com.google.common.collect.Sets; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.OutputBuffers; @@ -36,10 +28,18 @@ import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; import com.alibaba.polardbx.executor.mpp.planner.RemoteSourceNode; import com.alibaba.polardbx.executor.mpp.server.remotetask.HttpRemoteTask; -import com.alibaba.polardbx.gms.node.Node; import com.alibaba.polardbx.executor.mpp.split.RemoteSplit; import com.alibaba.polardbx.executor.mpp.util.ImmutableCollectors; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.alibaba.polardbx.gms.node.Node; +import com.google.common.base.Predicate; +import com.google.common.collect.HashMultimap; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMultimap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Multimap; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.units.Duration; import javax.annotation.concurrent.GuardedBy; @@ -92,6 +92,8 @@ public class SqlStageExecution { private final AtomicBoolean splitsScheduled = new AtomicBoolean(); private final ListenableFuture> waitBloomFuture; + private final List orderedNodes; + public SqlStageExecution( StageId stageId, URI location, @@ -102,7 +104,8 @@ public SqlStageExecution( NodeTaskMap nodeTaskMap, ExecutorService executor, Predicate needStageIdPredicate, - QueryBloomFilter bloomFilterManager) { + QueryBloomFilter bloomFilterManager, + List orderedNodes) { this.remoteTaskFactory = requireNonNull(remoteTaskFactory, "remoteTaskFactory is null"); this.stageId = stageId; this.summarizeTaskInfo = summarizeTaskInfo; @@ -131,6 +134,7 @@ public SqlStageExecution( } else { this.waitBloomFuture = null; } + this.orderedNodes = orderedNodes; } public void beginScheduling() { @@ -201,6 +205,12 @@ private static Split createRemoteSplitFor(TaskId taskId, TaskLocation taskLocati return new Split(true, new RemoteSplit(location)); } + private static Split createRemoteSplitForPairWise(TaskLocation taskLocation, int outputBufferId) { + TaskLocation location = + new TaskLocation(taskLocation.getNodeServer(), taskLocation.getTaskId(), outputBufferId); + return new Split(true, new RemoteSplit(location)); + } + private synchronized RemoteTask scheduleTask(Node node, TaskId taskId, Multimap sourceSplits, boolean noMoreSplits, boolean startImmediately) { ensureNeedSetStageId(); @@ -208,7 +218,18 @@ private synchronized RemoteTask scheduleTask(Node node, TaskId taskId, Multimap< ImmutableMultimap.Builder initialSplits = ImmutableMultimap.builder(); initialSplits.putAll(sourceSplits); for (Map.Entry entry : exchangeLocations.entries()) { - initialSplits.put(entry.getKey(), createRemoteSplitFor(taskId, entry.getValue(), needStageId)); + if (getFragment().isRemotePairWise()) { + int bufferId = orderedNodes.indexOf(node.getNodeIdentifier()); + checkArgument(bufferId != -1, + "Unknown node id under partition wise join, node is is %s, while all nodes is %s", + node.getNodeIdentifier(), + String.join(",", orderedNodes)); + bufferId = OutputBuffers.OutputBufferId + .formatOutputBufferId(needStageId ? taskId.getStageId().getId() : 0, bufferId); + initialSplits.put(entry.getKey(), createRemoteSplitForPairWise(entry.getValue(), bufferId)); + } else { + initialSplits.put(entry.getKey(), createRemoteSplitFor(taskId, entry.getValue(), needStageId)); + } } RemoteTask task = remoteTaskFactory.createRemoteTask( @@ -357,8 +378,19 @@ public synchronized void addExchangeLocations(Integer fragmentId, Set newSplits = ImmutableMultimap.builder(); for (TaskLocation exchangeLocation : exchangeLocations) { - newSplits.put(remoteSource.getRelatedId(), - createRemoteSplitFor(task.getTaskId(), exchangeLocation, needStageId)); + if (getFragment().isRemotePairWise()) { + int bufferId = orderedNodes.indexOf(task.getNodeId()); + checkArgument(bufferId != -1, + "Unknown node id under partition wise join, node is is %s, while all nodes is %s", + task.getNodeId(), String.join(",", orderedNodes)); + bufferId = OutputBuffers.OutputBufferId + .formatOutputBufferId(needStageId ? task.getTaskId().getStageId().getId() : 0, bufferId); + newSplits.put(remoteSource.getRelatedId(), + createRemoteSplitForPairWise(exchangeLocation, bufferId)); + } else { + newSplits.put(remoteSource.getRelatedId(), + createRemoteSplitFor(task.getTaskId(), exchangeLocation, needStageId)); + } } task.addSplits(newSplits.build(), isNoMoreSplits); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTask.java index 78555856e..f6085a94f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTask.java @@ -16,13 +16,10 @@ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.TrxIdGenerator; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.MetricLevel; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.common.ExecutorContext; @@ -35,6 +32,7 @@ import com.alibaba.polardbx.executor.mpp.operator.DriverContext; import com.alibaba.polardbx.executor.mpp.operator.DriverStats; import com.alibaba.polardbx.executor.mpp.operator.OperatorStats; +import com.alibaba.polardbx.executor.mpp.operator.PipelineDepTree; import com.alibaba.polardbx.executor.mpp.operator.TaskStats; import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; import com.alibaba.polardbx.executor.mpp.util.Failures; @@ -47,7 +45,10 @@ import com.alibaba.polardbx.statistics.ExecuteSQLOperation; import com.alibaba.polardbx.statistics.RuntimeStatHelper; import com.alibaba.polardbx.statistics.RuntimeStatistics; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.units.DataSize; import org.apache.calcite.rel.RelNode; import org.apache.calcite.util.trace.RuntimeStatisticsSketch; @@ -68,12 +69,13 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import static com.alibaba.polardbx.common.properties.MetricLevel.isSQLMetricEnabled; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; import static com.google.common.util.concurrent.Futures.immediateFuture; import static com.google.common.util.concurrent.MoreExecutors.directExecutor; -import static com.alibaba.polardbx.common.properties.MetricLevel.isSQLMetricEnabled; import static java.util.Objects.requireNonNull; public class SqlTask { @@ -323,8 +325,8 @@ public MemoryPool getTaskMemoryPool() { private TaskStats getTaskStats(TaskHolder taskHolder) { TaskInfo finalTaskInfo = taskHolder.getFinalTaskInfo(); if (finalTaskInfo != null) { - if (finalTaskInfo.getStats() != null) { - return finalTaskInfo.getStats(); + if (finalTaskInfo.getTaskStats() != null) { + return finalTaskInfo.getTaskStats(); } } SqlTaskExecution taskExecution = taskHolder.getTaskExecution(); @@ -345,7 +347,7 @@ public DateTime getTaskEndTime() { if (taskHolder != null) { TaskInfo finalTaskInfo = taskHolder.getFinalTaskInfo(); if (finalTaskInfo != null) { - return finalTaskInfo.getStats().getEndTime(); + return finalTaskInfo.getTaskStats().getEndTime(); } SqlTaskExecution taskExecution = taskHolder.getTaskExecution(); if (taskExecution != null) { @@ -369,8 +371,10 @@ private static Set getNoMoreSplits(TaskHolder taskHolder) { private TaskInfo createInitialTaskInfo(TaskHolder taskHolder) { TaskStats taskStats = getTaskStats(taskHolder); + if (isMPPMetricEnabled) { TaskStatus taskStatus = createInitialTaskStatus(Optional.of(taskStats), nodeId); + return new TaskInfo( taskStatus, lastHeartbeat.get(), @@ -385,11 +389,10 @@ private TaskInfo createInitialTaskInfo(TaskHolder taskHolder) { taskStats.getMemoryReservation(), System.currentTimeMillis() - createTime, 0, - taskStats.getElapsedTime(), - taskStats.getTotalScheduledTime(), + taskStats.getElapsedTimeMillis(), + taskStats.getTotalScheduledTimeNanos(), taskStateMachine.getPullDataTime(), - taskStats.getDeliveryTime() - ); + taskStats.getDeliveryTimeMillis()); } else { TaskStatus taskStatus = createInitialTaskStatus(Optional.empty(), nodeId); @@ -407,11 +410,10 @@ private TaskInfo createInitialTaskInfo(TaskHolder taskHolder) { taskStats.getMemoryReservation(), System.currentTimeMillis() - createTime, taskStateMachine.getDriverEndTime(), - taskStats.getElapsedTime(), - taskStats.getTotalScheduledTime(), + taskStats.getElapsedTimeMillis(), + taskStats.getTotalScheduledTimeNanos(), taskStateMachine.getPullDataTime(), - taskStats.getDeliveryTime() - ); + taskStats.getDeliveryTimeMillis()); } } @@ -435,11 +437,10 @@ private TaskInfo createTaskInfo(TaskHolder taskHolder) { taskStats.getMemoryReservation(), System.currentTimeMillis() - createTime, 0, - taskStats.getElapsedTime(), - taskStats.getTotalScheduledTime(), + taskStats.getElapsedTimeMillis(), + taskStats.getTotalScheduledTimeNanos(), taskStateMachine.getPullDataTime(), - taskStats.getDeliveryTime() - ); + taskStats.getDeliveryTimeMillis()); } else { return new TaskInfo( taskStatus, @@ -455,17 +456,16 @@ private TaskInfo createTaskInfo(TaskHolder taskHolder) { taskStats.getMemoryReservation(), System.currentTimeMillis() - createTime, taskStateMachine.getDriverEndTime(), - taskStats.getElapsedTime(), - taskStats.getTotalScheduledTime(), + taskStats.getElapsedTimeMillis(), + taskStats.getTotalScheduledTimeNanos(), taskStateMachine.getPullDataTime(), - taskStats.getDeliveryTime()); + taskStats.getDeliveryTimeMillis()); } } private TaskInfo createTaskInfoWithTaskStats(TaskHolder taskHolder) { TaskStats taskStats = getTaskStats(taskHolder); Set noMoreSplits = getNoMoreSplits(taskHolder); - TaskStatus taskStatus = createTaskStatus(taskHolder, Optional.empty()); TaskInfo taskInfo = new TaskInfo( taskStatus, @@ -481,11 +481,10 @@ private TaskInfo createTaskInfoWithTaskStats(TaskHolder taskHolder) { taskStats.getMemoryReservation(), System.currentTimeMillis() - createTime, taskStateMachine.getDriverEndTime(), - taskStats.getElapsedTime(), - taskStats.getTotalScheduledTime(), + taskStats.getElapsedTimeMillis(), + taskStats.getTotalScheduledTimeNanos(), taskStateMachine.getPullDataTime(), - taskStats.getDeliveryTime() - ); + taskStats.getDeliveryTimeMillis()); return taskInfo; } @@ -704,27 +703,44 @@ private TaskStats getTaskStats(TaskContext context) { List operatorStatsList = new ArrayList<>(); List driverContexts = new ArrayList<>(); + + Map idToName = new HashMap<>(); + Map idToStatisticsSketch = new HashMap<>(); + + if (context.getContext().getRuntimeStatistics() != null) { + Map statisticsSketchMap = + ((RuntimeStatistics) context.getContext().getRuntimeStatistics()).toSketch(); + + for (Map.Entry entry : statisticsSketchMap.entrySet()) { + idToStatisticsSketch.put(entry.getKey().getRelatedId(), entry.getValue()); + idToName.put(entry.getKey().getRelatedId(), entry.getKey().getClass().getSimpleName()); + } + for (RuntimeStatisticsSketch statistics : idToStatisticsSketch.values()) { + cumulativeMemory += statistics.getMemory(); + } + + } for (PipelineContext pipelineContext : context.getPipelineContexts()) { for (DriverContext driverContext : pipelineContext.getDriverContexts()) { driverContexts.add(driverContext); + for (Integer operatorId : driverContext.getDriverInputs()) { + RuntimeStatisticsSketch ret = idToStatisticsSketch.get(operatorId); + if (ret != null) { + OperatorStats operatorStats = + new OperatorStats(Optional.empty(), driverContext.getPipelineContext().getPipelineId(), + Optional.of(idToName.get(operatorId)), operatorId, ret.getRowCount(), + ret.getRuntimeFilteredRowCount(), + ret.getOutputBytes(), ret.getStartupDuration(), + ret.getDuration(), ret.getMemory(), ret.getInstances(), ret.getSpillCnt()); + operatorStatsList.add(operatorStats); + } + } } } if (taskStateMachine.getState().isDone()) { context.end(); if (context.getContext().getRuntimeStatistics() != null) { - Map statisticsSketchMap = - ((RuntimeStatistics) context.getContext().getRuntimeStatistics()).toSketch(); - Map idToStatisticsSketch = new HashMap<>(); - Map idToName = new HashMap<>(); - for (Map.Entry entry : statisticsSketchMap.entrySet()) { - idToStatisticsSketch.put(entry.getKey().getRelatedId(), entry.getValue()); - idToName.put(entry.getKey().getRelatedId(), entry.getKey().getClass().getSimpleName()); - } - for (RuntimeStatisticsSketch statistics : idToStatisticsSketch.values()) { - cumulativeMemory += statistics.getMemory(); - } - Set finishedStatics = new HashSet<>(); for (DriverContext driverContext : driverContexts) { @@ -734,8 +750,9 @@ private TaskStats getTaskStats(TaskContext context) { OperatorStats operatorStats = new OperatorStats(Optional.empty(), driverContext.getPipelineContext().getPipelineId(), Optional.of(idToName.get(operatorId)), operatorId, ret.getRowCount(), - ret.getOutputBytes(), ret.getStartupDuration(), ret.getDuration(), - ret.getMemory(), ret.getInstances(), ret.getSpillCnt()); + ret.getRuntimeFilteredRowCount(), + ret.getOutputBytes(), ret.getStartupDuration(), + ret.getDuration(), ret.getMemory(), ret.getInstances(), ret.getSpillCnt()); operatorStatsList.add(operatorStats); finishedStatics.add(operatorId); } @@ -770,6 +787,8 @@ private TaskStats getTaskStats(TaskContext context) { } } + List driverStatsList = new ArrayList<>(driverContexts.size()); + for (int i = 0; i < driverContexts.size(); i++) { DriverContext driverContext = driverContexts.get(i); @@ -789,6 +808,7 @@ private TaskStats getTaskStats(TaskContext context) { processedInputPositions += driverStats.getInputPositions(); outputDataSize += driverStats.getOutputDataSize(); outputPositions += driverStats.getOutputPositions(); + driverStatsList.add(driverStats); } long startMillis = context.getStartMillis(); @@ -818,14 +838,29 @@ private TaskStats getTaskStats(TaskContext context) { long peakMemory = taskMemoryPool.getMaxMemoryUsage(); long memoryReservation = taskMemoryPool.getMemoryUsage(); + Map> pipelineDeps = buildPipelineDeps(context.getPipelineDepTree()); return new TaskStats(taskStateMachine.getCreatedTime(), context.getStartTime(), context.getEndTime(), elapsedTime, queuedTime, deliveryTime, totalPipeExecs, queuedPipeExecs, runningPipeExecs, completePipeExecs, cumulativeMemory, memoryReservation, peakMemory, totalScheduledTime, totalCpuTime, totalUserTime, totalBlockedTime, (runningPipeExecs > 0), ImmutableSet.of(), processedInputDataSize, processedInputPositions, - outputDataSize, outputPositions, operatorStatsList - ); + outputDataSize, outputPositions, operatorStatsList, + driverStatsList, pipelineDeps); + } + + private Map> buildPipelineDeps(PipelineDepTree pipelineDepTree) { + if (pipelineDepTree == null) { + return null; + } + Map> deps = new HashMap<>(pipelineDepTree.size()); + for (int i = 0; i < pipelineDepTree.size(); i++) { + PipelineDepTree.TreeNode node = pipelineDepTree.getNode(i); + List depIds = node.getDependChildren().stream() + .map(PipelineDepTree.TreeNode::getId).collect(Collectors.toList()); + deps.put(node.getId(), depIds); + } + return deps; } public void clean() { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskExecution.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskExecution.java index 5b6be4280..3ecaca175 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskExecution.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskExecution.java @@ -16,15 +16,9 @@ package com.alibaba.polardbx.executor.mpp.execution; -import com.alibaba.polardbx.optimizer.config.meta.DrdsRelMetadataProvider; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Sets; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.DefaultSchema; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.executor.mpp.Session; import com.alibaba.polardbx.executor.mpp.Threads; import com.alibaba.polardbx.executor.mpp.execution.buffer.OutputBuffer; @@ -35,8 +29,18 @@ import com.alibaba.polardbx.executor.mpp.operator.PipelineDepTree; import com.alibaba.polardbx.executor.mpp.operator.factory.PipelineFactory; import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; +import com.alibaba.polardbx.executor.mpp.split.JdbcSplit; +import com.alibaba.polardbx.executor.mpp.split.OssSplit; import com.alibaba.polardbx.executor.operator.util.bloomfilter.BloomFilterExpression; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.optimizer.config.meta.DrdsRelMetadataProvider; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.concurrent.SetThreadName; import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider; import org.apache.calcite.rel.metadata.RelMetadataQuery; @@ -143,7 +147,6 @@ private SqlTaskExecution( this.notificationExecutor = requireNonNull(notificationExecutor, "notificationExecutor is null"); this.bloomFilterExpression = bloomFilterExpressionMap; - RelMetadataQuery.THREAD_PROVIDERS.set(JaninoRelMetadataProvider.of(DrdsRelMetadataProvider.INSTANCE)); List pipelineFactories = planner.plan( fragment, outputBuffer, session); taskContext.setPipelineDepTree(new PipelineDepTree(pipelineFactories)); @@ -354,6 +357,7 @@ private void schedulePartitionedSource(TaskSource source) { } if (newSplits.size() > 0) { + // bka join if (source.isExpand()) { int start = ThreadLocalRandom.current().nextInt(updateTaskSources.size()); List shuffleLists = zigzagSplitsByMysqlInst(new ArrayList<>(newSplits)); @@ -367,13 +371,39 @@ private void schedulePartitionedSource(TaskSource source) { } } } else { - int start = ThreadLocalRandom.current().nextInt(updateTaskSources.size()); - //排序的目的是为了保证同一个实例的splits被打散 - List shuffleLists = zigzagSplitsByMysqlInst(new ArrayList<>(newSplits)); - for (ScheduledSplit scheduledSplit : shuffleLists) { - updateTaskSources.get(start++).addSplit(scheduledSplit); - if (start >= updateTaskSources.size()) { - start = 0; + boolean containsJdbcSplit = containsJdbcSplit(newSplits); + if (containsJdbcSplit) { + // normal case, such as innodb or innodb with oss(not columnar) + int start = ThreadLocalRandom.current().nextInt(updateTaskSources.size()); + //排序的目的是为了保证同一个实例的splits被打散 + List shuffleLists = zigzagSplitsByMysqlInst(new ArrayList<>(newSplits)); + for (ScheduledSplit scheduledSplit : shuffleLists) { + updateTaskSources.get(start++).addSplit(scheduledSplit); + if (start >= updateTaskSources.size()) { + start = 0; + } + } + } else { + int start = ThreadLocalRandom.current().nextInt(updateTaskSources.size()); + Map partCounter = new HashMap<>(); + for (ScheduledSplit scheduledSplit : newSplits) { + if (scheduledSplit.getSplit().getConnectorSplit() instanceof OssSplit + && ((OssSplit) scheduledSplit.getSplit().getConnectorSplit()).isLocalPairWise() + && !taskContext.getContext().getParamManager() + .getBoolean(ConnectionParams.LOCAL_PAIRWISE_PROBE_SEPARATE)) { + OssSplit ossSplit = (OssSplit) scheduledSplit.getSplit().getConnectorSplit(); + int partIndex = ossSplit.getPartIndex(); + int count = partCounter.getOrDefault(partIndex, 0); + partCounter.put(partIndex, count + 1); + int selectSeq = ExecUtils.assignPartitionToExecutor(count, + ossSplit.getNodePartCount(), ossSplit.getPartIndex(), updateTaskSources.size()); + updateTaskSources.get(selectSeq).addSplit(scheduledSplit); + } else { + updateTaskSources.get(start++).addSplit(scheduledSplit); + if (start >= updateTaskSources.size()) { + start = 0; + } + } } } } @@ -384,6 +414,15 @@ private void schedulePartitionedSource(TaskSource source) { } } + private boolean containsJdbcSplit(Set newSplits) { + for (ScheduledSplit scheduledSplit : newSplits) { + if (scheduledSplit.getSplit().getConnectorSplit() instanceof JdbcSplit) { + return true; + } + } + return false; + } + private List zigzagSplitsByMysqlInst(List splits) { List newSplits = new ArrayList(splits.size()); int instCount = 0; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskExecutionFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskExecutionFactory.java index da74c9edf..b6e750e9a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskExecutionFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskExecutionFactory.java @@ -16,16 +16,21 @@ package com.alibaba.polardbx.executor.mpp.execution; +import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.executor.mpp.Session; import com.alibaba.polardbx.executor.mpp.execution.buffer.OutputBuffer; import com.alibaba.polardbx.executor.mpp.operator.ExchangeClientSupplier; import com.alibaba.polardbx.executor.mpp.operator.LocalExecutionPlanner; import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; +import com.alibaba.polardbx.executor.mpp.split.OssSplit; +import com.alibaba.polardbx.executor.mpp.split.SplitManagerImpl; import com.alibaba.polardbx.executor.operator.spill.SpillerFactory; import io.airlift.http.client.HttpClient; import java.net.URI; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.Executor; import static java.util.Objects.requireNonNull; @@ -70,7 +75,11 @@ public SqlTaskExecution create(Session session, QueryContext queryContext, TaskS fragment.getPrefetch(), taskNotificationExecutor, taskContext.isSpillable() ? spillerFactory : null, - httpClient, uri, true); + httpClient, uri, true, + fragment.getLocalPartitionCount(), + fragment.getTotalPartitionCount(), + fragment.getSplitCountMap(), + new SplitManagerImpl()); return SqlTaskExecution.createSqlTaskExecution( taskStateMachine, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskManager.java index 3dd04b24a..66970298e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/SqlTaskManager.java @@ -16,17 +16,10 @@ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.base.Preconditions; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.MppConfig; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.common.utils.logger.MDC; @@ -36,7 +29,14 @@ import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; import com.alibaba.polardbx.executor.mpp.web.ForWorkerInfo; import com.alibaba.polardbx.executor.operator.spill.SpillerFactory; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.google.common.base.Preconditions; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.cache.RemovalListener; +import com.google.common.cache.RemovalNotification; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.http.client.HttpClient; import io.airlift.node.NodeInfo; import io.airlift.units.DataSize; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageInfo.java index cbd300fa0..242ad6127 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageInfo.java @@ -29,17 +29,22 @@ */ package com.alibaba.polardbx.executor.mpp.execution; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; +import com.alibaba.polardbx.executor.mpp.operator.DriverStats; +import com.alibaba.polardbx.executor.mpp.operator.TaskStats; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.util.MoreObjects; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.collect.ImmutableList; -import com.alibaba.polardbx.util.MoreObjects; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import org.apache.commons.lang3.StringUtils; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; import java.net.URI; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Optional; import static java.util.Objects.requireNonNull; @@ -200,6 +205,62 @@ private static StageInfo summaryInternal(StageInfo parentStage) { return newStage; } + public static void collectStats(StageInfo rootStage, Map> driverStatistics) { + String stageId = rootStage.getStageId().toString(); + if (rootStage.isCompleteInfo() && !driverStatistics.containsKey(stageId)) { + List driverInfoResult = new ArrayList<>(); + driverStatistics.put(stageId, driverInfoResult); + + // Receive DriverInfo for each driver in each task. + List taskInfoList = rootStage.getTasks(); + for (TaskInfo taskInfo : taskInfoList) { + TaskStats taskStats = taskInfo.getTaskStats(); + + for (DriverStats driverStats : taskStats.getDriverStats()) { + DriverContext.DriverRuntimeStatistics driverRuntimeStatistics = + driverStats.getDriverRuntimeStatistics(); + + // Convert + // [traceId].[stageId].[nodeId].[pipelineId].[threadId] + // into + // | traceId | stageId-pipelineId | nodeId | threadId | + String[] splitDriverId = StringUtils.split(driverStats.getDriverId(), "."); + final String traceId = splitDriverId[0]; + final String stageAndPipeline = splitDriverId[1] + '-' + splitDriverId[3]; + final String nodeId = splitDriverId[2]; + final String threadId = splitDriverId[4]; + + if (driverRuntimeStatistics == null) { + // Use -1 as the default value when there is no dump in TaskExecution. + driverInfoResult.add(new Object[] { + traceId, stageAndPipeline, nodeId, threadId, + -1, -1, -1, -1, -1, -1, -1, -1}); + } else { + driverInfoResult.add(new Object[] { + traceId, stageAndPipeline, nodeId, threadId, + driverRuntimeStatistics.getRunningCost(), + driverRuntimeStatistics.getPendingCost(), + driverRuntimeStatistics.getBlockedCost(), + driverRuntimeStatistics.getOpenCost(), + driverRuntimeStatistics.getTotalCost(), + driverRuntimeStatistics.getRunningCount(), + driverRuntimeStatistics.getPendingCount(), + driverRuntimeStatistics.getBlockedCount() + }); + } + + } + } + } + + // Traverse the children stages. + if (rootStage.getSubStages() != null && !rootStage.getSubStages().isEmpty()) { + for (int i = 0; i < rootStage.getSubStages().size(); i++) { + collectStats(rootStage.getSubStages().get(i), driverStatistics); + } + } + } + public StageInfo summary() { return summaryInternal(this); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageStateMachine.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageStateMachine.java index 658bc2992..c4e1a5657 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageStateMachine.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageStateMachine.java @@ -29,7 +29,6 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.Session; @@ -39,6 +38,7 @@ import com.alibaba.polardbx.executor.mpp.operator.TaskStats; import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; import com.alibaba.polardbx.executor.mpp.util.Failures; +import com.google.common.collect.ImmutableList; import org.joda.time.DateTime; import javax.annotation.concurrent.ThreadSafe; @@ -53,7 +53,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; -import static com.google.common.base.MoreObjects.toStringHelper; import static com.alibaba.polardbx.executor.mpp.execution.StageState.ABORTED; import static com.alibaba.polardbx.executor.mpp.execution.StageState.CANCELED; import static com.alibaba.polardbx.executor.mpp.execution.StageState.FAILED; @@ -65,6 +64,7 @@ import static com.alibaba.polardbx.executor.mpp.execution.StageState.SCHEDULING; import static com.alibaba.polardbx.executor.mpp.execution.StageState.SCHEDULING_SPLITS; import static com.alibaba.polardbx.executor.mpp.execution.StageState.TERMINAL_STAGE_STATES; +import static com.google.common.base.MoreObjects.toStringHelper; import static io.airlift.units.DataSize.succinctBytes; import static java.util.Objects.requireNonNull; @@ -239,7 +239,7 @@ public StageInfo getStageInfo(Supplier> taskInfosSupplier, runningTasks++; } - TaskStats taskStats = taskInfo.getStats(); + TaskStats taskStats = taskInfo.getTaskStats(); if (taskStats != null) { totalPipelineExecs += taskStats.getTotalPipelineExecs(); @@ -250,10 +250,10 @@ public StageInfo getStageInfo(Supplier> taskInfosSupplier, cumulativeMemory += taskStats.getCumulativeMemory(); totalMemoryReservation += taskStats.getMemoryReservation(); - totalScheduledTime += taskStats.getTotalScheduledTime(); - totalCpuTime += taskStats.getTotalCpuTime(); - totalUserTime += taskStats.getTotalUserTime(); - totalBlockedTime += taskStats.getTotalBlockedTime(); + totalScheduledTime += taskStats.getTotalScheduledTimeNanos(); + totalCpuTime += taskStats.getTotalCpuTimeNanos(); + totalUserTime += taskStats.getTotalUserTimeNanos(); + totalBlockedTime += taskStats.getTotalBlockedTimeNanos(); if (!taskState.isDone()) { fullyBlocked &= taskStats.isFullyBlocked(); blockedReasons.addAll(taskStats.getBlockedReasons()); @@ -271,9 +271,10 @@ public StageInfo getStageInfo(Supplier> taskInfosSupplier, OperatorStats operator = taskStats.getOperatorStats().get(i); operators.add(new OperatorStats(Optional.of(stageId), operator.getPipelineId(), operator.getOperatorType(), - operator.getOperatorId(), operator.getOutputRowCount(), operator.getOutputBytes(), - operator.getStartupDuration(), operator.getDuration(), operator.getMemory(), - operator.getInstances(), operator.getSpillCnt())); + operator.getOperatorId(), operator.getOutputRowCount(), + operator.getRuntimeFilteredCount(), + operator.getOutputBytes(), operator.getStartupDuration(), operator.getDuration(), + operator.getMemory(), operator.getInstances(), operator.getSpillCnt())); } } else { if (taskStats.getOperatorStats() != null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageStats.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageStats.java index 7382b4f56..8d38bb42a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageStats.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StageStats.java @@ -29,11 +29,12 @@ */ package com.alibaba.polardbx.executor.mpp.execution; +import com.alibaba.polardbx.executor.mpp.operator.BlockedReason; +import com.alibaba.polardbx.executor.mpp.operator.OperatorStats; import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonFormat; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.collect.ImmutableSet; -import com.alibaba.polardbx.executor.mpp.operator.BlockedReason; -import com.alibaba.polardbx.executor.mpp.operator.OperatorStats; import io.airlift.units.DataSize; import org.joda.time.DateTime; @@ -46,6 +47,7 @@ @Immutable public class StageStats { + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8") private final DateTime schedulingComplete; private final int totalTasks; @@ -61,10 +63,10 @@ public class StageStats { private final DataSize totalMemoryReservation; private final DataSize peakMemoryReservation; - private final long totalScheduledTime; - private final long totalCpuTime; - private final long totalUserTime; - private final long totalBlockedTime; + private final long totalScheduledTimeNanos; + private final long totalCpuTimeNanos; + private final long totalUserTimeNanos; + private final long totalBlockedTimeNanos; private final boolean fullyBlocked; private final Set blockedReasons; @@ -92,10 +94,10 @@ public StageStats( @JsonProperty("totalMemoryReservation") DataSize totalMemoryReservation, @JsonProperty("peakMemoryReservation") DataSize peakMemoryReservation, - @JsonProperty("totalScheduledTime") long totalScheduledTime, - @JsonProperty("totalCpuTime") long totalCpuTime, - @JsonProperty("totalUserTime") long totalUserTime, - @JsonProperty("totalBlockedTime") long totalBlockedTime, + @JsonProperty("totalScheduledTimeNanos") long totalScheduledTimeNanos, + @JsonProperty("totalCpuTimeNanos") long totalCpuTimeNanos, + @JsonProperty("totalUserTimeNanos") long totalUserTimeNanos, + @JsonProperty("totalBlockedTimeNanos") long totalBlockedTimeNanos, @JsonProperty("fullyBlocked") boolean fullyBlocked, @JsonProperty("blockedReasons") Set blockedReasons, @@ -127,10 +129,10 @@ public StageStats( this.totalMemoryReservation = requireNonNull(totalMemoryReservation, "totalMemoryReservation is null"); this.peakMemoryReservation = requireNonNull(peakMemoryReservation, "peakMemoryReservation is null"); - this.totalScheduledTime = requireNonNull(totalScheduledTime, "totalScheduledTime is null"); - this.totalCpuTime = requireNonNull(totalCpuTime, "totalCpuTime is null"); - this.totalUserTime = requireNonNull(totalUserTime, "totalUserTime is null"); - this.totalBlockedTime = requireNonNull(totalBlockedTime, "totalBlockedTime is null"); + this.totalScheduledTimeNanos = requireNonNull(totalScheduledTimeNanos, "totalScheduledTime is null"); + this.totalCpuTimeNanos = requireNonNull(totalCpuTimeNanos, "totalCpuTime is null"); + this.totalUserTimeNanos = requireNonNull(totalUserTimeNanos, "totalUserTime is null"); + this.totalBlockedTimeNanos = requireNonNull(totalBlockedTimeNanos, "totalBlockedTime is null"); this.fullyBlocked = fullyBlocked; this.blockedReasons = ImmutableSet.copyOf(requireNonNull(blockedReasons, "blockedReasons is null")); @@ -145,6 +147,7 @@ public StageStats( } @JsonProperty + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8") public DateTime getSchedulingComplete() { return schedulingComplete; } @@ -200,23 +203,23 @@ public DataSize getPeakMemoryReservation() { } @JsonProperty - public long getTotalScheduledTime() { - return totalScheduledTime; + public long getTotalScheduledTimeNanos() { + return totalScheduledTimeNanos; } @JsonProperty - public long getTotalCpuTime() { - return totalCpuTime; + public long getTotalCpuTimeNanos() { + return totalCpuTimeNanos; } @JsonProperty - public long getTotalUserTime() { - return totalUserTime; + public long getTotalUserTimeNanos() { + return totalUserTimeNanos; } @JsonProperty - public long getTotalBlockedTime() { - return totalBlockedTime; + public long getTotalBlockedTimeNanos() { + return totalBlockedTimeNanos; } @JsonProperty diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StateMachine.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StateMachine.java index 1f6ea9b5f..1adaa6494 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StateMachine.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/StateMachine.java @@ -29,15 +29,15 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.Threads; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.units.Duration; import javax.annotation.Nonnull; @@ -52,9 +52,9 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; +import static com.alibaba.polardbx.executor.mpp.Threads.ENABLE_WISP; import static com.google.common.base.Preconditions.checkState; import static com.google.common.util.concurrent.Futures.immediateFuture; -import static com.alibaba.polardbx.executor.mpp.Threads.ENABLE_WISP; import static java.util.Objects.requireNonNull; import static java.util.concurrent.TimeUnit.NANOSECONDS; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskExecutor.java index 90a258116..7996790d5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskExecutor.java @@ -24,20 +24,27 @@ import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; import com.alibaba.polardbx.executor.mpp.Threads; +import com.alibaba.polardbx.executor.mpp.deploy.ServiceProvider; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; +import com.alibaba.polardbx.executor.mpp.operator.LocalExecutionPlanner; import com.alibaba.polardbx.optimizer.config.meta.DrdsRelMetadataProvider; import com.alibaba.polardbx.optimizer.memory.ApMemoryPool; import com.alibaba.polardbx.optimizer.memory.MemoryManager; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import io.airlift.concurrent.SetThreadName; import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider; import org.apache.calcite.rel.metadata.RelMetadataQuery; + import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; import javax.annotation.concurrent.GuardedBy; import javax.annotation.concurrent.ThreadSafe; import javax.inject.Inject; +import java.text.MessageFormat; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -63,6 +70,7 @@ public class TaskExecutor { private static final Logger log = LoggerFactory.getLogger(TaskExecutor.class); + private static final Logger PIPELINE_LOG = LoggerFactory.getLogger(LocalExecutionPlanner.class); private static final AtomicLong NEXT_LOW_RUNNER_ID = new AtomicLong(); private static final AtomicLong NEXT_HIGH_RUNNER_ID = new AtomicLong(); @@ -211,10 +219,15 @@ public List> enqueueSplits(TaskHandle taskHandle, boolean hi private void splitFinished(PrioritizedSplitRunner split) { split.destroy(); + // time cost statistics + split.markFinishTimestamp(); } private void startSplit(PrioritizedSplitRunner split, boolean highPriority) { try { + // time cost statistics + split.markStartTimestamp(); + split.startPending(); if (highPriority) { highPendingSplits.put(split); } else { @@ -345,6 +358,87 @@ private PrioritizedSplitRunner(TaskHandle taskHandle, SplitRunner split) { this.split = split; this.workerId = NEXT_WORKER_ID.getAndIncrement(); this.splitRunQuanta = MppConfig.getInstance().getSplitRunQuanta(); + this.split.runtimeStatsSupplier(() -> dump()); + } + + private long startNanoTimestamp = 0L; + private long finishNanoTimestamp = 0L; + private long blockedNanoTimestamp = 0L; + private long pendingNanoTimestamp = 0L; + private long runningNanoTimestamp = 0L; + + private long openCost = 0L; + private long blockedCost = 0L; + private long pendingCost = 0L; + private long runningCost = 0L; + + private long blockedCount = 0L; + private long pendingCount = 0L; + private long runningCount = 0L; + + public void markStartTimestamp() { + startNanoTimestamp = System.nanoTime(); + } + + public void markFinishTimestamp() { + if (finishNanoTimestamp == 0L) { + finishNanoTimestamp = System.nanoTime(); + if (PIPELINE_LOG.isDebugEnabled()) { + String info = MessageFormat.format( + "taskId={0}, splitId={1}, splitInfo={2}", + taskHandle.getTaskId(), + splitId, split.getInfo()); + + PIPELINE_LOG.debug(MessageFormat.format( + "finish split: {0}, runningCost={1}, pendingCost={2}, blockedCost={3}, totalCost={4}, " + + " runningCount={5}," + + " pendingCount={6}," + + " blockedCount={7}," + + "startTs={8}, endTs={9}", + info, runningCost, pendingCost, blockedCost, (finishNanoTimestamp - startNanoTimestamp), + runningCount, pendingCount, blockedCount, + startNanoTimestamp, finishNanoTimestamp)); + } + } + } + + public DriverContext.DriverRuntimeStatistics dump() { + return new DriverContext.DriverRuntimeStatistics( + runningCost, pendingCost, blockedCost, openCost, + (System.nanoTime() - startNanoTimestamp), + (int) runningCount, (int) pendingCount, (int) blockedCount); + } + + public void startPending() { + pendingCount++; + pendingNanoTimestamp = System.nanoTime(); + } + + public void startBlocked() { + blockedCount++; + blockedNanoTimestamp = System.nanoTime(); + } + + public void startRunning() { + runningCount++; + runningNanoTimestamp = System.nanoTime(); + } + + public void finishPending() { + pendingCost += System.nanoTime() - pendingNanoTimestamp; + } + + public void finishBlocked() { + if (blockedCount == 1) { + // The first blocked status is recognized as open cost. + openCost += System.nanoTime() - blockedNanoTimestamp; + } else { + blockedCost += System.nanoTime() - blockedNanoTimestamp; + } + } + + public void finishRunning() { + runningCost += System.nanoTime() - runningNanoTimestamp; } public long getSplitCost() { @@ -475,6 +569,7 @@ public void run() { logPriorityExecutorInfo(highPriorityExecutorInfo, lastHighRunnerProcessCount); lastHighRunnerProcessCount = highPriorityExecutorInfo.getRunnerProcessCount(); ApMemoryPool apMemoryPool = MemoryManager.getInstance().getApMemoryPool(); + log.info("Global Memory Pool: used " + MemoryManager.getInstance().getGlobalMemoryPool().getMemoryUsage() + " total " + MemoryManager.getInstance().getGlobalMemoryPool().getMaxLimit()); @@ -508,9 +603,6 @@ private class ApRunner implements Runnable { @Override public void run() { - //现在MPP在执行过程中都有可能使用DrdsRelMetadataProvider,所以这里在各个运行线程统一设置吧。 - RelMetadataQuery.THREAD_PROVIDERS.set(JaninoRelMetadataProvider.of(DrdsRelMetadataProvider.INSTANCE)); - runningCounted = true; runningLowSplits.incrementAndGet(); try (SetThreadName runnerName = new SetThreadName("ApSplitRunner-%s", runnerId)) { @@ -521,6 +613,8 @@ public void run() { runningCounted = false; runningLowSplits.decrementAndGet(); split = lowPendingSplits.take(); + // time cost statistics + split.finishPending(); split.buildMDC(); runningCounted = true; runningLowSplits.incrementAndGet(); @@ -543,7 +637,10 @@ public void run() { if (log.isDebugEnabled()) { log.debug(String.format("%s is started", split.getInfo())); } + // time cost statistics + split.startRunning(); blocked = split.process(start); + split.finishRunning(); long cost = System.currentTimeMillis() - start; try { split.spiltCostAdd(cost); @@ -561,11 +658,15 @@ public void run() { splitFinished(split); } else { if (blocked.isDone()) { + // time cost statistics + split.startPending(); lowPendingSplits.put(split); } else { if (log.isDebugEnabled()) { log.debug(String.format("%s is bloked", split.getInfo())); } + // time cost statistics + split.startBlocked(); lowBlockedSplitNum.getAndIncrement(); blockedSplits.put(split, blocked); split.recordBlocked(); @@ -574,9 +675,13 @@ public void run() { log.debug(String.format("%s is pending", split.getInfo())); } split.recordBlockedFinished(); + // time cost statistics + split.finishBlocked(); lowBlockedSplitNum.getAndDecrement(); blockedSplits.remove(split); try { + // time cost statistics + split.startPending(); lowPendingSplits.put(split); } catch (Exception e) { log.error("error", e); @@ -625,9 +730,6 @@ private class TpRunner implements Runnable { @Override public void run() { - //现在MPP在执行过程中都有可能使用DrdsRelMetadataProvider,所以这里在各个运行线程统一设置吧。 - RelMetadataQuery.THREAD_PROVIDERS.set(JaninoRelMetadataProvider.of(DrdsRelMetadataProvider.INSTANCE)); - runningCounted = true; runningHighSplits.incrementAndGet(); try (SetThreadName runnerName = new SetThreadName("TpSplitRunner-%s", runnerId)) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskInfo.java index d4e2c1600..aaa38fed8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskInfo.java @@ -61,8 +61,8 @@ public class TaskInfo { getEmptyTaskStats().getTotalPipelineExecs(), getEmptyTaskStats().getCumulativeMemory(), getEmptyTaskStats().getMemoryReservation(), - getEmptyTaskStats().getElapsedTime(), - getEmptyTaskStats().getTotalCpuTime(), + getEmptyTaskStats().getElapsedTimeMillis(), + getEmptyTaskStats().getTotalCpuTimeNanos(), 0, 0, 0, @@ -76,7 +76,7 @@ public static TaskInfo getEmptyTaskInfo() { private final DateTime lastHeartbeat; private final OutputBufferInfo outputBuffers; private final Set noMoreSplits; - private final TaskStats stats; + private final TaskStats taskStats; private final boolean needsPlan; private final boolean complete; @@ -85,36 +85,36 @@ public static TaskInfo getEmptyTaskInfo() { private final int totalPipelineExecs; private final double cumulativeMemory; private final long memoryReservation; - private final long elapsedTime; + private final long elapsedTimeMillis; private final long totalCpuTime; - private final long processTime; + private final long processTimeMillis; private final long processWall; - private final long pullDataTime; - private final long deliveryTime; + private final long pullDataTimeMillis; + private final long deliveryTimeMillis; @JsonCreator public TaskInfo(@JsonProperty("taskStatus") TaskStatus taskStatus, @JsonProperty("lastHeartbeat") DateTime lastHeartbeat, @JsonProperty("outputBuffers") OutputBufferInfo outputBuffers, @JsonProperty("noMoreSplits") Set noMoreSplits, - @JsonProperty("stats") TaskStats stats, + @JsonProperty("taskStats") TaskStats taskStats, @JsonProperty("needsPlan") boolean needsPlan, @JsonProperty("complete") boolean complete, @JsonProperty("completedPipelineExecs") int completedPipelineExecs, @JsonProperty("totalPipelineExecs") int totalPipelineExecs, @JsonProperty("cumulativeMemory") double cumulativeMemory, @JsonProperty("memoryReservation") long memoryReservation, - @JsonProperty("elapsedTime") long elapsedTime, + @JsonProperty("elapsedTimeMillis") long elapsedTimeMillis, @JsonProperty("totalCpuTime") long totalCpuTime, - @JsonProperty("processTime") long processTime, + @JsonProperty("processTimeMillis") long processTimeMillis, @JsonProperty("processWall") long processWall, - @JsonProperty("pullDataTime") long pullDataTime, - @JsonProperty("deliveryTime") long deliveryTime) { + @JsonProperty("pullDataTimeMillis") long pullDataTimeMillis, + @JsonProperty("deliveryTimeMillis") long deliveryTimeMillis) { this.taskStatus = requireNonNull(taskStatus, "taskStatus is null"); this.lastHeartbeat = requireNonNull(lastHeartbeat, "lastHeartbeat is null"); this.outputBuffers = requireNonNull(outputBuffers, "outputBuffers is null"); this.noMoreSplits = requireNonNull(noMoreSplits, "noMoreSplits is null"); - this.stats = stats; + this.taskStats = taskStats; this.needsPlan = needsPlan; this.complete = complete; @@ -125,13 +125,13 @@ public TaskInfo(@JsonProperty("taskStatus") TaskStatus taskStatus, this.cumulativeMemory = cumulativeMemory; this.memoryReservation = memoryReservation; - this.elapsedTime = elapsedTime; + this.elapsedTimeMillis = elapsedTimeMillis; this.totalCpuTime = totalCpuTime; - this.processTime = processTime; + this.processTimeMillis = processTimeMillis; this.processWall = processWall; - this.pullDataTime = pullDataTime; - this.deliveryTime = deliveryTime; + this.pullDataTimeMillis = pullDataTimeMillis; + this.deliveryTimeMillis = deliveryTimeMillis; } @JsonProperty @@ -155,8 +155,8 @@ public Set getNoMoreSplits() { } @JsonProperty - public TaskStats getStats() { - return stats; + public TaskStats getTaskStats() { + return taskStats; } @JsonProperty @@ -190,8 +190,8 @@ public long getMemoryReservation() { } @JsonProperty - public long getElapsedTime() { - return elapsedTime; + public long getElapsedTimeMillis() { + return elapsedTimeMillis; } @JsonProperty @@ -200,8 +200,8 @@ public long getTotalCpuTime() { } @JsonProperty - public long getProcessTime() { - return processTime; + public long getProcessTimeMillis() { + return processTimeMillis; } @JsonProperty @@ -210,13 +210,13 @@ public long getProcessWall() { } @JsonProperty - public long getPullDataTime() { - return pullDataTime; + public long getPullDataTimeMillis() { + return pullDataTimeMillis; } @JsonProperty - public long getDeliveryTime() { - return deliveryTime; + public long getDeliveryTimeMillis() { + return deliveryTimeMillis; } @Override @@ -241,8 +241,8 @@ public static TaskInfo createInitialTask( taskStats.getTotalPipelineExecs(), taskStats.getCumulativeMemory(), taskStats.getMemoryReservation(), - taskStats.getElapsedTime(), - taskStats.getTotalCpuTime(), + taskStats.getElapsedTimeMillis(), + taskStats.getTotalCpuTimeNanos(), 0, 0, 0, @@ -255,27 +255,27 @@ public TaskInfo withTaskStatus(TaskStatus newTaskStatus) { lastHeartbeat, outputBuffers, noMoreSplits, - stats, + taskStats, needsPlan, complete, completedPipelineExecs, totalPipelineExecs, cumulativeMemory, memoryReservation, - elapsedTime, + elapsedTimeMillis, totalCpuTime, - processTime, + processTimeMillis, processWall, - pullDataTime, - deliveryTime); + pullDataTimeMillis, + deliveryTimeMillis); } public String toTaskString() { MoreObjects.ToStringHelper toString = MoreObjects.toStringHelper(this); toString.add("task", getTaskStatus().getTaskId()); - toString.add("elapsedTime", elapsedTime); - toString.add("processTime", processTime); + toString.add("elapsedTimeMillis", elapsedTimeMillis); + toString.add("processTimeMillis", processTimeMillis); toString.add("processWall", processWall); - toString.add("pullDataTime", pullDataTime); - toString.add("deliveryTime", deliveryTime); + toString.add("pullDataTime", pullDataTimeMillis); + toString.add("deliveryTime", deliveryTimeMillis); TaskLocation taskLocation = getTaskStatus().getSelf(); toString.add("host", taskLocation.getNodeServer().getHost() + ":" + taskLocation.getNodeServer().getHttpPort()); return toString.toString(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskManager.java index e14780792..453b64f89 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskManager.java @@ -29,11 +29,11 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.util.concurrent.ListenableFuture; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.execution.buffer.BufferResult; import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.units.DataSize; import java.net.URI; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskStateMachine.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskStateMachine.java index 38c09851c..4d39d0834 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskStateMachine.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskStateMachine.java @@ -29,9 +29,9 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.google.common.util.concurrent.ListenableFuture; import org.joda.time.DateTime; import javax.annotation.concurrent.ThreadSafe; @@ -39,10 +39,10 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicLong; +import static com.alibaba.polardbx.executor.mpp.execution.TaskState.TERMINAL_TASK_STATES; import static com.google.common.base.MoreObjects.toStringHelper; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.util.concurrent.Futures.immediateFuture; -import static com.alibaba.polardbx.executor.mpp.execution.TaskState.TERMINAL_TASK_STATES; import static java.util.Objects.requireNonNull; @ThreadSafe diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskStatus.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskStatus.java index c07ee7132..f3e3fa9a4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskStatus.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/TaskStatus.java @@ -29,22 +29,22 @@ */ package com.alibaba.polardbx.executor.mpp.execution; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.executor.mpp.metadata.TaskLocation; -import com.alibaba.polardbx.util.MoreObjects; import com.alibaba.polardbx.optimizer.statis.TaskMemoryStatisticsGroup; import com.alibaba.polardbx.statistics.ExecuteSQLOperation; import com.alibaba.polardbx.statistics.RuntimeStatistics; +import com.alibaba.polardbx.util.MoreObjects; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableList; import java.util.List; import java.util.Map; -import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkState; import static com.alibaba.polardbx.executor.mpp.execution.TaskId.getEmptyTask; import static com.alibaba.polardbx.executor.mpp.metadata.TaskLocation.getEmptyTaskLocation; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; import static java.util.Objects.requireNonNull; public class TaskStatus { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/BroadcastOutputBuffer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/BroadcastOutputBuffer.java index 4eb33768a..0e4ff3abf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/BroadcastOutputBuffer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/BroadcastOutputBuffer.java @@ -16,12 +16,12 @@ package com.alibaba.polardbx.executor.mpp.execution.buffer; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Sets; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.execution.StateMachine; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.units.DataSize; import javax.annotation.concurrent.GuardedBy; @@ -32,10 +32,10 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; +import static com.alibaba.polardbx.executor.mpp.Threads.ENABLE_WISP; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; import static com.google.common.util.concurrent.Futures.immediateFuture; -import static com.alibaba.polardbx.executor.mpp.Threads.ENABLE_WISP; import static java.util.Objects.requireNonNull; public class BroadcastOutputBuffer diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/BufferInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/BufferInfo.java index e5e7d23d5..221ff2282 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/BufferInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/BufferInfo.java @@ -29,10 +29,10 @@ */ package com.alibaba.polardbx.executor.mpp.execution.buffer; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.util.MoreObjects; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Objects; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/ClientBuffer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/ClientBuffer.java index 443d5a2cb..200a1d42c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/ClientBuffer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/ClientBuffer.java @@ -16,13 +16,13 @@ package com.alibaba.polardbx.executor.mpp.execution.buffer; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.Threads; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import io.airlift.units.DataSize; import javax.annotation.concurrent.GuardedBy; @@ -36,12 +36,12 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import static com.alibaba.polardbx.executor.mpp.execution.buffer.BufferResult.emptyResults; import static com.google.common.base.MoreObjects.toStringHelper; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; import static com.google.common.base.Verify.verify; import static com.google.common.util.concurrent.Futures.immediateFuture; -import static com.alibaba.polardbx.executor.mpp.execution.buffer.BufferResult.emptyResults; import static java.lang.Math.toIntExact; import static java.util.Objects.requireNonNull; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/LazyOutputBuffer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/LazyOutputBuffer.java index 1f3e0aaaf..f63955972 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/LazyOutputBuffer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/LazyOutputBuffer.java @@ -16,9 +16,6 @@ package com.alibaba.polardbx.executor.mpp.execution.buffer; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.execution.RecordMemSystemListener; @@ -28,6 +25,9 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.memory.MemoryPool; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.concurrent.ExtendedSettableFuture; import io.airlift.units.DataSize; @@ -38,13 +38,13 @@ import java.util.Set; import java.util.concurrent.Executor; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.util.concurrent.Futures.immediateFuture; import static com.alibaba.polardbx.executor.mpp.execution.buffer.BufferResult.emptyResults; import static com.alibaba.polardbx.executor.mpp.execution.buffer.BufferState.FAILED; import static com.alibaba.polardbx.executor.mpp.execution.buffer.BufferState.FINISHED; import static com.alibaba.polardbx.executor.mpp.execution.buffer.BufferState.OPEN; import static com.alibaba.polardbx.executor.mpp.execution.buffer.BufferState.TERMINAL_BUFFER_STATES; +import static com.google.common.base.Preconditions.checkState; +import static com.google.common.util.concurrent.Futures.immediateFuture; import static java.util.Objects.requireNonNull; public class LazyOutputBuffer implements OutputBuffer { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBuffer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBuffer.java index 43a3f1b13..1c735660a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBuffer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBuffer.java @@ -16,10 +16,10 @@ package com.alibaba.polardbx.executor.mpp.execution.buffer; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.execution.StateMachine; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.units.DataSize; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBufferInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBufferInfo.java index 72b0cbab0..72dad8634 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBufferInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBufferInfo.java @@ -29,9 +29,9 @@ */ package com.alibaba.polardbx.executor.mpp.execution.buffer; +import com.alibaba.polardbx.util.MoreObjects; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.alibaba.polardbx.util.MoreObjects; import java.util.Objects; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBufferMemoryManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBufferMemoryManager.java index 79a87268f..f50fd7139 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBufferMemoryManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/OutputBufferMemoryManager.java @@ -16,10 +16,10 @@ package com.alibaba.polardbx.executor.mpp.execution.buffer; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.alibaba.polardbx.common.exception.MemoryNotEnoughException; import com.alibaba.polardbx.executor.mpp.execution.SystemMemoryUsageListener; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import javax.annotation.concurrent.GuardedBy; import javax.annotation.concurrent.ThreadSafe; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PageBufferInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PageBufferInfo.java index 5136e5054..f32f401bf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PageBufferInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PageBufferInfo.java @@ -29,9 +29,9 @@ */ package com.alibaba.polardbx.executor.mpp.execution.buffer; +import com.alibaba.polardbx.util.MoreObjects; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.alibaba.polardbx.util.MoreObjects; import java.util.Objects; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PagesSerde.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PagesSerde.java index a4f24e020..459cfeb1b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PagesSerde.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PagesSerde.java @@ -19,6 +19,7 @@ import com.alibaba.polardbx.executor.chunk.BlockEncoding; import com.alibaba.polardbx.executor.chunk.BlockEncodingBuilders; import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import io.airlift.compress.Compressor; import io.airlift.compress.Decompressor; @@ -49,7 +50,18 @@ public PagesSerde(Optional compressor, List types) { this.compressor = requireNonNull(compressor, "compressor is null"); this.decompressor = requireNonNull(decompressor, "decompressor is null"); - this.blockEncodings = BlockEncodingBuilders.create(types); + this.blockEncodings = BlockEncodingBuilders.create(types, null); + checkArgument(compressor.isPresent() == decompressor.isPresent(), + "compressor and decompressor must both be present or both be absent"); + } + + public PagesSerde(Optional compressor, + Optional decompressor, + List types, + ExecutionContext context) { + this.compressor = requireNonNull(compressor, "compressor is null"); + this.decompressor = requireNonNull(decompressor, "decompressor is null"); + this.blockEncodings = BlockEncodingBuilders.create(types, context); checkArgument(compressor.isPresent() == decompressor.isPresent(), "compressor and decompressor must both be present or both be absent"); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PagesSerdeFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PagesSerdeFactory.java index 714ad815e..efc5b46ae 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PagesSerdeFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PagesSerdeFactory.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.mpp.execution.buffer; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import io.airlift.compress.lz4.Lz4Compressor; import io.airlift.compress.lz4.Lz4Decompressor; @@ -38,4 +39,12 @@ public PagesSerde createPagesSerde(List types) { return new PagesSerde(Optional.empty(), Optional.empty(), types); } } + + public PagesSerde createPagesSerde(List types, ExecutionContext context) { + if (compressionEnabled) { + return new PagesSerde(Optional.of(new Lz4Compressor()), Optional.of(new Lz4Decompressor()), types, context); + } else { + return new PagesSerde(Optional.empty(), Optional.empty(), types, context); + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PartitionedOutputBuffer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PartitionedOutputBuffer.java index ff7cb0095..1eb0c85fb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PartitionedOutputBuffer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/PartitionedOutputBuffer.java @@ -16,21 +16,21 @@ package com.alibaba.polardbx.executor.mpp.execution.buffer; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.execution.StateMachine; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.units.DataSize; import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicLongFieldUpdater; +import static com.alibaba.polardbx.common.properties.MetricLevel.isSQLMetricEnabled; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; import static com.google.common.util.concurrent.Futures.immediateFuture; -import static com.alibaba.polardbx.common.properties.MetricLevel.isSQLMetricEnabled; import static java.util.Objects.requireNonNull; public class PartitionedOutputBuffer diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/SpilledOutputBufferMemoryManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/SpilledOutputBufferMemoryManager.java index 3aaddd846..1a8faebd8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/SpilledOutputBufferMemoryManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/buffer/SpilledOutputBufferMemoryManager.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.mpp.execution.buffer; -import com.google.common.util.concurrent.SettableFuture; import com.alibaba.polardbx.common.exception.MemoryNotEnoughException; import com.alibaba.polardbx.executor.mpp.execution.SystemMemoryUsageListener; +import com.google.common.util.concurrent.SettableFuture; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicLong; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/BroadcastOutputBufferManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/BroadcastOutputBufferManager.java index 78e8d20e9..4516440a4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/BroadcastOutputBufferManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/BroadcastOutputBufferManager.java @@ -47,14 +47,22 @@ class BroadcastOutputBufferManager private final Consumer outputBufferTarget; private final Map outputNoMoreBuffers; + /** + * required child output size under remote partition wise join + * otherwise, zero + */ + private final int bufferCount; + @GuardedBy("this") private OutputBuffers outputBuffers = OutputBuffers.createInitialEmptyOutputBuffers(OutputBuffers.BufferType.BROADCAST); public BroadcastOutputBufferManager(Map outputNoMoreBuffers, + int bufferCount, Consumer outputBufferTarget) { this.outputBufferTarget = requireNonNull(outputBufferTarget, "outputBufferTarget is null"); this.outputNoMoreBuffers = requireNonNull(outputNoMoreBuffers, "outputNoMoreBuffers is null"); + this.bufferCount = bufferCount; outputBufferTarget.accept(outputBuffers); } @@ -102,4 +110,8 @@ public void addOutputBuffers(StageId stageId, List } outputBufferTarget.accept(newOutputBuffers); } + + public int getBufferCount() { + return bufferCount; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/ColumnarNodeSelector.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/ColumnarNodeSelector.java new file mode 100644 index 000000000..5c7d7653f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/ColumnarNodeSelector.java @@ -0,0 +1,237 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.mpp.execution.scheduler; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.partition.MurmurHashUtils; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.mpp.execution.NodeTaskMap; +import com.alibaba.polardbx.executor.mpp.metadata.Split; +import com.alibaba.polardbx.executor.mpp.split.OssSplit; +import com.alibaba.polardbx.gms.node.InternalNode; +import com.alibaba.polardbx.gms.node.InternalNodeManager; +import com.alibaba.polardbx.gms.node.Node; +import com.google.common.collect.Multimap; +import io.airlift.slice.XxHash64; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public class ColumnarNodeSelector extends SimpleNodeSelector { + private static final Logger log = LoggerFactory.getLogger(ColumnarNodeSelector.class); + + private final boolean enableTwoChoiceSchedule; + + private Set nodeUsedByPairWise = new HashSet<>(); + + private boolean scheduleByPartition = false; + + public ColumnarNodeSelector(InternalNodeManager nodeManager, NodeTaskMap nodeTaskMap, Set nodes, + int limitCandidates, int maxSplitsPerNode, boolean enableOssRoundRobin, + boolean randomNode, boolean enableTwoChoiceSchedule, boolean preferLocal) { + super(nodeManager, nodeTaskMap, nodes, limitCandidates, maxSplitsPerNode, enableOssRoundRobin, randomNode, + preferLocal); + this.enableTwoChoiceSchedule = enableTwoChoiceSchedule; + } + + @Override + public Multimap scheduleOssSplit(List splits, List candidateNodes, + NodeAssignmentStats assignmentStats, + Multimap assignment) { + if (scheduleByPartition) { + return scheduleColumnarByPartition(splits, candidateNodes, assignmentStats, assignment); + } + Pair, List> splitsPair = splitByDefinedPartNum(splits); + // assign non partition first, and handle schedule skew if happened + assignNonPartition(splitsPair.getValue(), candidateNodes, assignmentStats, assignment); + assignByPartition(splitsPair.getKey(), candidateNodes, assignmentStats, assignment, false); + return assignment; + } + + private Multimap scheduleColumnarByPartition(List splits, List candidateNodes, + NodeAssignmentStats assignmentStats, + Multimap assignment) { + assignByPartition(splits, candidateNodes, assignmentStats, assignment, true); + return assignment; + } + + private void assignNonPartition(List splits, List candidateNodes, + NodeAssignmentStats assignmentStats, Multimap assignment) { + super.scheduleOssSplit(splits, candidateNodes, assignmentStats, assignment); + } + + @Override + public void assignToNode(List candidateNodes, NodeAssignmentStats assignmentStats, + Multimap assignment, Map> randomAssign) { + int maxSplitCount = randomAssign.values().stream().mapToInt(List::size).max().getAsInt(); + int minSplitCount = randomAssign.values().stream().mapToInt(List::size).min().getAsInt(); + + if (maxSplitCount > 2 * minSplitCount) { + if (log.isDebugEnabled()) { + logScheduleSkewMessage(assignment, maxSplitCount, minSplitCount); + } + if (enableTwoChoiceSchedule) { + scheduleByTwoChoice(candidateNodes, assignmentStats, assignment, randomAssign); + return; + } + } + super.assignToNode(candidateNodes, assignmentStats, assignment, randomAssign); + } + + private void scheduleByTwoChoice(List candidateNodes, NodeAssignmentStats assignmentStats, + Multimap assignment, Map> randomAssign) { + Map balancedAssign = new HashMap<>(); + + for (List splits : randomAssign.values()) { + for (Split split : splits) { + int bucket = chooseBucketByTwoChoice(((OssSplit) split.getConnectorSplit()).getDesignatedFile(), + candidateNodes.size(), balancedAssign); + doAssign(candidateNodes, assignmentStats, assignment, split, bucket); + } + } + if (log.isDebugEnabled()) { + log.debug("schedule by two phase choice. detail schedule result is " + assignment.entries().stream() + .map(entry -> entry.getKey().getHostPort() + " : " + String.join(",", + ((OssSplit) entry.getValue().getConnectorSplit()).getDesignatedFile())) + .collect(Collectors.joining("; "))); + } + } + + /** + * return choosed bucket, and update balanced assign result + */ + public static int chooseBucketByTwoChoice(List files, int allNodes, Map balancedAssign) { + long fileNameCode = files.hashCode(); + long hashCode1 = MurmurHashUtils.murmurHash128WithZeroSeed(fileNameCode); + long hashCode2 = XxHash64.hash(fileNameCode); + + // calculate two bucket + int bucket1 = (int) ((hashCode1 & Long.MAX_VALUE) % allNodes); + int bucket2 = (int) ((hashCode2 & Long.MAX_VALUE) % allNodes); + + // pick min count + int count1 = balancedAssign.getOrDefault(bucket1, Integer.MIN_VALUE), + count2 = balancedAssign.getOrDefault(bucket2, Integer.MIN_VALUE); + + int bucket = count1 < count2 ? bucket1 : bucket2; + balancedAssign.compute(bucket, (key, count) -> (count == null) ? 1 : count + 1); + return bucket; + } + + private void logScheduleSkewMessage(Multimap assignment, int maxSplitCount, int minSplitCount) { + log.debug(String.format( + "schedule split skewed under non pair-wise, max split count is %s, while min split count is %s", + maxSplitCount, minSplitCount)); + + log.debug("schedule by file. detail schedule result is " + assignment.entries().stream() + .map(entry -> entry.getKey().getHostPort() + " : " + String.join(",", + ((OssSplit) entry.getValue().getConnectorSplit()).getDesignatedFile())) + .collect(Collectors.joining("; "))); + } + + private Pair, List> splitByDefinedPartNum(List splits) { + List splitsHasPartNum = new ArrayList<>(); + List restSplits = new ArrayList<>(); + for (Split split : splits) { + if (((OssSplit) split.getConnectorSplit()).getPartIndex() > -1) { + splitsHasPartNum.add(split); + } else { + restSplits.add(split); + } + } + return Pair.of(splitsHasPartNum, restSplits); + } + + private void assignByPartition(List splits, List candidateNodes, + NodeAssignmentStats assignmentStats, + Multimap assignment, boolean forceGenPart) { + if (splits.isEmpty()) { + return; + } + // TODO can be replaced with consistency hash for better locality + candidateNodes.sort(NODE_COMPARATOR); + + if (log.isDebugEnabled()) { + log.debug("start distribute split under partition wise join\n"); + log.debug( + "selected nodes: " + candidateNodes.stream().map(Node::getHostPort).collect(Collectors.joining(",")) + + "\n"); + } + + Map> nodeIdToPartitions = new HashMap<>(); + Map partToNodeId = new HashMap<>(); + for (Split split : splits) { + OssSplit ossSplit = (OssSplit) (split.getConnectorSplit()); + int partNum = ossSplit.getPartIndex(); + if (partNum < 0 && forceGenPart) { + partNum = OssSplit.calcPartition(ossSplit.getLogicalSchema(), ossSplit.getLogicalTableName(), + ossSplit.getPhysicalSchema(), ossSplit.getPhyTableNameList().get(0)); + } + int nodeId = partNum % candidateNodes.size(); + if (log.isDebugEnabled()) { + log.debug("oss split: " + ossSplit + " part number: " + partNum + " node id: " + nodeId + "\n"); + } + partToNodeId.put(partNum, nodeId); + boolean alreadyAdd = nodeIdToPartitions.computeIfAbsent(nodeId, k -> new ArrayList<>()).contains(partNum); + if (!alreadyAdd) { + nodeIdToPartitions.get(nodeId).add(partNum); + } + doAssign(candidateNodes, assignmentStats, assignment, split, nodeId); + } + if (log.isDebugEnabled()) { + log.debug("finish distribute split under partition wise join\n"); + } + + nodeUsedByPairWise.addAll(nodeIdToPartitions.keySet()); + // sort the partition + nodeIdToPartitions.values().forEach(Collections::sort); + updateOssSplitPartInfo(splits, nodeIdToPartitions, partToNodeId); + } + + /** + * change partNum in ossSplit to node scope instead of global scope + */ + private void updateOssSplitPartInfo(List splits, Map> nodeIdToPartitions, + Map partToNodeId) { + for (Split split : splits) { + OssSplit ossSplit = (OssSplit) (split.getConnectorSplit()); + // reset partition info to default if not local partition wise + if (!ossSplit.isLocalPairWise()) { + ossSplit.setNodePartCount(-1); + ossSplit.setPartIndex(-1); + } else { + int partNum = ossSplit.getPartIndex(); + Integer nodeId = partToNodeId.get(partNum); + List nodeParts = nodeIdToPartitions.get(nodeId); + ossSplit.setPartIndex(nodeParts.indexOf(partNum)); + } + } + } + + public void setScheduleByPartition(boolean scheduleByPartition) { + this.scheduleByPartition = scheduleByPartition; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/DynamicSplitPlacementPolicy.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/DynamicSplitPlacementPolicy.java index 95abc1c7b..532f9e0ab 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/DynamicSplitPlacementPolicy.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/DynamicSplitPlacementPolicy.java @@ -29,11 +29,11 @@ */ package com.alibaba.polardbx.executor.mpp.execution.scheduler; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Multimap; import com.alibaba.polardbx.executor.mpp.execution.RemoteTask; import com.alibaba.polardbx.executor.mpp.metadata.Split; import com.alibaba.polardbx.gms.node.Node; +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; import java.util.List; import java.util.function.Supplier; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/FixedCountScheduler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/FixedCountScheduler.java index a73961cac..394a5425e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/FixedCountScheduler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/FixedCountScheduler.java @@ -56,6 +56,11 @@ public int getTaskNum() { return partitionToNode.size(); } + @Override + public int requireChildOutputNum() { + return getTaskNum(); + } + @Override public int getDriverParallelism() { return driverParallelism; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/FixedExpandSourceScheduler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/FixedExpandSourceScheduler.java index 3967686b3..f3d8839f0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/FixedExpandSourceScheduler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/FixedExpandSourceScheduler.java @@ -16,9 +16,6 @@ package com.alibaba.polardbx.executor.mpp.execution.scheduler; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Iterables; -import com.google.common.collect.Multimap; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.mpp.execution.RemoteTask; import com.alibaba.polardbx.executor.mpp.execution.SqlStageExecution; @@ -28,6 +25,9 @@ import com.alibaba.polardbx.gms.node.Node; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy; +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Iterables; +import com.google.common.collect.Multimap; import java.util.ArrayList; import java.util.HashMap; @@ -107,6 +107,11 @@ public int getTaskNum() { return splitAssignments.size(); } + @Override + public int requireChildOutputNum() { + return getTaskNum(); + } + @Override public int getDriverParallelism() { return driverParallelism; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/NodeScheduler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/NodeScheduler.java index 0082bdf2f..03ba9c5a3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/NodeScheduler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/NodeScheduler.java @@ -38,17 +38,34 @@ public NodeScheduler(InternalNodeManager nodeManager, NodeTaskMap nodeTaskMap) { this.nodeTaskMap = nodeTaskMap; } - public NodeSelector createNodeSelector(Session session, int limit) { + public NodeSelector createNodeSelector(Session session, int limit, boolean randomNode) { int maxSplitsPerNode = session.getClientContext().getParamManager().getInt(ConnectionParams.MPP_SCHEDULE_MAX_SPLITS_PER_NODE); boolean slaveFirst = session.getClientContext().getParamManager().getBoolean(ConnectionParams.POLARDBX_SLAVE_INSTANCE_FIRST); boolean enableOSSRoundRobin = - session.getClientContext().getParamManager().getBoolean(ConnectionParams.ENABLE_OSS_FILE_CONCURRENT_SPLIT_ROUND_ROBIN); + session.getClientContext().getParamManager() + .getBoolean(ConnectionParams.ENABLE_OSS_FILE_CONCURRENT_SPLIT_ROUND_ROBIN); Set nodes = nodeManager.getNodes(NodeState.ACTIVE, ConfigDataMode.isMasterMode() && slaveFirst); - return new SimpleNodeSelector(nodeManager, nodeTaskMap, nodes, limit, maxSplitsPerNode, enableOSSRoundRobin); + boolean columnarMode = session.getClientContext().getParamManager() + .getBoolean(ConnectionParams.ENABLE_COLUMNAR_SCHEDULE); + + boolean preferLocal = + session.getClientContext().getParamManager().getBoolean(ConnectionParams.MPP_PREFER_LOCAL_NODE); + + if (columnarMode) { + boolean enableTwoChoiceSchedule = session.getClientContext().getParamManager() + .getBoolean(ConnectionParams.ENABLE_TWO_CHOICE_SCHEDULE); + + return new ColumnarNodeSelector(nodeManager, nodeTaskMap, nodes, limit, maxSplitsPerNode, + enableOSSRoundRobin, + randomNode, enableTwoChoiceSchedule, preferLocal); + } else { + return new SimpleNodeSelector(nodeManager, nodeTaskMap, nodes, limit, maxSplitsPerNode, enableOSSRoundRobin, + randomNode, preferLocal); + } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/NodeSelector.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/NodeSelector.java index fb6a488fe..40d704c3d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/NodeSelector.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/NodeSelector.java @@ -29,10 +29,10 @@ */ package com.alibaba.polardbx.executor.mpp.execution.scheduler; -import com.google.common.collect.Multimap; import com.alibaba.polardbx.executor.mpp.execution.RemoteTask; import com.alibaba.polardbx.executor.mpp.metadata.Split; import com.alibaba.polardbx.gms.node.Node; +import com.google.common.collect.Multimap; import java.util.List; @@ -56,4 +56,9 @@ public interface NodeSelector { * 用户计算source节点执行nodes */ Multimap computeAssignments(List splits, List existingTasks); + + /** + * get ordered node for remote shuffle under partition wise join + */ + List getOrderedNode(); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/PartitionedOutputBufferManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/PartitionedOutputBufferManager.java index 00d56a8cf..bf2e34dd4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/PartitionedOutputBufferManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/PartitionedOutputBufferManager.java @@ -29,9 +29,9 @@ */ package com.alibaba.polardbx.executor.mpp.execution.scheduler; -import com.google.common.collect.ImmutableMap; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.execution.StageId; +import com.google.common.collect.ImmutableMap; import javax.annotation.concurrent.ThreadSafe; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/PhasedExecutionSchedule.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/PhasedExecutionSchedule.java index 9acbd1978..032e279ba 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/PhasedExecutionSchedule.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/PhasedExecutionSchedule.java @@ -29,9 +29,6 @@ */ package com.alibaba.polardbx.executor.mpp.execution.scheduler; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.execution.SqlStageExecution; @@ -41,6 +38,9 @@ import com.alibaba.polardbx.executor.mpp.util.ImmutableCollectors; import com.alibaba.polardbx.optimizer.core.rel.BKAJoin; import com.alibaba.polardbx.optimizer.core.rel.SemiBKAJoin; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.Join; import org.apache.calcite.rel.logical.LogicalCorrelate; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/ScheduleResult.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/ScheduleResult.java index 696ce4e9b..8457d0fbb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/ScheduleResult.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/ScheduleResult.java @@ -29,9 +29,9 @@ */ package com.alibaba.polardbx.executor.mpp.execution.scheduler; -import com.google.common.collect.ImmutableSet; import com.alibaba.polardbx.executor.mpp.execution.RemoteTask; import com.alibaba.polardbx.util.MoreObjects; +import com.google.common.collect.ImmutableSet; import java.util.Optional; import java.util.Set; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SimpleNodeSelector.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SimpleNodeSelector.java index e3cff30c6..8d44396e3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SimpleNodeSelector.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SimpleNodeSelector.java @@ -16,26 +16,33 @@ package com.alibaba.polardbx.executor.mpp.execution.scheduler; -import com.alibaba.polardbx.common.partition.MurmurHashUtils; -import com.alibaba.polardbx.executor.mpp.split.OssSplit; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Multimap; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.partition.MurmurHashUtils; +import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.execution.NodeTaskMap; import com.alibaba.polardbx.executor.mpp.execution.RemoteTask; import com.alibaba.polardbx.executor.mpp.metadata.Split; +import com.alibaba.polardbx.executor.mpp.split.OssSplit; import com.alibaba.polardbx.gms.node.InternalNode; import com.alibaba.polardbx.gms.node.InternalNodeManager; import com.alibaba.polardbx.gms.node.Node; +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; @@ -44,33 +51,55 @@ public class SimpleNodeSelector implements NodeSelector { private static final Logger log = LoggerFactory.getLogger(SimpleNodeSelector.class); - private final InternalNodeManager nodeManager; - private final NodeTaskMap nodeTaskMap; - private final int limitCandidates; - private final int maxSplitsPerNode; + protected static final Comparator NODE_COMPARATOR = Comparator.comparing(Node::getHostPort); + + protected final InternalNodeManager nodeManager; + protected final NodeTaskMap nodeTaskMap; + protected final int limitCandidates; + protected final int maxSplitsPerNode; private final boolean enableOssRoundRobin; + private final List workerNodes; + private final boolean preferLocal; + public SimpleNodeSelector(InternalNodeManager nodeManager, NodeTaskMap nodeTaskMap, Set nodes, - int limitCandidates, int maxSplitsPerNode, boolean enableOssRoundRobin) { + int limitCandidates, int maxSplitsPerNode, boolean enableOssRoundRobin, + boolean randomNode, boolean preferLocal) { this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); this.nodeTaskMap = requireNonNull(nodeTaskMap, "nodeTaskMap is null"); this.limitCandidates = limitCandidates; this.maxSplitsPerNode = maxSplitsPerNode; - this.workerNodes = selectSuitableNodes(limitCandidates, nodes); + this.workerNodes = selectSuitableNodes(limitCandidates, nodes, randomNode); this.enableOssRoundRobin = enableOssRoundRobin; + this.preferLocal = preferLocal; } - private List selectSuitableNodes(int limit, Collection internalNodes) { + private List selectSuitableNodes(int limit, Collection internalNodes, + boolean randomNode) { checkArgument(limit > 0, "limit must be at least 1"); - Iterator candidates = new ResettableRandomizedIterator(internalNodes); List selected = new ArrayList<>(limit); + + if (preferLocal && limit == 1) { + Node current = selectCurrentNode(); + if (current.isWorker() && internalNodes.contains(current)) { + selected.add(current); + return selected; + } + } + + Iterator candidates = randomNode ? new ResettableRandomizedIterator(internalNodes) : + internalNodes.stream().sorted(NODE_COMPARATOR).iterator(); while (selected.size() < limit && candidates.hasNext()) { Node node = candidates.next(); if (node.isWorker()) { selected.add(node); } } + if (!randomNode) { + log.debug("selected nodes under non random mode: " + selected.stream().map(Node::getHostPort).collect( + Collectors.joining(",")) + "\n"); + } return selected; } @@ -81,7 +110,7 @@ public Node selectCurrentNode() { @Override public List selectRandomNodes(int limit) { - return selectSuitableNodes(limit, workerNodes); + return selectSuitableNodes(limit, workerNodes, true); } private List selectNodes(int limit, Iterator candidates, NodeAssignmentStats assignmentStats) { @@ -108,10 +137,11 @@ public Multimap computeAssignments(List splits, List x.getConnectorSplit() instanceof OssSplit)) { - // split affinity - return OssSplitAffinityAssigment(splits, candidateNodes, assignmentStats); + Multimap assignment = HashMultimap.create(); + return scheduleOssSplit(splits, candidateNodes, assignmentStats, assignment); } + // normal schedule. e.g. for innodb Multimap assignment = HashMultimap.create(); ResettableRandomizedIterator nodeIterator = new ResettableRandomizedIterator(candidateNodes); for (Split split : splits) { @@ -129,33 +159,39 @@ public Multimap computeAssignments(List splits, List OssSplitAffinityAssigment(List splits, List candidateNodes, - NodeAssignmentStats assignmentStats) { - Multimap assignment = HashMultimap.create(); + protected Multimap scheduleOssSplit(List splits, List candidateNodes, + NodeAssignmentStats assignmentStats, + Multimap assignment) { + if (splits.isEmpty()) { + return assignment; + } candidateNodes.sort((a, b) -> a.getNodeIdentifier().compareTo(b.getNodeIdentifier())); + log.debug("distribute simple oss split, selected nodes: " + candidateNodes.stream().map(Node::getHostPort) + .collect(Collectors.joining(",")) + "\n"); + final boolean allSplitFileCurrent = splits .stream() .allMatch(split -> ((OssSplit) split.getConnectorSplit()).getDesignatedFile() != null); - if (enableOssRoundRobin && allSplitFileCurrent) { - // use round robin for oss query - int currentId = 0; - for (Split split : splits) { - int position = (currentId++) % candidateNodes.size(); - doAssign(candidateNodes, assignmentStats, assignment, split, position); + + // should be normal case under oss or columnar + if (allSplitFileCurrent) { + if (enableOssRoundRobin) { + return assignRoundRobin(splits, candidateNodes, assignmentStats, assignment); + } else { + return assignAllByFileName(splits, candidateNodes, assignmentStats, assignment); } - return assignment; } for (Split split : splits) { long hashCode; if (((OssSplit) split.getConnectorSplit()).getDesignatedFile() != null) { hashCode = ((OssSplit) split.getConnectorSplit()).getDesignatedFile().hashCode(); - hashCode = MurmurHashUtils.murmurHashWithZeroSeed(hashCode); + hashCode = MurmurHashUtils.murmurHash128WithZeroSeed(hashCode); } else { List phyTableNameList = ((OssSplit) split.getConnectorSplit()).getPhyTableNameList(); hashCode = phyTableNameList.stream().map(x -> x.hashCode()).reduce(31, (a, b) -> a + b).longValue(); - hashCode = MurmurHashUtils.murmurHashWithZeroSeed(hashCode); + hashCode = MurmurHashUtils.murmurHash128WithZeroSeed(hashCode); } int position = (int) hashCode % candidateNodes.size(); @@ -167,8 +203,51 @@ private Multimap OssSplitAffinityAssigment(List splits, List return assignment; } - private void doAssign(List candidateNodes, NodeAssignmentStats assignmentStats, - Multimap assignment, Split split, int position) { + protected Multimap assignRoundRobin(List splits, List candidateNodes, + NodeAssignmentStats assignmentStats, + Multimap assignment) { + // use round robin for oss query + int currentId = 0; + for (Split split : splits) { + int position = (currentId++) % candidateNodes.size(); + doAssign(candidateNodes, assignmentStats, assignment, split, position); + } + return assignment; + } + + protected Multimap assignAllByFileName(List splits, List candidateNodes, + NodeAssignmentStats assignmentStats, + Multimap assignment) { + Map> randomAssign = new HashMap<>(); + for (Split split : splits) { + long hashCode = ((OssSplit) split.getConnectorSplit()).getDesignatedFile().hashCode(); + hashCode = MurmurHashUtils.murmurHash128WithZeroSeed(hashCode); + + int position = (int) hashCode % candidateNodes.size(); + if (position < 0) { + position += candidateNodes.size(); + } + randomAssign.putIfAbsent(position, new ArrayList<>()); + randomAssign.get(position).add(split); + } + + assignToNode(candidateNodes, assignmentStats, assignment, randomAssign); + + return assignment; + } + + protected void assignToNode(List candidateNodes, NodeAssignmentStats assignmentStats, + Multimap assignment, Map> randomAssign) { + for (Map.Entry> entry : randomAssign.entrySet()) { + Integer pos = entry.getKey(); + for (Split split : entry.getValue()) { + doAssign(candidateNodes, assignmentStats, assignment, split, pos); + } + } + } + + protected void doAssign(List candidateNodes, NodeAssignmentStats assignmentStats, + Multimap assignment, Split split, int position) { Node chosenNode = candidateNodes.get(position); if (chosenNode != null) { assignment.put(chosenNode, split); @@ -177,4 +256,9 @@ private void doAssign(List candidateNodes, NodeAssignmentStats assignmentS throw new TddlRuntimeException(ErrorCode.ERR_NO_NODES_AVAILABLE, "No nodes available to run query"); } } + + @Override + public List getOrderedNode() { + return workerNodes.stream().sorted(NODE_COMPARATOR).collect(Collectors.toList()); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SourcePartitionedScheduler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SourcePartitionedScheduler.java index 19792df12..357afe416 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SourcePartitionedScheduler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SourcePartitionedScheduler.java @@ -16,25 +16,33 @@ package com.alibaba.polardbx.executor.mpp.execution.scheduler; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Iterables; -import com.google.common.collect.Multimap; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.execution.RemoteTask; import com.alibaba.polardbx.executor.mpp.execution.SqlStageExecution; import com.alibaba.polardbx.executor.mpp.metadata.Split; +import com.alibaba.polardbx.executor.mpp.split.OssSplit; import com.alibaba.polardbx.executor.mpp.split.SplitInfo; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.gms.node.Node; +import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy; +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Iterables; +import com.google.common.collect.Multimap; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; public class SourcePartitionedScheduler implements StageScheduler { @@ -46,34 +54,106 @@ public class SourcePartitionedScheduler implements StageScheduler { private final int driverParallelism; + private final int requireChildOutput; + + private HashSet prunePartitions = new HashSet<>(); + public SourcePartitionedScheduler( SqlStageExecution stageExecution, SplitPlacementPolicy splitPlacementPolicy, - SplitInfo splitInfo, List expandSplitInfos, ExecutionContext context) { + List splitInfos, List expandSplitInfos, ExecutionContext context, + List nodes) { + + int columnarMaxShard = context.getColumnarMaxShard(); + // if we have 4 partition, while we have 6 nodes, we can only schedule to 4 nodes, so child should output four partition + // we should avoid this situation, for the reset two nodes will idle + this.requireChildOutput = + columnarMaxShard > 0 ? Math.min(columnarMaxShard, nodes.size()) : nodes.size(); + this.stage = stageExecution; this.splitAssignments = new HashMap<>(); - List splits = Iterables.getOnlyElement(splitInfo.getSplits()); - Multimap splitAssignment = splitPlacementPolicy.computeAssignments(splits); - for (Map.Entry entry : splitAssignment.entries()) { - if (!splitAssignments.containsKey(entry.getKey())) { - Multimap multimap = HashMultimap.create(); - splitAssignments.put(entry.getKey(), multimap); + // example of partition pruning, fragment as following: + // Join + // | | + // Join Join + // | | | | + // t1 ex t2 t3 + // splitInfos is {t1:1, t2:1,3, t3:0,1} + // partitions of t1, t2 and t3 should be equal, and suggest partitions is 6 + // then pruning result is {0,2,3,4,5} + for (SplitInfo splitInfo : splitInfos) { + List splits = Iterables.getOnlyElement(splitInfo.getSplits()); + + if (stage.getFragment().isRemotePairWise() || stage.getFragment().isLocalPairWise()) { + + if (!splits.isEmpty()) { + + int partitions = getTablePartitions((OssSplit) splits.get(0).getConnectorSplit()); + + // if splits not contains all partition, then we should not use local partition wise join + // and optimize this situation has little meaning + if (stage.getFragment().isLocalPairWise()) { + updateLocalPairWiseInfo(partitions, splits, stageExecution); + // just reset all partitions here + // after schedule, we will set real local partition for local pair wise + stage.getFragment().setLocalPartitionCount(partitions); + + // when pipeline is in local partition mode, record it's total partition count across all nodes. + stage.getFragment().setTotalPartitionCount(partitions); + } + + // intersect visited partitions + if (context.getParamManager().getBoolean(ConnectionParams.ENABLE_PRUNE_EXCHANGE_PARTITION) + && stage.getFragment().isRemotePairWise() && stage.getFragment().isPruneExchangePartition()) { + updatePrunePartitionInfo(partitions, splits); + } + + } else { + // if split is empty, not use local partition wise + stage.getFragment().setLocalPairWise(Boolean.FALSE); + } + } + + // Notice: after assigned, part index of oss split will be under node scope rather than global + Multimap splitAssignment = splitPlacementPolicy.computeAssignments(splits); + + // e.g. for outer join under partition wise + if (stage.getFragment().isRemotePairWise() && !stage.getFragment().isPruneExchangePartition()) { + List shouldScheduleNodes = nodes.subList(0, requireChildOutput); + for (Node node : shouldScheduleNodes) { + if (!splitAssignment.containsKey(node)) { + splitAssignment.put(node, Split.EMPTY_SPLIT); + } + } + } + + for (Map.Entry entry : splitAssignment.entries()) { + if (!splitAssignments.containsKey(entry.getKey())) { + Multimap multimap = HashMultimap.create(); + splitAssignments.put(entry.getKey(), multimap); + } + splitAssignments.get(entry.getKey()).put(splitInfo.getSourceId(), entry.getValue()); } - splitAssignments.get(entry.getKey()).put(splitInfo.getSourceId(), entry.getValue()); } if (expandSplitInfos != null && expandSplitInfos.size() > 0) { + if (stage.getFragment().isLocalPairWise()) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "partition wise join should not support bka yet"); + } for (SplitInfo info : expandSplitInfos) { List expandSplits = Iterables.getOnlyElement(info.getSplits()); - for (Map.Entry entry : splitAssignment.entries()) { - splitAssignments.get(entry.getKey()).putAll(info.getSourceId(), expandSplits); + for (Node node : splitAssignments.keySet()) { + splitAssignments.get(node).putAll(info.getSourceId(), expandSplits); } } } int totalParallelism = stageExecution.getFragment().getPartitioning().getPartitionCount(); int taskNum = splitAssignments.keySet().size(); - if (splitInfo.isUnderSort()) { + + // multi splits should only occurs in partition wise join, and should not have sort trait under this situation + if (splitInfos.size() == 1 && splitInfos.get(0).isUnderSort()) { this.driverParallelism = 1; } else { this.driverParallelism = totalParallelism % taskNum > 0 ? totalParallelism / taskNum + 1 : @@ -81,12 +161,16 @@ public SourcePartitionedScheduler( } int totalPrefetch = 0; - if (splitInfo.getConcurrencyPolicy() == QueryConcurrencyPolicy.SEQUENTIAL) { - totalPrefetch = 1; + if (splitInfos.size() > 0 && splitInfos.stream() + .allMatch(splitInfo -> splitInfo.getConcurrencyPolicy() == QueryConcurrencyPolicy.SEQUENTIAL)) { + totalPrefetch = splitInfos.size(); } else { totalPrefetch = context.getParamManager().getInt(ConnectionParams.PREFETCH_SHARDS); if (totalPrefetch < 0) { - totalPrefetch = ExecUtils.getMppPrefetchNumForLogicalView(splitInfo.getSplitParallelism()); + for (SplitInfo splitInfo : splitInfos) { + totalPrefetch = Math.max( + totalPrefetch, ExecUtils.getMppPrefetchNumForLogicalView(splitInfo.getSplitParallelism())); + } if (expandSplitInfos != null && expandSplitInfos.size() > 0) { for (SplitInfo info : expandSplitInfos) { totalPrefetch = Math.max( @@ -101,8 +185,11 @@ public SourcePartitionedScheduler( int prefetch = totalPrefetch % taskNum > 0 ? totalPrefetch / taskNum + 1 : totalPrefetch / taskNum; - log.debug("sourceId: " + splitInfo.getSourceId() + "splits num: " + splitInfo.getSplitCount() + ", prefetch: " - + prefetch); + for (SplitInfo splitInfo : splitInfos) { + log.debug( + "sourceId: " + splitInfo.getSourceId() + "splits num: " + splitInfo.getSplitCount() + ", prefetch: " + + prefetch); + } int bkaJoinParallelism = stage.getFragment().getBkaJoinParallelism() % taskNum > 0 ? stage.getFragment().getBkaJoinParallelism() / taskNum + 1 : @@ -110,6 +197,90 @@ public SourcePartitionedScheduler( stageExecution.getFragment().setPrefetch(prefetch); stageExecution.getFragment().setBkaJoinParallelism(bkaJoinParallelism); + + int realPartitionCount = updateLocalPartitionCount(taskNum, context); + + updateOssInfoAfterSchedule(splitInfos, realPartitionCount); + } + + private int getTablePartitions(OssSplit split) { + String logicalSchema = split.getLogicalSchema(); + String logicalTable = split.getLogicalTableName(); + int allPartition = OptimizerContext.getContext(logicalSchema).getPartitionInfoManager() + .getPartitionInfo(logicalTable).getPartitionBy().getPartitions().size(); + return allPartition; + } + + private void updateLocalPairWiseInfo(int allPartition, List splits, SqlStageExecution stageExecution) { + stageExecution.getFragment() + .setLocalPairWise( + stageExecution.getFragment().isLocalPairWise() & ossSplitContainsAllPart(splits, allPartition)); + } + + private boolean ossSplitContainsAllPart(List splits, int allPartitions) { + Set realPartition = splits.stream().map(split -> ((OssSplit) split.getConnectorSplit()).getPartIndex()) + .collect(Collectors.toSet()); + return realPartition.size() == allPartitions; + } + + private void updatePrunePartitionInfo(int allPartition, List splits) { + Set realPartition = + splits.stream().map(split -> ((OssSplit) split.getConnectorSplit()).getPartIndex()) + .collect(Collectors.toSet()); + + Set notVisitPartition = + IntStream.range(0, allPartition).filter(i -> !realPartition.contains(i)).boxed() + .collect(Collectors.toSet()); + prunePartitions.addAll(notVisitPartition); + } + + /** + * update local partition count for local partition wise mode + * return -1 if update failed, and will not use local pairwise + */ + private int updateLocalPartitionCount(int taskNum, ExecutionContext context) { + if (stage.getFragment().isLocalPairWise()) { + int partitions = stage.getFragment().getLocalPartitionCount(); + if (partitions > 0) { + // when pipeline is in local partition mode, record it's total partition count across all nodes. + stage.getFragment().setTotalPartitionCount(partitions); + + int localPartitionCount = partitions % taskNum == 0 ? partitions / taskNum : partitions / taskNum + 1; + stage.getFragment().setLocalPartitionCount(localPartitionCount); + return localPartitionCount; + } else { + // wired, should not access here + log.warn(String.format("unable to use local partition wise, trace id is %s, should check this", + context.getTraceId())); + stage.getFragment().setLocalPartitionCount(-1); + stage.getFragment().setLocalPairWise(Boolean.FALSE); + } + } else { + // set local partition count to default, although we will not use this value under non local pairwise mode + stage.getFragment().setLocalPartitionCount(-1); + } + return -1; + } + + private void updateOssInfoAfterSchedule(List splitInfos, int realPartitionCount) { + if (realPartitionCount > 0) { + for (SplitInfo splitInfo : splitInfos) { + List splits = Iterables.getOnlyElement(splitInfo.getSplits()); + for (Split split : splits) { + ((OssSplit) split.getConnectorSplit()).setNodePartCount(realPartitionCount); + } + } + } else { + // disable local partition wise on all oss splits + for (SplitInfo splitInfo : splitInfos) { + List splits = Iterables.getOnlyElement(splitInfo.getSplits()); + for (Split split : splits) { + if (split.getConnectorSplit() instanceof OssSplit) { + ((OssSplit) split.getConnectorSplit()).setLocalPairWise(Boolean.FALSE); + } + } + } + } } @Override @@ -128,8 +299,18 @@ public int getTaskNum() { return splitAssignments.keySet().size(); } + @Override + public int requireChildOutputNum() { + return stage.getFragment().isRemotePairWise() ? requireChildOutput : getTaskNum(); + } + @Override public int getDriverParallelism() { return driverParallelism; } + + @Override + public List getPrunePartitions() { + return new ArrayList<>(prunePartitions); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SplitPlacementPolicy.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SplitPlacementPolicy.java index 9b60ec514..894f95b8e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SplitPlacementPolicy.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SplitPlacementPolicy.java @@ -29,9 +29,9 @@ */ package com.alibaba.polardbx.executor.mpp.execution.scheduler; -import com.google.common.collect.Multimap; import com.alibaba.polardbx.executor.mpp.metadata.Split; import com.alibaba.polardbx.gms.node.Node; +import com.google.common.collect.Multimap; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SqlQueryScheduler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SqlQueryScheduler.java index 4d4f0df73..dc9c58ed9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SqlQueryScheduler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/SqlQueryScheduler.java @@ -29,15 +29,6 @@ */ package com.alibaba.polardbx.executor.mpp.execution.scheduler; -import com.google.common.base.Preconditions; -import com.google.common.base.Throwables; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableMultimap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.LinkedHashMultimap; -import com.google.common.collect.Maps; -import com.google.common.collect.Multimap; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; @@ -60,10 +51,20 @@ import com.alibaba.polardbx.executor.mpp.planner.NodePartitioningManager; import com.alibaba.polardbx.executor.mpp.planner.PartitionHandle; import com.alibaba.polardbx.executor.mpp.planner.PartitionShuffleHandle; +import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; import com.alibaba.polardbx.executor.mpp.planner.StageExecutionPlan; import com.alibaba.polardbx.executor.mpp.util.Failures; import com.alibaba.polardbx.executor.mpp.util.ImmutableCollectors; import com.alibaba.polardbx.gms.node.Node; +import com.google.common.base.Preconditions; +import com.google.common.base.Throwables; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableMultimap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.LinkedHashMultimap; +import com.google.common.collect.Maps; +import com.google.common.collect.Multimap; import io.airlift.units.Duration; import java.util.ArrayList; @@ -77,8 +78,9 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; -import static com.google.common.base.Preconditions.checkArgument; import static com.alibaba.polardbx.executor.mpp.execution.StageState.ABORTED; import static com.alibaba.polardbx.executor.mpp.execution.StageState.CANCELED; import static com.alibaba.polardbx.executor.mpp.execution.StageState.FAILED; @@ -86,6 +88,7 @@ import static com.alibaba.polardbx.executor.mpp.execution.StageState.FLUSHING; import static com.alibaba.polardbx.executor.mpp.execution.StageState.RUNNING; import static com.alibaba.polardbx.executor.mpp.execution.StageState.SCHEDULED; +import static com.google.common.base.Preconditions.checkArgument; import static io.airlift.concurrent.MoreFutures.firstCompletedFuture; import static io.airlift.concurrent.MoreFutures.tryGetFutureValue; import static java.lang.String.format; @@ -237,14 +240,14 @@ private List createStages(Optional parent, if (initedStages.containsKey(stageId)) { StageLinkage linkage = - new StageLinkage(plan.getFragment().getId(), parent, stage2childStages.get(stageId), c2pLinkages + new StageLinkage(plan.getFragment(), parent, stage2childStages.get(stageId), c2pLinkages ); stageLinkages.put(stageId, linkage); stages.add(initedStages.get(stageId)); return stages.build(); } if (log.isDebugEnabled()) { - log.debug("create stage: " + stageId + " for flagment: " + plan.getFragment().getId()); + log.debug("create stage: " + stageId + " for fragment: " + plan.getFragment().getId()); } SqlStageExecution stage = new SqlStageExecution(stageId, locationFactory.createStageLocation(stageId), plan.getFragment(), @@ -259,7 +262,10 @@ private List createStages(Optional parent, } } return false; - }, bloomFilterManager + }, bloomFilterManager, + plan.getFragment().isRemotePairWise() ? + nodeSelector.getOrderedNode().stream().map(Node::getNodeIdentifier).collect( + Collectors.toList()) : null ); stages.add(stage); @@ -276,7 +282,8 @@ private List createStages(Optional parent, // only contain logicalView SplitPlacementPolicy placementPolicy = new DynamicSplitPlacementPolicy(nodeSelector, stage::getAllTasks); scheduler = new SourcePartitionedScheduler( - stage, placementPolicy, plan.getSplitInfo(), null, session.getClientContext()); + stage, placementPolicy, plan.getSplitInfos(), null, session.getClientContext(), + nodeSelector.getOrderedNode()); } else { if (plan.getFragment().getPartitionedSources().size() == plan.getFragment().getExpandSources().size()) { Map partitionToNode = partitioningCache.apply(plan.getFragment().getPartitioning()); @@ -284,11 +291,16 @@ private List createStages(Optional parent, stage, partitionToNode, plan.getExpandInfo(), session.getClientContext()); } else { + if (plan.getFragment().isRemotePairWise()) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "not support bka join under partition wise now"); + } //contain logicalView&&expandView SplitPlacementPolicy placementPolicy = new DynamicSplitPlacementPolicy(nodeSelector, stage::getAllTasks); scheduler = new SourcePartitionedScheduler( - stage, placementPolicy, plan.getSplitInfo(), plan.getExpandInfo(), session.getClientContext()); + stage, placementPolicy, plan.getSplitInfos(), plan.getExpandInfo(), session.getClientContext(), + nodeSelector.getOrderedNode()); } } @@ -300,12 +312,17 @@ private List createStages(Optional parent, plan.getFragment().getPartitioning().setPartitionCount(taskNum); bloomFilterManager.updateTaskParallelism(stageId.getId(), taskNum); - Preconditions.checkArgument(taskNum > 0, "childPartitionCount must be greater than 0!"); + Preconditions.checkArgument(taskNum > 0, "node count of this fragment must be greater than 0!"); Set childStageIds = new HashSet<>(); + + int requireChildOutputNum = scheduler.requireChildOutputNum(); + Preconditions.checkArgument(requireChildOutputNum > 0, "node count of child fragment must be greater than 0!"); + ImmutableSet.Builder childStagesBuilder = ImmutableSet.builder(); for (StageExecutionPlan subStagePlan : plan.getSubStages()) { - subStagePlan.getFragment().getPartitioningScheme().setPartitionCount(taskNum); + subStagePlan.getFragment().getPartitioningScheme().setPartitionCount(requireChildOutputNum); + subStagePlan.getFragment().getPartitioningScheme().setPrunePartitions(scheduler.getPrunePartitions()); List subTree = createStages(Optional.of(stage), stageIdMapping, initedStages, locationFactory, subStagePlan, partitioningCache, remoteTaskFactory, session, @@ -338,7 +355,7 @@ private List createStages(Optional parent, }); StageLinkage linkage = - new StageLinkage(plan.getFragment().getId(), parent, childStages, c2pLinkages); + new StageLinkage(plan.getFragment(), parent, childStages, c2pLinkages); stageLinkages.put(stageId, linkage); stage2childStages.put(stageId, childStages); initedStages.put(stageId, stage); @@ -508,9 +525,9 @@ private class StageLinkage { private final Set childStageIds; private final boolean childStageHasMultiParent; - public StageLinkage(Integer fragmentId, Optional parent, Set children, + public StageLinkage(PlanFragment fragment, Optional parent, Set children, Multimap linkages) { - this.currentStageFragmentId = fragmentId; + this.currentStageFragmentId = fragment.getId(); this.parent = parent; this.childStageHasMultiParent = children.stream().filter(childStage -> linkages.get(childStage.getStageId()).size() > 1).findAny() @@ -528,8 +545,11 @@ public StageLinkage(Integer fragmentId, Optional parent, Set< ImmutableMap parentStageIds = childParents.stream() .map(stageId -> stageId.getId()).collect( ImmutableCollectors.toImmutableMap(stageId -> stageId, stageId -> false)); + // schedule under remote pairwise should rely on partition count not task's num + int bufferCount = fragment.isRemotePairWise() ? partitioningHandle.getPartitionCount() : -1; outputBufferManager = - new BroadcastOutputBufferManager(new HashMap<>(parentStageIds), childStage::setOutputBuffers); + new BroadcastOutputBufferManager(new HashMap<>(parentStageIds), bufferCount, + childStage::setOutputBuffers); } else { int partitionCount = partitioningHandle.getPartitionCount(); outputBufferManager = @@ -596,10 +616,29 @@ public void processScheduleResults(StageId stageId, StageState newState, Set new OutputBuffers.OutputBufferId(task.getTaskId().getId())) .collect(ImmutableCollectors.toImmutableList()); } + for (OutputBufferManager child : childOutputBufferManagers) { - child.addOutputBuffers(stageId, newOutputBuffers, noMoreTasks); + List tempBuffers = newOutputBuffers; + // NOTE: here, buffer ids should have same stage id + if (child instanceof BroadcastOutputBufferManager + && ((BroadcastOutputBufferManager) child).getBufferCount() > 0 + && !newOutputBuffers.isEmpty()) { + tempBuffers = getFullBuffer(newOutputBuffers.get(0).getStageId(), + ((BroadcastOutputBufferManager) child).getBufferCount()); + } + child.addOutputBuffers(stageId, tempBuffers, noMoreTasks); } } } + + /** + * @param stageId stage id used to create output buffer + * @param bufferCount all buffers to create + */ + private List getFullBuffer(int stageId, + int bufferCount) { + return IntStream.range(0, bufferCount).boxed().map(idx -> new OutputBuffers.OutputBufferId(stageId, idx)) + .collect(Collectors.toList()); + } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/StageScheduler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/StageScheduler.java index 87b367ebd..1731d9228 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/StageScheduler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/execution/scheduler/StageScheduler.java @@ -30,6 +30,7 @@ package com.alibaba.polardbx.executor.mpp.execution.scheduler; import java.io.Closeable; +import java.util.List; public interface StageScheduler extends Closeable { /** @@ -43,9 +44,15 @@ public interface StageScheduler extends Closeable { int getTaskNum(); + int requireChildOutputNum(); + int getDriverParallelism(); @Override default void close() { } + + default List getPrunePartitions() { + return null; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/metadata/Split.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/metadata/Split.java index 28dc748b1..8491d5206 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/metadata/Split.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/metadata/Split.java @@ -29,9 +29,9 @@ */ package com.alibaba.polardbx.executor.mpp.metadata; +import com.alibaba.polardbx.executor.mpp.spi.ConnectorSplit; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.alibaba.polardbx.executor.mpp.spi.ConnectorSplit; import static com.google.common.base.MoreObjects.toStringHelper; import static java.util.Objects.requireNonNull; @@ -39,8 +39,8 @@ public final class Split { public static final Split EMPTY_SPLIT = new Split(true, null); - private final boolean remoteSplit; - private final ConnectorSplit connectorSplit; + protected final boolean remoteSplit; + protected final ConnectorSplit connectorSplit; @JsonCreator public Split( diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/metadata/TaskLocation.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/metadata/TaskLocation.java index 11cae7cf0..0cea858f9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/metadata/TaskLocation.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/metadata/TaskLocation.java @@ -16,13 +16,13 @@ package com.alibaba.polardbx.executor.mpp.metadata; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.annotations.VisibleForTesting; import com.alibaba.polardbx.executor.mpp.deploy.ServiceProvider; import com.alibaba.polardbx.executor.mpp.execution.TaskId; import com.alibaba.polardbx.gms.node.NodeServer; import com.alibaba.polardbx.util.MoreObjects; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.annotations.VisibleForTesting; import java.io.Serializable; import java.net.URI; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DirectExchanger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DirectExchanger.java index 54149808e..1bac021a3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DirectExchanger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DirectExchanger.java @@ -16,10 +16,10 @@ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.mpp.execution.buffer.OutputBufferMemoryManager; import com.alibaba.polardbx.executor.operator.ConsumerExecutor; +import com.google.common.collect.ImmutableList; public class DirectExchanger extends LocalExchanger { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/Driver.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/Driver.java index 7bd191525..3668f0e2f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/Driver.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/Driver.java @@ -16,6 +16,8 @@ package com.alibaba.polardbx.executor.mpp.operator; +import com.alibaba.polardbx.executor.mpp.execution.TaskExecutor; +import com.alibaba.polardbx.executor.mpp.split.OssSplit; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import com.google.common.collect.Sets; @@ -37,14 +39,22 @@ import com.alibaba.polardbx.executor.operator.SourceExec; import com.alibaba.polardbx.executor.operator.spill.MemoryRevoker; import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.google.common.base.Preconditions; +import com.google.common.base.Throwables; +import com.google.common.collect.Sets; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import javax.annotation.concurrent.GuardedBy; import java.io.Closeable; +import java.text.MessageFormat; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -53,12 +63,14 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; +import static com.alibaba.polardbx.executor.operator.ConsumerExecutor.NOT_BLOCKED; import static com.google.common.base.Preconditions.checkState; import static com.google.common.util.concurrent.MoreExecutors.directExecutor; import static com.alibaba.polardbx.executor.operator.ConsumerExecutor.NOT_BLOCKED; public class Driver implements Closeable { + private static final Logger EXECUTOR_LOG = LoggerFactory.getLogger(TaskExecutor.class); private static final Logger log = LoggerFactory.getLogger(Driver.class); private final ConcurrentMap newSources = new ConcurrentHashMap<>(); @@ -114,6 +126,10 @@ public Driver(DriverContext driverContext, DriverExec driverExec) { } } + public DriverExec getDriverExec() { + return driverExec; + } + @Override public void close() { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverContext.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverContext.java index 3b7fcf6b9..dfde59bd7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverContext.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverContext.java @@ -29,7 +29,6 @@ */ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.collect.ImmutableSet; import com.alibaba.polardbx.executor.mpp.deploy.ServiceProvider; import com.alibaba.polardbx.executor.mpp.execution.PipelineContext; import com.alibaba.polardbx.executor.mpp.execution.TaskId; @@ -40,6 +39,9 @@ import com.alibaba.polardbx.executor.mpp.execution.buffer.OutputBufferInfo; import com.alibaba.polardbx.executor.mpp.metadata.TaskLocation; import com.alibaba.polardbx.executor.operator.SourceExec; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableSet; import org.joda.time.DateTime; import org.joda.time.chrono.ISOChronology; import org.weakref.jmx.internal.guava.collect.ImmutableList; @@ -52,6 +54,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import static com.alibaba.polardbx.common.properties.MetricLevel.isSQLMetricEnabled; import static java.util.Objects.requireNonNull; @@ -63,10 +66,10 @@ public class DriverContext { private static final ThreadMXBean THREAD_MX_BEAN = ManagementFactory.getThreadMXBean(); // Atomic Updaters - private static final AtomicLongFieldUpdater startNanosUpdater = - AtomicLongFieldUpdater.newUpdater(DriverContext.class, "startNanosLong"); - private static final AtomicLongFieldUpdater endNanosUpdater = - AtomicLongFieldUpdater.newUpdater(DriverContext.class, "endNanosLong"); + private static final AtomicLongFieldUpdater startMillisUpdater = + AtomicLongFieldUpdater.newUpdater(DriverContext.class, "startMillis"); + private static final AtomicLongFieldUpdater endMillisUpdater = + AtomicLongFieldUpdater.newUpdater(DriverContext.class, "endMillis"); private static final AtomicLongFieldUpdater intervalWallStartUpdater = AtomicLongFieldUpdater.newUpdater(DriverContext.class, "intervalWallStartLong"); @@ -82,18 +85,18 @@ public class DriverContext { private static final AtomicLongFieldUpdater processUserNanosUpdater = AtomicLongFieldUpdater.newUpdater(DriverContext.class, "processUserNanosLong"); - private static final AtomicLongFieldUpdater blockedWallMillsUpdater = - AtomicLongFieldUpdater.newUpdater(DriverContext.class, "blockedWallMillsLong"); - private static final AtomicLongFieldUpdater blockedStartTimestampUpdater = - AtomicLongFieldUpdater.newUpdater(DriverContext.class, "blockedStartTimestampLong"); + private static final AtomicLongFieldUpdater blockedWallNanoUpdater = + AtomicLongFieldUpdater.newUpdater(DriverContext.class, "blockedWallNanosLong"); + private static final AtomicLongFieldUpdater blockedWallStartUpdater = + AtomicLongFieldUpdater.newUpdater(DriverContext.class, "blockedWallStartLong"); private final long createMillis = System.currentTimeMillis(); private long driverOutputPosition = 0; // volatile members - private volatile long startNanosLong = 0L; - private volatile long endNanosLong = 0L; + private volatile long startMillis = 0L; + private volatile long endMillis = 0L; private volatile long intervalWallStartLong = 0L; private volatile long intervalCpuStartLong = 0L; @@ -103,8 +106,8 @@ public class DriverContext { private volatile long processCpuNanosLong = 0L; private volatile long processUserNanosLong = 0L; - private volatile long blockedWallMillsLong = 0L; - private volatile long blockedStartTimestampLong = 0L; + private volatile long blockedWallNanosLong = 0L; + private volatile long blockedWallStartLong = 0L; private final AtomicBoolean isBlocked = new AtomicBoolean(false); @@ -123,6 +126,9 @@ public class DriverContext { private final AtomicReference driverStats = new AtomicReference<>(); private List driverInputs = new ArrayList<>(); + // Use Supplier to proactively dump the current statistics from TaskExecutor. + private Supplier driverRuntimeStatisticsSupplier; + public DriverContext(PipelineContext pipelineContext, boolean partitioned, int driverId) { this.pipelineContext = requireNonNull(pipelineContext, "pipelineContext is null"); this.partitioned = partitioned; @@ -145,11 +151,12 @@ public PipelineContext getPipelineContext() { } public void startProcessTimer() { - long now = System.currentTimeMillis(); - if (startNanosUpdater.compareAndSet(this, 0, now)) { + long nowMillis = System.currentTimeMillis(); + if (startMillisUpdater.compareAndSet(this, 0, nowMillis)) { pipelineContext.start(); } - intervalWallStartUpdater.set(this, now); + long nowNano = System.nanoTime(); + intervalWallStartUpdater.set(this, nowNano); if (isSQLMetricEnabled(metricLevel)) { intervalCpuStartUpdater.set(this, currentThreadCpuTime()); intervalUserStartUpdater.set(this, currentThreadUserTime()); @@ -157,7 +164,11 @@ public void startProcessTimer() { } public void recordProcessed() { - processWallNanosUpdater.getAndAdd(this, System.currentTimeMillis() - intervalWallStartUpdater.get(this)); + if (finished.get() && driverStats.get() != null) { + return; + } + long addTime = System.nanoTime() - intervalWallStartUpdater.get(this); + processWallNanosUpdater.getAndAdd(this, addTime); if (isSQLMetricEnabled(metricLevel)) { processCpuNanosUpdater .getAndAdd(this, nanosBetween(intervalCpuStartUpdater.get(this), currentThreadCpuTime())); @@ -175,7 +186,7 @@ public long getTotalScheduledTime() { } public long getTotalBlockedTime() { - return blockedWallMillsUpdater.get(this); + return blockedWallNanoUpdater.get(this); } public long getTotalUserTime() { @@ -185,7 +196,7 @@ public long getTotalUserTime() { public void recordBlocked() { if (isSQLMetricEnabled(metricLevel)) { if (isBlocked.compareAndSet(false, true)) { - blockedStartTimestampUpdater.set(this, System.currentTimeMillis()); + blockedWallStartUpdater.set(this, System.nanoTime()); } } } @@ -193,9 +204,9 @@ public void recordBlocked() { public void recordBlockedFinished() { if (isSQLMetricEnabled(metricLevel)) { if (isBlocked.compareAndSet(true, false)) { - long oldTime = blockedStartTimestampUpdater.getAndSet(this, 0); + long oldTime = blockedWallStartUpdater.getAndSet(this, 0); if (oldTime > 0) { - blockedWallMillsUpdater.getAndAdd(this, System.currentTimeMillis() - oldTime); + blockedWallNanoUpdater.getAndAdd(this, System.nanoTime() - oldTime); } } } @@ -223,7 +234,8 @@ public void close(boolean isException) { public void finished() { if (finished.compareAndSet(false, true)) { - endNanosUpdater.set(this, System.currentTimeMillis()); + endMillisUpdater.set(this, System.currentTimeMillis()); + recordProcessed(); pipelineContext.driverFinished(this); this.driverStats.set(this.getDriverStats()); this.driverExecRef.set(null); @@ -232,6 +244,7 @@ public void finished() { public void failed(Throwable cause) { pipelineContext.failed(cause); + recordProcessed(); finished.set(true); this.driverStats.set(this.getDriverStats()); this.driverExecRef.set(null); @@ -246,8 +259,8 @@ public boolean isCpuTimerEnabled() { } private long getBlockedTime() { - if (isBlocked.get() && blockedStartTimestampUpdater.get(this) > 0) { - return System.currentTimeMillis() - blockedStartTimestampUpdater.get(this); + if (isBlocked.get() && blockedWallStartUpdater.get(this) > 0) { + return System.nanoTime() - blockedWallStartUpdater.get(this); } return 0; } @@ -280,11 +293,8 @@ public int getDriverId() { public String getUniqueId() { if (uniqueId == null) { - StringBuilder sb = new StringBuilder(); - sb.append("tid:").append(pipelineContext.getTaskId()) - .append(" pid:").append(pipelineContext.getPipelineId()) - .append(" did:").append(driverId); - this.uniqueId = sb.toString(); + this.uniqueId = String.format("%s.%d.%d", pipelineContext.getTaskId(), + pipelineContext.getPipelineId(), driverId); } return uniqueId; } @@ -298,7 +308,7 @@ public AtomicBoolean getIsBlocked() { } public boolean isStart() { - return startNanosLong > 0; + return startMillis > 0; } @Nullable @@ -310,6 +320,10 @@ public List getDriverInputs() { return driverInputs; } + public void setDriverRuntimeStatisticsSupplier(Supplier supplier) { + this.driverRuntimeStatisticsSupplier = supplier; + } + public DriverStats getDriverStats() { DriverExec driverExec = this.driverExecRef.get(); if (driverExec != null) { @@ -329,14 +343,20 @@ public DriverStats getDriverStats() { } else { outputPositions += driverOutputPosition; } - return new DriverStats(inputDataSize, inputPositions, outputDataSize, outputPositions); + long runningTime = processWallNanosUpdater.get(this); + return new DriverStats(getUniqueId(), inputDataSize, inputPositions, outputDataSize, outputPositions, + startMillis, endMillis, runningTime, blockedWallNanosLong, + driverRuntimeStatisticsSupplier == null ? null : driverRuntimeStatisticsSupplier.get()); } else if (driverStats.get() != null) { return driverStats.get(); } - return new DriverStats(0, 0, 0, 0); + long runningTime = processWallNanosUpdater.get(this); + return new DriverStats(getUniqueId(), 0, 0, 0, 0, + startMillis, endMillis, runningTime, blockedWallNanosLong, + driverRuntimeStatisticsSupplier == null ? null : driverRuntimeStatisticsSupplier.get()); } - private TaskStats getSecondTaskStats() { + private TaskStats getTaskStatsBySecond() { int queuedPipeExecs = 0; int runningPipeExecs = 0; @@ -369,21 +389,21 @@ private TaskStats getSecondTaskStats() { long memoryReservation = 0; long cumulativeMemory = 0L; - DateTime start = new DateTime(startNanosLong, ISOChronology.getInstance()); - long endTime = endNanosLong == 0 ? System.currentTimeMillis() : endNanosLong; - DateTime end = new DateTime(endTime, ISOChronology.getInstance()); + DateTime start = new DateTime(startMillis, ISOChronology.getInstance()); + long endTimeMillis = endMillis == 0 ? System.currentTimeMillis() : endMillis; + DateTime end = new DateTime(endTimeMillis, ISOChronology.getInstance()); return new TaskStats(start, - start, end, endTime - startNanosLong, startNanosLong - createMillis, 0, 1, + start, end, endTimeMillis - startMillis, startMillis - createMillis, 0, 1, queuedPipeExecs, runningPipeExecs, completePipeExecs, cumulativeMemory, memoryReservation, peakMemory, totalScheduledTime, totalCpuTime, totalUserTime, totalBlockedTime, (runningPipeExecs > 0), ImmutableSet.of(), driverStats.getInputDataSize(), driverStats.getInputPositions(), - driverStats.getOutputDataSize(), driverStats.getOutputPositions(), ImmutableList.of() - ); + driverStats.getOutputDataSize(), driverStats.getOutputPositions(), ImmutableList.of(), + ImmutableList.of(driverStats), null); } public TaskInfo buildLocalModeTaskInfo(String queryId) { TaskId taskId = new TaskId(queryId, pipelineContext.getPipelineId(), driverId); - TaskStats taskStats = getSecondTaskStats(); + TaskStats taskStats = getTaskStatsBySecond(); OutputBufferInfo outputBufferInfo = new OutputBufferInfo(BufferState.OPEN, 0); @@ -418,17 +438,16 @@ public TaskInfo buildLocalModeTaskInfo(String queryId) { taskStats.getTotalPipelineExecs(), taskStats.getCumulativeMemory(), taskStats.getMemoryReservation(), - taskStats.getElapsedTime(), + taskStats.getElapsedTimeMillis(), 0, - taskStats.getElapsedTime(), - taskStats.getTotalScheduledTime(), + taskStats.getElapsedTimeMillis(), + taskStats.getTotalScheduledTimeNanos(), 0, - taskStats.getDeliveryTime() - ); + taskStats.getDeliveryTimeMillis()); } private TaskState getState() { - if (startNanosLong > 0) { + if (startMillis > 0) { if (finished.get()) { return TaskState.FINISHED; } else { @@ -442,4 +461,78 @@ private TaskState getState() { public void addOutputSize(long chunkSize) { this.driverOutputPosition += chunkSize; } + + /** + * Record the runtime stats of Driver in AP-RUNNER Executor. + */ + public static class DriverRuntimeStatistics { + private final long runningCost; + private final long pendingCost; + private final long blockedCost; + private final long openCost; + private final long totalCost; + private final int runningCount; + private final int pendingCount; + private final int blockedCount; + + @JsonCreator + public DriverRuntimeStatistics( + @JsonProperty("runningCost") long runningCost, + @JsonProperty("pendingCost") long pendingCost, + @JsonProperty("blockedCost") long blockedCost, + @JsonProperty("openCost") long openCost, + @JsonProperty("totalCost") long totalCost, + @JsonProperty("runningCount") int runningCount, + @JsonProperty("pendingCount") int pendingCount, + @JsonProperty("blockedCount") int blockedCount) { + this.runningCost = runningCost; + this.pendingCost = pendingCost; + this.blockedCost = blockedCost; + this.openCost = openCost; + this.totalCost = totalCost; + this.runningCount = runningCount; + this.pendingCount = pendingCount; + this.blockedCount = blockedCount; + } + + @JsonProperty + public long getRunningCost() { + return runningCost; + } + + @JsonProperty + public long getPendingCost() { + return pendingCost; + } + + @JsonProperty + public long getBlockedCost() { + return blockedCost; + } + + @JsonProperty + public long getOpenCost() { + return openCost; + } + + @JsonProperty + public long getTotalCost() { + return totalCost; + } + + @JsonProperty + public int getRunningCount() { + return runningCount; + } + + @JsonProperty + public int getPendingCount() { + return pendingCount; + } + + @JsonProperty + public int getBlockedCount() { + return blockedCount; + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverExec.java index 117480890..42b2156b5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverExec.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.mpp.operator; +import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.operator.ConsumerExecutor; @@ -25,6 +26,7 @@ import com.alibaba.polardbx.executor.operator.SortMergeExchangeExec; import com.alibaba.polardbx.executor.operator.SourceExec; import com.alibaba.polardbx.executor.operator.spill.MemoryRevoker; +import com.alibaba.polardbx.executor.operator.util.DriverObjectPool; import java.util.ArrayList; import java.util.HashMap; @@ -165,12 +167,14 @@ public synchronized void close() { consumer.buildConsume(); } catch (Throwable e) { log.warn("buildConsume consumer:" + consumer, e); + throw GeneralUtil.nestedException(e); } } else if (consumerIsBuffer) { try { consumer.buildConsume(); } catch (Throwable e) { log.warn("buildConsume consumer:" + consumer, e); + throw GeneralUtil.nestedException(e); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverStats.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverStats.java index 58bb0fb2a..a4d7879e3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverStats.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/DriverStats.java @@ -16,24 +16,73 @@ package com.alibaba.polardbx.executor.mpp.operator; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + public class DriverStats { + private final String driverId; private final long inputDataSize; private final long inputPositions; private final long outputDataSize; private final long outputPositions; + private final long startMillis; + private final long endMillis; + private final long processNanos; + + private final long blockedNanos; + private final DriverContext.DriverRuntimeStatistics driverRuntimeStatistics; - public DriverStats(long inputDataSize, long inputPositions, long outputDataSize, long outputPositions) { + @JsonCreator + public DriverStats( + @JsonProperty("driverId") + String driverId, + @JsonProperty("inputDataSize") + long inputDataSize, + @JsonProperty("inputPositions") + long inputPositions, + @JsonProperty("outputDataSize") + long outputDataSize, + @JsonProperty("outputPositions") + long outputPositions, + @JsonProperty("startMillis") + long startMillis, + @JsonProperty("endMillis") + long endMillis, + @JsonProperty("processNanos") + long processNanos, + @JsonProperty("blockedNanos") + long blockedNanos, + @JsonProperty("driverRuntimeStatistics") + DriverContext.DriverRuntimeStatistics driverRuntimeStatistics + ) { + this.driverId = driverId; this.inputDataSize = inputDataSize; this.inputPositions = inputPositions; this.outputDataSize = outputDataSize; this.outputPositions = outputPositions; + this.startMillis = startMillis; + this.endMillis = endMillis; + this.processNanos = processNanos; + this.blockedNanos = blockedNanos; + this.driverRuntimeStatistics = driverRuntimeStatistics; + } + + @JsonProperty + public String getDriverId() { + return driverId; } public long getInputDataSize() { return inputDataSize; } + @JsonProperty + public DriverContext.DriverRuntimeStatistics getDriverRuntimeStatistics() { + return driverRuntimeStatistics; + } + + @JsonProperty public long getInputPositions() { return inputPositions; } @@ -42,7 +91,43 @@ public long getOutputDataSize() { return outputDataSize; } + @JsonProperty public long getOutputPositions() { return outputPositions; } + + @JsonProperty + public long getStartMillis() { + return startMillis; + } + + @JsonProperty + public long getEndMillis() { + return endMillis; + } + + @JsonProperty + public long getProcessNanos() { + return processNanos; + } + + @JsonProperty + public long getBlockedNanos() { + return blockedNanos; + } + + @Override + public String toString() { + return "DriverStats{" + + "driverId='" + driverId + '\'' + + ", inputDataSize=" + inputDataSize + + ", inputPositions=" + inputPositions + + ", outputDataSize=" + outputDataSize + + ", outputPositions=" + outputPositions + + ", startMillis=" + startMillis + + ", endMillis=" + endMillis + + ", processNanos=" + processNanos + + ", blockedNanos=" + blockedNanos + + '}'; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/EmptyExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/EmptyExecutor.java index 2130e5bfa..10b7f1d53 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/EmptyExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/EmptyExecutor.java @@ -16,12 +16,12 @@ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.operator.Executor; import com.alibaba.polardbx.executor.operator.ProducerExecutor; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/ExchangeClient.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/ExchangeClient.java index 2630b30c9..42292414f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/ExchangeClient.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/ExchangeClient.java @@ -16,10 +16,6 @@ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.base.Throwables; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.alibaba.polardbx.common.exception.MemoryNotEnoughException; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.logger.Logger; @@ -30,6 +26,10 @@ import com.alibaba.polardbx.executor.mpp.metadata.TaskLocation; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.memory.MemorySetting; +import com.google.common.base.Throwables; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import io.airlift.http.client.HttpClient; import io.airlift.units.DataSize; import io.airlift.units.Duration; @@ -52,9 +52,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import static com.alibaba.polardbx.executor.mpp.Threads.ENABLE_WISP; import static com.google.common.base.Preconditions.checkState; import static com.google.common.collect.Sets.newConcurrentHashSet; -import static com.alibaba.polardbx.executor.mpp.Threads.ENABLE_WISP; import static io.airlift.slice.Slices.EMPTY_SLICE; import static java.util.Objects.requireNonNull; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/HttpPageBufferClient.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/HttpPageBufferClient.java index 4f9ddb479..f30980062 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/HttpPageBufferClient.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/HttpPageBufferClient.java @@ -16,15 +16,6 @@ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.base.Throwables; -import com.google.common.base.Ticker; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableListMultimap; -import com.google.common.net.MediaType; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.JdkFutureAdapters; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; @@ -37,12 +28,21 @@ import com.alibaba.polardbx.executor.mpp.execution.TaskManager; import com.alibaba.polardbx.executor.mpp.execution.buffer.BufferResult; import com.alibaba.polardbx.executor.mpp.execution.buffer.SerializedChunk; -import com.alibaba.polardbx.gms.node.HostAddressCache; import com.alibaba.polardbx.executor.mpp.metadata.TaskLocation; import com.alibaba.polardbx.executor.mpp.server.TaskResource; import com.alibaba.polardbx.executor.mpp.server.remotetask.Backoff; import com.alibaba.polardbx.executor.mpp.util.Failures; import com.alibaba.polardbx.executor.mpp.util.MillTicker; +import com.alibaba.polardbx.gms.node.HostAddressCache; +import com.google.common.base.Throwables; +import com.google.common.base.Ticker; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableListMultimap; +import com.google.common.net.MediaType; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.JdkFutureAdapters; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.http.client.HttpClient; import io.airlift.http.client.HttpStatus; import io.airlift.http.client.HttpUriBuilder; @@ -74,10 +74,6 @@ import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.AtomicLongFieldUpdater; -import static com.google.common.base.MoreObjects.toStringHelper; -import static com.google.common.base.Strings.isNullOrEmpty; -import static com.google.common.net.HttpHeaders.CONTENT_TYPE; -import static com.google.common.util.concurrent.MoreExecutors.directExecutor; import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_REMOTE_BUFFER; import static com.alibaba.polardbx.executor.mpp.client.MppMediaTypes.MPP_BUFFER_COMPLETE; import static com.alibaba.polardbx.executor.mpp.client.MppMediaTypes.MPP_MAX_SIZE; @@ -87,6 +83,10 @@ import static com.alibaba.polardbx.executor.mpp.execution.buffer.PagesSerdeUtil.readSerializedChunks; import static com.alibaba.polardbx.executor.mpp.operator.HttpPageBufferClient.PagesResponse.createEmptyPagesResponse; import static com.alibaba.polardbx.executor.mpp.operator.HttpPageBufferClient.PagesResponse.createPagesResponse; +import static com.google.common.base.MoreObjects.toStringHelper; +import static com.google.common.base.Strings.isNullOrEmpty; +import static com.google.common.net.HttpHeaders.CONTENT_TYPE; +import static com.google.common.util.concurrent.MoreExecutors.directExecutor; import static io.airlift.concurrent.MoreFutures.addTimeout; import static io.airlift.http.client.Request.Builder.prepareDelete; import static io.airlift.http.client.Request.Builder.prepareGet; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/IExchangeClient.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/IExchangeClient.java index 57cea669f..349cfba10 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/IExchangeClient.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/IExchangeClient.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.mpp.execution.buffer.SerializedChunk; import com.alibaba.polardbx.executor.mpp.metadata.TaskLocation; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.units.Duration; import java.io.Closeable; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalAllBufferExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalAllBufferExec.java index 42c0433f2..0c7c5ae6a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalAllBufferExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalAllBufferExec.java @@ -16,9 +16,6 @@ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.logger.Logger; @@ -30,13 +27,16 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import java.util.Iterator; import java.util.List; import static com.alibaba.polardbx.executor.utils.ExecUtils.checkException; -import static com.google.common.util.concurrent.MoreExecutors.directExecutor; import static com.alibaba.polardbx.executor.utils.ExecUtils.tryAndCheckException; +import static com.google.common.util.concurrent.MoreExecutors.directExecutor; public class LocalAllBufferExec extends LocalBufferExec { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalBucketPartitionFunction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalBucketPartitionFunction.java index c2dbb04ec..d8d81e0f4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalBucketPartitionFunction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalBucketPartitionFunction.java @@ -29,6 +29,7 @@ */ package com.alibaba.polardbx.executor.mpp.operator; +import com.alibaba.polardbx.common.utils.MathUtils; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.utils.ExecUtils; @@ -54,12 +55,7 @@ public LocalBucketPartitionFunction(int bucketNum, int partCount, int partId) { for (int bucket = 0; bucket < bucketNum; bucket++) { bucketToPartition[bucket * partCount + partId] = bucket; } - this.isPowerOfTwo = ExecUtils.isPowerOfTwo(totalBucketNum); - } - - @Override - public int getPartitionCount() { - return bucketNum; + this.isPowerOfTwo = MathUtils.isPowerOfTwo(totalBucketNum); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalExchanger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalExchanger.java index 1ffad85e3..b0bc279f6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalExchanger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalExchanger.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.mpp.execution.buffer.OutputBufferMemoryManager; import com.alibaba.polardbx.executor.operator.ConsumerExecutor; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; @@ -118,6 +118,14 @@ public void buildConsume() { } } + protected void forceBuildSynchronize() { + int buildCount = status.getBuildCount(); + // all exchanger reached build point + if (buildCount == status.getCurrentParallelism()) { + this.executors.stream().forEach(consumerExecutor -> consumerExecutor.buildConsume()); + } + } + public List getExecutors() { return executors; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalExecutionPlanner.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalExecutionPlanner.java index f56508cca..570cec737 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalExecutionPlanner.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalExecutionPlanner.java @@ -56,7 +56,6 @@ import com.alibaba.polardbx.executor.mpp.operator.factory.MaterializedJoinExecFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.NonBlockGeneralExecFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.OutputExecutorFactory; -import com.alibaba.polardbx.executor.mpp.operator.factory.OverWindowFramesExecFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.ParallelHashJoinExecutorFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.PipelineFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.ProjectExecFactory; @@ -64,14 +63,20 @@ import com.alibaba.polardbx.executor.mpp.operator.factory.SortAggExecFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.SortExecutorFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.SortMergeJoinFactory; +import com.alibaba.polardbx.executor.mpp.operator.factory.SortWindowFramesExecFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.SubPipelineFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.TopNExecutorFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.UnionExecFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.ValueExecutorFactory; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItem; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItemImpl; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItemKey; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFManager; import com.alibaba.polardbx.executor.mpp.planner.LocalExchange; import com.alibaba.polardbx.executor.mpp.planner.PipelineFragment; import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; import com.alibaba.polardbx.executor.mpp.planner.RemoteSourceNode; +import com.alibaba.polardbx.executor.mpp.planner.SimpleFragmentRFManager; import com.alibaba.polardbx.executor.mpp.planner.WrapPipelineFragment; import com.alibaba.polardbx.executor.mpp.split.SplitInfo; import com.alibaba.polardbx.executor.mpp.split.SplitManager; @@ -101,6 +106,7 @@ import com.alibaba.polardbx.optimizer.core.rel.MemSort; import com.alibaba.polardbx.optimizer.core.rel.MergeSort; import com.alibaba.polardbx.optimizer.core.rel.NLJoin; +import com.alibaba.polardbx.optimizer.core.rel.OSSTableScan; import com.alibaba.polardbx.optimizer.core.rel.PhysicalFilter; import com.alibaba.polardbx.optimizer.core.rel.PhysicalProject; import com.alibaba.polardbx.optimizer.core.rel.SemiBKAJoin; @@ -111,6 +117,7 @@ import com.alibaba.polardbx.optimizer.core.rel.SortMergeJoin; import com.alibaba.polardbx.optimizer.core.rel.SortWindow; import com.alibaba.polardbx.optimizer.core.rel.TopN; +import com.alibaba.polardbx.optimizer.core.rel.mpp.ColumnarExchange; import com.alibaba.polardbx.optimizer.core.rel.mpp.MppExchange; import com.alibaba.polardbx.optimizer.memory.MemoryPool; import com.alibaba.polardbx.optimizer.memory.MemoryType; @@ -161,6 +168,7 @@ import org.apache.calcite.util.Pair; import java.net.URI; +import java.text.MessageFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -172,8 +180,15 @@ public class LocalExecutionPlanner { + public static final Set> SUPPORT_ONE_SIDE_CACHE_NODES = ImmutableSet.of( + HashJoin.class, SemiHashJoin.class, NLJoin.class, SemiNLJoin.class, HashGroupJoin.class); + public static final Set> SUPPORT_ALL_CACHE_NODES = ImmutableSet.of( + HashAgg.class, MemSort.class, TopN.class, LogicalSort.class, HashWindow.class); private static final Logger log = LoggerFactory.getLogger(LocalExecutionPlanner.class); + // logger for new runtime filter. + private static final Logger LOGGER_FRAGMENT_MANAGER = LoggerFactory.getLogger(FragmentRFManager.class); + private static final Set> SUPPORT_NODES = ImmutableSet.of( LogicalView.class, HashJoin.class, SemiHashJoin.class, SortMergeJoin.class, SemiSortMergeJoin.class, BKAJoin.class, SemiBKAJoin.class, NLJoin.class, SemiNLJoin.class, MaterializedSemiJoin.class, HashAgg.class, @@ -184,67 +199,50 @@ public class LocalExecutionPlanner { LogicalOutFile.class, PhysicalProject.class, PhysicalFilter.class, LogicalInsert.class, RecursiveCTE.class, RecursiveCTEAnchor.class ); - - public static final Set> SUPPORT_ONE_SIDE_CACHE_NODES = ImmutableSet.of( - HashJoin.class, SemiHashJoin.class, NLJoin.class, SemiNLJoin.class, HashGroupJoin.class); - - public static final Set> SUPPORT_ALL_CACHE_NODES = ImmutableSet.of( - HashAgg.class, MemSort.class, TopN.class, LogicalSort.class, HashWindow.class); - - public static final boolean isAssignableFrom(Class ret) { - return isAssignableFrom(ret, SUPPORT_NODES); - } - - public static final boolean isAssignableFrom(Class ret, Set> relNodes) { - for (Class classNode : relNodes) { - if (classNode.isAssignableFrom(ret)) { - return true; - } - } - return false; - } - private final ExchangeClientSupplier exchangeClientSupplier; private final PagesSerdeFactory pagesSerdeFactory; - private ExecutionContext context; - private int taskNumber; - private int pipelineIdGen; - private List pipelineFactorys; private final boolean supportLocalBuffer; private final long localBufferSize; private final Executor notificationExecutor; private final SpillerFactory spillerFactory; - private final boolean isCluster; - + private final Map bloomFilterExpressionMap = new HashMap<>(); + // Used to skip runtime filter in AP_LOCAL executor mode. + private final boolean enableRuntimeFilter; + private final int localPartitionCount; + private final int totalPartitionCount; + private ExecutionContext context; + private int taskNumber; + private int pipelineIdGen; + private List pipelineFactorys; private int defaultParallelism; - //FIXME MPP模式该参数已经失效 private int bkaJoinParallelism; - //在Final Sort后pipeline并发度都需要保持为1 private boolean holdCollation; - private HttpClient httpClient; private URI runtimeFilterUpdateUri; - private final Map bloomFilterExpressionMap = new HashMap<>(); - // Used to skip runtime filter in AP_LOCAL executor mode. - private final boolean enableRuntimeFilter; - //only be used for logicalView private int totalPrefetch; - private boolean forbidMultipleReadConn; - private boolean expandView; + private boolean localBloomFilter = false; + + // logical table name and its split count + private Map splitCountMap; + + private SplitManager splitManager; public LocalExecutionPlanner(ExecutionContext context, ExchangeClientSupplier exchangeClientSupplier, int defaultParallelism, int bkaJoinParallelism, int taskNumber, int prefetch, Executor notificationExecutor, SpillerFactory spillerFactory, HttpClient httpClient, URI runtimeFilterUpdateUri, - boolean enableRuntimeFilter) { + boolean enableRuntimeFilter, + int localPartitionCount, int totalPartitionCount, + Map splitCountMap, SplitManager splitManager) { this.exchangeClientSupplier = exchangeClientSupplier; + this.totalPartitionCount = totalPartitionCount; this.pagesSerdeFactory = new PagesSerdeFactory(false); this.context = context; this.defaultParallelism = defaultParallelism; @@ -262,6 +260,26 @@ public LocalExecutionPlanner(ExecutionContext context, ExchangeClientSupplier ex this.httpClient = httpClient; this.runtimeFilterUpdateUri = runtimeFilterUpdateUri; this.enableRuntimeFilter = enableRuntimeFilter; + this.localPartitionCount = localPartitionCount; + this.splitCountMap = splitCountMap; + this.splitManager = splitManager; + } + + public void setForbidMultipleReadConn(boolean forbidMultipleReadConn) { + this.forbidMultipleReadConn = forbidMultipleReadConn; + } + + public static final boolean isAssignableFrom(Class ret) { + return isAssignableFrom(ret, SUPPORT_NODES); + } + + public static final boolean isAssignableFrom(Class ret, Set> relNodes) { + for (Class classNode : relNodes) { + if (classNode.isAssignableFrom(ret)) { + return true; + } + } + return false; } public OutputBufferMemoryManager createLocalMemoryManager() { @@ -285,10 +303,7 @@ public List plan(PlanFragment fragment, OutputBuffer outputBuff DefaultSchema.setSchemaName(session.getSchema()); RelNode relNode = fragment.getSerRootNode( context.getSchemaName(), PlannerContext.fromExecutionContext(context)); - //PipelineFactory 创建executor的构造函数中,可能会去引用RelMetadataProvider,所以这里需要保证一定是 - //DrdsRelMetadataProvider - RelMetadataQuery.THREAD_PROVIDERS.set(JaninoRelMetadataProvider.of( - relNode.getCluster().getMetadataProvider())); + localBloomFilter = fragment.isLocalBloomFilter(); PipelineFragment pipelineFragment = new PipelineFragment(defaultParallelism, relNode); ExecutorFactory factory = visit(null, relNode, pipelineFragment); if (pipelineFragment.getParallelism() > 1 @@ -329,10 +344,6 @@ public List plan(RelNode relNode, LocalBufferExecutorFactory re OutputBufferMemoryManager manager, String queryId) { try { DefaultSchema.setSchemaName(context.getSchemaName()); - //PipelineFactory 创建executor的构造函数中,可能会去引用RelMetadataProvider,所以这里需要保证一定是 - //DrdsRelMetadataProvider - RelMetadataQuery.THREAD_PROVIDERS.set(JaninoRelMetadataProvider.of( - relNode.getCluster().getMetadataProvider())); PipelineFragment pipelineFragment = new PipelineFragment(defaultParallelism, relNode); ExecutorFactory factory = visit(null, relNode, pipelineFragment); @@ -440,7 +451,7 @@ private ExecutorFactory visitRelNode(RelNode parent, RelNode current, PipelineFr return visitView(parent, (LogicalView) current, pipelineFragment); } else if (current instanceof HashJoin) { return visitHashJoin((Join) current, pipelineFragment, ((HashJoin) current).getOtherCondition(), - ((HashJoin) current).getEqualCondition(), false, null + ((HashJoin) current).getEqualCondition(), false, null, ((HashJoin) current).isKeepPartition() ); } else if (current instanceof SemiHashJoin) { return visitSemiJoin((SemiHashJoin) current, pipelineFragment); @@ -519,9 +530,13 @@ private ExecutorFactory visitRelNode(RelNode parent, RelNode current, PipelineFr og.hasInputOperator = false; } } - return visit(parent, ((Gather) current).getInput(), pipelineFragment); + ExecutorFactory ret = visit(parent, ((Gather) current).getInput(), pipelineFragment); + ret.setExchange((Gather) current); + return ret; } else if (current instanceof MppExchange) { return visitMppExchange(parent, (MppExchange) current, pipelineFragment); + } else if (current instanceof ColumnarExchange) { + return visitColumnarExchange(parent, (ColumnarExchange) current, pipelineFragment); } else if (current instanceof MergeSort) { return visitMergeSort(parent, (MergeSort) current, pipelineFragment); } else if (current instanceof LogicalUnion) { @@ -531,7 +546,7 @@ private ExecutorFactory visitRelNode(RelNode parent, RelNode current, PipelineFr } else if (current instanceof LogicalExpand) { return visitExpand((LogicalExpand) current, pipelineFragment); } else if (current instanceof SortWindow) { - return visitOverWindow((SortWindow) current, pipelineFragment); + return visitSortWindow((SortWindow) current, pipelineFragment); } else if (current instanceof HashWindow) { return visitHashWindow((HashWindow) current, pipelineFragment); } else if (current instanceof LogicalCorrelate) { @@ -587,33 +602,44 @@ private ExecutorFactory visitLogicalCorrelate(LogicalCorrelate current, Pipeline } } - private ExecutorFactory visitOverWindow(SortWindow overWindow, PipelineFragment pipelineFragment) { - ExecutorFactory overWindowFactory = createOverWindowFactory(overWindow, pipelineFragment); - // Note: 原理上这里的overWindow只会包含一个group,但是担心有其他地方未遵守该规定,因此采取了较保守的做法 - // 只要包含不分组的group,即over中的partition by字段为空,则该windowExec的并行度就应当是1 - if (overWindow.groups.stream().anyMatch(g -> g.keys.size() == 0)) { - pipelineFragment.setParallelism(1); - } - return overWindowFactory; - } + private ExecutorFactory visitSortWindow(SortWindow sortWindow, PipelineFragment pipelineFragment) { + PipelineFragment childFragment = new PipelineFragment(defaultParallelism, sortWindow.getInput()); + childFragment.setContainLimit(pipelineFragment.isContainLimit()); + ExecutorFactory childExecutorFactory = visit(sortWindow, sortWindow.getInput(), childFragment); + boolean noGroup = sortWindow.groups.stream().anyMatch(g -> g.keys.size() == 0); + if (childFragment.getParallelism() > 1 && noGroup) { + pipelineFragment.holdSingleTonParallelism(); + List columns = CalciteUtils.getTypes(sortWindow.getInput().getRowType()); + LocalBufferNode localBufferNode = LocalBufferNode.create(childFragment.getRoot()); + OutputBufferMemoryManager localBufferManager = createLocalMemoryManager(); + LocalBufferExecutorFactory localBufferExecutorFactory = new LocalBufferExecutorFactory( + localBufferManager, columns, pipelineFragment.getParallelism()); + LocalExchange localBufferExchange = new LocalExchange( + CalciteUtils.getTypes( + sortWindow.getInput().getRowType()), ImmutableList.of(), LocalExchange.LocalExchangeMode.SINGLE, + true); - private ExecutorFactory visitHashWindow(HashWindow overWindow, PipelineFragment pipelineFragment) { - ExecutorFactory overWindowFactory = createHashWindowFactory(overWindow, pipelineFragment); - // Note: 原理上这里的overWindow只会包含一个group,但是担心有其他地方未遵守该规定,因此采取了较保守的做法 - // 只要包含不分组的group,即over中的partition by字段为空,则该windowExec的并行度就应当是1 - if (overWindow.groups.stream().anyMatch(g -> g.keys.size() == 0)) { - pipelineFragment.setParallelism(1); - } - return overWindowFactory; - } + LocalExchangeConsumerFactory consumerFactory = new LocalExchangeConsumerFactory( + localBufferExecutorFactory, localBufferManager, localBufferExchange); - private ExecutorFactory createOverWindowFactory(SortWindow overWindow, PipelineFragment pipelineFragment) { - RelNode input = overWindow.getInput(); - ExecutorFactory childExecutorFactory = visit(overWindow, input, pipelineFragment); - return new OverWindowFramesExecFactory(overWindow, childExecutorFactory); + PipelineFactory bufferPipelineFactory = + new PipelineFactory(childExecutorFactory, consumerFactory, + childFragment.setPipelineId(pipelineIdGen++)); + + pipelineFactorys.add(bufferPipelineFactory); + + pipelineFragment.getDependency().addAll(childFragment.getDependency()); + pipelineFragment.addBufferNodeChild(localBufferNode.getInput().getRelatedId(), childFragment); + + return new SortWindowFramesExecFactory(sortWindow, localBufferExecutorFactory, + pipelineFragment.getParallelism()); + } else { + pipelineFragment.inherit(childFragment); + return new SortWindowFramesExecFactory(sortWindow, childExecutorFactory, pipelineFragment.getParallelism()); + } } - private ExecutorFactory createHashWindowFactory(HashWindow overWindow, PipelineFragment pipelineFragment) { + private ExecutorFactory visitHashWindow(HashWindow overWindow, PipelineFragment pipelineFragment) { List columnTypes = CalciteUtils.getTypes(overWindow.getInput().getRowType()); PipelineFragment childFragment = new PipelineFragment(defaultParallelism, overWindow.getInput()); ExecutorFactory childExecutorFactory = visit(overWindow, overWindow.getInput(), childFragment); @@ -656,7 +682,7 @@ private ExecutorFactory visitBaseTable(RelNode parent, BaseTableOperation other, logicalView.setRelatedId(other.getRelatedId()); pipelineFragment.addLogicalView(logicalView); pipelineFragment.holdSingleTonParallelism(); - SplitInfo splitInfo = new SplitManager().getSingleSplit(logicalView, context); + SplitInfo splitInfo = splitManager.getSingleSplit(logicalView, context); pipelineFragment.putSource(logicalView.getRelatedId(), splitInfo); return createViewFactory( parent, logicalView, pipelineFragment, spillerFactory, 1, false); @@ -749,7 +775,7 @@ private ExecutorFactory visitSortMergeJoin(Join join, List leftColumns, private ExecutorFactory visitSemiJoin(SemiHashJoin current, PipelineFragment fragment) { boolean maxOneRow = current.getJoinType() == JoinRelType.LEFT || current.getJoinType() == JoinRelType.INNER; return visitHashJoin(current, fragment, current.getOtherCondition(), current.getEqualCondition(), - maxOneRow, current.getOperands() + maxOneRow, current.getOperands(), current.isKeepPartition() ); } @@ -915,7 +941,89 @@ private ExecutorFactory visitGroupJoin(RelNode parent, HashGroupJoin current, Pi private ExecutorFactory visitHashJoin(Join current, PipelineFragment fragment, RexNode otherCond, RexNode equalCond, boolean maxOneRow, - List oprands) { + List oprands, boolean keepPartition) { + Join join = current; + RelNode buildNode = join.getInner(); + RelNode probeNode = join.getOuter(); + + // Parse the item keys from the RelNode of the join. + List rfItemKeys = FragmentRFItemKey.buildItemKeys(join); + + if (LOGGER_FRAGMENT_MANAGER.isDebugEnabled()) { + boolean isOuterBuild = (current instanceof HashJoin && ((HashJoin) current).isOuterBuild()) + || (current instanceof SemiHashJoin && ((SemiHashJoin) current).isOuterBuild()); + for (FragmentRFItemKey itemKey : rfItemKeys) { + LOGGER_FRAGMENT_MANAGER.debug("itemKey = " + itemKey + " isReversed = " + isOuterBuild); + } + } + + boolean useRF = context.getParamManager().getBoolean(ConnectionParams.ENABLE_NEW_RF); + final boolean useXXHashRFinBuild = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_XXHASH_RF_IN_BUILD); + final boolean useXXHashRFinFilter = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_XXHASH_RF_IN_FILTER); + final float defaultFloatFpp = context.getParamManager().getFloat(ConnectionParams.RUNTIME_FILTER_FPP); + final long rowUpperBound = context.getParamManager().getLong(ConnectionParams.GLOBAL_RF_ROWS_UPPER_BOUND); + final long rowLowerBound = context.getParamManager().getLong(ConnectionParams.GLOBAL_RF_ROWS_LOWER_BOUND); + final int rfSampleCount = context.getParamManager().getInt(ConnectionParams.NEW_RF_SAMPLE_COUNT); + final float floatFilterRatio = + context.getParamManager().getFloat(ConnectionParams.NEW_RF_FILTER_RATIO_THRESHOLD); + + final double defaultDoubleFpp = Double.parseDouble(Float.toString(defaultFloatFpp)); + final double doubleFilterRatio = Double.parseDouble(Float.toString(floatFilterRatio)); + + // pre-check the local partition count and total partition count + if (Integer.signum(totalPartitionCount) != Integer.signum(localPartitionCount)) { + useRF = false; + LOGGER_FRAGMENT_MANAGER.error(MessageFormat.format( + "The values of totalPartitionCount {0} and localPartitionCount {1} are incorrect.", + totalPartitionCount, localPartitionCount + )); + } + + // Initialize the runtime filter manager in this fragment. + if (fragment.getFragmentRFManager() == null && useRF) { + FragmentRFManager fragmentRFManager = new SimpleFragmentRFManager( + totalPartitionCount, localPartitionCount, + defaultDoubleFpp, rowUpperBound, rowLowerBound, + doubleFilterRatio, rfSampleCount + ); + + fragment.setFragmentRFManager(fragmentRFManager); + } + + // Build runtime filter item. + if (fragment.getFragmentRFManager() != null && useRF) { + for (int itemIndex = 0; itemIndex < rfItemKeys.size(); itemIndex++) { + FragmentRFItemKey itemKey = rfItemKeys.get(itemIndex); + + String buildColumnName = itemKey.getBuildColumnName(); + String probeColumnName = itemKey.getProbeColumnName(); + + FragmentRFManager manager = fragment.getFragmentRFManager(); + + boolean localPairWise = localPartitionCount > 0 && context.getParamManager() + .getBoolean(ConnectionParams.ENABLE_LOCAL_PARTITION_WISE_JOIN) + && current.getTraitSet().getPartitionWise().isLocalPartition(); + + // Use local mode for local partition wise because the hash table does not shared among all partitions. + // When using local mode the totalPartitionCount must be greater than 0. + FragmentRFManager.RFType rfType = localPairWise && totalPartitionCount > 0 + ? FragmentRFManager.RFType.LOCAL : FragmentRFManager.RFType.BROADCAST; + + // localPartitionCount * taskNumber; + + // To build a fragment RF item + FragmentRFItem rfItem = new FragmentRFItemImpl( + manager, buildColumnName, probeColumnName, + useXXHashRFinBuild, useXXHashRFinFilter, rfType + ); + + // add rf item into manager + manager.addItem(itemKey, rfItem); + } + } + boolean hybrid = false; if (spillerFactory != null && context.getParamManager().getInt( @@ -923,8 +1031,7 @@ private ExecutorFactory visitHashJoin(Join current, PipelineFragment fragment, hybrid = true; } boolean driverBuilder = false; - RelNode probeNode = current.getOuter(); - RelNode buildNode = current.getInner(); + if (current instanceof HashJoin) { driverBuilder = ((HashJoin) current).isOuterBuild(); if (driverBuilder) { @@ -936,7 +1043,34 @@ private ExecutorFactory visitHashJoin(Join current, PipelineFragment fragment, probeNode = current.getInner(); buildNode = current.getOuter(); } + boolean localPairWise = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_LOCAL_PARTITION_WISE_JOIN) + && current.getTraitSet().getPartitionWise().isLocalPartition(); + if (localPairWise) { + //make sure that it is not hybrid + if (hybrid) { + hybrid = false; + log.warn("Hybrid-Hash-Join don't support local pairwise mode!"); + } + } + } else if (current instanceof SemiHashJoin) { + driverBuilder = ((SemiHashJoin) current).isOuterBuild(); + JoinRelType type = current.getJoinType(); + // TODO support null safe under reverse anti join + if (type == JoinRelType.ANTI) { + driverBuilder &= !ParallelHashJoinExecutorFactory.containAntiJoinOperands(oprands, current); + } + if (driverBuilder) { + //make sure that it is not hybrid + if (hybrid) { + hybrid = false; + log.warn("Hybrid-Hash-Join don't support driverBuilder mode!"); + } + probeNode = current.getInner(); + buildNode = current.getOuter(); + } } + //the build child side List builderColumns = CalciteUtils.getTypes(buildNode.getRowType()); ExecutorFactory emptybuildFactory = new EmptyExecutorFactory(builderColumns); @@ -1001,40 +1135,141 @@ private ExecutorFactory visitHashJoin(Join current, PipelineFragment fragment, } return joinExecutorFactory; } else { - OutputBufferMemoryManager localBufferManager = createLocalMemoryManager(); - ExecutorFactory probeExecutorFactory = visit(current, probeNode, fragment); - //generate current executorFactory - int numPartitions = buildFramgent.getParallelism() > fragment.getParallelism() ? - fragment.getParallelism() : buildFramgent.getParallelism(); - ParallelHashJoinExecutorFactory joinExecutorFactory = - new ParallelHashJoinExecutorFactory(current, otherCond, equalCond, maxOneRow, oprands, - emptybuildFactory, - probeExecutorFactory, fragment.getParallelism(), numPartitions, driverBuilder); + boolean localPairWise = localPartitionCount > 0 && context.getParamManager() + .getBoolean(ConnectionParams.ENABLE_LOCAL_PARTITION_WISE_JOIN) + && current.getTraitSet().getPartitionWise().isLocalPartition(); + boolean separateProbe = + context.getParamManager().getBoolean(ConnectionParams.LOCAL_PAIRWISE_PROBE_SEPARATE); + if (localPairWise && separateProbe) { + PipelineFragment probeChildFragment = new PipelineFragment( + defaultParallelism, probeNode); + + if (fragment.getFragmentRFManager() != null) { + // Set fragment manager for children pipeline when using partition exchanger. + probeChildFragment.setFragmentRFManager(fragment.getFragmentRFManager()); + } - //generate child's pipelineFactory - LocalExchange localExchange = - new LocalExchange(CalciteUtils.getTypes(buildNode.getRowType()), ImmutableList.of(), - LocalExchange.LocalExchangeMode.RANDOM, true); - LocalExchangeConsumerFactory consumerFactory = - new LocalExchangeConsumerFactory(joinExecutorFactory, localBufferManager, localExchange); + ExecutorFactory probeChildExecutorFactory = visit(current, probeNode, probeChildFragment); + + OutputBufferMemoryManager localProbeBufferManager = createLocalMemoryManager(); + List outerColumns = CalciteUtils.getTypes(probeNode.getRowType()); + List joinKeys = EquiJoinUtils + .buildEquiJoinKeys(current, current.getOuter(), current.getInner(), (RexCall) equalCond, + current.getJoinType()); + List keyTypes = + joinKeys.stream().map(EquiJoinKey::getUnifiedType).collect(Collectors.toList()); + List keyIndexes = + joinKeys.stream().map(EquiJoinKey::getOuterIndex).collect(Collectors.toList()); + LocalExchange localProbeExchange = + new LocalExchange(CalciteUtils.getTypes(probeNode.getRowType()), keyIndexes, + keyTypes, LocalExchange.LocalExchangeMode.CHUNK_PARTITION, true); - PipelineFactory buildPipelineFactory = - new PipelineFactory(childFactory, consumerFactory, buildFramgent.setPipelineId(pipelineIdGen++)); - buildFramgent.setBuildDepOnAllConsumers(true); - pipelineFactorys.add(buildPipelineFactory); - fragment.addDependency(buildPipelineFactory.getPipelineId()); - if (forbidMultipleReadConn) { - fragment.getProperties().addDependencyForChildren(buildPipelineFactory.getPipelineId()); - } - fragment.addChild(buildFramgent); + LocalBufferExecutorFactory localBufferExecutorFactory = new LocalBufferExecutorFactory( + localProbeBufferManager, outerColumns, probeChildFragment.getParallelism()); + + //generate local-buffer consume pipelineFactory + LocalExchangeConsumerFactory consumerFactory = new LocalExchangeConsumerFactory( + localBufferExecutorFactory, localProbeBufferManager, localProbeExchange); + PipelineFactory consumerPipeFactory = new PipelineFactory(probeChildExecutorFactory, consumerFactory, + probeChildFragment.setPipelineId(pipelineIdGen++)); + pipelineFactorys.add(consumerPipeFactory); + fragment.addChild(probeChildFragment); + + //generate current executorFactory + int numPartitions = Math.min(buildFramgent.getParallelism(), fragment.getParallelism()); + ParallelHashJoinExecutorFactory joinExecutorFactory = + new ParallelHashJoinExecutorFactory(fragment, current, otherCond, + equalCond, maxOneRow, oprands, + emptybuildFactory, + localBufferExecutorFactory, fragment.getParallelism(), numPartitions, driverBuilder, + localPartitionCount, keepPartition); + + //generate child's pipelineFactory + LocalExchange localExchange = null; + // create partition exchange under partition wise join + keyTypes = joinKeys.stream().map(EquiJoinKey::getUnifiedType).collect(Collectors.toList()); + List keyInnerIndexes = + joinKeys.stream().map(EquiJoinKey::getInnerIndex).collect(Collectors.toList()); + + //generate build pipelineFactory + localExchange = + new LocalExchange(CalciteUtils.getTypes(buildNode.getRowType()), keyInnerIndexes, + keyTypes, LocalExchange.LocalExchangeMode.CHUNK_PARTITION, true); - if (fragment.isContainLimit() && probeExecutorFactory instanceof LogicalViewExecutorFactory && - context.getParamManager().getBoolean(ConnectionParams.ENABLE_DRIVING_STREAM_SCAN)) { - ((LogicalViewExecutorFactory) probeExecutorFactory).enableDrivingResumeSource(); - joinExecutorFactory.enableStreamJoin(true); - } + OutputBufferMemoryManager localBufferManager = createLocalMemoryManager(); + LocalExchangeConsumerFactory buildConsumerFactory = + new LocalExchangeConsumerFactory(joinExecutorFactory, localBufferManager, localExchange); + + PipelineFactory buildPipelineFactory = + new PipelineFactory(childFactory, buildConsumerFactory, + buildFramgent.setPipelineId(pipelineIdGen++)); + buildFramgent.setBuildDepOnAllConsumers(true); + pipelineFactorys.add(buildPipelineFactory); + fragment.addDependency(buildPipelineFactory.getPipelineId()); + if (forbidMultipleReadConn) { + log.error("forbid multiple read connection is true, check this"); + fragment.getProperties().addDependencyForChildren(buildPipelineFactory.getPipelineId()); + probeChildFragment.getProperties().addDependencyForChildren(buildPipelineFactory.getPipelineId()); + } + fragment.addChild(buildFramgent); - return joinExecutorFactory; + return joinExecutorFactory; + } else { + OutputBufferMemoryManager localBufferManager = createLocalMemoryManager(); + ExecutorFactory probeExecutorFactory = visit(current, probeNode, fragment); + //generate current executorFactory + int numPartitions = buildFramgent.getParallelism() > fragment.getParallelism() ? + fragment.getParallelism() : buildFramgent.getParallelism(); + ParallelHashJoinExecutorFactory joinExecutorFactory = + new ParallelHashJoinExecutorFactory(fragment, current, otherCond, + equalCond, maxOneRow, oprands, + emptybuildFactory, + probeExecutorFactory, fragment.getParallelism(), numPartitions, driverBuilder, + localPairWise ? localPartitionCount : -1, keepPartition); + + //generate child's pipelineFactory + LocalExchange localExchange = null; + if (localPairWise) { + // create partition exchange under partition wise join + List joinKeys = EquiJoinUtils + .buildEquiJoinKeys(current, current.getOuter(), current.getInner(), (RexCall) equalCond, + current.getJoinType()); + List keyTypes = + joinKeys.stream().map(t -> t.getUnifiedType()).collect(Collectors.toList()); + List keyInnerIndexes = + joinKeys.stream().map(t -> t.getInnerIndex()).collect(Collectors.toList()); + + //generate build pipelineFactory + localExchange = + new LocalExchange(CalciteUtils.getTypes(current.getInner().getRowType()), keyInnerIndexes, + keyTypes, LocalExchange.LocalExchangeMode.CHUNK_PARTITION, true); + } else { + // create random exchange under normal hash join + localExchange = new LocalExchange(CalciteUtils.getTypes(buildNode.getRowType()), ImmutableList.of(), + LocalExchange.LocalExchangeMode.RANDOM, true); + } + + LocalExchangeConsumerFactory consumerFactory = + new LocalExchangeConsumerFactory(joinExecutorFactory, localBufferManager, localExchange); + + PipelineFactory buildPipelineFactory = + new PipelineFactory(childFactory, consumerFactory, buildFramgent.setPipelineId(pipelineIdGen++)); + buildFramgent.setBuildDepOnAllConsumers(true); + pipelineFactorys.add(buildPipelineFactory); + fragment.addDependency(buildPipelineFactory.getPipelineId()); + if (forbidMultipleReadConn) { + fragment.getProperties().addDependencyForChildren(buildPipelineFactory.getPipelineId()); + } + fragment.addChild(buildFramgent); + + if (fragment.isContainLimit() && probeExecutorFactory instanceof LogicalViewExecutorFactory && + context.getParamManager().getBoolean(ConnectionParams.ENABLE_DRIVING_STREAM_SCAN)) { + ((LogicalViewExecutorFactory) probeExecutorFactory).enableDrivingResumeSource(); + joinExecutorFactory.enableStreamJoin(true); + } + + return joinExecutorFactory; + } } } @@ -1219,12 +1454,40 @@ private ExecutorFactory visitMergeSort(RelNode parent, MergeSort mergeSort, Pipe } } + private ExecutorFactory visitColumnarExchange( + RelNode parent, ColumnarExchange exchange, PipelineFragment pipelineFragment) { + if (exchange.isMergeSortExchange()) { + MergeSort mergeSort = MergeSort.create(exchange.getInput(), exchange.getCollation(), null, null); + mergeSort.setRelatedId(exchange.getRelatedId()); + ExecutorFactory ret = visitMergeSort(parent, mergeSort, pipelineFragment); + ret.setExchange(exchange); + return ret; + } else { + //对Gather && MppExchange特殊处理,Gather现在并没有统计CPU耗时, + // 计算Gather && MppExchange的cpu耗时时,不再减去input算子的耗时,否则结果会负数 + RuntimeStatistics statistics = (RuntimeStatistics) context.getRuntimeStatistics(); + if (statistics != null) { + RuntimeStatistics.OperatorStatisticsGroup og = + statistics.getRelationToStatistics().get(exchange.getRelatedId()); + if (og != null) { + og.hasInputOperator = false; + } + } + + ExecutorFactory ret = visit(parent, exchange.getInput(), pipelineFragment); + ret.setExchange(exchange); + return ret; + } + } + private ExecutorFactory visitMppExchange( RelNode parent, MppExchange exchange, PipelineFragment pipelineFragment) { if (exchange.isMergeSortExchange()) { MergeSort mergeSort = MergeSort.create(exchange.getInput(), exchange.getCollation(), null, null); mergeSort.setRelatedId(exchange.getRelatedId()); - return visitMergeSort(parent, mergeSort, pipelineFragment); + ExecutorFactory ret = visitMergeSort(parent, mergeSort, pipelineFragment); + ret.setExchange(exchange); + return ret; } else { //对Gather && MppExchange特殊处理,Gather现在并没有统计CPU耗时, // 计算Gather && MppExchange的cpu耗时时,不再减去input算子的耗时,否则结果会负数 @@ -1237,7 +1500,9 @@ private ExecutorFactory visitMppExchange( } } - return visit(parent, exchange.getInput(), pipelineFragment); + ExecutorFactory ret = visit(parent, exchange.getInput(), pipelineFragment); + ret.setExchange(exchange); + return ret; } } @@ -1383,9 +1648,9 @@ private ExecutorFactory visitView( SplitInfo splitInfo; if (logicalView.fromTableOperation() != null) { - splitInfo = new SplitManager().getSingleSplit(logicalView, context); + splitInfo = splitManager.getSingleSplit(logicalView, context); } else { - splitInfo = new SplitManager().getSplits(logicalView, context, false); + splitInfo = splitManager.getSplits(logicalView, context, false); } if (logicalView.pushedRelNodeIsSort()) { @@ -1404,38 +1669,15 @@ private ExecutorFactory visitView( } else { if (defaultParallelism > 1 && !holdCollation) { if (!pipelineFragment.isHoldParallelism()) { - int parallelism = isUnderMergeSort ? 1 : - context.getParamManager().getInt(ConnectionParams.PARALLELISM); - if (parallelism < 0) { - int shards; - switch (splitInfo.getConcurrencyPolicy()) { - case GROUP_CONCURRENT_BLOCK: - shards = splitInfo.getSplitParallelism(); - break; - case RELAXED_GROUP_CONCURRENT: - shards = splitInfo.getSplitParallelism(); - break; - case CONCURRENT: - shards = splitInfo.getSplitCount(); - break; - default: - // disable parallel query - shards = 1; - break; - } - parallelism = ExecUtils.getParallelismForLogicalView(shards); - } - - if (parallelism >= splitInfo.getSplitCount()) { - parallelism = splitInfo.getSplitCount(); - prefetch = 1 * parallelism; + boolean columnarIndex = + logicalView instanceof OSSTableScan && ((OSSTableScan) logicalView).isColumnarIndex(); + int parallelism = + !columnarIndex ? getParallelismForInnodbScan(isUnderMergeSort, splitInfo) : + getParallelismForColumnarScan(isUnderMergeSort, defaultParallelism, parent == null, + splitInfo.getSplitCount()); + if (!columnarIndex && parallelism >= splitInfo.getSplitCount()) { + prefetch = parallelism; } - - if (parallelism == 0) { - // Parallel query is disabled but we have a parallel plan... Very strange... - parallelism = 1; - } - pipelineFragment.setParallelism(parallelism); } } else { @@ -1456,7 +1698,113 @@ private ExecutorFactory visitView( } } - return createViewFactory(parent, logicalView, pipelineFragment, spillerFactory, prefetch, isUnderMergeSort); + LogicalViewExecutorFactory logicalViewExecutorFactory = + createViewFactory(parent, logicalView, pipelineFragment, spillerFactory, prefetch, isUnderMergeSort); + + boolean isProbeSideOfJoin = + parent != null && parent instanceof HashJoin && ((HashJoin) parent).getOuter() == logicalView; + boolean useFileConcurrency = + ExecUtils.getQueryConcurrencyPolicy(context, logicalView) == QueryConcurrencyPolicy.FILE_CONCURRENT; + boolean enableScanRandomShuffle = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_SCAN_RANDOM_SHUFFLE); + + boolean localPairWise = + parent != null && parent instanceof HashJoin + && parent.getTraitSet().getPartitionWise().isLocalPartition() + && context.getParamManager().getBoolean(ConnectionParams.ENABLE_LOCAL_PARTITION_WISE_JOIN); + + boolean joinKeepPartition = + parent != null && parent instanceof HashJoin && ((HashJoin) parent).isKeepPartition(); + + // Don't use scan random shuffle when using fragment RF. + if (isProbeSideOfJoin && useFileConcurrency && enableScanRandomShuffle + && splitCountMap.containsKey(logicalView.getLogicalTableName()) + && !localPairWise && pipelineFragment.getFragmentRFManager() == null && !joinKeepPartition) { + + // NOTE: the task number equal to the count of workers that executing this query. + final int totalFileCount = splitCountMap.get(logicalView.getLogicalTableName()); + final int targetParallelism = pipelineFragment.getParallelism(); + + int shuffleThreshold = context.getParamManager().getInt(ConnectionParams.SCAN_RANDOM_SHUFFLE_THRESHOLD); + + if (totalFileCount >= targetParallelism * taskNumber * shuffleThreshold) { + return logicalViewExecutorFactory; + } + + final List scanOutputTypes = CalciteUtils.getTypes(logicalView.getRowType()); + + // Create local buffer exec. + LocalBufferExecutorFactory bufferExecFactory = + new LocalBufferExecutorFactory(createLocalMemoryManager(), scanOutputTypes, targetParallelism); + + // Create local exchanger exec. + LocalExchange localExchange = new LocalExchange(scanOutputTypes, ImmutableList.of(), + LocalExchange.LocalExchangeMode.RANDOM, true); + + LocalExchangeConsumerFactory consumerFactory = + new LocalExchangeConsumerFactory(bufferExecFactory, createLocalMemoryManager(), localExchange); + + // Create a pipeline with structure of (producer: SCAN) -> (consumer: EXCHANGER -> CACHE) + PipelineFragment newPipelineFragment = new PipelineFragment(targetParallelism, logicalView); + PipelineFactory newPipelineFactory = new PipelineFactory( + logicalViewExecutorFactory, consumerFactory, newPipelineFragment.setPipelineId(pipelineIdGen++)); + + // Handle dependency between pipelines + pipelineFragment.addDependency(newPipelineFactory.getPipelineId()); + pipelineFragment.addChild(newPipelineFragment); + pipelineFactorys.add(newPipelineFactory); + + return bufferExecFactory; + } + + return logicalViewExecutorFactory; + } + + private int getParallelismForInnodbScan(boolean isUnderMergeSort, SplitInfo splitInfo) { + int parallelism = isUnderMergeSort ? 1 : + context.getParamManager().getInt(ConnectionParams.PARALLELISM); + if (parallelism < 0) { + int shards; + switch (splitInfo.getConcurrencyPolicy()) { + case GROUP_CONCURRENT_BLOCK: + shards = splitInfo.getSplitParallelism(); + break; + case RELAXED_GROUP_CONCURRENT: + shards = splitInfo.getSplitParallelism(); + break; + case CONCURRENT: + shards = splitInfo.getSplitCount(); + break; + default: + // disable parallel query + shards = 1; + break; + } + parallelism = ExecUtils.getParallelismForLogicalView(shards); + } + + if (parallelism >= splitInfo.getSplitCount()) { + parallelism = splitInfo.getSplitCount(); + } + + if (parallelism == 0) { + // Parallel query is disabled but we have a parallel plan... Very strange... + parallelism = 1; + } + + return parallelism; + } + + private int getParallelismForColumnarScan(boolean isUnderMergeSort, int defaultParallelism, boolean noParent, + int fileCount) { + int parallelism = isUnderMergeSort ? 1 : context.getParamManager().getInt(ConnectionParams.PARALLELISM); + if (parallelism < 0) { + parallelism = defaultParallelism > 0 ? defaultParallelism : ExecUtils.getParallelismForLocal(context); + } + if (noParent && fileCount > 0) { + parallelism = Math.min(parallelism, fileCount); + } + return parallelism; } private LogicalViewExecutorFactory createViewFactory( @@ -1503,8 +1851,9 @@ private LogicalViewExecutorFactory createViewFactory( break; } } - return new LogicalViewExecutorFactory(logicalView, prefetch, pipelineFragment.getParallelism(), - maxRowCount, bSort, fetched, offset, spillerFactory, bloomFilterExpressionMap, enableRuntimeFilter, + return new LogicalViewExecutorFactory(pipelineFragment, logicalView, prefetch, + pipelineFragment.getParallelism(), + maxRowCount, bSort, sort, fetched, offset, spillerFactory, bloomFilterExpressionMap, enableRuntimeFilter, randomSplits); } @@ -1598,7 +1947,7 @@ private ExecutorFactory visitRuntimeFilterBuild(RuntimeFilterBuilder filterBuild PipelineFragment pipelineFragment) { ExecutorFactory childExecutorFactory = visit(filterBuilder, filterBuilder.getInput(), pipelineFragment); return new RuntimeFilterBuilderExecFactory(filterBuilder, childExecutorFactory, httpClient, - runtimeFilterUpdateUri); + runtimeFilterUpdateUri, localBloomFilter); } private ExecutorFactory visitHashAgg(HashAgg agg, PipelineFragment pipelineFragment) { @@ -1615,8 +1964,9 @@ private ExecutorFactory visitHashAgg(HashAgg agg, PipelineFragment pipelineFragm LocalExchange localExchange = null; if (agg.isPartial()) { if (supportBuffer) { - LocalExchange.LocalExchangeMode mode = pipelineFragment.getParallelism() == 1 ? - LocalExchange.LocalExchangeMode.SINGLE : LocalExchange.LocalExchangeMode.PARTITION; + LocalExchange.LocalExchangeMode mode = + childFragment.getParallelism() == pipelineFragment.getParallelism() ? + LocalExchange.LocalExchangeMode.DIRECT : LocalExchange.LocalExchangeMode.RANDOM; localExchange = new LocalExchange(columns, ImmutableList.of(), mode, true); } else { LocalExchange.LocalExchangeMode mode = pipelineFragment.getParallelism() == 1 ? @@ -1630,8 +1980,22 @@ private ExecutorFactory visitHashAgg(HashAgg agg, PipelineFragment pipelineFragm asyncConsume); pipelineFragment.holdSingleTonParallelism(); } else { - LocalExchange.LocalExchangeMode mode = pipelineFragment.getParallelism() == 1 ? - LocalExchange.LocalExchangeMode.SINGLE : LocalExchange.LocalExchangeMode.PARTITION; + int parallelism = pipelineFragment.getParallelism(); + + boolean localPairWise = localPartitionCount > 0 && context.getParamManager() + .getBoolean(ConnectionParams.ENABLE_LOCAL_PARTITION_WISE_JOIN) + && agg.getTraitSet().getPartitionWise().isLocalPartition(); + + LocalExchange.LocalExchangeMode mode; + if (localPairWise && localPartitionCount == parallelism) { + // if parts_per_worker = parallelism * N, N = 1 + mode = LocalExchange.LocalExchangeMode.DIRECT; + } else if (parallelism == 1) { + mode = LocalExchange.LocalExchangeMode.SINGLE; + } else { + mode = LocalExchange.LocalExchangeMode.PARTITION; + } + localExchange = new LocalExchange(columns, agg.getGroupSet().toList(), mode, asyncConsume); } } @@ -1695,10 +2059,10 @@ private PipelineFactory createConsumeSideExchangeFactory( new PipelineFactory( localBufferExecutorFactory, localConsumerFactory, produceFragment.setPipelineId(pipelineIdGen++)); - pipelineFactorys.add(producePipelineFactory); + pipelineFactorys.add(producePipelineFactory); // pipelineFragment.addDependency(consumerPipeFactory.getPipelineId()); - pipelineFragment.addDependency(producePipelineFactory.getPipelineId()); - pipelineFragment.addBufferNodeChild(localBufferNode.getInput().getRelatedId(), produceFragment); + pipelineFragment.addDependency(producePipelineFactory.getPipelineId()); // + pipelineFragment.addBufferNodeChild(localBufferNode.getInput().getRelatedId(), produceFragment); // return producePipelineFactory; } else { //generate child's pipelineFactory diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalHashBucketFunction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalHashBucketFunction.java new file mode 100644 index 000000000..905b48f94 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/LocalHashBucketFunction.java @@ -0,0 +1,40 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.mpp.operator; + +import com.alibaba.polardbx.common.utils.MathUtils; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.utils.ExecUtils; + +import static com.google.common.base.Preconditions.checkState; + +public class LocalHashBucketFunction implements PartitionFunction { + protected final int partitionCount; + protected final boolean isPowerOfTwo; + + public LocalHashBucketFunction(int partitionCount) { + this.partitionCount = partitionCount; + this.isPowerOfTwo = MathUtils.isPowerOfTwo(partitionCount); + } + + @Override + public int getPartition(Chunk page, int position) { + int partition = ExecUtils.partition(page.hashCode(position), partitionCount, isPowerOfTwo); + checkState(partition >= 0 && partition < partitionCount); + return partition; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/OperatorStats.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/OperatorStats.java index 09948f859..7cb49710d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/OperatorStats.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/OperatorStats.java @@ -16,10 +16,10 @@ package com.alibaba.polardbx.executor.mpp.operator; +import com.alibaba.polardbx.executor.mpp.execution.StageId; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.collect.ImmutableList; -import com.alibaba.polardbx.executor.mpp.execution.StageId; import org.apache.calcite.util.trace.RuntimeStatisticsSketch; import javax.annotation.concurrent.Immutable; @@ -32,6 +32,7 @@ public class OperatorStats { private final Optional operatorType; private final int operatorId; private final long outputRowCount; + private final long runtimeFilteredCount; private final long outputBytes; private final double startupDuration; private final double duration; @@ -42,32 +43,35 @@ public class OperatorStats { @JsonCreator public OperatorStats( @JsonProperty("stageId") - Optional stageId, + Optional stageId, @JsonProperty("pipelineId") - int pipelineId, + int pipelineId, @JsonProperty("operatorType") - Optional operatorType, + Optional operatorType, @JsonProperty("operatorId") - int operatorId, + int operatorId, @JsonProperty("outputRowCount") - long outputRowCount, + long outputRowCount, + @JsonProperty("runtimeFilteredCount") + long runtimeFilteredCount, @JsonProperty("outputBytes") - long outputBytes, + long outputBytes, @JsonProperty("startupDuration") - double startupDuration, + double startupDuration, @JsonProperty("duration") - double duration, + double duration, @JsonProperty("memory") - long memory, + long memory, @JsonProperty("instances") - int instances, + int instances, @JsonProperty("spillCnt") - int spillCnt) { + int spillCnt) { this.operatorType = operatorType; this.stageId = stageId; this.pipelineId = pipelineId; this.operatorId = operatorId; this.outputRowCount = outputRowCount; + this.runtimeFilteredCount = runtimeFilteredCount; this.outputBytes = outputBytes; this.startupDuration = startupDuration; this.duration = duration; @@ -91,6 +95,11 @@ public long getOutputRowCount() { return outputRowCount; } + @JsonProperty + public long getRuntimeFilteredCount() { + return runtimeFilteredCount; + } + @JsonProperty public long getOutputBytes() { return outputBytes; @@ -137,6 +146,7 @@ public OperatorStats add(OperatorStats... operators) { public OperatorStats add(Iterable operators) { long outputRowCount = this.outputRowCount; + long runtimeFilteredCount = this.runtimeFilteredCount; long outputBytes = this.outputBytes; double startupDuration = this.startupDuration; double duration = this.duration; @@ -145,6 +155,7 @@ public OperatorStats add(Iterable operators) { int spillCnt = 0; for (OperatorStats operator : operators) { outputRowCount += operator.outputRowCount; + runtimeFilteredCount += operator.runtimeFilteredCount; outputBytes += operator.outputBytes; startupDuration += operator.startupDuration; duration += operator.duration; @@ -152,12 +163,12 @@ public OperatorStats add(Iterable operators) { instances += operator.instances; spillCnt += operator.spillCnt; } - return new OperatorStats(stageId, pipelineId, operatorType, operatorId, outputRowCount, outputBytes, - startupDuration, duration, memory, instances, spillCnt); + return new OperatorStats(stageId, pipelineId, operatorType, operatorId, outputRowCount, runtimeFilteredCount, + outputBytes, startupDuration, duration, memory, instances, spillCnt); } public RuntimeStatisticsSketch toSketch() { - return new RuntimeStatisticsSketch(startupDuration, duration, 0, outputRowCount, outputBytes, - memory, instances, spillCnt); + return new RuntimeStatisticsSketch(startupDuration, duration, 0, outputRowCount, runtimeFilteredCount, + outputBytes, memory, instances, spillCnt); } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PageBufferClientStatus.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PageBufferClientStatus.java index edf00fecd..e046be122 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PageBufferClientStatus.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PageBufferClientStatus.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.mpp.operator; +import com.alibaba.polardbx.executor.mpp.metadata.TaskLocation; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.alibaba.polardbx.executor.mpp.metadata.TaskLocation; import org.joda.time.DateTime; import java.util.OptionalInt; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitionFunction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitionFunction.java index 6d4017cef..e8383b73d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitionFunction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitionFunction.java @@ -19,7 +19,6 @@ import com.alibaba.polardbx.executor.chunk.Chunk; public interface PartitionFunction { - int getPartitionCount(); int getPartition(Chunk page, int position); } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitionedOutputCollector.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitionedOutputCollector.java index c2f2b08ae..175f3cd78 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitionedOutputCollector.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitionedOutputCollector.java @@ -16,6 +16,8 @@ package com.alibaba.polardbx.executor.mpp.operator; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.MathUtils; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.util.concurrent.Futures; @@ -34,8 +36,11 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataType; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; import java.util.List; import java.util.Random; +import java.util.Set; import static com.google.common.base.Preconditions.checkState; import static java.util.Objects.requireNonNull; @@ -48,7 +53,10 @@ public class PartitionedOutputCollector extends OutputCollector { public PartitionedOutputCollector( int partitionCount, + List prunePartitions, + int fullPartCount, List sourceTypes, + boolean remotePairWise, List outputType, List partitionChannels, OutputBuffer outputBuffer, @@ -56,10 +64,13 @@ public PartitionedOutputCollector( int chunkLimit, ExecutionContext context) { this.partitionPartitioner = new PagePartitioner( partitionCount, + prunePartitions, + fullPartCount, partitionChannels, outputBuffer, serdeFactory, sourceTypes, + remotePairWise, outputType, chunkLimit, context); @@ -109,7 +120,7 @@ public boolean needsInput() { private static class PagePartitioner { protected final OutputBuffer outputBuffer; protected final List outputType; - protected final PartitionFunction partitionFunction; + protected final RemotePartitionFunction partitionFunction; protected final List partitionChannels; //shuffle字段下标 protected final PagesSerde serde; @@ -123,31 +134,63 @@ private static class PagePartitioner { private List[] chunkArraylist; protected List pageBuilders; + /** + * There are int arrays with size = chunk_limit for each parallelism. + */ + private final int[][] partitionSelections; + private final int[] selSizes; + private final int[] partitionPositions; + public PagePartitioner( int partitionCount, + List prunePartitions, + int fullPartCount, List partitionChannels, OutputBuffer outputBuffer, PagesSerdeFactory serdeFactory, List sourceTypes, + boolean remotePairWise, List outputType, int chunkLimit, ExecutionContext context) { this.converter = Converters.createChunkConverter(sourceTypes, outputType, context); if (partitionCount == 1) { - this.partitionFunction = new SingleBucketFunction(); + this.partitionFunction = new SinglePartitionFunction(); } else if (partitionChannels.size() > 0) { - this.partitionFunction = new HashPartitionFunction(partitionCount, partitionChannels); + if (remotePairWise) { + if (prunePartitions == null || prunePartitions.isEmpty()) { + boolean enableCompatible = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_PAIRWISE_SHUFFLE_COMPATIBLE); + this.partitionFunction = + new PairWisePartitionFunction(partitionCount, fullPartCount, partitionChannels, + enableCompatible); + } else { + boolean enableCompatible = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_PAIRWISE_SHUFFLE_COMPATIBLE); + this.partitionFunction = + new PrunedPairWisePartitionFunction(partitionCount, fullPartCount, + partitionChannels, new HashSet<>(prunePartitions), enableCompatible); + } + } else { + this.partitionFunction = + new HashPartitionFunction(partitionCount, partitionChannels); + } } else { - this.partitionFunction = new RandomBucketFunction(partitionCount); + this.partitionFunction = new RandomPartitionFunction(partitionCount); } this.partitionChannels = requireNonNull(partitionChannels, "partitionChannels is null"); this.outputBuffer = requireNonNull(outputBuffer, "outputBuffer is null"); this.outputType = requireNonNull(outputType, "sourceTypes is null"); - this.serde = requireNonNull(serdeFactory, "serdeFactory is null").createPagesSerde(outputType); + this.serde = requireNonNull(serdeFactory, "serdeFactory is null") + .createPagesSerde(outputType, context); this.chunkLimit = chunkLimit; this.context = context; this.partitionCount = partitionCount; + + this.partitionSelections = new int[partitionCount][chunkLimit]; + this.selSizes = new int[partitionCount]; + this.partitionPositions = new int[chunkLimit]; } public void init() { @@ -168,18 +211,66 @@ public void init() { } public ListenableFuture partitionPage(Chunk chunk) { - requireNonNull(chunk, "chunk is null"); + requireNonNull(chunk); boolean sendChunkArraylist = false; Chunk convertPage = converter.apply(chunk); + + // clear selSize + Arrays.fill(selSizes, 0); + + // collect partition -> selSize -> positions[] for (int position = 0; position < convertPage.getPositionCount(); position++) { int partition = partitionFunction.getPartition(chunk, position); + // partition == -1, means this function is pruned pairwise partition function + // and this record can be discard + if (partition == -1) { + continue; + } + partitionSelections[partition][selSizes[partition]] = position; + selSizes[partition] = selSizes[partition] + 1; + } + + for (int partition = 0; partition < selSizes.length; partition++) { + final int partitionSize = selSizes[partition]; + int[] partitionSelection = partitionSelections[partition]; ChunkBuilder pageBuilder = pageBuilders.get(partition); - pageBuilder.declarePosition(); - for (int channel = 0; channel < outputType.size(); channel++) { - pageBuilder.appendTo(convertPage.getBlock(channel), channel, position); + final int writablePositions = chunkLimit - pageBuilder.getDeclarePosition(); + if (partitionSize > writablePositions) { + // declarePosition -> chunkLimit -> declarePosition + partitionSize + + pageBuilder.updateDeclarePosition(writablePositions); + for (int channel = 0; channel < outputType.size(); channel++) { + pageBuilder.appendTo(convertPage.getBlock(channel), channel, partitionSelection, 0, + writablePositions); + } + + // check full + if (pageBuilder.isFull()) { + sendChunkArraylist = true; + Chunk pageBucket = pageBuilder.build(); + this.chunkArraylist[partition].add(pageBucket); + pageBuilder.reset(); + } + + pageBuilder.updateDeclarePosition(partitionSize - writablePositions); + for (int channel = 0; channel < outputType.size(); channel++) { + pageBuilder.appendTo(convertPage.getBlock(channel), channel, partitionSelection, + writablePositions, partitionSize - writablePositions); + } + + } else { + + // declarePosition -> declarePosition + partitionSize -> chunkLimit + pageBuilder.updateDeclarePosition(partitionSize); + for (int channel = 0; channel < outputType.size(); channel++) { + pageBuilder.appendTo(convertPage.getBlock(channel), channel, partitionSelection, 0, + partitionSize); + } } + + // check full if (pageBuilder.isFull()) { sendChunkArraylist = true; Chunk pageBucket = pageBuilder.build(); @@ -187,6 +278,7 @@ public ListenableFuture partitionPage(Chunk chunk) { pageBuilder.reset(); } } + if (sendChunkArraylist) { return flush(chunkArraylist); } else { @@ -250,15 +342,20 @@ public ListenableFuture flush(boolean force) { } } - public static class HashPartitionFunction implements PartitionFunction { - private final int partitionCount; - private final List partitionChannels; - private final boolean isPowerOfTwo; + public static class HashPartitionFunction implements RemotePartitionFunction { + + protected final int partitionCount; + protected final int[] partitionChannelArray; + protected final boolean isPowerOfTwo; public HashPartitionFunction(int partitionCount, List partitionChannels) { this.partitionCount = partitionCount; - this.partitionChannels = partitionChannels; - this.isPowerOfTwo = ExecUtils.isPowerOfTwo(partitionCount); + this.partitionChannelArray = new int[partitionChannels.size()]; + for (int i = 0; i < partitionChannels.size(); i++) { + partitionChannelArray[i] = partitionChannels.get(i); + } + + this.isPowerOfTwo = MathUtils.isPowerOfTwo(partitionCount); } @Override @@ -268,40 +365,75 @@ public int getPartitionCount() { @Override public int getPartition(Chunk page, int position) { - int hashCode = 0; - for (int i = 0; i < partitionChannels.size(); i++) { - hashCode = hashCode * 31 + page.getBlock(partitionChannels.get(i)).hashCode(position); + long hashCode = 0; + for (int i = 0; i < partitionChannelArray.length; i++) { + hashCode = hashCode * 31 + page.getBlock(partitionChannelArray[i]).hashCodeUseXxhash(position); } - int partition = ExecUtils.partition(hashCode, partitionCount, isPowerOfTwo); + int partition = ExecUtils.directPartition(hashCode, partitionCount, isPowerOfTwo); checkState(partition >= 0 && partition < partitionCount); return partition; } } - public static class HashBucketFunction implements PartitionFunction { - private final int partitionCount; - private final boolean isPowerOfTwo; + public static class PairWisePartitionFunction extends HashPartitionFunction { - public HashBucketFunction(int partitionCount) { - this.partitionCount = partitionCount; - this.isPowerOfTwo = ExecUtils.isPowerOfTwo(partitionCount); - } + protected final int fullPartCount; - @Override - public int getPartitionCount() { - return partitionCount; + protected final boolean isFullPartPowerOfTwo; + + protected final boolean enableCompatible; + + public PairWisePartitionFunction(int partitionCount, int fullPartCount, List partitionChannels, + boolean enableCompatible) { + super(partitionCount, partitionChannels); + this.fullPartCount = fullPartCount; + this.isFullPartPowerOfTwo = MathUtils.isPowerOfTwo(fullPartCount); + this.enableCompatible = enableCompatible; } @Override public int getPartition(Chunk page, int position) { - int partition = ExecUtils.partition(page.hashCode(position), partitionCount, isPowerOfTwo); + long hashCode = 0; + for (int i = 0; i < partitionChannelArray.length; i++) { + hashCode = hashCode * 31 + page.getBlock(partitionChannelArray[i]) + .hashCodeUnderPairWise(position, enableCompatible); + } + + int partition = + ExecUtils.partitionUnderPairWise(hashCode, partitionCount, fullPartCount, isFullPartPowerOfTwo); checkState(partition >= 0 && partition < partitionCount); return partition; } } - private static class SingleBucketFunction implements PartitionFunction { + public static class PrunedPairWisePartitionFunction extends PairWisePartitionFunction { + private final Set prunePartitions; + + public PrunedPairWisePartitionFunction(int partitionCount, int fullPartCount, List partitionChannels, + Set prunePartitions, boolean enableCompatible) { + super(partitionCount, fullPartCount, partitionChannels, enableCompatible); + this.prunePartitions = prunePartitions; + } + + @Override + public int getPartition(Chunk page, int position) { + long hashCode = 0; + for (int i = 0; i < partitionChannelArray.length; i++) { + hashCode = hashCode * 31 + page.getBlock(partitionChannelArray[i]) + .hashCodeUnderPairWise(position, enableCompatible); + } + + int storagePartNum = ExecUtils.calcStoragePartNum(hashCode, fullPartCount, isFullPartPowerOfTwo); + if (prunePartitions.contains(storagePartNum)) { + return -1; + } else { + return storagePartNum % partitionCount; + } + } + } + + private static class SinglePartitionFunction implements RemotePartitionFunction { @Override public int getPartitionCount() { @@ -314,11 +446,11 @@ public int getPartition(Chunk page, int position) { } } - private static class RandomBucketFunction implements PartitionFunction { + private static class RandomPartitionFunction implements RemotePartitionFunction { private final int partitionCount; private final Random random; - public RandomBucketFunction(int partitionCount) { + public RandomPartitionFunction(int partitionCount) { this.random = new Random(); this.partitionCount = partitionCount; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitioningBucketExchanger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitioningBucketExchanger.java index 1d7fa9054..eacbbacaa 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitioningBucketExchanger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitioningBucketExchanger.java @@ -16,17 +16,17 @@ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.collect.ImmutableList; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkBuilder; import com.alibaba.polardbx.executor.chunk.ChunkConverter; import com.alibaba.polardbx.executor.chunk.Converters; import com.alibaba.polardbx.executor.mpp.execution.buffer.OutputBufferMemoryManager; -import com.alibaba.polardbx.executor.mpp.operator.PartitionedOutputCollector.HashBucketFunction; import com.alibaba.polardbx.executor.operator.ConsumerExecutor; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.collect.ImmutableList; import it.unimi.dsi.fastutil.ints.IntArrayList; import it.unimi.dsi.fastutil.ints.IntList; @@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicBoolean; public class PartitioningBucketExchanger extends LocalExchanger { - private final HashBucketFunction bucketGenerator; + private final LocalHashBucketFunction bucketGenerator; private final List partitionChannels; private final List consumings; private final ChunkConverter keyConverter; @@ -70,7 +70,7 @@ public PartitioningBucketExchanger(OutputBufferMemoryManager bufferMemoryManager this.keyConverter = Converters.createChunkConverter(columnIndex, types, keyTypes, context); } this.totalBucketNum = executors.size() * bucketNum; - this.bucketGenerator = new HashBucketFunction(totalBucketNum); + this.bucketGenerator = new LocalHashBucketFunction(totalBucketNum); this.context = context; this.chunkLimit = chunkLimit; this.bucketNum = bucketNum; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitioningExchanger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitioningExchanger.java index 274c6f88f..12dc77d15 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitioningExchanger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PartitioningExchanger.java @@ -23,37 +23,81 @@ import com.alibaba.polardbx.executor.chunk.ChunkConverter; import com.alibaba.polardbx.executor.chunk.Converters; import com.alibaba.polardbx.executor.mpp.execution.buffer.OutputBufferMemoryManager; -import com.alibaba.polardbx.executor.mpp.operator.PartitionedOutputCollector.HashBucketFunction; import com.alibaba.polardbx.executor.operator.ConsumerExecutor; +import com.alibaba.polardbx.executor.operator.util.ObjectPools; +import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import it.unimi.dsi.fastutil.ints.IntArrayList; -import it.unimi.dsi.fastutil.ints.IntList; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.IntStream; public class PartitioningExchanger extends LocalExchanger { - private final HashBucketFunction partitionGenerator; + private final LocalHashBucketFunction partitionGenerator; private final List partitionChannels; private List types; private final List consumings; private ChunkConverter keyConverter; private ExecutionContext context; + private List chunkBuildersPrimary; + + /** + * used to stash overflow records when enable batch + */ + private List chunkBuildersBackUp; + + private final boolean enableBatch; + + private final int chunkLimit; + + /** + * There are int arrays with size = chunk_limit for each parallelism. + */ + private final int[][] partitionSelections; + private final int[] selSizes; + private final boolean optimizePartition; + + private final ObjectPools objectPools; + private final boolean shouldRecycle; + + // for random order + private final List randomOrderList; + + private boolean chunkExchange; + + /** + * used by partition wise mode + */ + private Map partCounter = new HashMap<>(); + public PartitioningExchanger(OutputBufferMemoryManager bufferMemoryManager, List executors, LocalExchangersStatus status, boolean asyncConsume, List types, List partitionChannels, List keyTargetTypes, - ExecutionContext context) { + ExecutionContext context, + boolean chunkExchange) { super(bufferMemoryManager, executors, status, asyncConsume); + + // for random order. + this.randomOrderList = new ArrayList<>(); + for (int i = 0; i < executors.size(); i++) { + randomOrderList.add(i); + } + this.types = types; this.context = context; - this.partitionGenerator = new HashBucketFunction(executors.size()); + this.partitionGenerator = new LocalHashBucketFunction(executors.size()); this.partitionChannels = partitionChannels; this.consumings = status.getConsumings(); if (keyTargetTypes.isEmpty()) { @@ -65,11 +109,158 @@ public PartitioningExchanger(OutputBufferMemoryManager bufferMemoryManager, List } this.keyConverter = Converters.createChunkConverter(columnIndex, types, keyTargetTypes, context); } + + this.chunkLimit = context.getParamManager().getInt(ConnectionParams.CHUNK_SIZE); + + // chunk exchange mode no need to batch local exchange result + this.enableBatch = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_LOCAL_EXCHANGE_BATCH) && !chunkExchange; + + this.partitionSelections = new int[executors.size()][chunkLimit]; + this.selSizes = new int[executors.size()]; + this.optimizePartition = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_EXCHANGE_PARTITION_OPTIMIZATION); + + this.shouldRecycle = context.getParamManager().getBoolean(ConnectionParams.ENABLE_DRIVER_OBJECT_POOL); + this.objectPools = ObjectPools.create(); + this.chunkBuildersPrimary = IntStream.range(0, executors.size()).boxed() + .map(i -> new ChunkBuilder(types, chunkLimit, context, objectPools)).collect(Collectors.toList()); + this.chunkBuildersBackUp = IntStream.range(0, executors.size()).boxed() + .map(i -> new ChunkBuilder(types, chunkLimit, context, objectPools)).collect(Collectors.toList()); + this.chunkExchange = chunkExchange; } @Override public void consumeChunk(Chunk chunk) { - IntList[] partitionAssignments = new IntList[executors.size()]; + // build a page for each partition + Map partitionChunks; + if (chunkExchange && chunk.getPartIndex() > -1 && chunk.getPartCount() > 0) { + partitionChunks = chunkExchange(chunk); + // don't recycle because chunk is cached. + + } else if (optimizePartition) { + partitionChunks = tupleExchangeWithoutAllocation(chunk); + + // should recycle + if (shouldRecycle) { + chunk.recycle(); + } + + } else { + partitionChunks = tupleExchange(chunk); + + // should recycle + if (shouldRecycle) { + chunk.recycle(); + } + } + + consumePartitionChunk(partitionChunks); + } + + @Override + public void closeConsume(boolean force) { + if (objectPools != null) { + objectPools.clear(); + } + super.closeConsume(force); + } + + private void consumePartitionChunk(Map partitionChunks) { + if (partitionChunks == null) { + return; + } + + // random order to avoid lock race. + Collections.shuffle(randomOrderList); + + if (asyncConsume) { + for (int i = 0; i < randomOrderList.size(); i++) { + int partition = randomOrderList.get(i); + Chunk result; + if ((result = partitionChunks.get(partition)) != null) { + executors.get(partition).consumeChunk(result); + } + } + } else { + for (int i = 0; i < randomOrderList.size(); i++) { + int partition = randomOrderList.get(i); + Chunk result; + if ((result = partitionChunks.get(partition)) != null) { + AtomicBoolean consuming = consumings.get(partition); + while (true) { + if (consuming.compareAndSet(false, true)) { + try { + executors.get(partition).consumeChunk(result); + } finally { + consuming.set(false); + } + break; + } + } + } + } + } + } + + protected Map chunkExchange(Chunk chunk) { + Map partitionChunks = new HashMap<>(); + int partitionNum = chunk.getPartIndex(); + int consumeNum = executors.size(); + int counter = partCounter.getOrDefault(partitionNum, 0); + partCounter.put(partitionNum, counter + 1); + int executorSeq = + ExecUtils.assignPartitionToExecutor(counter, chunk.getPartCount(), partitionNum, consumeNum); + partitionChunks.put(executorSeq, chunk); + return partitionChunks; + } + + protected Map tupleExchangeWithoutAllocation(Chunk chunk) { + for (int partition = 0; partition < executors.size(); partition++) { + selSizes[partition] = 0; + } + + // assign each row to a partition + Chunk keyChunk; + if (keyConverter == null) { + keyChunk = getPartitionFunctionArguments(chunk); + } else { + keyChunk = keyConverter.apply(chunk); + } + + for (int position = 0; position < keyChunk.getPositionCount(); position++) { + int partition = partitionGenerator.getPartition(keyChunk, position); + partitionSelections[partition][selSizes[partition]] = position; + selSizes[partition] = selSizes[partition] + 1; + } + + if (!enableBatch) { + Map partitionChunks = new HashMap<>(); + for (int partition = 0; partition < executors.size(); partition++) { + final int[] positions = partitionSelections[partition]; + final int selSize = selSizes[partition]; + if (selSize > 0) { + ChunkBuilder builder = new ChunkBuilder(types, selSize, context, objectPools); + writeToChunkBuilder(builder, positions, selSize, chunk); + Chunk partitionedChunk = builder.build(); + partitionChunks.put(partition, partitionedChunk); + } + } + return partitionChunks; + } else { + boolean chunkIsFull = writeToChunkBuilder(executors.size(), partitionSelections, selSizes, chunk); + if (chunkIsFull) { + Map partitionChunks = buildPartitionChunk(true); + swapPrimaryAndBackUp(); + return partitionChunks; + } else { + return null; + } + } + } + + protected Map tupleExchange(Chunk chunk) { + IntArrayList[] partitionAssignments = new IntArrayList[executors.size()]; for (int i = 0; i < partitionAssignments.length; i++) { partitionAssignments[i] = new IntArrayList(); } @@ -86,51 +277,138 @@ public void consumeChunk(Chunk chunk) { partitionAssignments[partition].add(position); } - final boolean enableDelay = - context.getParamManager().getBoolean(ConnectionParams.ENABLE_OSS_DELAY_MATERIALIZATION_ON_EXCHANGE); - - // build a page for each partition - Map partitionChunks = new HashMap<>(); - for (int partition = 0; partition < executors.size(); partition++) { - List positions = partitionAssignments[partition]; - if (!positions.isEmpty()) { - ChunkBuilder builder = new ChunkBuilder(types, positions.size(), context); - Chunk partitionedChunk; - if (enableDelay) { - partitionedChunk = builder.fromPartition(positions, chunk); - } else { - for (Integer pos : positions) { - builder.declarePosition(); - for (int i = 0; i < chunk.getBlockCount(); i++) { - builder.appendTo(chunk.getBlock(i), i, pos); - } - } - partitionedChunk = builder.build(); + if (!enableBatch) { + Map partitionChunks = new HashMap<>(); + for (int partition = 0; partition < executors.size(); partition++) { + IntArrayList positions = partitionAssignments[partition]; + if (!positions.isEmpty()) { + ChunkBuilder builder = new ChunkBuilder(types, positions.size(), context, objectPools); + writeToChunkBuilder(builder, positions, chunk); + Chunk partitionedChunk = builder.build(); + partitionChunks.put(partition, partitionedChunk); } - partitionChunks.put(partition, partitionedChunk); + } + return partitionChunks; + } else { + boolean chunkIsFull = false; + for (int partition = 0; partition < executors.size(); partition++) { + IntArrayList positions = partitionAssignments[partition]; + if (!positions.isEmpty()) { + chunkIsFull |= writeToChunkBuilder(partition, positions, chunk); + } + } + if (chunkIsFull) { + Map partitionChunks = buildPartitionChunk(true); + swapPrimaryAndBackUp(); + return partitionChunks; + } else { + return null; } } - if (asyncConsume) { - for (Map.Entry entry : partitionChunks.entrySet()) { - int partition = entry.getKey(); - executors.get(partition).consumeChunk(entry.getValue()); + } + + private void writeToChunkBuilder(ChunkBuilder builder, IntArrayList positions, Chunk chunk) { + for (int i = 0; i < chunk.getBlockCount(); i++) { + builder.appendTo(chunk.getBlock(i), i, positions.elements(), 0, positions.size()); + } + builder.updateDeclarePosition(positions.size()); + } + + private boolean writeToChunkBuilder(Integer partition, IntArrayList positions, Chunk chunk) { + ChunkBuilder builder = chunkBuildersPrimary.get(partition); + int resetCount = chunkLimit - builder.getDeclarePosition(); + int arrayListIndex = 0; + int primaryLimit = Math.min(resetCount, positions.size()); + // write to primary chunk builder + for (; arrayListIndex < primaryLimit; arrayListIndex++) { + int pos = positions.getInt(arrayListIndex); + builder.declarePosition(); + for (int i = 0; i < chunk.getBlockCount(); i++) { + builder.appendTo(chunk.getBlock(i), i, pos); } - } else { - for (Map.Entry entry : partitionChunks.entrySet()) { - int partition = entry.getKey(); - AtomicBoolean consuming = consumings.get(partition); - while (true) { - if (consuming.compareAndSet(false, true)) { - try { - executors.get(partition).consumeChunk(entry.getValue()); - } finally { - consuming.set(false); - } - break; + } + + // if primary chunk builder is full, write reset to back up + builder = chunkBuildersBackUp.get(partition); + for (; arrayListIndex < positions.size(); arrayListIndex++) { + int pos = positions.getInt(arrayListIndex); + builder.declarePosition(); + for (int i = 0; i < chunk.getBlockCount(); i++) { + builder.appendTo(chunk.getBlock(i), i, pos); + } + } + + return resetCount <= positions.size(); + } + + private void swapPrimaryAndBackUp() { + List tmp = chunkBuildersPrimary; + chunkBuildersPrimary = chunkBuildersBackUp; + chunkBuildersBackUp = tmp; + } + + private void writeToChunkBuilder(ChunkBuilder builder, int[] positions, int selSize, Chunk chunk) { + for (int i = 0; i < chunk.getBlockCount(); i++) { + builder.appendTo(chunk.getBlock(i), i, positions, 0, selSize); + } + builder.updateDeclarePosition(selSize); + } + + private boolean writeToChunkBuilder(Integer partitions, int[][] partitionAssignments, int[] selSizes, Chunk chunk) { + boolean chunkIsFull = false; + + // priority for partition loop + for (int partition = 0; partition < partitions; partition++) { + + final int[] positions = partitionAssignments[partition]; + final int selSize = selSizes[partition]; + + if (selSize > 0) { + ChunkBuilder builder = chunkBuildersPrimary.get(partition); + final int resetCount = chunkLimit - builder.getDeclarePosition(); + final int primaryLimit = Math.min(resetCount, selSize); + + // update chunk builder position + builder.updateDeclarePosition(primaryLimit); + + // write to primary chunk builder + for (int blockIndex = 0; blockIndex < chunk.getBlockCount(); blockIndex++) { + builder.appendTo(chunk.getBlock(blockIndex), blockIndex, positions, 0, primaryLimit); + } + + // if primary chunk builder is full, write reset to back up + if (primaryLimit < selSize) { + builder = chunkBuildersBackUp.get(partition); + + // update chunk builder position + builder.updateDeclarePosition(selSize - primaryLimit); + + for (int blockIndex = 0; blockIndex < chunk.getBlockCount(); blockIndex++) { + builder.appendTo(chunk.getBlock(blockIndex), blockIndex, positions, primaryLimit, + selSize - primaryLimit); } + } + + chunkIsFull |= (primaryLimit < selSize); } + } + + return chunkIsFull; + } + + private Map buildPartitionChunk(boolean reset) { + Map partitionChunks = new HashMap<>(); + for (int idx = 0; idx < chunkBuildersPrimary.size(); ++idx) { + if (!chunkBuildersPrimary.get(idx).isEmpty()) { + partitionChunks.put(idx, chunkBuildersPrimary.get(idx).build()); + } + } + if (reset) { + chunkBuildersPrimary.forEach(ChunkBuilder::reset); + } + return partitionChunks; } private Chunk getPartitionFunctionArguments(Chunk page) { @@ -140,4 +418,15 @@ private Chunk getPartitionFunctionArguments(Chunk page) { } return new Chunk(page.getPositionCount(), blocks); } + + @Override + public void buildConsume() { + if (enableBatch) { + Map partitionChunks = buildPartitionChunk(false); + consumePartitionChunk(partitionChunks); + forceBuildSynchronize(); + } else { + super.buildConsume(); + } + } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PipelineDepTree.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PipelineDepTree.java index 99dbe23a4..60c2d12c1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PipelineDepTree.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/PipelineDepTree.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.mpp.operator; +import com.alibaba.polardbx.optimizer.planmanager.PlanManagerUtil; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; @@ -61,6 +62,7 @@ public PipelineDepTree(List pipelineFactories) { TreeNode treeNode = getOrCreateTreeNode(pipeline.getPipelineId()); treeNode.setParallelism(pipeline.getParallelism()); treeNode.setBuildDepOnAllConsumers(pipeline.getFragment().isBuildDepOnAllConsumers()); + for (Integer dependChild : pipeline.getDependency()) { TreeNode dependChildNode = getOrCreateTreeNode(dependChild); treeNode.addDependChild(dependChildNode); @@ -127,7 +129,11 @@ public synchronized void pipelineFinish(int id) { } - protected static class TreeNode { + public int size() { + return nodeIndex.size(); + } + + public static class TreeNode { private TreeNode parent; private Set dependChildren = new HashSet<>(); private Set children = new HashSet<>(); @@ -217,6 +223,10 @@ public void setChildrenFuture(ListenableFuture childrenFuture) { this.childrenFuture = childrenFuture; } + public int getId() { + return id; + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/RandomExchanger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/RandomExchanger.java index 62c8e0c2b..57fe8a49b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/RandomExchanger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/RandomExchanger.java @@ -20,24 +20,41 @@ import com.alibaba.polardbx.executor.mpp.execution.buffer.OutputBufferMemoryManager; import com.alibaba.polardbx.executor.operator.ConsumerExecutor; +import java.util.Collections; import java.util.List; import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.IntStream; public class RandomExchanger extends LocalExchanger { private final List consumings; private final Random random; + private int nextIndex; + + private final boolean useRoundRobin; + + private final List randomOrderList; + public RandomExchanger(OutputBufferMemoryManager bufferMemoryManager, List executors, - LocalExchangersStatus status, boolean asyncConsume) { + LocalExchangersStatus status, boolean asyncConsume, int index, boolean roundRobin) { super(bufferMemoryManager, executors, status, asyncConsume); this.consumings = status.getConsumings(); this.random = new Random(executors.size()); + this.randomOrderList = IntStream.range(0, executors.size()).boxed().collect(Collectors.toList()); + Collections.shuffle(randomOrderList); + this.nextIndex = index; + this.useRoundRobin = roundRobin; } @Override public void consumeChunk(Chunk chunk) { - int randomIndex = executors.size() > 1 ? random.nextInt(executors.size()) : 0; + int randomIndex = 0; + if (executors.size() > 1) { + randomIndex = + useRoundRobin ? randomOrderList.get(nextIndex++ % executors.size()) : random.nextInt(executors.size()); + } if (asyncConsume) { executors.get(randomIndex).consumeChunk(chunk); } else { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/RemotePartitionFunction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/RemotePartitionFunction.java new file mode 100644 index 000000000..134ba201b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/RemotePartitionFunction.java @@ -0,0 +1,21 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.mpp.operator; + +public interface RemotePartitionFunction extends PartitionFunction { + int getPartitionCount(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/TaskOutputCollector.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/TaskOutputCollector.java index d9dd3a7f7..b07016606 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/TaskOutputCollector.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/TaskOutputCollector.java @@ -16,8 +16,6 @@ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkConverter; import com.alibaba.polardbx.executor.chunk.Converters; @@ -26,6 +24,8 @@ import com.alibaba.polardbx.executor.mpp.execution.buffer.PagesSerde; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/TaskStats.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/TaskStats.java index 73f59f779..eda82d1c0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/TaskStats.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/TaskStats.java @@ -30,6 +30,7 @@ package com.alibaba.polardbx.executor.mpp.operator; import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonFormat; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; @@ -37,6 +38,7 @@ import javax.annotation.Nullable; import java.util.List; +import java.util.Map; import java.util.Set; import static com.google.common.base.Preconditions.checkArgument; @@ -53,9 +55,9 @@ public static TaskStats getEmptyTaskStats() { private final DateTime firstStartTime; private final DateTime endTime; - private final long elapsedTime; - private final long queuedTime; - private final long deliveryTime; + private final long elapsedTimeMillis; + private final long queuedTimeMillis; + private final long deliveryTimeMillis; private final int totalPipelineExecs; private final int queuedPipelineExecs; @@ -66,10 +68,10 @@ public static TaskStats getEmptyTaskStats() { private final long memoryReservation; private final long peakMemory; - private final long totalScheduledTime; - private final long totalCpuTime; - private final long totalUserTime; - private final long totalBlockedTime; + private final long totalScheduledTimeNanos; + private final long totalCpuTimeNanos; + private final long totalUserTimeNanos; + private final long totalBlockedTimeNanos; private final boolean fullyBlocked; private final Set blockedReasons; @@ -80,6 +82,8 @@ public static TaskStats getEmptyTaskStats() { private final long outputPositions; private final List operatorStats; + private final List driverStats; + private final Map> pipelineDeps; public TaskStats(DateTime createTime, DateTime endTime, long size) { this(createTime, @@ -105,7 +109,9 @@ public TaskStats(DateTime createTime, DateTime endTime, long size) { 0, size, 0, - ImmutableList.of()); + ImmutableList.of(), + ImmutableList.of(), + null); } public TaskStats(DateTime createTime, DateTime endTime) { @@ -132,65 +138,71 @@ public TaskStats(DateTime createTime, DateTime endTime) { 0, 0, 0, - ImmutableList.of()); + ImmutableList.of(), + ImmutableList.of(), + null); } @JsonCreator public TaskStats( @JsonProperty("createTime") - DateTime createTime, + DateTime createTime, @JsonProperty("firstStartTime") - DateTime firstStartTime, + DateTime firstStartTime, @JsonProperty("endTime") - DateTime endTime, - @JsonProperty("elapsedTime") - long elapsedTime, + DateTime endTime, + @JsonProperty("elapsedTimeMillis") + long elapsedTimeMillis, @JsonProperty("queuedTime") - long queuedTime, + long queuedTimeMillis, @JsonProperty("deliveryTime") - long deliveryTime, + long deliveryTimeMillis, @JsonProperty("totalPipelineExecs") - int totalPipelineExecs, + int totalPipelineExecs, @JsonProperty("queuedPipelineExecs") - int queuedPipelineExecs, + int queuedPipelineExecs, @JsonProperty("runningPipelineExecs") - int runningPipelineExecs, + int runningPipelineExecs, @JsonProperty("completedPipelineExecs") - int completedPipelineExecs, + int completedPipelineExecs, @JsonProperty("cumulativeMemory") - double cumulativeMemory, + double cumulativeMemory, @JsonProperty("memoryReservation") - long memoryReservation, + long memoryReservation, @JsonProperty("peakMemory") - long peakMemory, - @JsonProperty("totalScheduledTime") - long totalScheduledTime, - @JsonProperty("totalCpuTime") - long totalCpuTime, - @JsonProperty("totalUserTime") - long totalUserTime, - @JsonProperty("totalBlockedTime") - long totalBlockedTime, + long peakMemory, + @JsonProperty("totalScheduledTimeNanos") + long totalScheduledTimeNanos, + @JsonProperty("totalCpuTimeNanos") + long totalCpuTimeNanos, + @JsonProperty("totalUserTimeNanos") + long totalUserTimeNanos, + @JsonProperty("totalBlockedTimeNanos") + long totalBlockedTimeNanos, @JsonProperty("fullyBlocked") - boolean fullyBlocked, + boolean fullyBlocked, @JsonProperty("blockedReasons") - Set blockedReasons, + Set blockedReasons, @JsonProperty("processedInputDataSize") - long processedInputDataSize, + long processedInputDataSize, @JsonProperty("processedInputPositions") - long processedInputPositions, + long processedInputPositions, @JsonProperty("outputDataSize") - long outputDataSize, + long outputDataSize, @JsonProperty("outputPositions") - long outputPositions, + long outputPositions, @JsonProperty("operatorStats") - List operatorStats) { + List operatorStats, + @JsonProperty("driverStats") + List driverStats, + @JsonProperty("pipelineDeps") + Map> pipelineDeps) { this.createTime = requireNonNull(createTime, "createTime is null"); this.firstStartTime = firstStartTime; this.endTime = endTime; - this.elapsedTime = elapsedTime; - this.queuedTime = queuedTime; - this.deliveryTime = deliveryTime; + this.elapsedTimeMillis = elapsedTimeMillis; + this.queuedTimeMillis = queuedTimeMillis; + this.deliveryTimeMillis = deliveryTimeMillis; checkArgument(totalPipelineExecs >= 0, "totalPipelineExecs is negative"); this.totalPipelineExecs = totalPipelineExecs; @@ -207,10 +219,10 @@ public TaskStats( this.memoryReservation = memoryReservation; this.peakMemory = peakMemory; - this.totalScheduledTime = totalScheduledTime; - this.totalCpuTime = totalCpuTime; - this.totalUserTime = totalUserTime; - this.totalBlockedTime = totalBlockedTime; + this.totalScheduledTimeNanos = totalScheduledTimeNanos; + this.totalCpuTimeNanos = totalCpuTimeNanos; + this.totalUserTimeNanos = totalUserTimeNanos; + this.totalBlockedTimeNanos = totalBlockedTimeNanos; this.fullyBlocked = fullyBlocked; this.blockedReasons = requireNonNull(blockedReasons, "blockedReasons is null"); @@ -222,38 +234,43 @@ public TaskStats( checkArgument(outputPositions >= 0, "outputPositions is negative"); this.outputPositions = outputPositions; this.operatorStats = operatorStats; + this.driverStats = driverStats; + this.pipelineDeps = pipelineDeps; } @JsonProperty + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8") public DateTime getCreateTime() { return createTime; } @Nullable @JsonProperty + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8") public DateTime getFirstStartTime() { return firstStartTime; } @Nullable @JsonProperty + @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss.SSS", timezone = "GMT+8") public DateTime getEndTime() { return endTime; } @JsonProperty - public long getElapsedTime() { - return elapsedTime; + public long getElapsedTimeMillis() { + return elapsedTimeMillis; } @JsonProperty - public long getQueuedTime() { - return queuedTime; + public long getQueuedTimeMillis() { + return queuedTimeMillis; } @JsonProperty - public long getDeliveryTime() { - return deliveryTime; + public long getDeliveryTimeMillis() { + return deliveryTimeMillis; } @JsonProperty @@ -292,23 +309,23 @@ public long getPeakMemory() { } @JsonProperty - public long getTotalScheduledTime() { - return totalScheduledTime; + public long getTotalScheduledTimeNanos() { + return totalScheduledTimeNanos; } @JsonProperty - public long getTotalCpuTime() { - return totalCpuTime; + public long getTotalCpuTimeNanos() { + return totalCpuTimeNanos; } @JsonProperty - public long getTotalUserTime() { - return totalUserTime; + public long getTotalUserTimeNanos() { + return totalUserTimeNanos; } @JsonProperty - public long getTotalBlockedTime() { - return totalBlockedTime; + public long getTotalBlockedTimeNanos() { + return totalBlockedTimeNanos; } @JsonProperty @@ -345,4 +362,14 @@ public long getOutputPositions() { public List getOperatorStats() { return operatorStats; } + + @JsonProperty + public List getDriverStats() { + return driverStats; + } + + @JsonProperty + public Map> getPipelineDeps() { + return pipelineDeps; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/WorkProcessorExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/WorkProcessorExec.java index 8c0b91e5a..3122110bb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/WorkProcessorExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/WorkProcessorExec.java @@ -16,12 +16,12 @@ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.operator.Executor; import com.alibaba.polardbx.executor.operator.ProducerExecutor; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/WorkProcessorUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/WorkProcessorUtils.java index 74eae5537..e47cfc062 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/WorkProcessorUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/WorkProcessorUtils.java @@ -16,11 +16,11 @@ package com.alibaba.polardbx.executor.mpp.operator; -import com.google.common.collect.AbstractIterator; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.mpp.operator.WorkProcessor.ProcessState; import com.alibaba.polardbx.executor.mpp.operator.WorkProcessor.Transformation; import com.alibaba.polardbx.executor.mpp.operator.WorkProcessor.TransformationState; +import com.google.common.collect.AbstractIterator; +import com.google.common.util.concurrent.ListenableFuture; import javax.annotation.Nullable; import java.util.Comparator; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/CteAnchorExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/CteAnchorExecFactory.java index 64f4bf55f..741a6e1a6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/CteAnchorExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/CteAnchorExecFactory.java @@ -18,10 +18,8 @@ import com.alibaba.polardbx.executor.operator.Executor; import com.alibaba.polardbx.executor.operator.RecursiveCTEAnchorExec; -import com.alibaba.polardbx.executor.operator.RecursiveCTEExec; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import org.apache.calcite.rel.core.RecursiveCTE; import org.apache.calcite.rel.core.RecursiveCTEAnchor; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/CteExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/CteExecFactory.java index 1a3d57488..0d667d097 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/CteExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/CteExecFactory.java @@ -59,10 +59,14 @@ public Executor createExecutor(ExecutionContext context, int index) { skip = getRexParam(cte.getOffset(), params); } } + long fetchSize = skip + fetch; + if (skip > 0 && fetch > 0 && fetchSize < 0) { + fetchSize = Long.MAX_VALUE; + } Executor anchorExecutor = anchorExecutorFactory.createExecutor(context, index); RecursiveCTEExec recursiveCTEExec = - new RecursiveCTEExec(cte.getCteName(), anchorExecutor, recursiveExecutorFactory, fetch + skip, context); + new RecursiveCTEExec(cte.getCteName(), anchorExecutor, recursiveExecutorFactory, fetchSize, context); recursiveCTEExec.setId(cte.getRelatedId()); if (context.getRuntimeStatistics() != null) { RuntimeStatHelper.registerStatForExec(cte, recursiveCTEExec, context); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/DynamicValuesExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/DynamicValuesExecutorFactory.java index 43de8d4ea..56f0bc503 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/DynamicValuesExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/DynamicValuesExecutorFactory.java @@ -48,10 +48,8 @@ public Executor createExecutor(ExecutionContext context, int index) { e -> RexUtils.buildRexNode(e, context, dynamicExpressions) ).collect(Collectors.toList())).collect(Collectors.toList()); Executor exec = new DynamicValueExec(expressions, outputColumns, context); - exec.setId(dynamicValues.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(dynamicValues, exec, context); - } + registerRuntimeStat(exec, dynamicValues, context); return exec; } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/EmptyExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/EmptyExecutorFactory.java index b544931ad..3d6d23364 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/EmptyExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/EmptyExecutorFactory.java @@ -36,4 +36,5 @@ public Executor createExecutor(ExecutionContext context, int index) { EmptyExecutor exec = new EmptyExecutor(columnMetaList); return exec; } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExchangeExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExchangeExecFactory.java index d7da23ba7..eb8b03374 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExchangeExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExchangeExecFactory.java @@ -71,7 +71,7 @@ public Executor createExecutor(ExecutionContext context, int index) { List orderBys = ExecUtils.convertFrom(sortList); ret = new SortMergeExchangeExec(context, sourceNode.getRelatedId(), supplier, - pagesSerdeFactory.createPagesSerde(types), orderBys, types + pagesSerdeFactory.createPagesSerde(types, context), orderBys, types ); } else { if (exchangeClient == null) { @@ -80,15 +80,13 @@ public Executor createExecutor(ExecutionContext context, int index) { exchangeClient = supplier.get(new RecordMemSystemListener(memoryPool.getMemoryAllocatorCtx()), context); } ret = new ExchangeExec(context, sourceNode.getRelatedId(), exchangeClient, memoryPool, - pagesSerdeFactory.createPagesSerde(types), + pagesSerdeFactory.createPagesSerde(types, context), types ); } - ret.setId(sourceNode.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(sourceNode, ret, context); - } + registerRuntimeStat(ret, sourceNode, context); return ret; } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExecutorFactory.java index 0797dffaa..16896b9cf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExecutorFactory.java @@ -18,14 +18,22 @@ import com.alibaba.polardbx.executor.operator.Executor; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.statistics.RuntimeStatHelper; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Exchange; import java.util.ArrayList; import java.util.List; public abstract class ExecutorFactory { + protected Exchange exchange = null; protected List childs = new ArrayList<>(); + public void setExchange(Exchange exchange) { + this.exchange = exchange; + } + public List getInputs() { return childs; } @@ -51,4 +59,13 @@ public void explain(StringBuilder output) { output.append(")"); } + protected void registerRuntimeStat(Executor exec, RelNode relNode, ExecutionContext context) { + exec.setId(relNode.getRelatedId()); + if (context.getRuntimeStatistics() != null) { + if (exchange != null) { + RuntimeStatHelper.registerStatForExec(exchange, exec, context); + } + RuntimeStatHelper.registerStatForExec(relNode, exec, context); + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExpandExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExpandExecFactory.java index 19ac6ae02..b15d90696 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExpandExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ExpandExecFactory.java @@ -55,10 +55,8 @@ public Executor createExecutor(ExecutionContext context, int index) { Executor input = getInputs().get(0).createExecutor(context, index); Executor exec = new ExpandExec(input, exprsList, outputColumns, context); - exec.setId(expand.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(expand, exec, context); - } + registerRuntimeStat(exec, expand, context); return exec; } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/FilterExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/FilterExecFactory.java index e918d7831..2819d17cf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/FilterExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/FilterExecFactory.java @@ -140,11 +140,7 @@ private Executor createRowBaseFilter(ExecutionContext context, int index) { } exec = new FilterExec(input, expression, bloomFilterExpression, context); - - exec.setId(filter.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(filter, exec, context); - } + registerRuntimeStat(exec, filter, context); return exec; } @@ -160,10 +156,7 @@ private Executor createVectorizedFilter(ExecutionContext context, int index) { VectorizedExpressionBuilder.buildVectorizedExpression(inputTypes, filterCondition, context, true); Executor exec = new VectorizedFilterExec(inputExec, result.getKey(), result.getValue(), context); - exec.setId(filter.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(filter, exec, context); - } + registerRuntimeStat(exec, filter, context); return exec; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashAggExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashAggExecutorFactory.java index 55c886b2c..21fac66ba 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashAggExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashAggExecutorFactory.java @@ -22,7 +22,7 @@ import com.alibaba.polardbx.executor.operator.util.AggregateUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import com.alibaba.polardbx.optimizer.core.rel.HashAgg; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.utils.CalciteUtils; @@ -76,22 +76,25 @@ private synchronized List createAllExecutors(ExecutionContext context) for (int j = 0; j < parallelism; j++) { MemoryAllocatorCtx memoryAllocator = context.getMemoryPool().getMemoryAllocatorCtx(); - List outputDataTypes = CalciteUtils.getTypes(hashAgg.getRowType()); List aggregators = - AggregateUtils.convertAggregators(inputDataTypes, - outputDataTypes.subList(groups.length, groups.length + hashAgg.getAggCallList().size()), - hashAgg.getAggCallList(), context, memoryAllocator); + AggregateUtils.convertAggregators(hashAgg.getAggCallList(), context, memoryAllocator); HashAggExec exec = new HashAggExec(inputDataTypes, groups, aggregators, CalciteUtils.getTypes(hashAgg.getRowType()), estimateHashTableSize, spillerFactory, context); - exec.setId(hashAgg.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(hashAgg, exec, context); - } + registerRuntimeStat(exec, hashAgg, context); executors.add(exec); } } return executors; } + + public static int[] convertFrom(ImmutableBitSet gp) { + List list = gp.asList(); + int[] groups = new int[list.size()]; + for (int i = 0, n = list.size(); i < n; i++) { + groups[i] = list.get(i); + } + return groups; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashGroupJoinExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashGroupJoinExecutorFactory.java index f8b9288b6..5160ab980 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashGroupJoinExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashGroupJoinExecutorFactory.java @@ -21,7 +21,7 @@ import com.alibaba.polardbx.executor.operator.util.AggregateUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import com.alibaba.polardbx.optimizer.core.expression.calc.IExpression; import com.alibaba.polardbx.optimizer.core.join.EquiJoinKey; import com.alibaba.polardbx.optimizer.core.join.EquiJoinUtils; @@ -30,15 +30,12 @@ import com.alibaba.polardbx.optimizer.utils.CalciteUtils; import com.alibaba.polardbx.optimizer.utils.RexUtils; import com.alibaba.polardbx.statistics.RuntimeStatHelper; -import org.apache.calcite.rel.core.AggregateCall; -import org.apache.calcite.rel.core.JoinRelType; import org.apache.calcite.rex.RexCall; import org.apache.calcite.rex.RexNode; import org.apache.calcite.util.ImmutableBitSet; import java.util.ArrayList; import java.util.List; -import java.util.stream.Collectors; public class HashGroupJoinExecutorFactory extends ExecutorFactory { @@ -97,30 +94,13 @@ private synchronized List createAllExecutor(ExecutionContext context) final Executor innerInput = getInputs().get(1).createExecutor(context, i); IExpression otherCondition = convertExpression(otherCond, context); - List aggCalls = new ArrayList<>(hashAggJoin.getAggCallList()); - if (hashAggJoin.getJoinType() != JoinRelType.RIGHT) { - int offset = outerInput.getDataTypes().size(); - for (int j = 0; j < aggCalls.size(); ++j) { - List aggIndexInProbeChunk = - aggCalls.get(j).getArgList().stream().map(t -> t - offset).collect(Collectors.toList()); - aggCalls.set(j, aggCalls.get(j).copy(aggIndexInProbeChunk)); - } - } - List joinKeys = EquiJoinUtils .buildEquiJoinKeys(hashAggJoin, hashAggJoin.getOuter(), hashAggJoin.getInner(), (RexCall) equalCond, hashAggJoin.getJoinType()); MemoryAllocatorCtx memoryAllocator = context.getMemoryPool().getMemoryAllocatorCtx(); - List dataTypes = new ArrayList() { - { - addAll(innerInput.getDataTypes()); - } - }; List aggregators = - AggregateUtils.convertAggregators(dataTypes, - outputDataTypes.subList(groups.length, groups.length + aggCalls.size()), - aggCalls, context, memoryAllocator); + AggregateUtils.convertAggregators(hashAggJoin.getAggCallList(), context, memoryAllocator); Executor exec = new HashGroupJoinExec(outerInput, innerInput, hashAggJoin.getJoinType(), @@ -131,10 +111,7 @@ private synchronized List createAllExecutor(ExecutionContext context) context, estimateHashTableSize ); - exec.setId(hashAggJoin.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(hashAggJoin, exec, context); - } + registerRuntimeStat(exec, hashAggJoin, context); executors.add(exec); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashWindowExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashWindowExecFactory.java index cdf4a63c8..1e70b0fa9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashWindowExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HashWindowExecFactory.java @@ -16,15 +16,14 @@ package com.alibaba.polardbx.executor.mpp.operator.factory; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.operator.Executor; import com.alibaba.polardbx.executor.operator.HashWindowExec; import com.alibaba.polardbx.executor.operator.spill.SpillerFactory; import com.alibaba.polardbx.executor.operator.util.AggregateUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import com.alibaba.polardbx.optimizer.core.rel.HashWindow; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.utils.CalciteUtils; @@ -33,7 +32,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.stream.Collectors; public class HashWindowExecFactory extends ExecutorFactory { @@ -75,23 +73,17 @@ private synchronized List createAllExecutors(ExecutionContext context) int estimateHashTableSize = AggregateUtils.estimateHashTableSize(expectedOutputRowCount, context); for (int j = 0; j < parallelism; j++) { MemoryAllocatorCtx memoryAllocator = context.getMemoryPool().getMemoryAllocatorCtx(); - List outputDataTypes = overWindow.groups.get(0).getAggregateCalls(overWindow).stream() - .map(call -> DataTypeUtil.calciteToDrdsType(call.getType())) - .collect(Collectors.toList()); + // notice: filter args in window function is always -1 List aggregators = - AggregateUtils.convertAggregators(inputDataTypes, outputDataTypes, - overWindow.groups.get(0).getAggregateCalls(overWindow), context, + AggregateUtils.convertAggregators(overWindow.groups.get(0).getAggregateCalls(overWindow), context, memoryAllocator); HashWindowExec exec = new HashWindowExec(inputDataTypes, groups, aggregators, CalciteUtils.getTypes(overWindow.getRowType()), estimateHashTableSize, spillerFactory, context); - exec.setId(overWindow.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(overWindow, exec, context); - } + registerRuntimeStat(exec, overWindow, context); executors.add(exec); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HybridHashJoinExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HybridHashJoinExecutorFactory.java index 9197439b5..bc9f8fd8c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HybridHashJoinExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/HybridHashJoinExecutorFactory.java @@ -91,10 +91,7 @@ private synchronized List createAllExecutor(ExecutionContext context) new HybridHashJoinExec(outerInput, inner, join.getJoinType(), maxOneRow, joinKeys, otherCondition, antiJoinOperands, context, parallelism, i, bucketNum, spillerFactory); - exec.setId(join.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(join, exec, context); - } + registerRuntimeStat(exec, join, context); executors.add(exec); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/InsertSelectExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/InsertSelectExecFactory.java index ee0859165..af5cab18a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/InsertSelectExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/InsertSelectExecFactory.java @@ -38,10 +38,8 @@ public Executor createExecutor(ExecutionContext context, int index) { Executor inputExec = getInputs().get(0).createExecutor(context, index); Executor exec = new InsertSelectExec(relNode, inputExec, context); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(relNode, exec, context); - } - exec.setId(relNode.getRelatedId()); + registerRuntimeStat(exec, relNode, context); return exec; } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LimitExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LimitExecFactory.java index 5d053333b..3d9228981 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LimitExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LimitExecFactory.java @@ -56,10 +56,8 @@ public Executor createExecutor(ExecutionContext context, int index) { } } Executor exec = new LimitExec(inputs, skip, fetch, context); - exec.setId(limit.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(limit, exec, context); - } + registerRuntimeStat(exec, limit, context); return exec; } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LocalExchangeConsumerFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LocalExchangeConsumerFactory.java index 789ef178f..607a02187 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LocalExchangeConsumerFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LocalExchangeConsumerFactory.java @@ -70,7 +70,8 @@ public ConsumerExecutor createExecutor(ExecutionContext context, int index) { break; case RANDOM: localExchanger = new RandomExchanger(outputBufferMemoryManager, consumerExecutors, - this.status, localExchange.isAsyncConsume()); + this.status, localExchange.isAsyncConsume(), index, + context.getParamManager().getBoolean(ConnectionParams.ENABLE_OPTIMIZE_RANDOM_EXCHANGE)); break; case BORADCAST: localExchanger = new BroadcastExchanger(outputBufferMemoryManager, consumerExecutors, @@ -91,9 +92,16 @@ public ConsumerExecutor createExecutor(ExecutionContext context, int index) { this.status, localExchange.isAsyncConsume(), localExchange.getTypes(), localExchange.getPartitionChannels(), - localExchange.getKeyTypes(), context); + localExchange.getKeyTypes(), context, false); } break; + case CHUNK_PARTITION: + localExchanger = new PartitioningExchanger(outputBufferMemoryManager, consumerExecutors, + this.status, + localExchange.isAsyncConsume(), localExchange.getTypes(), + localExchange.getPartitionChannels(), + localExchange.getKeyTypes(), context, true); + break; case DIRECT: localExchanger = new DirectExchanger( outputBufferMemoryManager, consumerExecutors.get(index), this.status); @@ -104,4 +112,9 @@ public ConsumerExecutor createExecutor(ExecutionContext context, int index) { status.incrementParallelism(); return localExchanger; } + + // for test + public List getConsumerExecutors() { + return consumerExecutors; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LocalMergeSortExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LocalMergeSortExecutorFactory.java index 4da5c18be..9c502b423 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LocalMergeSortExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LocalMergeSortExecutorFactory.java @@ -73,10 +73,7 @@ private synchronized List createAllExecutors(ExecutionContext context) } } MergeSortExec sortExec = new MergeSortExec(inputs, orderBys, offset, limit, context); - sortExec.setId(sort.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(sort, sortExec, context); - } + registerRuntimeStat(sortExec, sort, context); executors.add(sortExec); } return executors; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LogicalCorrelateFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LogicalCorrelateFactory.java index 79f664bbe..182d6695a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LogicalCorrelateFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LogicalCorrelateFactory.java @@ -40,12 +40,14 @@ public Executor createExecutor(ExecutionContext context, int index) { DataType dateType = correlate.getJoinType() == SemiJoinType.LEFT ? CalciteUtils.getType(correlate.getRowType().getFieldList().get(fieldCount)) : DataTypes.LongType; - return new CorrelateExec(inner, correlate.getRight(), dateType, correlate.getLeft().getRowType(), + CorrelateExec exec = new CorrelateExec(inner, correlate.getRight(), dateType, correlate.getLeft().getRowType(), correlate.getCorrelationId(), correlate.getLeftConditions(), correlate.getOpKind(), correlate.getJoinType(), context); + registerRuntimeStat(exec, correlate, context); + return exec; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LogicalViewExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LogicalViewExecutorFactory.java index 78e2d93ad..001bf3f64 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LogicalViewExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LogicalViewExecutorFactory.java @@ -28,8 +28,17 @@ import com.alibaba.polardbx.optimizer.core.rel.OrcTableScan; import com.google.common.base.Preconditions; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.mpp.operator.LocalExecutionPlanner; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItem; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItemKey; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFManager; +import com.alibaba.polardbx.executor.mpp.planner.PipelineFragment; import com.alibaba.polardbx.executor.operator.AbstractOSSTableScanExec; +import com.alibaba.polardbx.executor.operator.ColumnarDeletedScanExec; +import com.alibaba.polardbx.executor.operator.ColumnarScanExec; import com.alibaba.polardbx.executor.operator.DrivingStreamTableScanExec; import com.alibaba.polardbx.executor.operator.DrivingStreamTableScanSortExec; import com.alibaba.polardbx.executor.operator.Executor; @@ -64,11 +73,9 @@ import com.alibaba.polardbx.optimizer.utils.CalciteUtils; import com.alibaba.polardbx.statistics.RuntimeStatHelper; import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; +import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.Join; -import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider; -import org.apache.calcite.rel.metadata.RelColumnOrigin; -import org.apache.calcite.rel.metadata.RelMetadataQuery; +import org.apache.calcite.rel.core.Sort; import org.apache.calcite.rex.RexCall; import org.apache.calcite.rex.RexNode; import org.jetbrains.annotations.NotNull; @@ -79,19 +86,22 @@ import java.util.concurrent.atomic.AtomicInteger; public class LogicalViewExecutorFactory extends ExecutorFactory { + private static final Logger MPP_LOGGER = LoggerFactory.getLogger(LocalExecutionPlanner.class); + private final PipelineFragment fragment; private final int totalPrefetch; private final CursorMeta meta; private final AtomicInteger counter = new AtomicInteger(0); private final int parallelism; private final long maxRowCount; - private LogicalView logicalView; + private final LogicalView logicalView; private TableScanClient scanClient; - private SpillerFactory spillerFactory; + private final SpillerFactory spillerFactory; - private boolean bSort; - private long fetch; - private long skip; + private final boolean bSort; + private final Sort sort; + private final long fetch; + private final long skip; private BloomFilterExpression filterExpression; @@ -105,15 +115,18 @@ public class LogicalViewExecutorFactory extends ExecutorFactory { private boolean randomSplits; public LogicalViewExecutorFactory( - LogicalView logicalView, int totalPrefetch, int parallelism, long maxRowCount, boolean bSort, + PipelineFragment fragment, LogicalView logicalView, + int totalPrefetch, int parallelism, long maxRowCount, boolean bSort, Sort sort, long fetch, long skip, SpillerFactory spillerFactory, Map bloomFilters, boolean enableRuntimeFilter, boolean randomSplits) { + this.fragment = fragment; this.logicalView = logicalView; this.totalPrefetch = totalPrefetch; this.meta = CursorMeta.build(CalciteUtils.buildColumnMeta(logicalView, "TableScanColumns")); this.parallelism = parallelism; this.maxRowCount = maxRowCount; this.bSort = bSort; + this.sort = sort; this.fetch = fetch; this.skip = skip; this.spillerFactory = spillerFactory; @@ -143,9 +156,6 @@ public LogicalViewExecutorFactory( @Override public Executor createExecutor(ExecutionContext context, int index) { - RelMetadataQuery.THREAD_PROVIDERS - .set(JaninoRelMetadataProvider.of(logicalView.getCluster().getMetadataProvider())); - if (logicalView instanceof OSSTableScan) { return buildOSSTableScanExec(context); } else { @@ -200,16 +210,75 @@ private Executor buildTableScanExec(ExecutionContext context) { scanExec.setRandomSplits(randomSplits); } } - scanExec.setId(logicalView.getRelatedId()); + registerRuntimeStat(scanExec, logicalView, context); + + return scanExec; + } + + @Override + protected void registerRuntimeStat(Executor scanExec, RelNode relNode, ExecutionContext context) { + super.registerRuntimeStat(scanExec, relNode, context); if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(logicalView, scanExec, context); + if (bSort && scanExec instanceof TableScanSortExec) { + RuntimeStatHelper.registerStatForExec(sort, scanExec, context); + } } - return scanExec; } private Executor buildOSSTableScanExec(ExecutionContext context) { OSSTableScan ossTableScan = (OSSTableScan) logicalView; + if (context.isEnableOrcDeletedScan()) { + // Special path for check cci consistency. + // Normal oss read should not get here. + Executor exec = new ColumnarDeletedScanExec(ossTableScan, context, dataTypeList); + registerRuntimeStat(exec, logicalView, context); + return exec; + } + + // Use columnar table scan exec. + if (context.getParamManager().getBoolean(ConnectionParams.ENABLE_COLUMNAR_SCAN_EXEC)) { + ColumnarScanExec exec = new ColumnarScanExec(ossTableScan, context, dataTypeList); + registerRuntimeStat(exec, logicalView, context); + + if (fragment.getFragmentRFManager() != null) { + FragmentRFManager fragmentRFManager = fragment.getFragmentRFManager(); + + Map allItems = fragmentRFManager.getAllItems(); + + for (FragmentRFItemKey itemKey : allItems.keySet()) { + FragmentRFItem item = allItems.get(itemKey); + + String probeColumnName = item.getProbeColumnName(); + + // inspect the filter channel according to registered RF columns. + List fieldNames = logicalView.getRowType().getFieldNames(); + final int outProjectIndex = fieldNames.indexOf(probeColumnName); + + if (outProjectIndex == -1) { + if (MPP_LOGGER.isDebugEnabled()) { + MPP_LOGGER.debug( + "Cannot find the filter channel according to registered RF columns " + + ", fragmentRFItemKey = " + itemKey + + ", for scan: " + logicalView); + } + + } else { + // Mapping to input index in file. + final int inProjectIndex = ossTableScan.getOrcNode().getInProjects().get(outProjectIndex); + item.setSourceRefInFile(inProjectIndex); + item.setSourceFilterChannel(outProjectIndex); + + // register column scan exec in all threads into fragment RF manager. + item.registerSource(exec); + } + } + + } + + return exec; + } + AbstractOSSTableScanExec exec = AbstractOSSTableScanExec.create(ossTableScan, context, dataTypeList); OrcTableScan orcTableScan = ossTableScan.getOrcNode(); @@ -248,11 +317,8 @@ private Executor buildOSSTableScanExec(ExecutionContext context) { outProject[i] = orcTableScan.getOutProjects().get(i); } exec.setOutProject(outProject); - exec.setId(logicalView.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(logicalView, exec, context); - } } + registerRuntimeStat(exec, logicalView, context); if (filterExpression != null) { exec.initWaitFuture(filterExpression.getWaitBloomFuture()); @@ -326,10 +392,7 @@ public TableScanExec createLookupScanExec(ExecutionContext context, boolean canS TableScanExec scanExec = new LookupTableScanExec(logicalView, context, scanClient.incrementSourceExec(), canShard, spillerFactory, predicate, allJoinKeys, dataTypeList); - scanExec.setId(logicalView.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(logicalView, scanExec, context); - } + registerRuntimeStat(scanExec, logicalView, context); return scanExec; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LookupJoinExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LookupJoinExecFactory.java index f303a77a5..404e823ba 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LookupJoinExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LookupJoinExecFactory.java @@ -96,10 +96,7 @@ public Executor createExecutor(ExecutionContext context, int index) { otherCondition, context, shardCount, parallelism, allowMultiReadConn); } - ret.setId(join.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(join, ret, context); - } + registerRuntimeStat(ret, join, context); return ret; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LoopJoinExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LoopJoinExecutorFactory.java index b300e0fe2..78bd342a7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LoopJoinExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/LoopJoinExecutorFactory.java @@ -82,10 +82,7 @@ maxOneRow, otherCondition, antiJoinOperands, convertExpression(antiCondition, co synchronizer ); exec.setStreamJoin(streamJoin); - exec.setId(join.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(join, exec, context); - } + registerRuntimeStat(exec, join, context); executors.add(exec); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/MaterializedJoinExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/MaterializedJoinExecFactory.java index 1b984d199..207650bff 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/MaterializedJoinExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/MaterializedJoinExecFactory.java @@ -69,10 +69,7 @@ private synchronized List createAllExecutor(ExecutionContext context) IExpression condition = convertExpression(join.getCondition(), context); Executor exec = new MaterializedSemiJoinExec( outer, inner, join.isDistinctInput(), joinKeys, join.getJoinType(), condition, context); - exec.setId(join.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(join, exec, context); - } + registerRuntimeStat(exec, join, context); executors.add(exec); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/NonBlockGeneralExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/NonBlockGeneralExecFactory.java index 119bcaae5..35c3c2ddb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/NonBlockGeneralExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/NonBlockGeneralExecFactory.java @@ -16,10 +16,10 @@ package com.alibaba.polardbx.executor.mpp.operator.factory; -import com.google.common.base.Preconditions; import com.alibaba.polardbx.executor.operator.Executor; import com.alibaba.polardbx.executor.operator.NonBlockGeneralSourceExec; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.google.common.base.Preconditions; import org.apache.calcite.rel.RelNode; public class NonBlockGeneralExecFactory extends ExecutorFactory { @@ -35,4 +35,5 @@ public Executor createExecutor(ExecutionContext context, int index) { Preconditions.checkArgument(index < 1, "NonBlockGeneralSourceExec's parallism must be 1!"); return new NonBlockGeneralSourceExec(relNode, context); } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/OutputExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/OutputExecutorFactory.java index b03a679bd..133df635a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/OutputExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/OutputExecutorFactory.java @@ -53,12 +53,14 @@ public ConsumerExecutor createExecutor(ExecutionContext context, int index) { .equals(PartitionShuffleHandle.PartitionShuffleMode.BROADCAST) || partitioningScheme.getPartitionCount() == 1) { return new TaskOutputCollector(inputType, outputType, outputBuffer, - this.pagesSerdeFactory.createPagesSerde(outputType), context); + this.pagesSerdeFactory.createPagesSerde(outputType, context), context); } else { int chunkLimit = context.getParamManager().getInt(ConnectionParams.CHUNK_SIZE); - return new PartitionedOutputCollector(partitioningScheme.getPartitionCount(), inputType, - outputType, partitioningScheme.getPartChannels(), outputBuffer, this.pagesSerdeFactory, chunkLimit, - context); + return new PartitionedOutputCollector(partitioningScheme.getPartitionCount(), + partitioningScheme.getPrunePartitions(), partitioningScheme.getFullPartCount(), + inputType, partitioningScheme.isRemotePairWise(), outputType, partitioningScheme.getPartChannels(), + outputBuffer, this.pagesSerdeFactory, + chunkLimit, context); } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/OverWindowFramesExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/OverWindowFramesExecFactory.java deleted file mode 100644 index 2391f6133..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/OverWindowFramesExecFactory.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.mpp.operator.factory; - -import com.alibaba.druid.util.StringUtils; -import com.google.common.collect.ImmutableList; -import com.alibaba.polardbx.executor.operator.Executor; -import com.alibaba.polardbx.executor.operator.NonFrameOverWindowExec; -import com.alibaba.polardbx.executor.operator.OverWindowFramesExec; -import com.alibaba.polardbx.executor.operator.frame.OverWindowFrame; -import com.alibaba.polardbx.executor.operator.frame.RangeSlidingOverFrame; -import com.alibaba.polardbx.executor.operator.frame.RangeUnboundedFollowingOverFrame; -import com.alibaba.polardbx.executor.operator.frame.RangeUnboundedPrecedingOverFrame; -import com.alibaba.polardbx.executor.operator.frame.RowSlidingOverFrame; -import com.alibaba.polardbx.executor.operator.frame.RowUnboundedFollowingOverFrame; -import com.alibaba.polardbx.executor.operator.frame.RowUnboundedPrecedingOverFrame; -import com.alibaba.polardbx.executor.operator.frame.UnboundedOverFrame; -import com.alibaba.polardbx.executor.operator.util.AggregateUtils; -import com.alibaba.polardbx.executor.utils.OrderByOption; -import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; -import com.alibaba.polardbx.executor.calc.Aggregator; -import com.alibaba.polardbx.optimizer.core.rel.SortWindow; -import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; -import com.alibaba.polardbx.optimizer.utils.CalciteUtils; -import com.alibaba.polardbx.statistics.RuntimeStatHelper; -import org.apache.calcite.rel.RelFieldCollation; -import org.apache.calcite.rel.core.AggregateCall; -import org.apache.calcite.rel.core.Window; -import org.apache.calcite.rex.RexInputRef; -import org.apache.calcite.rex.RexLiteral; -import org.apache.calcite.rex.RexNode; -import org.apache.calcite.rex.RexWindowBound; -import org.apache.calcite.sql.SqlOperator; -import org.apache.calcite.sql.fun.SqlStdOperatorTable; -import org.apache.calcite.util.ImmutableBitSet; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.stream.Collectors; - -/** - * @author hongxi.chx - */ -public class OverWindowFramesExecFactory extends ExecutorFactory { - - private final SortWindow overWindow; - private final List columnMetas; - - public OverWindowFramesExecFactory(SortWindow overWindow, ExecutorFactory executorFactory) { - this.overWindow = overWindow; - this.columnMetas = CalciteUtils.getTypes(overWindow.getRowType()); - addInput(executorFactory); - } - - @Override - public Executor createExecutor(ExecutionContext context, int index) { - return createFrameWindowExec(context, index); - } - - private Executor createFrameWindowExec(ExecutionContext context, int index) { - ImmutableList aggCalls = overWindow.groups.get(0).aggCalls; - SqlOperator operator = aggCalls.get(0).getOperator(); - if (isNoFrameWindowFunction(operator)) { - Executor nonFrameWindowExec = createNonFrameWindowExec(overWindow, context, index); - nonFrameWindowExec.setId(overWindow.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(overWindow, nonFrameWindowExec, context); - } - return nonFrameWindowExec; - } - Executor input = getInputs().get(0).createExecutor(context, index); - ImmutableBitSet gp = overWindow.groups.get(0).keys; - List list = gp.asList(); - int[] groups = new int[list.size()]; - for (int i = 0, n = list.size(); i < n; i++) { - groups[i] = list.get(i); - } - MemoryAllocatorCtx memoryAllocator = context.getMemoryPool().getMemoryAllocatorCtx(); - - OverWindowFrame[] overWindowFrames = new OverWindowFrame[overWindow.groups.size()]; - List outputDataTypes = CalciteUtils.getTypes(overWindow.getRowType()); - int aggColumnIndex = input.getDataTypes().size(); - for (int i = 0; i < overWindow.groups.size(); i++) { - Window.Group group = overWindow.groups.get(i); - List aggregateCalls = group.getAggregateCalls(overWindow); - List aggregators = AggregateUtils - .convertAggregators(input.getDataTypes(), - outputDataTypes.subList(aggColumnIndex, aggColumnIndex + aggregateCalls.size()), aggregateCalls, - context, memoryAllocator); - if (group.aggCalls.stream().anyMatch(t -> isUnboundedFrameWindowFunction(t.getOperator()))) { - overWindowFrames[i] = new UnboundedOverFrame(aggregators.toArray(new Aggregator[0])); - } else if (group.lowerBound.isUnbounded() && group.upperBound.isUnbounded()) { - overWindowFrames[i] = new UnboundedOverFrame(aggregators.toArray(new Aggregator[0])); - } else if (group.isRows) { - overWindowFrames[i] = createRowFrame(overWindow, group, aggregators); - } else { - overWindowFrames[i] = createRangeFrame(overWindow, group, aggregators); - } - aggColumnIndex += aggregateCalls.size(); - } - OverWindowFramesExec overWindowFramesExec = - new OverWindowFramesExec(input, context, overWindowFrames, list, columnMetas); - overWindowFramesExec.setId(overWindow.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(overWindow, overWindowFramesExec, context); - } - return overWindowFramesExec; - } - - private boolean isNoFrameWindowFunction(SqlOperator method) { - return method == SqlStdOperatorTable.RANK || method == SqlStdOperatorTable.DENSE_RANK - || method == SqlStdOperatorTable.ROW_NUMBER; - } - - private boolean isUnboundedFrameWindowFunction(SqlOperator method) { - return method == SqlStdOperatorTable.CUME_DIST || method == SqlStdOperatorTable.PERCENT_RANK; - } - - private com.alibaba.polardbx.executor.operator.Executor createNonFrameWindowExec(SortWindow overWindow, - ExecutionContext context, int index) { - Executor input = getInputs().get(0).createExecutor(context, index); - MemoryAllocatorCtx memoryAllocator = context.getMemoryPool().getMemoryAllocatorCtx(); - - List dataTypes = CalciteUtils.getTypes(overWindow.getRowType()); - List aggregateCalls = overWindow.groups.get(0).getAggregateCalls(overWindow); - ImmutableBitSet gp = overWindow.groups.get(0).keys; - List list = gp.asList(); - int[] groups = list.stream().mapToInt(Integer::intValue).toArray(); - - List aggregators = AggregateUtils - .convertAggregators(input.getDataTypes(), - dataTypes.subList(groups.length, groups.length + aggregateCalls.size()), aggregateCalls, context, - memoryAllocator); - - boolean[] peer = new boolean[aggregators.size()]; - for (int i = 0; i < aggregators.size(); i++) { - Window.Group group = overWindow.groups.get(0); - if (group.lowerBound.isCurrentRow() && group.upperBound.isCurrentRow()) { - peer[i] = true; - } else { - peer[i] = false; - } - } - - return new NonFrameOverWindowExec(input, context, aggregators, - Arrays.stream(groups).boxed().collect(Collectors.toList()), peer, dataTypes); - } - - private OverWindowFrame createRowFrame(SortWindow overWindow, SortWindow.Group group, - List aggregators) { - if (group.lowerBound.isUnbounded()) { - RexWindowBound upperBound = group.upperBound; - int offset = - getConstant(upperBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); - return new RowUnboundedPrecedingOverFrame(aggregators, offset); - } else if (overWindow.groups.get(0).upperBound.isUnbounded()) { - RexWindowBound lowerBound = overWindow.groups.get(0).lowerBound; - int offset = - getConstant(lowerBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); - return new RowUnboundedFollowingOverFrame(aggregators, offset); - } else { - RexWindowBound lowerBound = overWindow.groups.get(0).lowerBound; - RexWindowBound upperBound = overWindow.groups.get(0).upperBound; - int lowerOffset = - getConstant(lowerBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); - int upperOffset = - getConstant(upperBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); - return new RowSlidingOverFrame(aggregators, lowerOffset, upperOffset); - } - } - - private int getConstant(RexWindowBound bound, ImmutableList constants, int offset) { - if (bound.isCurrentRow()) { - return 0; - } - RexNode boundOffset = bound.getOffset(); - assert boundOffset instanceof RexInputRef; - if (!(boundOffset instanceof RexInputRef)) { - throw new IllegalArgumentException( - "bound offset must be a instance of RexInputRef, but was " + boundOffset.getClass().getName() + "."); - } - RexLiteral rexLiteral = constants - .get(((RexInputRef) boundOffset).getIndex() - offset); - Object value2 = rexLiteral.getValue2(); - if (!StringUtils.isNumber(String.valueOf(value2))) { - throw new IllegalArgumentException("bound index must be a digit, but was " + value2.toString() + "."); - } - return StringUtils.stringToInteger( - String.valueOf(value2)); - } - - private OverWindowFrame createRangeFrame(SortWindow overWindow, SortWindow.Group group, - List aggregators) { - List sortList = group.orderKeys.getFieldCollations(); - List orderBys = new ArrayList<>(sortList.size()); - if (sortList != null) { - for (int i = 0, n = sortList.size(); i < n; i++) { - RelFieldCollation field = sortList.get(i); - orderBys.add(new OrderByOption(field.getFieldIndex(), field.direction, field.nullDirection)); - } - } - if (orderBys.size() != 1) { - throw new IllegalArgumentException( - "RANGE N PRECEDING/FOLLOWING frame requires exactly one ORDER BY expression"); - } - OrderByOption orderByOption = orderBys.get(0); - int index = orderByOption.getIndex(); - DataType dataType = - DataTypeUtil.calciteToDrdsType(overWindow.getInput().getRowType().getFieldList().get(index).getType()); - if (group.lowerBound.isUnbounded()) { - RexWindowBound upperBound = group.upperBound; - int offset = - getConstant(upperBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); - return new RangeUnboundedPrecedingOverFrame(aggregators, offset, - index, orderByOption.isAsc(), - dataType); - } else if (overWindow.groups.get(0).upperBound.isUnbounded()) { - RexWindowBound lowerBound = overWindow.groups.get(0).lowerBound; - int offset = - getConstant(lowerBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); - return new RangeUnboundedFollowingOverFrame(aggregators, offset, index, orderByOption.isAsc(), dataType); - } else { - RexWindowBound lowerBound = overWindow.groups.get(0).lowerBound; - RexWindowBound upperBound = overWindow.groups.get(0).upperBound; - int lowerOffset = - getConstant(lowerBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); - int upperOffset = - getConstant(upperBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); - return new RangeSlidingOverFrame(aggregators, lowerOffset, upperOffset, index, orderByOption.isAsc(), - dataType); - } - } - -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ParallelHashJoinExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ParallelHashJoinExecutorFactory.java index b4fd21c80..7a017e237 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ParallelHashJoinExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ParallelHashJoinExecutorFactory.java @@ -17,27 +17,36 @@ package com.alibaba.polardbx.executor.mpp.operator.factory; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItem; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItemKey; +import com.alibaba.polardbx.executor.mpp.planner.PipelineFragment; import com.alibaba.polardbx.executor.operator.Executor; import com.alibaba.polardbx.executor.operator.ParallelHashJoinExec; +import com.alibaba.polardbx.executor.operator.Synchronizer; +import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.expression.calc.IExpression; import com.alibaba.polardbx.optimizer.core.join.EquiJoinKey; import com.alibaba.polardbx.optimizer.core.join.EquiJoinUtils; import com.alibaba.polardbx.optimizer.core.rel.HashJoin; import com.alibaba.polardbx.optimizer.utils.RexUtils; -import com.alibaba.polardbx.statistics.RuntimeStatHelper; import org.apache.calcite.rel.core.Join; import org.apache.calcite.rel.core.JoinRelType; import org.apache.calcite.rex.RexCall; import org.apache.calcite.rex.RexNode; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; +import java.util.stream.IntStream; public class ParallelHashJoinExecutorFactory extends ExecutorFactory { - private boolean driverBuilder; + + private PipelineFragment pipelineFragment; + private Join join; private List executors = new ArrayList<>(); private RexNode otherCond; @@ -48,9 +57,17 @@ public class ParallelHashJoinExecutorFactory extends ExecutorFactory { private int numPartitions; private boolean streamJoin; - public ParallelHashJoinExecutorFactory(Join join, RexNode otherCond, RexNode equalCond, boolean maxOneRow, + int localPartitionCount; + + boolean keepPartition; + + public ParallelHashJoinExecutorFactory(PipelineFragment pipelineFragment, + Join join, RexNode otherCond, RexNode equalCond, boolean maxOneRow, List operands, ExecutorFactory build, ExecutorFactory probe, - int probeParallelism, int numPartitions, boolean driverBuilder) { + int probeParallelism, int numPartitions, boolean driverBuilder, + int localPartitionCount, boolean keepPartition) { + this.pipelineFragment = pipelineFragment; + this.join = join; this.otherCond = otherCond; this.equalCond = equalCond; @@ -61,6 +78,8 @@ public ParallelHashJoinExecutorFactory(Join join, RexNode otherCond, RexNode equ addInput(build); addInput(probe); this.driverBuilder = driverBuilder; + this.localPartitionCount = localPartitionCount; + this.keepPartition = keepPartition; } @Override @@ -76,14 +95,129 @@ public List getAllExecutors(ExecutionContext context) { private synchronized List createAllExecutor(ExecutionContext context) { if (executors.isEmpty()) { - boolean alreadyUseRuntimeFilter = false; - if (join instanceof HashJoin) { - alreadyUseRuntimeFilter = ((HashJoin) join).isRuntimeFilterPushedDown(); + + // Parse the item keys from the RelNode of the join. + List rfItemKeys = FragmentRFItemKey.buildItemKeys(join); + + boolean alreadyUseRuntimeFilter = join instanceof HashJoin && ((HashJoin) join).isRuntimeFilterPushedDown(); + List synchronizers = new ArrayList<>(); + List assignResult = null; + + List joinKeys = EquiJoinUtils + .buildEquiJoinKeys(join, join.getOuter(), join.getInner(), (RexCall) equalCond, join.getJoinType()); + IExpression otherCondition = convertExpression(otherCond, context); + + boolean useBloomFilter = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_HASH_TABLE_BLOOM_FILTER); + int chunkLimit = context.getParamManager().getInt(ConnectionParams.CHUNK_SIZE); + + if (localPartitionCount > 0) { + Map partitionParallelism = new HashMap<>(); + // local partition wise mode + assignResult = ExecUtils.assignPartitionToExecutor(localPartitionCount, probeParallelism); + for (Integer i : assignResult) { + partitionParallelism.merge(i, 1, Integer::sum); + } + int hashTableNum = Math.min(localPartitionCount, probeParallelism); + + int[] partitionsOfEachBucket = ExecUtils.partitionsOfEachBucket(localPartitionCount, probeParallelism); + for (int hashTableIndex = 0; hashTableIndex < hashTableNum; hashTableIndex++) { + + // how many degrees of parallelism in this synchronizer instance. + int numberOfExec = localPartitionCount >= probeParallelism + ? 1 : partitionParallelism.get(hashTableIndex); + + // how many data partitions in this synchronizer instance. + int numberOfPartition = partitionsOfEachBucket[hashTableIndex]; + + Synchronizer synchronizer = + new Synchronizer(join.getJoinType(), driverBuilder, numberOfExec, alreadyUseRuntimeFilter, + useBloomFilter, chunkLimit, numberOfExec, numberOfPartition, false); + + synchronizers.add(synchronizer); + } + + // If using fragment-level runtime filter in this join operator, register the + // synchronizer into RF-manager. + if (pipelineFragment.getFragmentRFManager() != null) { + + for (int itemKeyIndex = 0; itemKeyIndex < rfItemKeys.size(); itemKeyIndex++) { + FragmentRFItemKey itemKey = rfItemKeys.get(itemKeyIndex); + + // Find the ordinal in the build side. + int ordinal; + for (ordinal = 0; ordinal < joinKeys.size(); ordinal++) { + // Consider two cases: + // 1. join input side is not reversed + // 2. join input side is reversed + if ((!driverBuilder && joinKeys.get(ordinal).getInnerIndex() == itemKey.getBuildIndex()) + || (driverBuilder + && joinKeys.get(ordinal).getOuterIndex() == itemKey.getBuildIndex())) { + + // Find the items from pipeline fragment that matching the given item key. + Map allItems = + pipelineFragment.getFragmentRFManager().getAllItems(); + FragmentRFItem rfItem; + if ((rfItem = allItems.get(itemKey)) != null) { + // The channel for build side in join operator is the ordinal of the item key. + rfItem.setBuildSideChannel(ordinal); + + // register multiple synchronizer and share the rf item. + rfItem.registerBuilder( + ordinal, probeParallelism, synchronizers.toArray(new Synchronizer[0])); + } + } + } + + } + } + + } else { + Synchronizer synchronizer = + new Synchronizer(join.getJoinType(), driverBuilder, numPartitions, + alreadyUseRuntimeFilter, + useBloomFilter, chunkLimit, probeParallelism, -1, true); + + synchronizers.add(synchronizer); + + // If using fragment-level runtime filter in this join operator, register the + // synchronizer into RF-manager. + if (pipelineFragment.getFragmentRFManager() != null) { + + for (int itemKeyIndex = 0; itemKeyIndex < rfItemKeys.size(); itemKeyIndex++) { + FragmentRFItemKey itemKey = rfItemKeys.get(itemKeyIndex); + + // Find the ordinal in the build side. + int ordinal; + for (ordinal = 0; ordinal < joinKeys.size(); ordinal++) { + + // Consider two cases: + // 1. join input side is not reversed + // 2. join input side is reversed + if ((!driverBuilder && joinKeys.get(ordinal).getInnerIndex() == itemKey.getBuildIndex()) + || (driverBuilder + && joinKeys.get(ordinal).getOuterIndex() == itemKey.getBuildIndex())) { + + // Find the items from pipeline fragment that matching the given item key. + Map allItems = + pipelineFragment.getFragmentRFManager().getAllItems(); + FragmentRFItem rfItem; + if ((rfItem = allItems.get(itemKey)) != null) { + // The channel for build side in join operator is the ordinal of the item key. + rfItem.setBuildSideChannel(ordinal); + + // register multiple synchronizer and share the rf item. + rfItem.registerBuilder(ordinal, probeParallelism, synchronizer); + } + } + } + } + + } + } - ParallelHashJoinExec.Synchronizer synchronizer = - new ParallelHashJoinExec.Synchronizer(numPartitions, alreadyUseRuntimeFilter, - context.getParamManager().getBoolean( - ConnectionParams.ENABLE_HASH_TABLE_BLOOM_FILTER)); + + Map partitionOperatorIdx = new HashMap<>(); for (int i = 0; i < probeParallelism; i++) { Executor inner; Executor outerInput; @@ -94,23 +228,28 @@ private synchronized List createAllExecutor(ExecutionContext context) inner = getInputs().get(0).createExecutor(context, i); outerInput = getInputs().get(1).createExecutor(context, i); } - IExpression otherCondition = convertExpression(otherCond, context); - List joinKeys = EquiJoinUtils - .buildEquiJoinKeys(join, join.getOuter(), join.getInner(), (RexCall) equalCond, join.getJoinType()); List antiJoinOperands = null; - if (operands != null && join.getJoinType() == JoinRelType.ANTI && !operands.isEmpty()) { + if (containAntiJoinOperands(operands, join)) { antiJoinOperands = operands.stream().map(ele -> convertExpression(ele, context)).collect(Collectors.toList()); } + + int operatorIdx = i; + if (localPartitionCount > 0) { + int partition = assignResult.get(i); + operatorIdx = partitionOperatorIdx.getOrDefault(partition, 0); + partitionOperatorIdx.put(partition, operatorIdx + 1); + } + ParallelHashJoinExec exec = - new ParallelHashJoinExec(synchronizer, outerInput, inner, join.getJoinType(), maxOneRow, - joinKeys, otherCondition, antiJoinOperands, driverBuilder, context, i); + new ParallelHashJoinExec( + localPartitionCount > 0 ? synchronizers.get(assignResult.get(i)) : synchronizers.get(0), + outerInput, inner, join.getJoinType(), maxOneRow, + joinKeys, otherCondition, antiJoinOperands, driverBuilder, context, operatorIdx, + probeParallelism, keepPartition); exec.setStreamJoin(streamJoin); - exec.setId(join.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(join, exec, context); - } + registerRuntimeStat(exec, join, context); executors.add(exec); } } @@ -124,4 +263,8 @@ private IExpression convertExpression(RexNode rexNode, ExecutionContext context) public void enableStreamJoin(boolean streamJoin) { this.streamJoin = streamJoin; } + + public static boolean containAntiJoinOperands(List operands, Join join) { + return operands != null && join.getJoinType() == JoinRelType.ANTI && !operands.isEmpty(); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ProjectExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ProjectExecFactory.java index 7e9b26edd..2c16c0f81 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ProjectExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ProjectExecFactory.java @@ -71,10 +71,7 @@ private Executor createRowBasedProjectionExecutor(ExecutionContext context, int Executor input = getInputs().get(0).createExecutor(context, index); Executor exec = new ProjectExec(input, expressions, outputColumns, context); - exec.setId(project.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(project, exec, context); - } + registerRuntimeStat(exec, project, context); return exec; } @@ -97,10 +94,7 @@ private Executor buildVectorizedProjectionExecutor(ExecutionContext context, int }); Executor exec = new VectorizedProjectExec(inputExec, expressions, preAllocatedChunks, outputColumns, context); - exec.setId(project.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(project, exec, context); - } + registerRuntimeStat(exec, project, context); return exec; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/RuntimeFilterBuilderExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/RuntimeFilterBuilderExecFactory.java index f5254948b..5d3131f4a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/RuntimeFilterBuilderExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/RuntimeFilterBuilderExecFactory.java @@ -17,8 +17,8 @@ package com.alibaba.polardbx.executor.mpp.operator.factory; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilter; import com.alibaba.polardbx.common.utils.hash.HashMethodInfo; -import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.operator.Executor; import com.alibaba.polardbx.executor.operator.RuntimeFilterBuilderExec; import com.alibaba.polardbx.executor.operator.util.BloomFilterProduce; @@ -27,7 +27,6 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.planner.rule.mpp.runtimefilter.RuntimeFilterUtil; import com.alibaba.polardbx.statistics.RuntimeStatHelper; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilter; import io.airlift.http.client.HttpClient; import org.apache.calcite.plan.RelOptUtil; import org.apache.calcite.rel.logical.RuntimeFilterBuilder; @@ -42,6 +41,7 @@ public class RuntimeFilterBuilderExecFactory extends ExecutorFactory { + boolean localBloomFilter; private RuntimeFilterBuilder filterBuilder; private HttpClient client; private URI uri; @@ -49,11 +49,12 @@ public class RuntimeFilterBuilderExecFactory extends ExecutorFactory { public RuntimeFilterBuilderExecFactory(RuntimeFilterBuilder filterBuilder, ExecutorFactory executorFactory, HttpClient httpClient, - URI uri) { + URI uri, boolean localBloomFilter) { this.filterBuilder = filterBuilder; addInput(executorFactory); this.client = httpClient; this.uri = uri; + this.localBloomFilter = localBloomFilter; } @Override @@ -100,16 +101,15 @@ public Executor createExecutor(ExecutionContext context, int idx) { minMaxFilters.add(minMaxFilterList); } bloomFilterProduce = BloomFilterProduce.create( - bloomfilterId, keyHash, bloomFilters, minMaxFilters, client, uri, context.getTraceId()); + bloomfilterId, keyHash, bloomFilters, minMaxFilters, !localBloomFilter ? client : null, + !localBloomFilter ? uri : null, context.getTraceId()); } bloomFilterProduce.addCounter(); Executor input = getInputs().get(0).createExecutor(context, idx); Executor exec = new RuntimeFilterBuilderExec(input, bloomFilterProduce, context, idx); - exec.setId(filterBuilder.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(filterBuilder, exec, context); - } + registerRuntimeStat(exec, filterBuilder, context); return exec; } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortAggExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortAggExecFactory.java index 94b9ea535..b2b59344e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortAggExecFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortAggExecFactory.java @@ -20,8 +20,7 @@ import com.alibaba.polardbx.executor.operator.SortAggExec; import com.alibaba.polardbx.executor.operator.util.AggregateUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import com.alibaba.polardbx.optimizer.core.rel.SortAgg; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.utils.CalciteUtils; @@ -55,25 +54,20 @@ public synchronized List getAllExecutors(ExecutionContext context) { if (executors.isEmpty()) { for (int k = 0; k < parallelism; k++) { MemoryAllocatorCtx memoryAllocator = context.getMemoryPool().getMemoryAllocatorCtx(); - Executor input = getInputs().get(0).createExecutor(context, k); - List outputDataTypes = CalciteUtils.getTypes(sortAgg.getRowType()); List aggregators = - AggregateUtils.convertAggregators(input.getDataTypes(), outputDataTypes - .subList(sortAgg.getGroupCount(), sortAgg.getGroupCount() + sortAgg.getAggCallList().size()), - sortAgg.getAggCallList(), context, memoryAllocator); + AggregateUtils.convertAggregators(sortAgg.getAggCallList(), context, memoryAllocator); + Executor input = getInputs().get(0).createExecutor(context, k); ImmutableBitSet gp = sortAgg.getGroupSet(); int[] groups = AggregateUtils.convertBitSet(gp); Executor exec = new SortAggExec( input, groups, aggregators, CalciteUtils.getTypes(sortAgg.getRowType()), context); - exec.setId(sortAgg.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(sortAgg, exec, context); - } + registerRuntimeStat(exec, sortAgg, context); executors.add(exec); } } return executors; } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortExecutorFactory.java index cb61eedec..b6a8fe50a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortExecutorFactory.java @@ -64,10 +64,7 @@ private synchronized List createAllExecutors(ExecutionContext context) List orderBys = ExecUtils.convertFrom(sortList); SortExec sortExec = new SortExec(dataTypeList, orderBys, context, this.spillerFactory); - sortExec.setId(sort.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(sort, sortExec, context); - } + registerRuntimeStat(sortExec, sort, context); executors.add(sortExec); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortMergeJoinFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortMergeJoinFactory.java index 8f8bd4dc2..c47c2881e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortMergeJoinFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortMergeJoinFactory.java @@ -77,10 +77,7 @@ public Executor createExecutor(ExecutionContext context, int index) { new SortMergeJoinExec(outer, inner, join.getJoinType(), maxOneRow, joinKeys, columnIsAscending, otherCondition, antiJoinOperands, context); - ret.setId(join.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(join, ret, context); - } + registerRuntimeStat(ret, join, context); return ret; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortWindowFramesExecFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortWindowFramesExecFactory.java new file mode 100644 index 000000000..f429907eb --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/SortWindowFramesExecFactory.java @@ -0,0 +1,256 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.mpp.operator.factory; + +import com.alibaba.druid.util.StringUtils; +import com.alibaba.polardbx.executor.operator.Executor; +import com.alibaba.polardbx.executor.operator.NonFrameOverWindowExec; +import com.alibaba.polardbx.executor.operator.OverWindowFramesExec; +import com.alibaba.polardbx.executor.operator.frame.OverWindowFrame; +import com.alibaba.polardbx.executor.operator.frame.RangeSlidingOverFrame; +import com.alibaba.polardbx.executor.operator.frame.RangeUnboundedFollowingOverFrame; +import com.alibaba.polardbx.executor.operator.frame.RangeUnboundedPrecedingOverFrame; +import com.alibaba.polardbx.executor.operator.frame.RowSlidingOverFrame; +import com.alibaba.polardbx.executor.operator.frame.RowUnboundedFollowingOverFrame; +import com.alibaba.polardbx.executor.operator.frame.RowUnboundedPrecedingOverFrame; +import com.alibaba.polardbx.executor.operator.frame.UnboundedOverFrame; +import com.alibaba.polardbx.executor.operator.util.AggregateUtils; +import com.alibaba.polardbx.executor.utils.OrderByOption; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.rel.SortWindow; +import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; +import com.alibaba.polardbx.optimizer.utils.CalciteUtils; +import com.alibaba.polardbx.statistics.RuntimeStatHelper; +import com.google.common.collect.ImmutableList; +import org.apache.calcite.rel.RelFieldCollation; +import org.apache.calcite.rel.core.Window; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexWindowBound; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.util.ImmutableBitSet; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +/** + * @author hongxi.chx + */ +public class SortWindowFramesExecFactory extends ExecutorFactory { + + private final SortWindow overWindow; + private final List columnMetas; + + private final int parallelism; + + private List executors = new ArrayList<>(); + + public SortWindowFramesExecFactory(SortWindow overWindow, ExecutorFactory executorFactory, int parallelism) { + this.overWindow = overWindow; + this.columnMetas = CalciteUtils.getTypes(overWindow.getRowType()); + this.parallelism = parallelism; + addInput(executorFactory); + } + + @Override + public Executor createExecutor(ExecutionContext context, int index) { + createAllExecutors(context); + return executors.get(index); + } + + @Override + public List getAllExecutors(ExecutionContext context) { + return createAllExecutors(context); + } + + public synchronized List createAllExecutors(ExecutionContext context) { + + if (executors.isEmpty()) { + for (int k = 0; k < parallelism; k++) { + Executor exec = createOverWindowExec(context, k); + registerRuntimeStat(exec, overWindow, context); + executors.add(exec); + } + } + return executors; + } + + private Executor createOverWindowExec(ExecutionContext context, int index) { + ImmutableList aggCalls = overWindow.groups.get(0).aggCalls; + SqlOperator operator = aggCalls.get(0).getOperator(); + if (isNoFrameWindowFunction(operator)) { + return createNonFrameWindowExec(overWindow, context, index); + } + Executor input = getInputs().get(0).createExecutor(context, index); + ImmutableBitSet gp = overWindow.groups.get(0).keys; + List list = gp.asList(); + int[] groups = new int[list.size()]; + for (int i = 0, n = list.size(); i < n; i++) { + groups[i] = list.get(i); + } + MemoryAllocatorCtx memoryAllocator = context.getMemoryPool().getMemoryAllocatorCtx(); + + OverWindowFrame[] overWindowFrames = new OverWindowFrame[overWindow.groups.size()]; + for (int i = 0; i < overWindow.groups.size(); i++) { + Window.Group group = overWindow.groups.get(i); + List aggregators = AggregateUtils + .convertAggregators(group.getAggregateCalls(overWindow), context, memoryAllocator); + if (group.aggCalls.stream().anyMatch(t -> isUnboundedFrameWindowFunction(t.getOperator()))) { + overWindowFrames[i] = new UnboundedOverFrame(aggregators.toArray(new Aggregator[0])); + } else if (group.lowerBound.isUnbounded() && group.upperBound.isUnbounded()) { + overWindowFrames[i] = new UnboundedOverFrame(aggregators.toArray(new Aggregator[0])); + } else if (group.isRows) { + overWindowFrames[i] = createRowFrame(overWindow, group, aggregators); + } else { + overWindowFrames[i] = createRangeFrame(overWindow, group, aggregators); + } + } + OverWindowFramesExec overWindowFramesExec = + new OverWindowFramesExec(input, context, overWindowFrames, list, columnMetas); + registerRuntimeStat(overWindowFramesExec, overWindow, context); + return overWindowFramesExec; + } + + private boolean isNoFrameWindowFunction(SqlOperator method) { + return method == SqlStdOperatorTable.RANK || method == SqlStdOperatorTable.DENSE_RANK + || method == SqlStdOperatorTable.ROW_NUMBER; + } + + private boolean isUnboundedFrameWindowFunction(SqlOperator method) { + return method == SqlStdOperatorTable.CUME_DIST || method == SqlStdOperatorTable.PERCENT_RANK; + } + + private com.alibaba.polardbx.executor.operator.Executor createNonFrameWindowExec(SortWindow overWindow, + ExecutionContext context, + int index) { + Executor input = getInputs().get(0).createExecutor(context, index); + MemoryAllocatorCtx memoryAllocator = context.getMemoryPool().getMemoryAllocatorCtx(); + + List aggregators = AggregateUtils + .convertAggregators(overWindow.groups.get(0).getAggregateCalls(overWindow), context, memoryAllocator); + + ImmutableBitSet gp = overWindow.groups.get(0).keys; + List list = gp.asList(); + int[] groups = list.stream().mapToInt(Integer::intValue).toArray(); + + boolean[] peer = new boolean[aggregators.size()]; + for (int i = 0; i < aggregators.size(); i++) { + Window.Group group = overWindow.groups.get(0); + if (group.lowerBound.isCurrentRow() && group.upperBound.isCurrentRow()) { + peer[i] = true; + } else { + peer[i] = false; + } + } + List dataTypes = CalciteUtils.getTypes(overWindow.getRowType()); + return new NonFrameOverWindowExec(input, context, aggregators, + Arrays.stream(groups).boxed().collect(Collectors.toList()), peer, dataTypes); + } + + private OverWindowFrame createRowFrame(SortWindow overWindow, SortWindow.Group group, + List aggregators) { + if (group.lowerBound.isUnbounded()) { + RexWindowBound upperBound = group.upperBound; + int offset = + getConstant(upperBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); + return new RowUnboundedPrecedingOverFrame(aggregators, offset); + } else if (overWindow.groups.get(0).upperBound.isUnbounded()) { + RexWindowBound lowerBound = overWindow.groups.get(0).lowerBound; + int offset = + getConstant(lowerBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); + return new RowUnboundedFollowingOverFrame(aggregators, offset); + } else { + RexWindowBound lowerBound = overWindow.groups.get(0).lowerBound; + RexWindowBound upperBound = overWindow.groups.get(0).upperBound; + int lowerOffset = + getConstant(lowerBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); + int upperOffset = + getConstant(upperBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); + return new RowSlidingOverFrame(aggregators, lowerOffset, upperOffset); + } + } + + private int getConstant(RexWindowBound bound, ImmutableList constants, int offset) { + if (bound.isCurrentRow()) { + return 0; + } + RexNode boundOffset = bound.getOffset(); + assert boundOffset instanceof RexInputRef; + if (!(boundOffset instanceof RexInputRef)) { + throw new IllegalArgumentException( + "bound offset must be a instance of RexInputRef, but was " + boundOffset.getClass().getName() + "."); + } + RexLiteral rexLiteral = constants + .get(((RexInputRef) boundOffset).getIndex() - offset); + Object value2 = rexLiteral.getValue2(); + if (!StringUtils.isNumber(String.valueOf(value2))) { + throw new IllegalArgumentException("bound index must be a digit, but was " + value2.toString() + "."); + } + return StringUtils.stringToInteger( + String.valueOf(value2)); + } + + private OverWindowFrame createRangeFrame(SortWindow overWindow, SortWindow.Group group, + List aggregators) { + List sortList = group.orderKeys.getFieldCollations(); + List orderBys = new ArrayList<>(sortList.size()); + if (sortList != null) { + for (int i = 0, n = sortList.size(); i < n; i++) { + RelFieldCollation field = sortList.get(i); + orderBys.add(new OrderByOption(field.getFieldIndex(), field.direction, field.nullDirection)); + } + } + if (orderBys.size() != 1) { + throw new IllegalArgumentException( + "RANGE N PRECEDING/FOLLOWING frame requires exactly one ORDER BY expression"); + } + OrderByOption orderByOption = orderBys.get(0); + int index = orderByOption.getIndex(); + DataType dataType = + DataTypeUtil.calciteToDrdsType(overWindow.getInput().getRowType().getFieldList().get(index).getType()); + if (group.lowerBound.isUnbounded()) { + RexWindowBound upperBound = group.upperBound; + int offset = + getConstant(upperBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); + return new RangeUnboundedPrecedingOverFrame(aggregators, offset, + index, orderByOption.isAsc(), + dataType); + } else if (overWindow.groups.get(0).upperBound.isUnbounded()) { + RexWindowBound lowerBound = overWindow.groups.get(0).lowerBound; + int offset = + getConstant(lowerBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); + return new RangeUnboundedFollowingOverFrame(aggregators, offset, index, orderByOption.isAsc(), dataType); + } else { + RexWindowBound lowerBound = overWindow.groups.get(0).lowerBound; + RexWindowBound upperBound = overWindow.groups.get(0).upperBound; + int lowerOffset = + getConstant(lowerBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); + int upperOffset = + getConstant(upperBound, overWindow.constants, overWindow.getInput().getRowType().getFieldCount()); + return new RangeSlidingOverFrame(aggregators, lowerOffset, upperOffset, index, orderByOption.isAsc(), + dataType); + } + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/TopNExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/TopNExecutorFactory.java index 98f7dc9bb..f74896546 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/TopNExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/TopNExecutorFactory.java @@ -71,11 +71,16 @@ private synchronized List createAllExecutors(ExecutionContext context) skip = getRexParam(topN.offset, params); } } + long topSize = skip + fetch; + if (skip > 0 && fetch > 0 && topSize < 0) { + topSize = Long.MAX_VALUE; + } + for (int j = 0; j < parallelism; j++) { List sortList = topN.getCollation().getFieldCollations(); List orderBys = ExecUtils.convertFrom(sortList); - Executor exec = new SpilledTopNExec(dataTypeList, orderBys, skip + fetch, context, spillerFactory); + Executor exec = new SpilledTopNExec(dataTypeList, orderBys, topSize, context, spillerFactory); exec.setId(topN.getRelatedId()); if (context.getRuntimeStatistics() != null) { RuntimeStatHelper.registerStatForExec(topN, exec, context); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ValueExecutorFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ValueExecutorFactory.java index 829e53c20..bbc151c6b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ValueExecutorFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/operator/factory/ValueExecutorFactory.java @@ -36,10 +36,8 @@ public ValueExecutorFactory(LogicalValues logicalValues) { public Executor createExecutor(ExecutionContext context, int index) { int count = ExecUtils.getTupleCount(logicalValues, context); Executor exec = new ValueExec(count, CalciteUtils.getTypes(logicalValues.getRowType()), context); - exec.setId(logicalValues.getRelatedId()); - if (context.getRuntimeStatistics() != null) { - RuntimeStatHelper.registerStatForExec(logicalValues, exec, context); - } + registerRuntimeStat(exec, logicalValues, context); return exec; } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFItem.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFItem.java new file mode 100644 index 000000000..e62f1e228 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFItem.java @@ -0,0 +1,66 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.mpp.planner; + +import com.alibaba.polardbx.common.utils.bloomfilter.RFBloomFilter; +import com.alibaba.polardbx.executor.operator.ColumnarScanExec; +import com.alibaba.polardbx.executor.operator.Synchronizer; + +import java.util.List; + +public interface FragmentRFItem { + FragmentRFManager.RFType getRFType(); + + FragmentRFManager getManager(); + + boolean useXXHashInBuild(); + + boolean useXXHashInFilter(); + + String getBuildColumnName(); + + String getProbeColumnName(); + + void setBuildSideChannel(int buildSideChannel); + + void setSourceFilterChannel(int sourceFilterChannel); + + void setSourceRefInFile(int sourceRefInFile); + + int getBuildSideChannel(); + + int getSourceFilterChannel(); + + int getSourceRefInFile(); + + /** + * Register this item to several Synchronizer Objects so that they can share the Runtime Filter Merger. + * + * @param ordinal the ordinal of this item in the build side channels. + * @param buildSideParallelism the total DOP in build side for shared Runtime Filter Merger. + * @param synchronizerList the Synchronizer Objects that sharing the Runtime Filter Merger. + */ + void registerBuilder(int ordinal, int buildSideParallelism, Synchronizer... synchronizerList); + + void registerSource(ColumnarScanExec columnarScanExec); + + List getRegisteredSource(); + + void assignRF(RFBloomFilter[] bloomFilters); + + RFBloomFilter[] getRF(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFItemImpl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFItemImpl.java new file mode 100644 index 000000000..e15b6ea90 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFItemImpl.java @@ -0,0 +1,174 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.mpp.planner; + +import com.alibaba.polardbx.common.utils.bloomfilter.RFBloomFilter; +import com.alibaba.polardbx.executor.operator.ColumnarScanExec; +import com.alibaba.polardbx.executor.operator.Synchronizer; +import com.alibaba.polardbx.executor.operator.SynchronizerRFMerger; +import com.google.common.base.Preconditions; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class FragmentRFItemImpl implements FragmentRFItem { + private FragmentRFManager manager; + + private final String buildColumnName; + private final String probeColumnName; + + private final boolean useXXHashInBuild; + private final boolean useXXHashInFilter; + private final FragmentRFManager.RFType rfType; + + // The block channel of build side + private int buildSideChannel; + private int sourceFilterChannel; + private int sourceRefInFile; + + private volatile RFBloomFilter[] bloomFilters; + private volatile Synchronizer[] synchronizerList; + private List sourceList; + + public FragmentRFItemImpl(FragmentRFManager manager, String buildColumnName, String probeColumnName, + boolean useXXHashInBuild, boolean useXXHashInFilter, + FragmentRFManager.RFType rfType) { + this.manager = manager; + + this.buildColumnName = buildColumnName; + this.probeColumnName = probeColumnName; + this.useXXHashInBuild = useXXHashInBuild; + this.useXXHashInFilter = useXXHashInFilter; + this.rfType = rfType; + + this.sourceList = new ArrayList<>(); + + this.buildSideChannel = -1; + this.sourceFilterChannel = -1; + } + + @Override + public FragmentRFManager.RFType getRFType() { + return rfType; + } + + @Override + public FragmentRFManager getManager() { + return manager; + } + + @Override + public boolean useXXHashInBuild() { + return useXXHashInBuild; + } + + @Override + public boolean useXXHashInFilter() { + return useXXHashInFilter; + } + + @Override + public String getBuildColumnName() { + return buildColumnName; + } + + @Override + public String getProbeColumnName() { + return probeColumnName; + } + + @Override + public void setBuildSideChannel(int buildSideChannel) { + this.buildSideChannel = buildSideChannel; + } + + @Override + public void setSourceFilterChannel(int sourceFilterChannel) { + this.sourceFilterChannel = sourceFilterChannel; + } + + @Override + public void setSourceRefInFile(int sourceRefInFile) { + this.sourceRefInFile = sourceRefInFile; + } + + @Override + public int getBuildSideChannel() { + return this.buildSideChannel; + } + + @Override + public int getSourceFilterChannel() { + return this.sourceFilterChannel; + } + + @Override + public int getSourceRefInFile() { + return sourceRefInFile; + } + + @Override + public void registerBuilder(int ordinal, int buildSideParallelism, Synchronizer... synchronizerList) { + SynchronizerRFMerger merger = new SynchronizerRFMerger( + this.getManager(), this, buildSideParallelism, ordinal); + + for (Synchronizer synchronizer : synchronizerList) { + synchronizer.putSynchronizerRFMerger(ordinal, merger); + } + + this.synchronizerList = synchronizerList; + } + + @Override + public void registerSource(ColumnarScanExec columnarScanExec) { + sourceList.add(columnarScanExec); + columnarScanExec.setFragmentRFManager(this.getManager()); + } + + @Override + public List getRegisteredSource() { + return sourceList; + } + + @Override + public void assignRF(RFBloomFilter[] bloomFilters) { + Preconditions.checkArgument(this.bloomFilters == null); + this.bloomFilters = bloomFilters; + } + + @Override + public RFBloomFilter[] getRF() { + return bloomFilters; + } + + @Override + public String toString() { + return "FragmentRFItemImpl{" + + " buildColumnName='" + buildColumnName + '\'' + + ", probeColumnName='" + probeColumnName + '\'' + + ", useXXHashInBuild=" + useXXHashInBuild + + ", useXXHashInFilter=" + useXXHashInFilter + + ", rfType=" + rfType + + ", buildSideChannel=" + buildSideChannel + + ", sourceFilterChannel=" + sourceFilterChannel + + ", sourceRefInFile=" + sourceRefInFile + + ", synchronizerList=" + Arrays.toString(synchronizerList) + + ", sourceList=" + sourceList + + '}'; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFItemKey.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFItemKey.java new file mode 100644 index 000000000..4391f4f7f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFItemKey.java @@ -0,0 +1,182 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.mpp.planner; + +import com.alibaba.polardbx.optimizer.core.rel.HashJoin; +import com.alibaba.polardbx.optimizer.core.rel.SemiHashJoin; +import com.google.common.base.Preconditions; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Join; +import org.apache.calcite.rel.core.JoinInfo; +import org.apache.calcite.rel.core.JoinRelType; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.sql.type.SqlTypeUtil; +import org.apache.calcite.util.ImmutableIntList; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +public class FragmentRFItemKey { + private final String buildColumnName; + private final String probeColumnName; + private final int buildIndex; + private final int probeIndex; + + public FragmentRFItemKey(String buildColumnName, String probeColumnName, int buildIndex, int probeIndex) { + this.buildColumnName = buildColumnName; + this.probeColumnName = probeColumnName; + this.buildIndex = buildIndex; + this.probeIndex = probeIndex; + } + + /** + * Get the list of the FragmentRFItemKey object sorted by buildIndex field. + * + * @param join the RelNode of join. + */ + public static List buildItemKeys(Join join) { + boolean isOuterBuild = (join instanceof HashJoin && ((HashJoin) join).isOuterBuild()) + || (join instanceof SemiHashJoin && ((SemiHashJoin) join).isOuterBuild()); + + // Don't support runtime filter for anti join without reverse. + if (join.getJoinType() == JoinRelType.ANTI && !isOuterBuild) { + // empty set. + return new ArrayList<>(); + } + + RelNode buildNode = join.getInner(); + RelNode probeNode = join.getOuter(); + + RexNode equalCondition = null; + if (join instanceof HashJoin) { + equalCondition = ((HashJoin) join).getEqualCondition(); + } else if (join instanceof SemiHashJoin) { + equalCondition = ((SemiHashJoin) join).getEqualCondition(); + } + JoinInfo joinInfo = JoinInfo.of(join.getLeft(), join.getRight(), equalCondition); + + ImmutableIntList buildKeys, probeKeys; + if (buildNode == join.getLeft()) { + buildKeys = joinInfo.leftKeys; + probeKeys = joinInfo.rightKeys; + } else { + buildKeys = joinInfo.rightKeys; + probeKeys = joinInfo.leftKeys; + } + + Preconditions.checkArgument(buildKeys.size() == probeKeys.size()); + + RelDataType buildType = buildNode.getRowType(); + RelDataType probeType = probeNode.getRowType(); + + List buildColumnNames = + buildKeys.stream() + .map(k -> buildType.getFieldNames().get(k)) + .collect(Collectors.toList()); + + List probeColumnNames = + probeKeys.stream() + .map(k -> probeType.getFieldNames().get(k)) + .collect(Collectors.toList()); + + List itemKeys = new ArrayList<>(); + + for (int i = 0; i < buildKeys.size(); i++) { + // Check if the dataType of build and probe key are integer types and equal. + RelDataType buildDataType = buildType.getFieldList().get(buildKeys.get(i)).getType(); + RelDataType probeDataType = probeType.getFieldList().get(probeKeys.get(i)).getType(); + + if (SqlTypeUtil.isIntType(buildDataType) && SqlTypeUtil.isIntType(probeDataType) + && buildDataType.getSqlTypeName() == probeDataType.getSqlTypeName()) { + + if (!isOuterBuild) { + // for not reversed join. + itemKeys.add(new FragmentRFItemKey( + buildColumnNames.get(i), + probeColumnNames.get(i), + buildKeys.get(i), + probeKeys.get(i) + )); + + } else { + // For reversed join, swap the build/probe sides. + itemKeys.add(new FragmentRFItemKey( + probeColumnNames.get(i), + buildColumnNames.get(i), + probeKeys.get(i), + buildKeys.get(i) + )); + + } + + } + } + + Collections.sort(itemKeys, Comparator.comparing(FragmentRFItemKey::getBuildIndex)); + + return itemKeys; + } + + public String getBuildColumnName() { + return buildColumnName; + } + + public String getProbeColumnName() { + return probeColumnName; + } + + public int getBuildIndex() { + return buildIndex; + } + + public int getProbeIndex() { + return probeIndex; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + FragmentRFItemKey itemKey = (FragmentRFItemKey) o; + return buildIndex == itemKey.buildIndex && probeIndex == itemKey.probeIndex && Objects.equals( + buildColumnName, itemKey.buildColumnName) && Objects.equals(probeColumnName, itemKey.probeColumnName); + } + + @Override + public int hashCode() { + return Objects.hash(buildColumnName, probeColumnName, buildIndex, probeIndex); + } + + @Override + public String toString() { + return "FragmentRFItemKey{" + + "buildColumnName='" + buildColumnName + '\'' + + ", probeColumnName='" + probeColumnName + '\'' + + ", buildIndex=" + buildIndex + + ", probeIndex=" + probeIndex + + '}'; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFManager.java new file mode 100644 index 000000000..096659899 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/FragmentRFManager.java @@ -0,0 +1,47 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.mpp.planner; + +import java.util.Map; + +/** + * PlanFragment-level runtime filter manager + */ +public interface FragmentRFManager { + + enum RFType { + LOCAL, BROADCAST + } + + Map getAllItems(); + + void addItem(FragmentRFItemKey itemKey, FragmentRFItem rfItem); + + double getDefaultFpp(); + + int getTotalPartitionCount(); + + int getPartitionsOfNode(); + + long getUpperBound(); + + long getLowerBound(); + + int getSampleCount(); + + double getFilterRatioThreshold(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/LocalExchange.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/LocalExchange.java index 086d87ebe..d8172a8ba 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/LocalExchange.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/LocalExchange.java @@ -16,8 +16,8 @@ package com.alibaba.polardbx.executor.mpp.planner; -import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.collect.ImmutableList; import java.util.List; @@ -26,6 +26,7 @@ public class LocalExchange { public enum LocalExchangeMode { RANDOM, PARTITION, + CHUNK_PARTITION, SINGLE, DIRECT, BORADCAST diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/NodePartitionMap.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/NodePartitionMap.java index 4520d197d..ed9f50d1d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/NodePartitionMap.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/NodePartitionMap.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.mpp.planner; -import com.google.common.collect.ImmutableMap; import com.alibaba.polardbx.executor.mpp.metadata.Split; import com.alibaba.polardbx.gms.node.Node; +import com.google.common.collect.ImmutableMap; import java.util.Map; import java.util.function.ToIntFunction; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/NodePartitioningManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/NodePartitioningManager.java index cb5363ff5..ce5ef03c9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/NodePartitioningManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/NodePartitioningManager.java @@ -16,14 +16,14 @@ package com.alibaba.polardbx.executor.mpp.planner; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.execution.scheduler.NodeSelector; -import com.alibaba.polardbx.gms.node.Node; import com.alibaba.polardbx.executor.mpp.util.Failures; +import com.alibaba.polardbx.gms.node.Node; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import javax.inject.Inject; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PartitionShuffleHandle.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PartitionShuffleHandle.java index 67e0c10fd..b3f1098fa 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PartitionShuffleHandle.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PartitionShuffleHandle.java @@ -19,21 +19,20 @@ import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.List; import java.util.Objects; import static com.google.common.base.MoreObjects.toStringHelper; public class PartitionShuffleHandle { - private int partitionCount; private final PartitionShuffleMode partitionShuffleMode; private final boolean mergeSort; + private int fullPartCount = -1; + private int partitionCount; + private List prunePartitions; - public enum PartitionShuffleMode { - SINGLE, - FIXED, - BROADCAST - } + private boolean remotePairWise = false; public PartitionShuffleHandle( PartitionShuffleMode partitionShuffleMode, @@ -45,13 +44,19 @@ public PartitionShuffleHandle( @JsonCreator public PartitionShuffleHandle( @JsonProperty("partitionShuffleMode") - PartitionShuffleMode partitionShuffleMode, + PartitionShuffleMode partitionShuffleMode, @JsonProperty("mergeSort") - boolean mergeSort, - @JsonProperty("partitionCount") int partitionCount) { + boolean mergeSort, + @JsonProperty("remotePairWise") boolean remotePairWise, + @JsonProperty("partitionCount") int partitionCount, + @JsonProperty("fullPartCount") int fullPartCount, + @JsonProperty("prunePartitions") List prunePartitions) { this.partitionCount = partitionCount; this.mergeSort = mergeSort; + this.remotePairWise = remotePairWise; this.partitionShuffleMode = partitionShuffleMode; + this.fullPartCount = fullPartCount; + this.prunePartitions = prunePartitions; } @JsonProperty @@ -63,6 +68,33 @@ public void setPartitionCount(int partitionCount) { this.partitionCount = partitionCount; } + @JsonProperty + public int getFullPartCount() { + return fullPartCount; + } + + public void setFullPartCount(int fullPartCount) { + this.fullPartCount = fullPartCount; + } + + @JsonProperty + public boolean isRemotePairWise() { + return remotePairWise; + } + + public void setRemotePairWise(boolean remotePairWise) { + this.remotePairWise = remotePairWise; + } + + @JsonProperty + public List getPrunePartitions() { + return prunePartitions; + } + + public void setPrunePartitions(List prunePartitions) { + this.prunePartitions = prunePartitions; + } + @JsonProperty public PartitionShuffleMode getPartitionShuffleMode() { return partitionShuffleMode; @@ -86,14 +118,15 @@ public boolean equals(Object o) { return false; } PartitionShuffleHandle that = (PartitionShuffleHandle) o; - return partitionCount == that.partitionCount && + return fullPartCount == that.fullPartCount && + partitionCount == that.partitionCount && mergeSort == that.mergeSort && partitionShuffleMode == that.partitionShuffleMode; } @Override public int hashCode() { - return Objects.hash(partitionCount, partitionShuffleMode, mergeSort); + return Objects.hash(fullPartCount, partitionCount, partitionShuffleMode, mergeSort); } @Override @@ -102,6 +135,14 @@ public String toString() { .add("partitionShuffleMode", partitionShuffleMode) .add("mergeSort", mergeSort) .add("partitionCount", partitionCount) + .add("fullPartCount", fullPartCount) + .add("remotePairWise", remotePairWise) .toString(); } + + public enum PartitionShuffleMode { + SINGLE, + FIXED, + BROADCAST + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PartitioningScheme.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PartitioningScheme.java index 0ff753394..2ad18b07d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PartitioningScheme.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PartitioningScheme.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.mpp.planner; +import com.alibaba.polardbx.executor.utils.OrderByOption; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.alibaba.polardbx.executor.utils.OrderByOption; import java.util.List; @@ -35,7 +35,7 @@ public PartitioningScheme( @JsonProperty("partChannels") List partChannels, @JsonProperty("orderByOptions") List orderByOptions, @JsonProperty("shuffleHandle") - PartitionShuffleHandle shuffleHandle) { + PartitionShuffleHandle shuffleHandle) { this.partChannels = partChannels; this.orderByOptions = orderByOptions; this.shuffleHandle = shuffleHandle; @@ -64,6 +64,22 @@ public void setPartitionCount(int partitionCount) { shuffleHandle.setPartitionCount(partitionCount); } + public boolean isRemotePairWise() { + return shuffleHandle.isRemotePairWise(); + } + + public List getPrunePartitions() { + return shuffleHandle.getPrunePartitions(); + } + + public void setPrunePartitions(List prunePartitions) { + shuffleHandle.setPrunePartitions(prunePartitions); + } + + public int getFullPartCount() { + return shuffleHandle.getFullPartCount(); + } + public PartitionShuffleHandle.PartitionShuffleMode getPartitionMode() { return shuffleHandle.getPartitionShuffleMode(); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PipelineFragment.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PipelineFragment.java index 49b7634e0..9562b3e7c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PipelineFragment.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PipelineFragment.java @@ -16,11 +16,12 @@ package com.alibaba.polardbx.executor.mpp.planner; -import com.google.common.base.Preconditions; import com.alibaba.polardbx.executor.mpp.split.SplitInfo; +import com.alibaba.polardbx.executor.operator.ParallelHashJoinExec; import com.alibaba.polardbx.optimizer.core.rel.LogicalView; import com.alibaba.polardbx.optimizer.utils.RelUtils; import com.alibaba.polardbx.util.MoreObjects; +import com.google.common.base.Preconditions; import org.apache.calcite.rel.RelNode; import java.util.ArrayList; @@ -31,6 +32,8 @@ public class PipelineFragment { + protected FragmentRFManager fragmentRFManager; + protected int parallelism; protected List logicalViews = new ArrayList<>(); protected boolean buildDepOnAllConsumers = false; @@ -58,6 +61,14 @@ public PipelineFragment(int parallelism, RelNode root, Set dependency) this.properties.setRoot(root); } + public FragmentRFManager getFragmentRFManager() { + return fragmentRFManager; + } + + public void setFragmentRFManager(FragmentRFManager fragmentRFManager) { + this.fragmentRFManager = fragmentRFManager; + } + public void addChild(PipelineFragment child) { properties.addChild(child.properties.getRoot().getRelatedId(), child.getProperties()); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanFragment.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanFragment.java index c6b1556c2..da18d76eb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanFragment.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanFragment.java @@ -29,25 +29,26 @@ */ package com.alibaba.polardbx.executor.mpp.planner; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.util.MoreObjects; import com.alibaba.polardbx.optimizer.PlannerContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; import com.alibaba.polardbx.optimizer.planmanager.PlanManagerUtil; import com.alibaba.polardbx.optimizer.utils.CalciteUtils; import com.alibaba.polardbx.optimizer.utils.RelUtils; +import com.alibaba.polardbx.util.MoreObjects; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableList; import org.apache.calcite.rel.RelNode; import java.util.ArrayList; import java.util.List; +import java.util.Map; import static java.util.Objects.requireNonNull; @@ -79,28 +80,50 @@ public class PlanFragment { private Integer bkaJoinParallelism = -1; + private Boolean localBloomFilter = false; + + private Boolean localPairWise; + + private final Boolean remotePairWise; + + private Integer localPartitionCount = -1; + private Integer totalPartitionCount = -1; + + // collect the logical table name and its split count. + private Map splitCountMap; + + private final boolean pruneExchangePartition; + public PlanFragment( Integer id, RelNode root, List outputTypes, PartitionHandle partitioning, + Boolean remotePairWise, List partitionSources, List expandSources, PartitioningScheme partitioningScheme, Integer bkaJoinParallelism, List consumeFilterIds, - List produceFilterIds) { + List produceFilterIds, + Boolean localPairWise, + Map splitCountMap, + boolean pruneExchangePartition) { this.id = id; this.root = root; this.partitioning = partitioning; this.outputTypes = outputTypes; this.partitionSources = partitionSources; this.partitioningScheme = partitioningScheme; + this.remotePairWise = remotePairWise; this.rootId = root.getRelatedId(); this.expandSources.addAll(expandSources); this.consumeFilterIds.addAll(consumeFilterIds); this.produceFilterIds.addAll(produceFilterIds); this.bkaJoinParallelism = bkaJoinParallelism; + this.localPairWise = localPairWise; + this.splitCountMap = splitCountMap; + this.pruneExchangePartition = pruneExchangePartition; } @JsonCreator @@ -111,24 +134,47 @@ public PlanFragment( @JsonProperty("partitionedSources") List partitionSources, @JsonProperty("partitioningScheme") PartitioningScheme partitioningScheme, @JsonProperty("partitioning") PartitionHandle partitioning, + @JsonProperty("remotePairWise") Boolean remotePairWise, @JsonProperty("rootId") Integer rootId, @JsonProperty("driverParallelism") Integer driverParallelism, @JsonProperty("prefetch") Integer prefetch, @JsonProperty("bkaJoinParallelism") Integer bkaJoinParallelism, @JsonProperty("consumeFilterIds") List consumeFilterIds, - @JsonProperty("produceFilterIds") List produceFilterIds) { + @JsonProperty("produceFilterIds") List produceFilterIds, + @JsonProperty("localBloomFilter") Boolean localBloomFilter, + @JsonProperty("localPairWise") Boolean localPairWise, + @JsonProperty("localPartitionCount") Integer localPartitionCount, + @JsonProperty("totalPartitionCount") Integer totalPartitionCount, + @JsonProperty("splitCountMap") Map splitCountMap, + @JsonProperty("pruneExchangePartition") boolean pruneExchangePartition) { this.id = requireNonNull(id, "id is null"); this.relNodeJson = requireNonNull(relNodeJson, "relNodeJson is null"); this.outputTypes = outputTypes; this.partitionSources = partitionSources; this.partitioning = partitioning; this.partitioningScheme = partitioningScheme; + this.remotePairWise = remotePairWise; this.rootId = rootId; this.driverParallelism = driverParallelism; this.prefetch = prefetch; this.bkaJoinParallelism = bkaJoinParallelism; this.consumeFilterIds = consumeFilterIds; this.produceFilterIds = produceFilterIds; + this.localBloomFilter = localBloomFilter; + this.localPairWise = localPairWise; + this.localPartitionCount = localPartitionCount; + this.totalPartitionCount = totalPartitionCount; + this.splitCountMap = splitCountMap; + this.pruneExchangePartition = pruneExchangePartition; + } + + @JsonProperty + public Map getSplitCountMap() { + return splitCountMap; + } + + public void setSplitCountMap(Map splitCountMap) { + this.splitCountMap = splitCountMap; } @JsonProperty @@ -168,6 +214,52 @@ public void setBkaJoinParallelism(Integer bkaJoinParallelism) { this.bkaJoinParallelism = bkaJoinParallelism; } + @JsonProperty + public Boolean isLocalBloomFilter() { + return localBloomFilter; + } + + public void setLocalBloomFilter(boolean localBloomFilter) { + this.localBloomFilter = localBloomFilter; + } + + @JsonProperty + public Boolean isLocalPairWise() { + return localPairWise; + } + + public void setLocalPairWise(Boolean localPairWise) { + this.localPairWise = localPairWise; + } + + @JsonProperty + public Integer getLocalPartitionCount() { + return localPartitionCount; + } + + public void setLocalPartitionCount(Integer localPartitionCount) { + this.localPartitionCount = localPartitionCount; + } + + @JsonProperty + public Integer getTotalPartitionCount() { + return totalPartitionCount; + } + + public void setTotalPartitionCount(Integer totalPartitionCount) { + this.totalPartitionCount = totalPartitionCount; + } + + @JsonProperty + public Boolean isRemotePairWise() { + return remotePairWise; + } + + @JsonProperty + public boolean isPruneExchangePartition() { + return pruneExchangePartition; + } + @JsonProperty public Integer getId() { return id; @@ -279,6 +371,7 @@ public String toString() { .add("id", id) .add("plan", relNodeJson) .add("partitioningScheme", partitioningScheme) + .add("pruneExchangePartition", pruneExchangePartition) .toString(); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanFragmenter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanFragmenter.java index 361b17179..dbb838795 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanFragmenter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanFragmenter.java @@ -22,18 +22,25 @@ import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.mpp.Session; +import com.alibaba.polardbx.executor.mpp.deploy.ServiceProvider; import com.alibaba.polardbx.executor.mpp.operator.LocalExecutionPlanner; +import com.alibaba.polardbx.executor.mpp.split.OssSplit; import com.alibaba.polardbx.executor.mpp.split.SplitInfo; import com.alibaba.polardbx.executor.mpp.split.SplitManager; +import com.alibaba.polardbx.executor.mpp.split.SplitManagerImpl; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.executor.utils.OrderByOption; +import com.alibaba.polardbx.gms.config.impl.InstConfUtil; import com.alibaba.polardbx.optimizer.core.rel.BKAJoin; import com.alibaba.polardbx.optimizer.core.rel.BaseTableOperation; import com.alibaba.polardbx.optimizer.core.rel.Limit; import com.alibaba.polardbx.optimizer.core.rel.LogicalView; import com.alibaba.polardbx.optimizer.core.rel.MergeSort; +import com.alibaba.polardbx.optimizer.core.rel.OSSTableScan; import com.alibaba.polardbx.optimizer.core.rel.SemiBKAJoin; +import com.alibaba.polardbx.optimizer.core.rel.mpp.ColumnarExchange; import com.alibaba.polardbx.optimizer.core.rel.mpp.MppExchange; import com.alibaba.polardbx.optimizer.workload.WorkloadUtil; import com.google.common.base.Preconditions; @@ -51,6 +58,7 @@ import org.apache.calcite.rel.core.Exchange; import org.apache.calcite.rel.core.Filter; import org.apache.calcite.rel.core.Join; +import org.apache.calcite.rel.core.JoinRelType; import org.apache.calcite.rel.logical.LogicalCorrelate; import org.apache.calcite.rel.logical.LogicalUnion; import org.apache.calcite.rel.logical.LogicalValues; @@ -62,8 +70,13 @@ import org.apache.calcite.sql.fun.SqlRuntimeFilterFunction; import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; import static com.alibaba.polardbx.executor.mpp.operator.LocalExecutionPlanner.SUPPORT_ALL_CACHE_NODES; import static com.alibaba.polardbx.executor.mpp.operator.LocalExecutionPlanner.SUPPORT_ONE_SIDE_CACHE_NODES; @@ -105,6 +118,8 @@ private static class Fragmenter { private boolean onlyUseReadInstance; private boolean lowConcurrencyQuery = false; + private SplitManager splitManager; + public Fragmenter(Session session, RelNode root) { this.session = session; if (!WorkloadUtil.isApWorkload(session.getClientContext().getWorkloadType())) { @@ -114,6 +129,7 @@ public Fragmenter(Session session, RelNode root) { } this.onlyUseReadInstance = existMppOnlyInstanceNode(); + this.splitManager = new SplitManagerImpl(); } public int getMaxConcurrentParallelism() { @@ -171,6 +187,8 @@ protected RelNode visitChild(RelNode parent, int i, RelNode relNode, FragmentPro protected RelNode visitChildren(RelNode rel, FragmentProperties properties) { if (rel instanceof BKAJoin || rel instanceof SemiBKAJoin) { + // TODO consider this when partition wise support bka join + properties.setPruneExchangePartition(false); //对于BKAJoin,我们先遍历outer端,直接忽略遍历inner端 Join join = (Join) rel; int outerIndex = join.getOuter() == join.getInput(0) ? 0 : 1; @@ -193,6 +211,9 @@ public void visit(RelNode node, int ordinal, RelNode parent) { } else if (rel.getInputs().size() == 2) { Join join = (Join) rel; + if ((join.getJoinType() != JoinRelType.INNER) && (join.getJoinType() != JoinRelType.SEMI)) { + properties.setPruneExchangePartition(false); + } int innerIndex = join.getInner() == join.getInput(0) ? 0 : 1; int outerIndex = join.getOuter() == join.getInput(0) ? 0 : 1; boolean convertBuildSide = convertBuildSide(join); @@ -233,7 +254,22 @@ public RelNode visit(RelNode parent, RelNode other, FragmentProperties parentPro parentProperties.setSourceNode(); parentProperties.updateRootCount(ExecUtils.calcRowCount((LogicalView) other)); parentProperties.updateRootIo(ExecUtils.calcIo((LogicalView) other)); - parentProperties.setLogicalView((LogicalView) other); + parentProperties.addLogicalView((LogicalView) other); + if (other instanceof OSSTableScan) { + parentProperties.setSimpleOssScan(parent instanceof ColumnarExchange || parent == null); + parentProperties.updatePairWiseInfo((OSSTableScan) other, session.getClientContext(). + getParamManager().getBoolean(ConnectionParams.ENABLE_LOCAL_PARTITION_WISE_JOIN)); + // handle case like this: + // JOIN + // | | + // Ex T1 + // when we reached exchange, we can not know this Exchange is under pairwise + // but when we reached table t1, we can go back to set all the children + if (parentProperties.isRemotePairWise()) { + parentProperties.getChildren().forEach(subPlan -> subPlan.getFragment(). + getPartitioningScheme().getShuffleHandle().setRemotePairWise(true)); + } + } return other; } else if (other instanceof LogicalValues || other instanceof DynamicValues) { parentProperties.setSingleTonNode(); @@ -288,7 +324,8 @@ private RelNode visitMergeSort(RelNode parent, MergeSort mergeSort, FragmentProp private RelNode visitExchange(RelNode parent, Exchange exchange, FragmentProperties parentProperties) { PartitionShuffleHandle shuffleHandle; boolean mergeSort = - exchange instanceof MppExchange ? ((MppExchange) exchange).isMergeSortExchange() : false; + exchange instanceof MppExchange ? ((MppExchange) exchange).isMergeSortExchange() : + (exchange instanceof ColumnarExchange && ((ColumnarExchange) exchange).isMergeSortExchange()); if (exchange.getDistribution() == RelDistributions.SINGLETON) { if (mergeSort) { shuffleHandle = @@ -313,13 +350,26 @@ private RelNode visitExchange(RelNode parent, Exchange exchange, FragmentPropert shuffleHandle = new PartitionShuffleHandle( PartitionShuffleHandle.PartitionShuffleMode.FIXED, mergeSort); + shuffleHandle.setFullPartCount(exchange.getTraitSet().getDistribution().getShardCnt()); } List orderBys = new ArrayList<>(); if (mergeSort) { - RelCollation ret = ((MppExchange) exchange).getCollation(); + RelCollation ret = exchange instanceof MppExchange ? ((MppExchange) exchange).getCollation() : + ((ColumnarExchange) exchange).getCollation(); List sortList = ret.getFieldCollations(); orderBys = ExecUtils.convertFrom(sortList); } + + // handle case like this: + // JOIN + // | | + // T1 Ex + // when we reached exchange, we have known this Exchange is under pairwise + if (parentProperties.isRemotePairWise()) { + shuffleHandle.setRemotePairWise(true); + } + + // TODO yuehan check union under partition wise join if (exchange.getInput() instanceof LogicalUnion) { LogicalUnion union = (LogicalUnion) exchange.getInput(); RelNode remoteSourceNode = visitUnion( @@ -378,9 +428,18 @@ private SubPlan generateFragment( runtimeFilterIdCollector.collect(root, session); List outputTypes = SerializeDataType.convertToSerilizeType( parent.getRowType().getFieldList()); - return generateSubPlan(root, currentProperties, outputTypes, partitioningScheme, + Set commonIds = + runtimeFilterIdCollector.filterIds.stream().filter(runtimeFilterIdCollector.produceFilterIds::contains) + .collect(Collectors.toSet()); + SubPlan subPlan = generateSubPlan(root, currentProperties, outputTypes, partitioningScheme, runtimeFilterIdCollector.filterIds, runtimeFilterIdCollector.produceFilterIds); + if (InstConfUtil.getBool(ConnectionParams.ENABLE_LOCAL_RUNTIME_FILTER) && commonIds.equals( + new HashSet<>(runtimeFilterIdCollector.filterIds)) && + commonIds.equals(new HashSet<>(runtimeFilterIdCollector.produceFilterIds))) { + subPlan.getFragment().setLocalBloomFilter(true); + } + return subPlan; } private SubPlan generateSubPlan( @@ -388,27 +447,27 @@ private SubPlan generateSubPlan( PartitioningScheme partitioningScheme, List filterIds, List produceFilterIds) { Pair, List> pairs = getPartitionSourceIds(currentProperties); - SplitInfo splitInfo = null; - if (currentProperties.getCurrentLogicalView() != null && !session.isIgnoreSplitInfo()) { - LogicalView logicalView = currentProperties.getCurrentLogicalView(); - if (logicalView.fromTableOperation() != null) { - splitInfo = new SplitManager().getSingleSplit(logicalView, session.getClientContext()); - } else { - splitInfo = new SplitManager().getSplits( - logicalView, session.getClientContext(), !lowConcurrencyQuery); + // collect the logical table name and its split count. + Map splitCountMap = new HashMap<>(); + + List splitInfos = new ArrayList<>(); + if (currentProperties.getLogicalViews().size() > 0 && !session.isIgnoreSplitInfo()) { + for (LogicalView logicalView : currentProperties.getLogicalViews()) { + SplitInfo splitInfo = null; + if (logicalView.fromTableOperation() != null) { + splitInfo = splitManager.getSingleSplit(logicalView, session.getClientContext()); + } else { + splitInfo = splitManager.getSplits( + logicalView, session.getClientContext(), !lowConcurrencyQuery); + } + splitCountMap.put(logicalView.getLogicalTableName(), splitInfo.getSplitCount()); + session.getGroups().putAll(splitInfo.getGroups()); + splitInfos.add(splitInfo); } - session.getGroups().putAll(splitInfo.getGroups()); } PlanFragment planFragment = null; - final int outerParallelism; - if (currentProperties.getPartitionHandle().isSingleTon()) { - outerParallelism = 1; - } else if (splitInfo != null) { - outerParallelism = calcScanParallelism(currentProperties.rootIo, splitInfo); - } else { - outerParallelism = calcParallelism(currentProperties); - } + int outerParallelism = getOuterParallelism(splitInfos, currentProperties); List expandSplitInfos = new ArrayList<>(); Integer bkaJoinParallelism = -1; @@ -416,15 +475,13 @@ private SubPlan generateSubPlan( for (LogicalView logicalView : currentProperties.getExpandView()) { SplitInfo info; if (logicalView.fromTableOperation() != null) { - info = new SplitManager().getSingleSplit(logicalView, session.getClientContext()); + info = splitManager.getSingleSplit(logicalView, session.getClientContext()); } else { - info = new SplitManager().getSplits( + info = splitManager.getSplits( logicalView, session.getClientContext(), !lowConcurrencyQuery); } + splitCountMap.put(logicalView.getLogicalTableName(), info.getSplitCount()); - if (splitInfo != null) { - session.getGroups().putAll(splitInfo.getGroups()); - } session.getGroups().putAll(info.getGroups()); expandSplitInfos.add(info); @@ -440,8 +497,10 @@ private SubPlan generateSubPlan( } planFragment = new PlanFragment(nextFragmentId++, root, outputTypes, - currentProperties.getPartitionHandle().setPartitionCount(outerParallelism), pairs.getKey(), - pairs.getValue(), partitioningScheme, bkaJoinParallelism, filterIds, produceFilterIds); + currentProperties.getPartitionHandle().setPartitionCount(outerParallelism), + currentProperties.isRemotePairWise(), pairs.getKey(), + pairs.getValue(), partitioningScheme, bkaJoinParallelism, filterIds, produceFilterIds, + currentProperties.isLocalPairWise(), splitCountMap, currentProperties.isPruneExchangePartition()); int currentParallelism = currentProperties.getPartitionHandle().getPartitionCount(); int singleChildParallelism = currentProperties.getSingleChildParallelism(); @@ -460,7 +519,46 @@ private SubPlan generateSubPlan( } else { currentConcurrentParallelism += currentParallelism; } - return new SubPlan(planFragment, splitInfo, expandSplitInfos, currentProperties.getChildren()); + return new SubPlan(planFragment, splitInfos, expandSplitInfos, currentProperties.getChildren()); + } + + private int getOuterParallelism(List splitInfos, FragmentProperties currentProperties) { + if (currentProperties.getPartitionHandle().isSingleTon()) { + return 1; + } + + // fragment not contain scan + if (splitInfos.size() == 0) { + return calcParallelism(currentProperties); + } + + boolean containsOss = splitInfos.stream().anyMatch(this::containsOssSplit); + // for innodb + if (!containsOss) { + int outerParallelism = 1; + for (SplitInfo splitInfo : splitInfos) { + outerParallelism = + Math.max(outerParallelism, calcScanParallelism(currentProperties.rootIo, splitInfo)); + } + return outerParallelism; + } + + // coroner case: all tables in this fragment is empty + boolean allEmpty = splitInfos.stream().allMatch(splitInfo -> splitInfo.getSplits().isEmpty()); + if (allEmpty) { + int parallelism = + session.getClientContext().getParamManager().getInt(ConnectionParams.PARALLELISM_FOR_EMPTY_TABLE); + if (parallelism > 0) { + return parallelism; + } + } + + int outerParallelism = 1; + for (SplitInfo splitInfo : splitInfos) { + outerParallelism = Math.max(outerParallelism, + calcColumnarScanParallelism(currentProperties.isSimpleOssScan(), splitInfo)); + } + return outerParallelism; } public int calcParallelism(FragmentProperties properties) { @@ -503,6 +601,7 @@ public int calcScanParallelism(double io, SplitInfo splitInfo) { if (lowConcurrencyQuery) { return 1; } + ParamManager paramManager = session.getClientContext().getParamManager(); int parallelsim = -1; @@ -525,12 +624,45 @@ public int calcScanParallelism(double io, SplitInfo splitInfo) { ExecUtils.getMppMinParallelism(paramManager)); } } + int dbParallelism = ExecUtils.getPolarDbCores(paramManager, !onlyUseReadInstance); parallelsim = Math.max(Math.min(Math.min( splitInfo.getSplitCount(), dbParallelism * splitInfo.getInsCount()), parallelsim), 1); + return parallelsim; } + private boolean containsOssSplit(SplitInfo splitInfo) { + return splitInfo != null && splitInfo.getSplits().stream() + .anyMatch(splits -> splits.stream().anyMatch(split -> split.getConnectorSplit() instanceof OssSplit)); + } + + private int calcColumnarScanParallelism(boolean simpleOssScan, SplitInfo splitInfo) { + if (lowConcurrencyQuery) { + return 1; + } + + ParamManager paramManager = session.getClientContext().getParamManager(); + + int parallelism = paramManager.getInt(ConnectionParams.MPP_PARALLELISM); + + if (parallelism < 0) { + int mppNodeSize = paramManager.getInt(ConnectionParams.MPP_NODE_SIZE); + if (mppNodeSize <= 0) { + mppNodeSize = (ServiceProvider.getInstance().getServer()).getNodeManager() + .getAllWorkers(ConfigDataMode.isMasterMode() && paramManager + .getBoolean(ConnectionParams.POLARDBX_SLAVE_INSTANCE_FIRST)).size(); + } + // default parallelism is cores of all compute node + parallelism = mppNodeSize * ExecUtils.getPolarDBXCores(paramManager, !onlyUseReadInstance); + } + + if (simpleOssScan && splitInfo.getSplitCount() > 0) { + parallelism = Math.min(parallelism, splitInfo.getSplitCount()); + } + return parallelism; + } + public boolean isMergeSortSourceNode(RelNode node) { if (node instanceof RemoteSourceNode && node.getTraitSet().getTrait(RelCollationTraitDef.INSTANCE).getFieldCollations().size() > 0) { @@ -557,8 +689,8 @@ public Double estimateRowCount(T relNode) { public Pair, List> getPartitionSourceIds(FragmentProperties properties) { List partitionSources = new ArrayList<>(); List expandSources = new ArrayList<>(); - if (properties.getCurrentLogicalView() != null) { - partitionSources.add(properties.getCurrentLogicalView().getRelatedId()); + for (LogicalView logicalView : properties.getLogicalViews()) { + partitionSources.add(logicalView.getRelatedId()); } if (properties.getExpandView().size() > 0) { for (LogicalView logicalView : properties.getExpandView()) { @@ -572,7 +704,7 @@ public Pair, List> getPartitionSourceIds(FragmentProperti private static class FragmentProperties { private final List children = new ArrayList<>(); - private LogicalView currentLogicalView; + private List logicalViews = new ArrayList<>(); private List expandViews = new ArrayList<>(); private double rootRowCnt; private double rootIo; @@ -583,6 +715,14 @@ private static class FragmentProperties { private int singleChildParallelism = 0; + private boolean localPairWise = false; + + private boolean remotePairWise = false; + + private boolean simpleOssScan = false; + + private boolean pruneExchangePartition = true; + public int getInnerChildParallelism() { return innerChildParallelism; } @@ -599,13 +739,43 @@ public void setSingleChildParallelism(int singleChildParallelism) { this.singleChildParallelism = singleChildParallelism; } - public LogicalView getCurrentLogicalView() { - return currentLogicalView; + public boolean isSimpleOssScan() { + return simpleOssScan; + } + + public void setSimpleOssScan(boolean simpleOssScan) { + this.simpleOssScan = simpleOssScan; + } + + public boolean isPruneExchangePartition() { + return pruneExchangePartition; + } + + public void setPruneExchangePartition(boolean pruneExchangePartition) { + this.pruneExchangePartition = pruneExchangePartition; + } + + public List getLogicalViews() { + return logicalViews; } - public void setLogicalView(LogicalView logicalView) { - Preconditions.checkState(this.currentLogicalView == null, "currentLogicalView is already exist!"); - this.currentLogicalView = logicalView; + public void addLogicalView(LogicalView logicalView) { + this.logicalViews.add(logicalView); + } + + public void updatePairWiseInfo(OSSTableScan ossTableScan, boolean enableLocalPairWise) { + if (enableLocalPairWise) { + this.localPairWise = localPairWise | ossTableScan.getTraitSet().getPartitionWise().isLocalPartition(); + } + this.remotePairWise = remotePairWise | ossTableScan.getTraitSet().getPartitionWise().isRemotePartition(); + } + + public boolean isLocalPairWise() { + return localPairWise; + } + + public boolean isRemotePairWise() { + return remotePairWise; } public List getExpandView() { @@ -716,7 +886,10 @@ public void collect(RelNode rel, Session session) { return; } visit(rel); - verify(); + if (session.getClientContext().getParamManager() + .getBoolean(ConnectionParams.CHECK_RUNTIME_FILTER_SAME_FRAGMENT)) { + verify(); + } } private void verify() { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanUtils.java index 4587b2efd..c666228a3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/PlanUtils.java @@ -26,6 +26,7 @@ import com.alibaba.polardbx.executor.mpp.operator.LocalExecutionPlanner; import com.alibaba.polardbx.executor.mpp.operator.factory.LocalBufferExecutorFactory; import com.alibaba.polardbx.executor.mpp.operator.factory.PipelineFactory; +import com.alibaba.polardbx.executor.mpp.split.SplitManagerImpl; import com.alibaba.polardbx.executor.mpp.util.MoreExecutors; import com.alibaba.polardbx.executor.operator.spill.MemorySpillerFactory; import com.alibaba.polardbx.executor.utils.ExecUtils; @@ -38,6 +39,7 @@ import com.alibaba.polardbx.optimizer.workload.WorkloadUtil; import com.google.common.base.Joiner; import com.google.common.base.Strings; +import com.google.common.collect.ImmutableMap; import org.apache.calcite.rel.RelNode; import java.util.List; @@ -61,32 +63,38 @@ public static String textLocalPlan(ExecutionContext context, RelNode relNode, Ex int parallelism = ExecUtils.getParallelismForLocal(context); + StringBuilder builder = new StringBuilder(); + builder.append("ExecutorMode: ").append(type).append(" ").append("\n"); + outputLocalFragment(context, parallelism, relNode, builder); + return builder.toString(); + } + + private static void outputLocalFragment(ExecutionContext context, int parallelism, RelNode relNode, + StringBuilder builder) { boolean isSpill = MemorySetting.ENABLE_SPILL && context.getParamManager().getBoolean(ConnectionParams.ENABLE_SPILL); LocalExecutionPlanner planner = new LocalExecutionPlanner(context, null, parallelism, parallelism, 1, context.getParamManager().getInt(ConnectionParams.PREFETCH_SHARDS), MoreExecutors.directExecutor(), - isSpill ? new MemorySpillerFactory() : null, null, null, type == ExecutorMode.MPP); + isSpill ? new MemorySpillerFactory() : null, null, null, false, + -1, -1, ImmutableMap.of(), new SplitManagerImpl()); List columns = CalciteUtils.getTypes(relNode.getRowType()); OutputBufferMemoryManager localBufferManager = planner.createLocalMemoryManager(); LocalBufferExecutorFactory factory = new LocalBufferExecutorFactory(localBufferManager, columns, 1); List pipelineFactories = planner.plan(relNode, factory, localBufferManager, context.getTraceId()); - StringBuilder builder = new StringBuilder(); - builder.append("ExecutorMode: ").append(type).append(" ").append("\n"); for (PipelineFactory pipelineFactory : pipelineFactories) { builder.append(formatPipelineFragment(context, pipelineFactory, context.getParams().getCurrentParameter())); } - return builder.toString(); } - private static String formatPipelineFragment(ExecutionContext executionContext, - PipelineFactory pipelineFactory, - Map params) { + public static String formatPipelineFragment(ExecutionContext executionContext, + PipelineFactory pipelineFactory, + Map params) { PipelineFragment fragment = pipelineFactory.getFragment(); StringBuilder builder = new StringBuilder(); - builder.append(format("Fragment %s dependency: [%s] parallelism: %s", fragment.getPipelineId(), + builder.append(format("Pipeline %s dependency: [%s] parallelism: %s", fragment.getPipelineId(), Joiner.on(", ").join(fragment.getDependency()), fragment.getParallelism())); if (fragment.getPrefetchLists().size() > 0) { builder.append(format(" prefetch: %s \n ", fragment.getPrefetchLists())); @@ -107,6 +115,14 @@ public static String textPlan(ExecutionContext executionContext, Session session for (PlanFragment fragment : plan.getKey().getAllFragments()) { builder.append(formatFragment( executionContext, fragment, session.getClientContext().getParams().getCurrentParameter())); + if (executionContext.getParamManager().getBoolean(ConnectionParams.SHOW_PIPELINE_INFO_UNDER_MPP)) { + int mppNodeSize = executionContext.getParamManager().getInt(ConnectionParams.MPP_NODE_SIZE); + if (mppNodeSize <= 0) { + mppNodeSize = ExecUtils.getActiveNodeCount(); + } + int parallelism = fragment.getPartitioning().getPartitionCount() / mppNodeSize; + outputLocalFragment(executionContext, Math.max(1, parallelism), fragment.getRootNode(), builder); + } } return builder.toString(); } @@ -117,13 +133,6 @@ private static String formatFragment(ExecutionContext executionContext, PlanFrag builder.append(format("Fragment %s \n", fragment.getId())); PartitioningScheme partitioningScheme = fragment.getPartitioningScheme(); - builder.append(indentString(1)) - .append(format("Shuffle Output layout: [%s]", Joiner.on(", ").join( - SerializeDataType.convertToDataType(fragment.getOutputTypes()).stream().map( - c -> c.getStringSqlType()).toArray() - ))).append(format(" Output layout: [%s]\n", Joiner.on(", ").join( - fragment.getTypes().stream().map(c -> c.getStringSqlType()).toArray() - ))); builder.append(indentString(1)); builder.append(format("Output partitioning: %s [%s] ", partitioningScheme.getPartitionMode(), Joiner.on(", ").join(partitioningScheme.getPartChannels()))); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/SerializeDataType.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/SerializeDataType.java index 8ca9711a4..71e280920 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/SerializeDataType.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/SerializeDataType.java @@ -16,8 +16,6 @@ package com.alibaba.polardbx.executor.mpp.planner; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; import com.alibaba.polardbx.common.charset.CharsetName; import com.alibaba.polardbx.common.charset.CollationName; import com.alibaba.polardbx.common.utils.time.MySQLTimeTypeUtil; @@ -29,6 +27,8 @@ import com.alibaba.polardbx.optimizer.core.datatype.TimeType; import com.alibaba.polardbx.optimizer.core.datatype.TimestampType; import com.alibaba.polardbx.optimizer.core.datatype.VarcharType; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.sql.SqlCollation; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/SimpleFragmentRFManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/SimpleFragmentRFManager.java new file mode 100644 index 000000000..6338a7360 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/SimpleFragmentRFManager.java @@ -0,0 +1,113 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.mpp.planner; + +import java.util.HashMap; +import java.util.Map; + +public class SimpleFragmentRFManager implements FragmentRFManager { + /** + * The total partition number of the table for join key. + * It's for route calculation. + */ + private final int totalPartitionCount; + + /** + * The number of allocated partition on this worker. + */ + private final int partitionsOfNode; + + private final double defaultFpp; + private final long rowUpperBound; + private final long rowLowerBound; + + private final double filterRatioThreshold; + private final int rfSampleCount; + + private final Map items; + + public SimpleFragmentRFManager(int totalPartitionCount, int partitionsOfNode, + double defaultFpp, + long rowUpperBound, long rowLowerBound, double filterRatioThreshold, + int rfSampleCount) { + this.totalPartitionCount = totalPartitionCount; + this.partitionsOfNode = partitionsOfNode; + + this.defaultFpp = defaultFpp; + this.rowUpperBound = rowUpperBound; + this.rowLowerBound = rowLowerBound; + this.filterRatioThreshold = filterRatioThreshold; + this.rfSampleCount = rfSampleCount; + + this.items = new HashMap<>(); + } + + @Override + public Map getAllItems() { + return items; + } + + @Override + public void addItem(FragmentRFItemKey itemKey, FragmentRFItem rfItem) { + items.put(itemKey, rfItem); + } + + @Override + public double getDefaultFpp() { + return defaultFpp; + } + + @Override + public int getTotalPartitionCount() { + return totalPartitionCount; + } + + @Override + public int getPartitionsOfNode() { + return partitionsOfNode; + } + + @Override + public long getUpperBound() { + return rowUpperBound; + } + + @Override + public long getLowerBound() { + return rowLowerBound; + } + + @Override + public int getSampleCount() { + return rfSampleCount; + } + + @Override + public double getFilterRatioThreshold() { + return filterRatioThreshold; + } + + @Override + public String toString() { + return "SimpleFragmentRFManager{" + + "totalPartitionCount=" + totalPartitionCount + + ", defaultFpp=" + defaultFpp + + ", rowThreshold=" + rowUpperBound + + ", items=" + items + + '}'; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/StageExecutionPlan.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/StageExecutionPlan.java index 4ff310b65..6db7a802b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/StageExecutionPlan.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/StageExecutionPlan.java @@ -29,9 +29,9 @@ */ package com.alibaba.polardbx.executor.mpp.planner; -import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.executor.mpp.split.SplitInfo; import com.alibaba.polardbx.util.MoreObjects; +import com.google.common.collect.ImmutableList; import java.util.List; @@ -39,17 +39,17 @@ public class StageExecutionPlan { private final PlanFragment fragment; - private final SplitInfo splitInfo; + private final List splitInfos; private final List expandSplitInfos; private final List subStages; public StageExecutionPlan( PlanFragment fragment, - SplitInfo splitInfo, + List splitInfos, List expandSplitInfos, List subStages) { this.fragment = requireNonNull(fragment, "fragment is null"); - this.splitInfo = splitInfo; + this.splitInfos = splitInfos; this.expandSplitInfos = expandSplitInfos; this.subStages = ImmutableList.copyOf(requireNonNull(subStages, "dependencies is null")); } @@ -58,8 +58,8 @@ public PlanFragment getFragment() { return fragment; } - public SplitInfo getSplitInfo() { - return splitInfo; + public List getSplitInfos() { + return splitInfos; } public List getExpandInfo() { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/SubPlan.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/SubPlan.java index a25e0aa01..d5aa2840b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/SubPlan.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/planner/SubPlan.java @@ -29,8 +29,8 @@ */ package com.alibaba.polardbx.executor.mpp.planner; -import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.executor.mpp.split.SplitInfo; +import com.google.common.collect.ImmutableList; import javax.annotation.concurrent.Immutable; import java.util.List; @@ -40,20 +40,20 @@ @Immutable public class SubPlan { private final PlanFragment fragment; - private final SplitInfo logicalViewInfo; + private final List logicalViewInfos; private final List expandViewInfos; private final List children; public SubPlan( PlanFragment fragment, - SplitInfo logicalViewInfo, + List logicalViewInfos, List expandViewInfos, List children) { requireNonNull(fragment, "fragment is null"); requireNonNull(children, "children is null"); this.fragment = fragment; - this.logicalViewInfo = logicalViewInfo; + this.logicalViewInfos = logicalViewInfos; this.expandViewInfos = expandViewInfos; this.children = ImmutableList.copyOf(children); } @@ -66,8 +66,8 @@ public List getChildren() { return children; } - public SplitInfo getLogicalViewInfo() { - return logicalViewInfo; + public List getLogicalViewInfos() { + return logicalViewInfos; } public List getExpandSplitInfos() { @@ -81,8 +81,8 @@ public List getAllFragments() { ImmutableList.Builder fragments = ImmutableList.builder(); fragments.add(getFragment()); - if (logicalViewInfo != null) { - getFragment().setAllSplitNums(logicalViewInfo.getSplitCount()); + if (logicalViewInfos != null) { + getFragment().setAllSplitNums(logicalViewInfos.stream().mapToInt(SplitInfo::getSplitCount).sum()); } for (SubPlan child : getChildren()) { fragments.addAll(child.getAllFragments()); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/BasicQueryInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/BasicQueryInfo.java index 5b0f4d507..5d4cfbdec 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/BasicQueryInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/BasicQueryInfo.java @@ -29,11 +29,11 @@ */ package com.alibaba.polardbx.executor.mpp.server; -import com.fasterxml.jackson.annotation.JsonProperty; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.exception.code.ErrorType; import com.alibaba.polardbx.executor.mpp.execution.QueryInfo; import com.alibaba.polardbx.executor.mpp.execution.QueryState; +import com.fasterxml.jackson.annotation.JsonProperty; import javax.annotation.Nullable; import javax.annotation.concurrent.Immutable; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/BasicQueryStats.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/BasicQueryStats.java index 56786c299..043fb2ded 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/BasicQueryStats.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/BasicQueryStats.java @@ -30,10 +30,10 @@ package com.alibaba.polardbx.executor.mpp.server; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.ImmutableSet; import com.alibaba.polardbx.executor.mpp.execution.QueryStats; import com.alibaba.polardbx.executor.mpp.operator.BlockedReason; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableSet; import io.airlift.units.DataSize; import io.airlift.units.Duration; import org.joda.time.DateTime; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/PagesResponseWriter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/PagesResponseWriter.java index a365f8db7..546ebe972 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/PagesResponseWriter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/PagesResponseWriter.java @@ -29,10 +29,10 @@ */ package com.alibaba.polardbx.executor.mpp.server; -import com.google.common.base.Throwables; -import com.google.common.reflect.TypeToken; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.mpp.execution.buffer.SerializedChunk; +import com.google.common.base.Throwables; +import com.google.common.reflect.TypeToken; import io.airlift.slice.OutputStreamSliceOutput; import io.airlift.slice.RuntimeIOException; import io.airlift.slice.SliceOutput; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/StatementResource.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/StatementResource.java index afd1ea856..555bedb1b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/StatementResource.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/StatementResource.java @@ -29,9 +29,6 @@ */ package com.alibaba.polardbx.executor.mpp.server; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.exception.code.ErrorType; import com.alibaba.polardbx.common.utils.GeneralUtil; @@ -64,6 +61,9 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.utils.CalciteUtils; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; import io.airlift.jaxrs.testing.MockUriInfo; import io.airlift.units.DataSize; import io.airlift.units.Duration; @@ -139,7 +139,8 @@ public LocalResultResponse createQuery(ExecutionContext clientContext, RelNode p clientContext.getOriginSql(), queryManager, pagesSerdeFactory, - physicalPlan); + physicalPlan, + clientContext); queries.put(query.getQueryId(), query); query.init(); @@ -256,12 +257,13 @@ public Query(ExchangeClientSupplier exchangeClientSupplier, String query, QueryManager queryManager, PagesSerdeFactory pagesSerdeFactory, - RelNode physicalPlan) { + RelNode physicalPlan, + ExecutionContext context) { requireNonNull(query, "query is null"); requireNonNull(queryManager, "queryManager is null"); this.session = requireNonNull(session, "sessionSupplier is null"); this.queryId = requireNonNull(session.getQueryId(), "queryId is null"); - this.serde = pagesSerdeFactory.createPagesSerde(CalciteUtils.getTypes(physicalPlan.getRowType())); + this.serde = pagesSerdeFactory.createPagesSerde(CalciteUtils.getTypes(physicalPlan.getRowType()), context); this.exchangeClientSupplier = exchangeClientSupplier; this.queryManager = queryManager; this.query = query; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/TaskResource.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/TaskResource.java index 991bddd71..8773b107b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/TaskResource.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/TaskResource.java @@ -16,9 +16,6 @@ package com.alibaba.polardbx.executor.mpp.server; -import com.google.common.reflect.TypeToken; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.Chunk; @@ -32,6 +29,9 @@ import com.alibaba.polardbx.executor.mpp.execution.TaskStatus; import com.alibaba.polardbx.executor.mpp.execution.buffer.BufferResult; import com.alibaba.polardbx.executor.mpp.execution.buffer.SerializedChunk; +import com.google.common.reflect.TypeToken; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.concurrent.BoundedExecutor; import io.airlift.units.DataSize; import io.airlift.units.Duration; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/TaskUpdateRequest.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/TaskUpdateRequest.java index c57ab3c84..8f239798b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/TaskUpdateRequest.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/TaskUpdateRequest.java @@ -16,14 +16,14 @@ package com.alibaba.polardbx.executor.mpp.server; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.ImmutableList; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.execution.SessionRepresentation; import com.alibaba.polardbx.executor.mpp.execution.TaskSource; import com.alibaba.polardbx.executor.mpp.planner.PlanFragment; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.collect.ImmutableList; import java.net.URI; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/Backoff.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/Backoff.java index 4d0ccdbe2..bce374006 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/Backoff.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/Backoff.java @@ -29,8 +29,8 @@ */ package com.alibaba.polardbx.executor.mpp.server.remotetask; -import com.google.common.base.Ticker; import com.alibaba.polardbx.executor.mpp.util.MillTicker; +import com.google.common.base.Ticker; import io.airlift.units.Duration; import javax.annotation.concurrent.ThreadSafe; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/ContinuousTaskStatusFetcher.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/ContinuousTaskStatusFetcher.java index 79cfbddd0..1cd901a10 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/ContinuousTaskStatusFetcher.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/ContinuousTaskStatusFetcher.java @@ -29,8 +29,6 @@ */ package com.alibaba.polardbx.executor.mpp.server.remotetask; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; @@ -44,10 +42,12 @@ import com.alibaba.polardbx.executor.mpp.execution.TaskId; import com.alibaba.polardbx.executor.mpp.execution.TaskManager; import com.alibaba.polardbx.executor.mpp.execution.TaskStatus; -import com.alibaba.polardbx.gms.node.HostAddressCache; import com.alibaba.polardbx.executor.mpp.server.TaskResource; import com.alibaba.polardbx.executor.mpp.util.Failures; +import com.alibaba.polardbx.gms.node.HostAddressCache; import com.alibaba.polardbx.statistics.RuntimeStatistics; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.http.client.FullJsonResponseHandler; import io.airlift.http.client.HttpClient; import io.airlift.http.client.Request; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/HttpRemoteTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/HttpRemoteTask.java index e881688d2..dfbd405b7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/HttpRemoteTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/HttpRemoteTask.java @@ -18,6 +18,7 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.OutputBuffers; @@ -42,7 +43,6 @@ import com.alibaba.polardbx.executor.mpp.server.StatementResource; import com.alibaba.polardbx.executor.mpp.server.TaskUpdateRequest; import com.alibaba.polardbx.executor.mpp.util.Failures; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.HashMultimap; import com.google.common.collect.ImmutableList; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/HttpRemoteTaskFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/HttpRemoteTaskFactory.java index aacf9a910..d6c07367d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/HttpRemoteTaskFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/HttpRemoteTaskFactory.java @@ -16,11 +16,10 @@ package com.alibaba.polardbx.executor.mpp.server.remotetask; -import com.google.common.collect.Multimap; import com.alibaba.polardbx.common.properties.MppConfig; -import com.alibaba.polardbx.executor.mpp.Threads; import com.alibaba.polardbx.executor.mpp.OutputBuffers; import com.alibaba.polardbx.executor.mpp.Session; +import com.alibaba.polardbx.executor.mpp.Threads; import com.alibaba.polardbx.executor.mpp.execution.LocationFactory; import com.alibaba.polardbx.executor.mpp.execution.NodeTaskMap; import com.alibaba.polardbx.executor.mpp.execution.RemoteTask; @@ -36,6 +35,7 @@ import com.alibaba.polardbx.executor.mpp.server.ForAsyncHttp; import com.alibaba.polardbx.executor.mpp.server.TaskUpdateRequest; import com.alibaba.polardbx.gms.node.Node; +import com.google.common.collect.Multimap; import io.airlift.http.client.HttpClient; import io.airlift.json.JsonCodec; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/RequestErrorTracker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/RequestErrorTracker.java index 04f97d229..1bfca5309 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/RequestErrorTracker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/RequestErrorTracker.java @@ -29,10 +29,6 @@ */ package com.alibaba.polardbx.executor.mpp.server.remotetask; -import com.google.common.collect.ObjectArrays; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListenableFutureTask; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; @@ -40,6 +36,10 @@ import com.alibaba.polardbx.executor.mpp.execution.TaskId; import com.alibaba.polardbx.executor.mpp.metadata.TaskLocation; import com.alibaba.polardbx.executor.mpp.util.Failures; +import com.google.common.collect.ObjectArrays; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListenableFutureTask; import io.airlift.event.client.ServiceUnavailableException; import javax.annotation.concurrent.ThreadSafe; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/SimpleHttpResponseHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/SimpleHttpResponseHandler.java index 6efe5d9cc..c6670cc8c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/SimpleHttpResponseHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/SimpleHttpResponseHandler.java @@ -29,8 +29,8 @@ */ package com.alibaba.polardbx.executor.mpp.server.remotetask; -import com.google.common.util.concurrent.FutureCallback; import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.google.common.util.concurrent.FutureCallback; import io.airlift.http.client.FullJsonResponseHandler; import io.airlift.http.client.HttpStatus; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/TaskInfoFetcher.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/TaskInfoFetcher.java index a84e5bd9d..30596b743 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/TaskInfoFetcher.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/server/remotetask/TaskInfoFetcher.java @@ -29,8 +29,6 @@ */ package com.alibaba.polardbx.executor.mpp.server.remotetask; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.Session; @@ -43,6 +41,8 @@ import com.alibaba.polardbx.executor.mpp.execution.TaskStatus; import com.alibaba.polardbx.executor.mpp.server.StatementResource; import com.alibaba.polardbx.statistics.RuntimeStatistics; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.http.client.FullJsonResponseHandler; import io.airlift.http.client.HttpClient; import io.airlift.http.client.HttpUriBuilder; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/JdbcSplit.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/JdbcSplit.java index 2d3430019..c5b5b25da 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/JdbcSplit.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/JdbcSplit.java @@ -24,7 +24,6 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.common.utils.SerializeUtils; import com.alibaba.polardbx.executor.mpp.spi.ConnectorSplit; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.PhyTableScanBuilder; @@ -34,9 +33,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; -import java.io.Serializable; import java.util.ArrayList; -import java.util.Base64; import java.util.List; import static com.google.common.base.MoreObjects.toStringHelper; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/OssSplit.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/OssSplit.java index dd71453ae..5a17a07e8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/OssSplit.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/OssSplit.java @@ -20,6 +20,8 @@ import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.jdbc.Parameters; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.SerializeUtils; import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; @@ -35,7 +37,10 @@ import com.alibaba.polardbx.executor.archive.schemaevolution.ColumnMetaWithTs; import com.alibaba.polardbx.executor.archive.schemaevolution.OrcColumnManager; import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.OSSTaskUtils; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.gms.ColumnarStoreUtils; import com.alibaba.polardbx.executor.mpp.spi.ConnectorSplit; +import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.FileMeta; import com.alibaba.polardbx.optimizer.config.table.OSSOrcFileMeta; @@ -48,6 +53,8 @@ import com.alibaba.polardbx.optimizer.core.rel.OSSTableScan; import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; import com.alibaba.polardbx.optimizer.core.rel.PhyTableScanBuilder; +import com.alibaba.polardbx.optimizer.partition.PartSpecBase; +import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; @@ -55,12 +62,14 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.RelPartitionWise; import org.apache.calcite.rel.logical.LogicalAggregate; import org.apache.calcite.rel.metadata.RelColumnOrigin; import org.apache.calcite.rex.RexBuilder; import org.apache.calcite.rex.RexDynamicParam; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlKind; +import org.apache.commons.collections.CollectionUtils; import org.apache.orc.ColumnStatistics; import org.apache.orc.IntegerColumnStatistics; import org.apache.orc.TypeDescription; @@ -73,8 +82,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.TimeZone; import java.util.regex.Pattern; @@ -85,6 +96,8 @@ import static com.google.common.base.MoreObjects.toStringHelper; public class OssSplit implements ConnectorSplit { + public static final Integer NO_PARTITION_INFO = -1; + private List readOptions; private String logicalSchema; @@ -112,17 +125,35 @@ public class OssSplit implements ConnectorSplit { private boolean isInit = false; + // for delta of columnar store (nullable) + private DeltaReadOption deltaReadOption; + private Long checkpointTso; + + private int partIndex = -1; + + private int nodePartCount = -1; + + private Boolean localPairWise; + public OssSplit(String logicalSchema, String physicalSchema, Map params, String logicalTableName, List phyTableNameList, - List designatedFile) { + List designatedFile, + DeltaReadOption deltaReadOption, + Long checkpointTso, + int partIndex, + Boolean localPairWise) { this.logicalSchema = logicalSchema; this.physicalSchema = physicalSchema; this.params = params; this.logicalTableName = logicalTableName; this.phyTableNameList = phyTableNameList; this.designatedFile = designatedFile; + this.deltaReadOption = deltaReadOption; + this.checkpointTso = checkpointTso; + this.partIndex = partIndex; + this.localPairWise = localPairWise; } @JsonCreator @@ -132,13 +163,33 @@ public OssSplit( @JsonProperty("paramsBytes") byte[] paramsBytes, @JsonProperty("logicalTableName") String logicalTableName, @JsonProperty("phyTableNameList") List phyTableNameList, - @JsonProperty("designatedFile") List designatedFile) { + @JsonProperty("designatedFile") List designatedFile, + @JsonProperty("deltaReadOption") DeltaReadOption deltaReadOption, + @JsonProperty("checkpointTso") Long checkpointTso, + @JsonProperty("partIndex") int partIndex, + @JsonProperty("nodePartCount") int nodePartCount, + @JsonProperty("localPairWise") boolean localPairWise) { this.logicalSchema = logicalSchema; this.physicalSchema = physicalSchema; this.paramsBytes = paramsBytes; this.logicalTableName = logicalTableName; this.phyTableNameList = phyTableNameList; this.designatedFile = designatedFile; + this.partIndex = partIndex; + this.nodePartCount = nodePartCount; + this.deltaReadOption = deltaReadOption; + this.checkpointTso = checkpointTso; + this.localPairWise = localPairWise; + } + + @JsonProperty + public DeltaReadOption getDeltaReadOption() { + return deltaReadOption; + } + + @JsonProperty + public Long getCheckpointTso() { + return checkpointTso; } @Override @@ -153,58 +204,134 @@ public Object getInfo() { return null; } + @JsonProperty + public int getPartIndex() { + return partIndex; + } + + public void setPartIndex(int partIndex) { + this.partIndex = partIndex; + } + /** * group file meta by version * * @param relNode the physical operation * @return list of OssSplit, each split has the same version of meta */ - public static List getTableConcurrencySplit(RelNode relNode, - ExecutionContext executionContext) { + public static List getTableConcurrencySplit(OSSTableScan ossTableScan, RelNode relNode, + ExecutionContext executionContext, + Long tso) { Preconditions.checkArgument(relNode instanceof PhyTableOperation); PhyTableOperation phyTableOperation = (PhyTableOperation) relNode; String logicalSchema = phyTableOperation.getSchemaName(); String physicalSchema = phyTableOperation.getDbIndex(); - String logicalTableName = phyTableOperation.getLogicalTableNames().get(0); + String logicalTable = phyTableOperation.getLogicalTableNames().get(0); + List phyTableList = phyTableOperation.getTableNames().get(0); PhyTableScanBuilder phyOperationBuilder = (PhyTableScanBuilder) phyTableOperation.getPhyOperationBuilder(); - TableMeta tableMeta = executionContext.getSchemaManager(logicalSchema).getTable(logicalTableName); + TableMeta tableMeta = executionContext.getSchemaManager(logicalSchema).getTable(logicalTable); Map> flatFileMetas = FileMeta.getFlatFileMetas(tableMeta); - List splits = new ArrayList<>(); - // for each physical table - for (String phyTable : phyTableOperation.getTableNames().get(0)) { - List fileMetas = flatFileMetas.get(phyTable); - if (fileMetas.isEmpty()) { - continue; + if (ossTableScan.isColumnarIndex()) { + DeltaReadOption deltaReadOption = null; + + Map params = phyOperationBuilder.buildSplitParamMap(phyTableList); + final ColumnarManager columnarManager = ColumnarManager.getInstance(); + + // build delta read option + deltaReadOption = new DeltaReadOption(tso); + final PartitionInfo partitionInfo = tableMeta.getPartitionInfo(); + + Map> allCsvFiles = new HashMap<>(); + List allOrcFiles = new ArrayList<>(); + if (executionContext.isReadOrcOnly()) { + // Special hint, only read specified orc files. + // Normal columnar read should not get here. + allOrcFiles.addAll(executionContext.getReadOrcFiles()); + } else { + for (String physicalTable : phyTableList) { + // Find part name from physical schema + physical table + final String partName = partitionInfo.getPartitionNameByPhyLocation(physicalSchema, physicalTable); + + // Find csv files from tso + part name + // TODO(siyun): divide files with different schema_ts into different splits + Pair, List> files = + columnarManager.findFileNames(tso, logicalSchema, logicalTable, partName); + List orcFiles = files.getKey(); + List csvFiles = files.getValue(); + + allOrcFiles.addAll(orcFiles); + + if (GeneralUtil.isNotEmpty(csvFiles)) { + allCsvFiles.put( + physicalTable, + csvFiles + ); + } + } + } + + if (executionContext.isReadCsvOnly()) { + // Special hint, only read csv files, so we clear orc files here. + // Normal columnar read should not get here. + allOrcFiles.clear(); } - //map version to files - Map> fileNamesMap = new HashMap<>(); - for (FileMeta fileMeta : fileMetas) { - List list = fileNamesMap.computeIfAbsent(fileMeta.getCommitTs(), aLong -> new ArrayList<>()); - list.add(fileMeta.getFileName()); + + if (allOrcFiles.isEmpty() && allCsvFiles.isEmpty()) { + return ImmutableList.of(); } - // build single physical table list and params, - // and split for each file group. - List singlePhyTableNameList = ImmutableList.of(phyTable); + // correct project column indexes (skip implicit column) + ImmutableList projectList = ossTableScan.getOrcNode().getInProjects(); - Map params = - phyOperationBuilder.buildSplitParamMap(singlePhyTableNameList); + // TODO(siyun): column mapping may defer for different files + List correctedProjectList = + columnarManager.getPhysicalColumnIndexes(tso, null, projectList); - for (List names : fileNamesMap.values()) { - OssSplit ossSplit = new OssSplit(logicalSchema, physicalSchema, params, - logicalTableName, singlePhyTableNameList, names); - splits.add(ossSplit); + deltaReadOption.setAllCsvFiles(allCsvFiles); + deltaReadOption.setProjectColumnIndexes(correctedProjectList); + return ImmutableList.of(new OssSplit(logicalSchema, physicalSchema, params, + logicalTable, phyTableList, allOrcFiles, deltaReadOption, + tso, NO_PARTITION_INFO, false)); + } else { + List splits = new ArrayList<>(); + // for each physical table + for (String phyTable : phyTableList) { + List fileMetas = flatFileMetas.get(phyTable); + if (fileMetas.isEmpty()) { + continue; + } + //map version to files + Map> fileNamesMap = new HashMap<>(); + for (FileMeta fileMeta : fileMetas) { + List list = + fileNamesMap.computeIfAbsent(fileMeta.getCommitTs(), aLong -> new ArrayList<>()); + list.add(fileMeta.getFileName()); + } + + // build single physical table list and params, + // and split for each file group. + List singlePhyTableNameList = ImmutableList.of(phyTable); + + Map params = + phyOperationBuilder.buildSplitParamMap(singlePhyTableNameList); + + for (List names : fileNamesMap.values()) { + OssSplit ossSplit = new OssSplit(logicalSchema, physicalSchema, params, + logicalTable, singlePhyTableNameList, names, null, + null, NO_PARTITION_INFO, false); + splits.add(ossSplit); + } } + return splits; } - return splits; } public static List getFileConcurrencySplit(OSSTableScan ossTableScan, RelNode relNode, - ExecutionContext executionContext) { + ExecutionContext executionContext, Long tso) { Preconditions.checkArgument(relNode instanceof PhyTableOperation); List splits = new ArrayList<>(); @@ -219,31 +346,151 @@ public static List getFileConcurrencySplit(OSSTableScan ossTableScan, (PhyTableScanBuilder) phyTableOperation.getPhyOperationBuilder(); TableMeta tableMeta = executionContext.getSchemaManager(logicalSchema).getTable(logicalTableName); - Map> flatFileMetas = tableMeta.getFlatFileMetas(); - // for each physical table - for (int i = 0; i < phyTableNameList.size(); i++) { - String phyTable = phyTableNameList.get(i); - List fileMetas = flatFileMetas.get(phyTable); - if (fileMetas.isEmpty()) { - continue; + if (ossTableScan.isColumnarIndex()) { + final PartitionInfo partitionInfo = tableMeta.getPartitionInfo(); + final ColumnarManager columnarManager = ColumnarManager.getInstance(); + + // for each physical table + Set targetOrcFiles = null; + for (int i = 0; i < phyTableNameList.size(); i++) { + String phyTable = phyTableNameList.get(i); + + // Find part name from physical schema + physical table + final String partName = partitionInfo.getPartitionNameByPhyLocation(physicalSchema, phyTable); + + // Find csv files from tso + part name + Pair, List> files = + columnarManager.findFileNames(tso, logicalSchema, logicalTableName, partName); + List orcFiles = files.getKey(); + List csvFiles = files.getValue(); + + if (executionContext.isReadOrcOnly()) { + // Special hint, only read specified files. + // Normal columnar read should not get here. + csvFiles = new ArrayList<>(); + if (null == targetOrcFiles) { + targetOrcFiles = new HashSet<>(executionContext.getReadOrcFiles()); + } + List newOrcFiles = new ArrayList<>(); + for (String orcFile : orcFiles) { + if (targetOrcFiles.contains(orcFile)) { + newOrcFiles.add(orcFile); + } + } + orcFiles = newOrcFiles; + } + + if (executionContext.isReadCsvOnly()) { + // Special hint, only read csv files, so we clear orc files here. + orcFiles.clear(); + } + + // get partition number of this split, used for partition wise join + RelPartitionWise partitionWise = ossTableScan.getTraitSet().getPartitionWise(); + boolean needPartition = partitionWise.isRemotePartition() || executionContext.getParamManager() + .getBoolean(ConnectionParams.SCHEDULE_BY_PARTITION); + + int partition = needPartition ? + calcPartition(logicalSchema, logicalTableName, physicalSchema, phyTable) : NO_PARTITION_INFO; + boolean localPairWise = + executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_LOCAL_PARTITION_WISE_JOIN) + && partitionWise.isLocalPartition(); + + // build split for orc files. + if (!orcFiles.isEmpty()) { + // build single physical table list and params, + // and split for each file. + List singlePhyTableNameList = ImmutableList.of(phyTable); + + Map params = + phyOperationBuilder.buildSplitParamMap(singlePhyTableNameList); + + for (String orcFile : orcFiles) { + OssSplit ossSplit = new OssSplit( + logicalSchema, physicalSchema, params, + logicalTableName, singlePhyTableNameList, + ImmutableList.of(orcFile), null, + tso, partition, localPairWise); + splits.add(ossSplit); + } + } + + // build split for csv files. + if (!csvFiles.isEmpty()) { + for (String csvFile : csvFiles) { + // build single physical table list and params, + // and split for each file. + List singlePhyTableNameList = ImmutableList.of(phyTable); + + Map params = + phyOperationBuilder.buildSplitParamMap(singlePhyTableNameList); + + // Build delta read option. There is only one physical table and one csv file name. + DeltaReadOption deltaReadOption = new DeltaReadOption(tso); + Map> allCsvFiles = new HashMap<>(); + allCsvFiles.put(phyTable, ImmutableList.of(csvFile)); + + // correct project column indexes (skip implicit column) + ImmutableList projectList = ossTableScan.getOrcNode().getInProjects(); + + // TODO(siyun): column mapping may defer for different files + List correctedProjectList = + columnarManager.getPhysicalColumnIndexes(tso, null, projectList); + + deltaReadOption.setAllCsvFiles(allCsvFiles); + deltaReadOption.setProjectColumnIndexes(correctedProjectList); + + OssSplit ossSplit = new OssSplit( + logicalSchema, physicalSchema, params, + logicalTableName, singlePhyTableNameList, + null, deltaReadOption, + tso, partition, localPairWise); + splits.add(ossSplit); + } + } } + return splits; + } else { + Map> flatFileMetas = tableMeta.getFlatFileMetas(); - // build single physical table list and params, - // and split for each file. - List singlePhyTableNameList = ImmutableList.of(phyTable); + // for each physical table + for (int i = 0; i < phyTableNameList.size(); i++) { + String phyTable = phyTableNameList.get(i); + List fileMetas = flatFileMetas.get(phyTable); + if (fileMetas.isEmpty()) { + continue; + } - Map params = - phyOperationBuilder.buildSplitParamMap(singlePhyTableNameList); + // build single physical table list and params, + // and split for each file. + List singlePhyTableNameList = ImmutableList.of(phyTable); - for (FileMeta fileMeta : fileMetas) { - OssSplit ossSplit = new OssSplit(logicalSchema, physicalSchema, params, - logicalTableName, singlePhyTableNameList, ImmutableList.of(fileMeta.getFileName())); - splits.add(ossSplit); + Map params = + phyOperationBuilder.buildSplitParamMap(singlePhyTableNameList); + + for (FileMeta fileMeta : fileMetas) { + OssSplit ossSplit = new OssSplit(logicalSchema, physicalSchema, params, + logicalTableName, singlePhyTableNameList, ImmutableList.of(fileMeta.getFileName()), null, + null, NO_PARTITION_INFO, false); + splits.add(ossSplit); + } } + return splits; } + } - return splits; + public static int calcPartition(String logicalSchema, String logicalTableName, String physicalSchema, + String physicalTableName) { + PartitionInfo partInfo = + OptimizerContext.getContext(logicalSchema).getPartitionInfoManager() + .getPartitionInfo(logicalTableName); + int partition = partInfo.getPartitionBy().getPartitions().stream(). + filter( + t -> t.getLocation().getGroupKey().equalsIgnoreCase(physicalSchema) + && t.getLocation().getPhyTableName().equalsIgnoreCase(physicalTableName)) + .findFirst().map(PartSpecBase::getPosition).map(Long::intValue).orElse(-1); + return partition - 1; } @JsonIgnore @@ -297,6 +544,24 @@ public byte[] getParamsBytes() { return paramsBytes; } + @JsonProperty + public int getNodePartCount() { + return nodePartCount; + } + + public void setNodePartCount(int nodePartCount) { + this.nodePartCount = nodePartCount; + } + + @JsonProperty + public Boolean isLocalPairWise() { + return localPairWise; + } + + public void setLocalPairWise(Boolean localPairWise) { + this.localPairWise = localPairWise; + } + @JsonIgnore public TypeDescription getReadSchema() { return readSchema; @@ -312,6 +577,115 @@ public String[] getColumns() { return columns; } + public OSSColumnTransformer getColumnTransformer(OSSTableScan ossTableScan, ExecutionContext executionContext) { + TableMeta tableMeta = executionContext.getSchemaManager(logicalSchema).getTable(logicalTableName); + Set filterSet = getFilterSet(executionContext); + List timestamps = new ArrayList<>(); + List columnMetas = new ArrayList<>(); + List fileColumnMetas = new ArrayList<>(); + List initColumnMetas = new ArrayList<>(); + List locInOrc = new ArrayList<>(); + + boolean isColumnarMode = ossTableScan.isColumnarIndex(); + + if (isColumnarMode) { + String desinatedFileName; + if (CollectionUtils.isEmpty(designatedFile)) { + // for csv file + Optional> fileNames = deltaReadOption.allCsvFiles.values().stream().findFirst(); + if (fileNames.isPresent()) { + if (fileNames.get().isEmpty()) { + return null; + } else { + desinatedFileName = fileNames.get().get(0); + } + } else { + return null; + } + } else { + // for orc file + desinatedFileName = designatedFile.get(0); + } + FileMeta fileMeta = ColumnarManager.getInstance().fileMetaOf(desinatedFileName); + List inProjects = ossTableScan.getOrcNode().getInProjects(); + List inProjectNames = ossTableScan.getOrcNode().getInputProjectName(); + long tableId = Long.parseLong(fileMeta.getLogicalTableName()); + + Map columnIndexMap = + ColumnarManager.getInstance().getColumnIndex(fileMeta.getSchemaTs(), tableId); + + for (int i = 0; i < inProjects.size(); i++) { + Integer columnIndex = inProjects.get(i); + String columnName = inProjectNames.get(i); + + columnMetas.add(tableMeta.getColumn(columnName)); + + long fieldId = tableMeta.getColumnarFieldId(columnIndex); + Integer actualColumnIndex = columnIndexMap.get(fieldId); + if (actualColumnIndex != null) { + fileColumnMetas.add(fileMeta.getColumnMetas().get(actualColumnIndex)); + locInOrc.add(actualColumnIndex + 1); + timestamps.add(null); + initColumnMetas.add(null); + } else { + ColumnMetaWithTs metaWithTs = ColumnarManager.getInstance().getInitColumnMeta(tableId, fieldId); + fileColumnMetas.add(null); + locInOrc.add(null); + timestamps.add(metaWithTs.getCreate()); + initColumnMetas.add(metaWithTs.getMeta()); + } + } + } else { + Map> flatFileMetas = tableMeta.getFlatFileMetas(); + Optional baseFileMeta = phyTableNameList.stream() + .map(flatFileMetas::get) + .flatMap(List::stream) + .filter(x -> (filterSet == null || filterSet.contains(x.getFileName()))) + .findFirst(); + + if (!baseFileMeta.isPresent()) { + return null; + } + + OSSOrcFileMeta fileMeta = (OSSOrcFileMeta) baseFileMeta.get(); + + Preconditions.checkArgument(fileMeta.getCommitTs() != null); + for (String column : ossTableScan.getOrcNode().getInputProjectName()) { + String fieldId = tableMeta.getColumnFieldId(column); + columnMetas.add(tableMeta.getColumn(column)); + if (tableMeta.isOldFileStorage()) { + fileColumnMetas.add(tableMeta.getColumn(fieldId)); + initColumnMetas.add(null); + timestamps.add(null); + locInOrc.add(fileMeta.getColumnNameToIdx(fieldId) + 1); + continue; + } + ColumnMetaWithTs meta = OrcColumnManager.getHistoryWithTs(fieldId, fileMeta.getCommitTs()); + if (meta != null) { + fileColumnMetas.add(meta.getMeta()); + initColumnMetas.add(null); + timestamps.add(meta.getCreate()); + locInOrc.add(fileMeta.getColumnNameToIdx(fieldId) + 1); + } else { + // new column after the file was created, use default value when the column was created + fileColumnMetas.add(null); + ColumnMetaWithTs versionColumnMeta = OrcColumnManager.getFirst(fieldId); + initColumnMetas.add(versionColumnMeta.getMeta()); + timestamps.add(versionColumnMeta.getCreate()); + locInOrc.add(null); + } + } + } + + return new OSSColumnTransformer( + columnMetas, + fileColumnMetas, + initColumnMetas, + timestamps, + locInOrc + ); + } + public void init(OSSTableScan ossTableScan, ExecutionContext executionContext, SessionProperties sessionProperties, Map bloomFilterInfos, RexNode bloomFilterCondition) { @@ -333,8 +707,6 @@ public void init(OSSTableScan ossTableScan, ExecutionContext executionContext, TableMeta tableMeta = executionContext.getSchemaManager(logicalSchema).getTable(logicalTableName); Parameters parameters = executionContext.getParams(); - // physical table name -> file metas - Map> flatFileMetas = tableMeta.getFlatFileMetas(); Engine tableEngine = tableMeta.getEngine(); List columnMetas = new ArrayList<>(); @@ -342,12 +714,30 @@ public void init(OSSTableScan ossTableScan, ExecutionContext executionContext, List initColumnMetas = new ArrayList<>(); List timestamps = new ArrayList<>(); + boolean isColumnarMode = ossTableScan.isColumnarIndex(); // init allFileMetas and readSchema for (int j = 0; j < phyTableNameList.size(); j++) { String phyTable = phyTableNameList.get(j); - List fileMetas = flatFileMetas.get(phyTable).stream() - .filter(x -> (filterSet == null || filterSet.contains(x.getFileName()))).collect( - Collectors.toList()); + List fileMetas; + if (isColumnarMode) { + final PartitionInfo partitionInfo = tableMeta.getPartitionInfo(); + final ColumnarManager columnarManager = ColumnarManager.getInstance(); + final long checkpointTso = this.checkpointTso; + + // Find part name from physical schema + physical table + final String partName = partitionInfo.getPartitionNameByPhyLocation(physicalSchema, phyTable); + + // Find csv files from tso + part name + Pair, List> files = + columnarManager.findFiles(checkpointTso, logicalSchema, logicalTableName, partName); + fileMetas = files.getKey(); + } else { + // physical table name -> file metas + Map> flatFileMetas = tableMeta.getFlatFileMetas(); + fileMetas = flatFileMetas.get(phyTable).stream() + .filter(x -> (filterSet == null || filterSet.contains(x.getFileName()))).collect( + Collectors.toList()); + } if (fileMetas.isEmpty()) { continue; @@ -357,39 +747,58 @@ public void init(OSSTableScan ossTableScan, ExecutionContext executionContext, if (this.readSchema == null) { TypeDescription typeDescription = TypeDescription.createStruct(); OSSOrcFileMeta fileMeta = (OSSOrcFileMeta) fileMetas.get(0); - if (fileMeta.getCommitTs() == null) { - continue; - } - for (String column : ossTableScan.getOrcNode().getInputProjectName()) { - String fieldId = tableMeta.getColumnFieldId(column); - columnMetas.add(tableMeta.getColumn(column)); - if (tableMeta.isOldFileStorage()) { - Integer columnIndex = fileMeta.getColumnNameToIdx(fieldId); + + if (isColumnarMode) { + + // for columnar store mode, add implicit column: position + // NOTE: need destruction in read result post handler. + // TODO(siyun): this part is only used by old table scan, support later + typeDescription.addField("position", TypeDescription.createLong()); + for (Integer columnIndex : ossTableScan.getOrcNode().getInProjects()) { + int actualColumnIndex = columnIndex + (ColumnarStoreUtils.POSITION_COLUMN_INDEX + 1); typeDescription.addField( - fileMeta.getTypeDescription().getFieldNames().get(columnIndex), - fileMeta.getTypeDescription().getChildren().get(columnIndex).clone()); - fileColumnMetas.add(tableMeta.getColumn(fieldId)); + fileMeta.getTypeDescription().getFieldNames().get(actualColumnIndex), + fileMeta.getTypeDescription().getChildren().get(actualColumnIndex).clone()); + columnMetas.add(fileMeta.getColumnMetas().get(actualColumnIndex)); + fileColumnMetas.add(fileMeta.getColumnMetas().get(actualColumnIndex)); initColumnMetas.add(null); - timestamps.add(null); + } + } else { + if (fileMeta.getCommitTs() == null) { continue; } - ColumnMetaWithTs meta = OrcColumnManager.getHistoryWithTs(fieldId, fileMeta.getCommitTs()); - if (meta != null) { - Integer columnIndex = fileMeta.getColumnNameToIdx(fieldId); - typeDescription.addField( - fileMeta.getTypeDescription().getFieldNames().get(columnIndex), - fileMeta.getTypeDescription().getChildren().get(columnIndex).clone()); - fileColumnMetas.add(meta.getMeta()); - initColumnMetas.add(null); - timestamps.add(meta.getCreate()); - } else { - // new column after the file was created, use default value when the column was created - fileColumnMetas.add(null); - ColumnMetaWithTs versionColumnMeta = OrcColumnManager.getFirst(fieldId); - initColumnMetas.add(versionColumnMeta.getMeta()); - timestamps.add(versionColumnMeta.getCreate()); + for (String column : ossTableScan.getOrcNode().getInputProjectName()) { + String fieldId = tableMeta.getColumnFieldId(column); + columnMetas.add(tableMeta.getColumn(column)); + if (tableMeta.isOldFileStorage()) { + Integer columnIndex = fileMeta.getColumnNameToIdx(fieldId); + typeDescription.addField( + fileMeta.getTypeDescription().getFieldNames().get(columnIndex), + fileMeta.getTypeDescription().getChildren().get(columnIndex).clone()); + fileColumnMetas.add(tableMeta.getColumn(fieldId)); + initColumnMetas.add(null); + timestamps.add(null); + continue; + } + ColumnMetaWithTs meta = OrcColumnManager.getHistoryWithTs(fieldId, fileMeta.getCommitTs()); + if (meta != null) { + Integer columnIndex = fileMeta.getColumnNameToIdx(fieldId); + typeDescription.addField( + fileMeta.getTypeDescription().getFieldNames().get(columnIndex), + fileMeta.getTypeDescription().getChildren().get(columnIndex).clone()); + fileColumnMetas.add(meta.getMeta()); + initColumnMetas.add(null); + timestamps.add(meta.getCreate()); + } else { + // new column after the file was created, use default value when the column was created + fileColumnMetas.add(null); + ColumnMetaWithTs versionColumnMeta = OrcColumnManager.getFirst(fieldId); + initColumnMetas.add(versionColumnMeta.getMeta()); + timestamps.add(versionColumnMeta.getCreate()); + } } } + this.readSchema = typeDescription; } } @@ -398,7 +807,8 @@ public void init(OSSTableScan ossTableScan, ExecutionContext executionContext, OSSColumnTransformer ossColumnTransformer = new OSSColumnTransformer(columnMetas, fileColumnMetas, initColumnMetas, - timestamps); + timestamps, + null); // init readOptions List phyTableReadOptions = new ArrayList<>(); @@ -443,6 +853,7 @@ public void init(OSSTableScan ossTableScan, ExecutionContext executionContext, Long readTs = null; if (ossTableScan.getFlashback() instanceof RexDynamicParam) { + String timestampString = executionContext.getParams().getCurrentParameter() .get(((RexDynamicParam) ossTableScan.getFlashback()).getIndex() + 1).getValue().toString(); TimeZone fromTimeZone; @@ -537,7 +948,8 @@ public void init(OSSTableScan ossTableScan, ExecutionContext executionContext, tableFileNames, afterPruningFileMetas, pruningResultList, - executionContext.getParamManager().getLong(ConnectionParams.OSS_ORC_MAX_MERGE_DISTANCE) + executionContext.getParamManager().getLong(ConnectionParams.OSS_ORC_MAX_MERGE_DISTANCE), + ossTableScan.isColumnarIndex() ); phyTableReadOptions.add(readOption); @@ -629,7 +1041,7 @@ private void pruneStripe(OSSOrcFileMeta fileMeta, SqlKind kind = agg.getAggCallList().get(i).getAggregation().getKind(); RelColumnOrigin columnOrigin = ossTableScan.getAggColumns().get(i); // any stripe can't use statistics - if (kind == SqlKind.COUNT || kind == SqlKind.CHECK_SUM) { + if (kind == SqlKind.COUNT || kind == SqlKind.CHECK_SUM || kind == SqlKind.CHECK_SUM_V2) { if (!(pruningResult.getNonStatisticsStripeSize() == 0)) { pruningResult.addNotAgg(pruningResult.getStripeMap().keySet()); } @@ -692,7 +1104,7 @@ private void pruneStripe(OSSOrcFileMeta fileMeta, } @Nullable - private Set getFilterSet(ExecutionContext executionContext) { + public Set getFilterSet(ExecutionContext executionContext) { Set filterSet = null; String fileListStr = executionContext.getParamManager().getString(ConnectionParams.FILE_LIST); if (!fileListStr.equalsIgnoreCase("ALL")) { @@ -766,4 +1178,54 @@ public String toString() { .toString(); } + public static class DeltaReadOption implements Serializable { + /** + * checkpoint tso for columnar store. + */ + private final long checkpointTso; + + /** + * map: {physical table} -> {list } + */ + private Map> allCsvFiles; + + private List projectColumnIndexes; + + public DeltaReadOption(long checkpointTso) { + this.checkpointTso = checkpointTso; + } + + @JsonCreator + public DeltaReadOption( + @JsonProperty("checkpointTso") long checkpointTso, + @JsonProperty("allCsvFiles") Map> allCsvFiles, + @JsonProperty("projectColumnIndexes") List projectColumnIndexes) { + this.checkpointTso = checkpointTso; + this.allCsvFiles = allCsvFiles; + this.projectColumnIndexes = projectColumnIndexes; + } + + @JsonProperty + public long getCheckpointTso() { + return checkpointTso; + } + + @JsonProperty + public Map> getAllCsvFiles() { + return allCsvFiles; + } + + @JsonProperty + public List getProjectColumnIndexes() { + return projectColumnIndexes; + } + + public void setAllCsvFiles(Map> allCsvFiles) { + this.allCsvFiles = allCsvFiles; + } + + public void setProjectColumnIndexes(List projectColumnIndexes) { + this.projectColumnIndexes = projectColumnIndexes; + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/RemoteSplit.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/RemoteSplit.java index 0b0377f77..e54e51140 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/RemoteSplit.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/RemoteSplit.java @@ -29,10 +29,10 @@ */ package com.alibaba.polardbx.executor.mpp.split; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; import com.alibaba.polardbx.executor.mpp.metadata.TaskLocation; import com.alibaba.polardbx.executor.mpp.spi.ConnectorSplit; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import static com.google.common.base.MoreObjects.toStringHelper; import static java.util.Objects.requireNonNull; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/SplitManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/SplitManager.java index c3977e3f2..cfd1c6feb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/SplitManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/SplitManager.java @@ -16,524 +16,11 @@ package com.alibaba.polardbx.executor.mpp.split; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.jdbc.BytesSql; -import com.alibaba.polardbx.common.jdbc.ParameterContext; -import com.alibaba.polardbx.common.properties.DynamicConfig; -import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.common.utils.Pair; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.config.ConfigDataMode; -import com.alibaba.polardbx.executor.common.ExecutorContext; -import com.alibaba.polardbx.executor.common.TopologyHandler; -import com.alibaba.polardbx.executor.mpp.metadata.Split; -import com.alibaba.polardbx.executor.spi.IGroupExecutor; -import com.alibaba.polardbx.executor.utils.ExecUtils; -import com.alibaba.polardbx.executor.utils.SubqueryUtils; -import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.rel.BaseQueryOperation; -import com.alibaba.polardbx.optimizer.core.rel.BaseTableOperation; import com.alibaba.polardbx.optimizer.core.rel.LogicalView; -import com.alibaba.polardbx.optimizer.core.rel.OSSTableScan; -import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; -import com.alibaba.polardbx.optimizer.core.rel.PhyTableScanBuilder; -import com.alibaba.polardbx.optimizer.utils.GroupConnId; -import com.alibaba.polardbx.optimizer.utils.ITransaction; -import com.alibaba.polardbx.optimizer.utils.PhyTableOperationUtil; -import com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy; -import com.alibaba.polardbx.optimizer.utils.RelUtils; -import com.google.common.collect.ImmutableList; -import com.google.protobuf.ByteString; -import org.apache.calcite.rel.RelNode; -import org.apache.calcite.rex.RexDynamicParam; -import org.apache.calcite.sql.SqlBasicCall; -import org.apache.calcite.sql.SqlLiteral; -import org.apache.calcite.sql.SqlNode; -import org.apache.calcite.sql.SqlOperator; -import org.apache.calcite.sql.SqlSelect; -import org.apache.calcite.sql.fun.SqlStdOperatorTable; -import org.apache.calcite.sql.parser.SqlParserPos; -import org.apache.commons.lang.StringUtils; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; +public interface SplitManager { + SplitInfo getSingleSplit(LogicalView logicalView, ExecutionContext executionContext); -import static com.alibaba.polardbx.group.jdbc.TGroupDataSource.LOCAL_ADDRESS; -import static com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy.CONCURRENT; -import static com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy.FILE_CONCURRENT; -import static com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy.FIRST_THEN_CONCURRENT; -import static com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy.GROUP_CONCURRENT_BLOCK; - -public class SplitManager { - - private static final Logger logger = LoggerFactory.getLogger(SplitManager.class); - - public static SqlNode DYNAMIC_CONDITION_PLACEHOLDER = new SqlBasicCall(SqlStdOperatorTable.EQUALS, - new SqlNode[] { - SqlLiteral.createCharString("bka_magic", SqlParserPos.ZERO), - SqlLiteral.createCharString("bka_magic", SqlParserPos.ZERO)}, SqlParserPos.ZERO); - - public SplitInfo getSingleSplit(LogicalView logicalView, ExecutionContext executionContext) { - if (logicalView instanceof OSSTableScan) { - throw GeneralUtil.nestedException("Impossible code path: oss table scan with single split"); - } else { - return logicalViewSingleSplit(logicalView, executionContext); - } - } - - public SplitInfo logicalViewSingleSplit(LogicalView logicalView, ExecutionContext executionContext) { - List splitList = new ArrayList<>(); - String schemaName = logicalView.getSchemaName(); - if (StringUtils.isEmpty(schemaName)) { - schemaName = executionContext.getSchemaName(); - } - TopologyHandler topology = ExecutorContext.getContext(schemaName).getTopologyHandler(); - - ITransaction.RW rw = ITransaction.RW.READ; - - BaseQueryOperation queryOperation = logicalView.fromTableOperation(); - List> params = null; - String dbIndex = null; - - if (queryOperation instanceof BaseTableOperation && ((BaseTableOperation) queryOperation).isForUpdate()) { - rw = ITransaction.RW.WRITE; - } else if (logicalView.getLockMode() == SqlSelect.LockMode.EXCLUSIVE_LOCK) { - rw = ITransaction.RW.WRITE; - } - - List> phyTableNames = new ArrayList<>(); - boolean useParameterDelegate = ExecUtils.useParameterDelegate(executionContext); - if (queryOperation instanceof PhyTableOperation) { - PhyTableScanBuilder phyOperationBuilder = - (PhyTableScanBuilder) ((PhyTableOperation) queryOperation).getPhyOperationBuilder(); - if (phyOperationBuilder != null) { - List> groupTables = ((PhyTableOperation) queryOperation).getTableNames(); - params = new ArrayList<>(groupTables.size()); - for (List tables : groupTables) { - params.add(phyOperationBuilder.buildSplitParams(dbIndex, tables, useParameterDelegate)); - } - dbIndex = queryOperation.getDbIndex(); - for (List tables : groupTables) { - params.add(phyOperationBuilder.buildSplitParams(dbIndex, tables, useParameterDelegate)); - } - - phyTableNames = groupTables; - } - } - if (params == null) { - - Pair> dbIndexAndParam = - queryOperation - .getDbIndexAndParam(executionContext.getParams() == null ? null : executionContext.getParams() - .getCurrentParameter(), phyTableNames, executionContext); - Map p = dbIndexAndParam.getValue(); - - List splitParams = new ArrayList<>(); - params = Collections.singletonList(splitParams); - if (p != null) { - int paramCount = p.keySet().size(); - for (int i = 1; i <= paramCount; i++) { - splitParams.add(i - 1, p.get(i)); - } - } - dbIndex = dbIndexAndParam.getKey(); - - } - - IGroupExecutor groupExecutor = topology.get(dbIndex); - TGroupDataSource ds = (TGroupDataSource) groupExecutor.getDataSource(); - - String address = LOCAL_ADDRESS; - if (!DynamicConfig.getInstance().enableExtremePerformance()) { - address = ds.getOneAtomAddress(ConfigDataMode.isMasterMode()); - } - - byte[] hint = ExecUtils.buildDRDSTraceCommentBytes(executionContext); - BytesSql sqlTemplate = queryOperation.getBytesSql(); - Long intraGroupSortKey = - PhyTableOperationUtil - .fetchBaseOpIntraGroupConnKey(queryOperation, dbIndex, phyTableNames, - executionContext); - final ByteString galaxyDigestBS = logicalView.getGalaxyPrepareDigest(executionContext, sqlTemplate); - final byte[] galaxyDigest = null == galaxyDigestBS ? null : galaxyDigestBS.toByteArray(); - JdbcSplit split = new JdbcSplit(ds.getDbGroupKey(), - schemaName, - dbIndex, - hint, - sqlTemplate, - null, - params, - address, - ImmutableList.of(logicalView.getTableNames()), - rw, - false, - intraGroupSortKey, - galaxyDigest, - galaxyDigest != null && logicalView.isSupportGalaxyPrepare()); - splitList.add(new Split(false, split)); - - HashMap groups = new HashMap<>(); - groups.put(dbIndex, schemaName); - Map grpConnIdSet = new HashMap<>(); - grpConnIdSet.put(new GroupConnId(dbIndex, split.getGrpConnId(executionContext)), schemaName); - return new SplitInfo(logicalView.getRelatedId(), false, QueryConcurrencyPolicy.SEQUENTIAL, - ImmutableList.of(splitList), groups, 1, - 1, false, grpConnIdSet); - } - - public SplitInfo getSplits( - LogicalView logicalView, ExecutionContext executionContext, boolean highConcurrencyQuery) { - if (logicalView instanceof OSSTableScan) { - return ossTableScanSplit((OSSTableScan) logicalView, executionContext, highConcurrencyQuery); - } else { - return logicalViewSplit(logicalView, executionContext, highConcurrencyQuery); - } - } - - public SplitInfo logicalViewSplit(LogicalView logicalView, ExecutionContext executionContext, - boolean highConcurrencyQuery) { - if (logicalView != null) { - ITransaction.RW rw = - logicalView.getLockMode() == SqlSelect.LockMode.UNDEF ? ITransaction.RW.READ : ITransaction.RW.WRITE; - boolean underSort = logicalView.pushedRelNodeIsSort(); - QueryConcurrencyPolicy concurrencyPolicy = highConcurrencyQuery ? CONCURRENT : - ExecUtils.getQueryConcurrencyPolicy(executionContext, logicalView); - - boolean allowMultipleReadConns = ExecUtils.allowMultipleReadConns(executionContext, logicalView); - - //在调度split之前,如果scan有子查询,则在server端计算完成 - List scalarList = logicalView.getScalarList(); - if (scalarList.size() > 0) { - SubqueryUtils.buildScalarSubqueryValue(scalarList, executionContext); // handle - } - - List inputs = ExecUtils.getInputs( - logicalView, executionContext, !ExecUtils.isMppMode(executionContext)); - - //FIXME 当优化器支持对memory的估算后,需要调用以下逻辑,预估内存如果超限,禁止执行SQL - //HandlerCommon.checkExecMemCost(ExecutionContext executionContext, List subNodes); - - if (inputs.size() > 1) { - /* - * 记录全表扫描,当前判断条件为访问分片数大于1,用于后续sql.log输出 - */ - executionContext.setHasScanWholeTable(true); - } - - byte[] hint = ExecUtils.buildDRDSTraceCommentBytes(executionContext); - - String schemaName = logicalView.getSchemaName(); - if (StringUtils.isEmpty(schemaName)) { - schemaName = executionContext.getSchemaName(); - } - - TopologyHandler topology = ExecutorContext.getContext(logicalView.getSchemaName()) - .getTopologyHandler(); - - HashMap shardSet = new HashMap<>(); - Set instSet = new HashSet<>(); - Map grpConnSet = new HashMap(); - int splitCount = 0; - switch (concurrencyPolicy) { - case SEQUENTIAL: - case CONCURRENT: - List sortInputs = ExecUtils.zigzagInputsByMysqlInst(inputs, schemaName, executionContext); - List splitList = new ArrayList<>(); - for (RelNode input : sortInputs) { - JdbcSplit split = - parseRelNode( - logicalView, topology, input, logicalView.getSchemaName(), hint, rw, - executionContext); - if (split != null) { - shardSet.put(split.getDbIndex(), split.getSchemaName()); - instSet.add(split.getHostAddress()); - grpConnSet.put(new GroupConnId(split.getDbIndex(), split.getGrpConnId(executionContext)), - split.getSchemaName()); - splitList.add(new Split(false, split)); - splitCount++; - } - } - return new SplitInfo(logicalView.getRelatedId(), logicalView.isExpandView(), concurrencyPolicy, - ImmutableList.of(splitList), - shardSet, instSet.size(), - splitCount, underSort, grpConnSet); - case FIRST_THEN_CONCURRENT: //只有对广播表写的时候才会用到,在查询的时候我们恢复默认策略即可GROUP_CONCURRENT_BLOCK - case GROUP_CONCURRENT_BLOCK: - if (!allowMultipleReadConns) { - Map> splitAssignment = new HashMap<>(); - Map> instDbMap = new LinkedHashMap<>(); - List> outList = new ArrayList<>(); - int maxInstDbsize = 0; - - for (RelNode input : inputs) { - JdbcSplit split = - parseRelNode(logicalView, topology, input, logicalView.getSchemaName(), hint, rw, - executionContext); - if (split != null) { - shardSet.put(split.getDbIndex(), split.getSchemaName()); - instSet.add(split.getHostAddress()); - grpConnSet.put(new GroupConnId(split.getDbIndex(), split.getGrpConnId(executionContext)), - split.getSchemaName()); - if (!splitAssignment.containsKey(split.getDbIndex())) { - splitAssignment.put(split.getDbIndex(), new ArrayList<>()); - if (!instDbMap.containsKey(split.getHostAddress())) { - instDbMap.put(split.getHostAddress(), new ArrayList<>()); - } - instDbMap.get(split.getHostAddress()).add(split.getDbIndex()); - if (instDbMap.get(split.getHostAddress()).size() > maxInstDbsize) { - maxInstDbsize = instDbMap.get(split.getHostAddress()).size(); - } - } - splitAssignment.get(split.getDbIndex()).add(new Split(false, split)); - splitCount++; - } - } - for (int i = 0; i < maxInstDbsize; i++) { - for (List dbs : instDbMap.values()) { - if (i < dbs.size()) { - outList.add(splitAssignment.get(dbs.get(i))); - } - } - } - return new SplitInfo(logicalView.getRelatedId(), logicalView.isExpandView(), concurrencyPolicy, - outList.isEmpty() ? ImmutableList.of(new ArrayList<>()) : outList, shardSet, - instSet.size(), - splitCount, underSort, grpConnSet); - } else { - List sortInputByInts = - ExecUtils.zigzagInputsByMysqlInst(inputs, schemaName, executionContext); - List retLists = new ArrayList<>(); - for (RelNode input : sortInputByInts) { - JdbcSplit split = - parseRelNode(logicalView, topology, input, logicalView.getSchemaName(), hint, rw, - executionContext); - if (split != null) { - shardSet.put(split.getDbIndex(), split.getSchemaName()); - grpConnSet.put(new GroupConnId(split.getDbIndex(), split.getGrpConnId(executionContext)), - split.getSchemaName()); - instSet.add(split.getHostAddress()); - retLists.add(new Split(false, split)); - splitCount++; - } - } - return new SplitInfo(logicalView.getRelatedId(), logicalView.isExpandView(), GROUP_CONCURRENT_BLOCK, - ImmutableList.of(retLists), - shardSet, - instSet.size(), splitCount, underSort, grpConnSet); - } - case RELAXED_GROUP_CONCURRENT: - - if (!allowMultipleReadConns) { - /** - * the output of grpConnIdSet has been zigzag by mysqlInst - */ - List grpConnIdSetOutput = new ArrayList<>(); - List> newInputsGroupedByGrpConnIdOutput = new ArrayList<>(); - ExecUtils.zigzagInputsByBothDnInstAndGroupConnId(inputs, schemaName, executionContext, - grpConnIdSetOutput, newInputsGroupedByGrpConnIdOutput); - List> outList = new ArrayList<>(); - for (int i = 0; i < newInputsGroupedByGrpConnIdOutput.size(); i++) { - List phyOpListOfOneGrpConn = newInputsGroupedByGrpConnIdOutput.get(i); - List splits = new ArrayList<>(); - for (int j = 0; j < phyOpListOfOneGrpConn.size(); j++) { - JdbcSplit split = - parseRelNode(logicalView, topology, phyOpListOfOneGrpConn.get(j), - logicalView.getSchemaName(), hint, rw, executionContext); - if (split != null) { - shardSet.put(split.getDbIndex(), split.getSchemaName()); - grpConnSet.put(grpConnIdSetOutput.get(i), split.getSchemaName()); - instSet.add(split.getHostAddress()); - splitCount++; - splits.add(new Split(false, split)); - } - } - outList.add(splits); - } - return new SplitInfo(logicalView.getRelatedId(), logicalView.isExpandView(), concurrencyPolicy, - outList.isEmpty() ? ImmutableList.of(new ArrayList<>()) : outList, shardSet, - instSet.size(), - splitCount, underSort, grpConnSet); - } else { - /** - * Come here means it is allowed to do table scan by multiple read conns - */ - List sortInputByInts = - ExecUtils.zigzagInputsByMysqlInst(inputs, schemaName, executionContext); - List retLists = new ArrayList<>(); - for (RelNode input : sortInputByInts) { - JdbcSplit split = - parseRelNode(logicalView, topology, input, logicalView.getSchemaName(), hint, rw, - executionContext); - if (split != null) { - shardSet.put(split.getDbIndex(), split.getSchemaName()); - grpConnSet.put(new GroupConnId(split.getDbIndex(), split.getGrpConnId(executionContext)), - split.getSchemaName()); - instSet.add(split.getHostAddress()); - retLists.add(new Split(false, split)); - splitCount++; - } - } - return new SplitInfo(logicalView.getRelatedId(), logicalView.isExpandView(), - QueryConcurrencyPolicy.RELAXED_GROUP_CONCURRENT, - ImmutableList.of(retLists), - shardSet, - instSet.size(), splitCount, underSort, grpConnSet); - } - - case INSTANCE_CONCURRENT: - //FIXME 现在并没有实例间并行的配置 - break; - default: - break; - } - throw new TddlRuntimeException(ErrorCode.ERR_GENERATE_SPLIT, "getSplits error:" + concurrencyPolicy); - - } - - throw new TddlRuntimeException(ErrorCode.ERR_GENERATE_SPLIT, "logicalView is null"); - - } - - public SplitInfo ossTableScanSplit( - OSSTableScan ossTableScan, ExecutionContext executionContext, boolean highConcurrencyQuery) { - if (ossTableScan == null) { - throw new TddlRuntimeException(ErrorCode.ERR_GENERATE_SPLIT, "logicalView is null"); - } - - QueryConcurrencyPolicy concurrencyPolicy = - ExecUtils.getQueryConcurrencyPolicy(executionContext, ossTableScan); - - List inputs = ExecUtils.getInputs( - ossTableScan, executionContext, !ExecUtils.isMppMode(executionContext)); - - String schemaName = ossTableScan.getSchemaName(); - if (StringUtils.isEmpty(schemaName)) { - schemaName = executionContext.getSchemaName(); - } - - if (inputs.size() > 1) { - // record full table scan - executionContext.setHasScanWholeTable(true); - } - - HashMap shardSet = new HashMap<>(); - int splitCount = 0; - List sortInputs = ExecUtils.zigzagInputsByMysqlInst(inputs, schemaName, executionContext); - List splitList = new ArrayList<>(); - switch (concurrencyPolicy) { - case SEQUENTIAL: - case CONCURRENT: - case FIRST_THEN_CONCURRENT: - case GROUP_CONCURRENT_BLOCK: - // split according to physical table operations. - for (RelNode input : sortInputs) { - List splits = OssSplit.getTableConcurrencySplit(input, executionContext); - for (OssSplit split : splits) { - shardSet.put(split.getPhysicalSchema(), split.getLogicalSchema()); - splitList.add(new Split(false, split)); - splitCount++; - } - } - return new SplitInfo(ossTableScan.getRelatedId(), ossTableScan.isExpandView(), - concurrencyPolicy == FIRST_THEN_CONCURRENT ? GROUP_CONCURRENT_BLOCK : concurrencyPolicy, - ImmutableList.of(splitList), - shardSet, 1, - splitCount, false); - case FILE_CONCURRENT: - // split according to all table files. - for (RelNode input : sortInputs) { - List splits = OssSplit.getFileConcurrencySplit(ossTableScan, input, executionContext); - if (splits != null) { - for (OssSplit split : splits) { - shardSet.put(split.getPhysicalSchema(), split.getLogicalSchema()); - splitList.add(new Split(false, split)); - splitCount++; - } - } - } - return new SplitInfo(ossTableScan.getRelatedId(), ossTableScan.isExpandView(), - FILE_CONCURRENT, - ImmutableList.of(splitList), - shardSet, 1, - splitCount, false); - case INSTANCE_CONCURRENT: - default: - break; - } - throw new TddlRuntimeException(ErrorCode.ERR_GENERATE_SPLIT, "getSplits error:" + concurrencyPolicy); - - } - - private JdbcSplit parseRelNode(LogicalView logicalView, TopologyHandler topology, RelNode input, - String schemaName, byte[] hint, ITransaction.RW rw, ExecutionContext executionContext) { - if (input instanceof PhyTableOperation) { - PhyTableOperation phyTableOperation = (PhyTableOperation) input; - - if (logicalView.isMGetEnabled()) { - phyTableOperation.setBytesSql(logicalView.getLookupSqlTemplateCache(() -> { - SqlSelect nativeSqlForMget = (SqlSelect) phyTableOperation.getNativeSqlNode() - .clone(phyTableOperation.getNativeSqlNode().getParserPosition()); - SqlNode filter = nativeSqlForMget.getWhere(); - SqlNode customFilter; - if (filter != null) { - SqlOperator operator = SqlStdOperatorTable.AND; - customFilter = new SqlBasicCall(operator, - new SqlNode[] {filter, DYNAMIC_CONDITION_PLACEHOLDER}, - SqlParserPos.ZERO); - } else { - customFilter = DYNAMIC_CONDITION_PLACEHOLDER; - } - nativeSqlForMget.setWhere(customFilter); - return RelUtils.toNativeBytesSql(nativeSqlForMget); - })); - } - - IGroupExecutor groupExecutor = topology.get(phyTableOperation.getDbIndex()); - TGroupDataSource ds = (TGroupDataSource) groupExecutor.getDataSource(); - String address = LOCAL_ADDRESS; - if (!DynamicConfig.getInstance().enableExtremePerformance()) { - address = ds.getOneAtomAddress(ConfigDataMode.isMasterMode()); - } - - PhyTableScanBuilder phyOperationBuilder = - (PhyTableScanBuilder) phyTableOperation.getPhyOperationBuilder(); - String orderBy = phyOperationBuilder.buildPhysicalOrderByClause(); - BytesSql sqlTemplate = phyTableOperation.getBytesSql(); - final ByteString galaxyDigestBS = phyTableOperation.getGalaxyPrepareDigest(); - final byte[] galaxyDigest = null == galaxyDigestBS ? null : galaxyDigestBS.toByteArray(); - - boolean useParameterDelegate = ExecUtils.useParameterDelegate(executionContext); - List> params = new ArrayList<>(phyTableOperation.getTableNames().size()); - for (List tables : phyTableOperation.getTableNames()) { - params.add(phyOperationBuilder.buildSplitParams( - phyTableOperation.getDbIndex(), tables, useParameterDelegate)); - } - Long intraGroupSortKey = PhyTableOperationUtil.fetchPhyOpIntraGroupConnKey(phyTableOperation, executionContext); - return new JdbcSplit(ds.getDbGroupKey(), - schemaName, - phyTableOperation.getDbIndex(), - hint, - sqlTemplate, - orderBy, - params, - address, - phyTableOperation.getTableNames(), - rw, - phyOperationBuilder.containLimit() || (logicalView.getLockMode() != null - && logicalView.getLockMode() != SqlSelect.LockMode.UNDEF), - intraGroupSortKey, - galaxyDigest, galaxyDigest != null && phyTableOperation.isSupportGalaxyPrepare()); - } else { - throw new UnsupportedOperationException("Unknown input " + input); - } - } + SplitInfo getSplits(LogicalView logicalView, ExecutionContext executionContext, boolean highConcurrencyQuery); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/SplitManagerImpl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/SplitManagerImpl.java new file mode 100644 index 000000000..73391f9d7 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/split/SplitManagerImpl.java @@ -0,0 +1,574 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.mpp.split; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.jdbc.BytesSql; +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.properties.DynamicConfig; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.config.ConfigDataMode; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.common.TopologyHandler; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.mpp.metadata.Split; +import com.alibaba.polardbx.executor.spi.IGroupExecutor; +import com.alibaba.polardbx.executor.utils.ExecUtils; +import com.alibaba.polardbx.executor.utils.SubqueryUtils; +import com.alibaba.polardbx.group.jdbc.TGroupDataSource; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.BaseQueryOperation; +import com.alibaba.polardbx.optimizer.core.rel.BaseTableOperation; +import com.alibaba.polardbx.optimizer.core.rel.LogicalView; +import com.alibaba.polardbx.optimizer.core.rel.OSSTableScan; +import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; +import com.alibaba.polardbx.optimizer.core.rel.PhyTableScanBuilder; +import com.alibaba.polardbx.optimizer.utils.GroupConnId; +import com.alibaba.polardbx.optimizer.utils.IColumnarTransaction; +import com.alibaba.polardbx.optimizer.utils.ITransaction; +import com.alibaba.polardbx.optimizer.utils.PhyTableOperationUtil; +import com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy; +import com.alibaba.polardbx.optimizer.utils.RelUtils; +import com.alibaba.polardbx.statistics.RuntimeStatistics; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rex.RexDynamicParam; +import org.apache.calcite.sql.SqlBasicCall; +import org.apache.calcite.sql.SqlLiteral; +import org.apache.calcite.sql.SqlNode; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.SqlSelect; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.commons.lang.StringUtils; + +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static com.alibaba.polardbx.group.jdbc.TGroupDataSource.LOCAL_ADDRESS; +import static com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy.CONCURRENT; +import static com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy.FILE_CONCURRENT; +import static com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy.FIRST_THEN_CONCURRENT; +import static com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy.GROUP_CONCURRENT_BLOCK; + +public class SplitManagerImpl implements SplitManager { + + private static final Logger LOGGER = LoggerFactory.getLogger(SplitManagerImpl.class); + + public static SqlNode DYNAMIC_CONDITION_PLACEHOLDER = new SqlBasicCall(SqlStdOperatorTable.EQUALS, + new SqlNode[] { + SqlLiteral.createCharString("bka_magic", SqlParserPos.ZERO), + SqlLiteral.createCharString("bka_magic", SqlParserPos.ZERO)}, SqlParserPos.ZERO); + + @Override + public SplitInfo getSingleSplit(LogicalView logicalView, ExecutionContext executionContext) { + if (logicalView instanceof OSSTableScan) { + throw GeneralUtil.nestedException("Impossible code path: oss table scan with single split"); + } else { + return logicalViewSingleSplit(logicalView, executionContext); + } + } + + @Override + public SplitInfo getSplits( + LogicalView logicalView, ExecutionContext executionContext, boolean highConcurrencyQuery) { + if (logicalView instanceof OSSTableScan) { + RuntimeStatistics stat = (RuntimeStatistics) executionContext.getRuntimeStatistics(); + long startTimeNanos = System.nanoTime(); + SplitInfo splitInfo = ossTableScanSplit((OSSTableScan) logicalView, executionContext, highConcurrencyQuery); + if (((OSSTableScan) logicalView).isColumnarIndex() && stat != null) { + long timeCost = System.nanoTime() - startTimeNanos; + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format("timeCost = {0}, traceId = {1}, logicalView = {2}", + timeCost, executionContext.getTraceId(), logicalView)); + } + stat.addColumnarSnapshotTimecost(timeCost); + } + return splitInfo; + } else { + return logicalViewSplit(logicalView, executionContext, highConcurrencyQuery); + } + } + + private SplitInfo logicalViewSingleSplit(LogicalView logicalView, ExecutionContext executionContext) { + List splitList = new ArrayList<>(); + String schemaName = logicalView.getSchemaName(); + if (StringUtils.isEmpty(schemaName)) { + schemaName = executionContext.getSchemaName(); + } + TopologyHandler topology = ExecutorContext.getContext(schemaName).getTopologyHandler(); + + ITransaction.RW rw = ITransaction.RW.READ; + + BaseQueryOperation queryOperation = logicalView.fromTableOperation(); + List> params = null; + String dbIndex = null; + + if (queryOperation instanceof BaseTableOperation && ((BaseTableOperation) queryOperation).isForUpdate()) { + rw = ITransaction.RW.WRITE; + } else if (logicalView.getLockMode() == SqlSelect.LockMode.EXCLUSIVE_LOCK) { + rw = ITransaction.RW.WRITE; + } + + List> phyTableNames = new ArrayList<>(); + boolean useParameterDelegate = ExecUtils.useParameterDelegate(executionContext); + if (queryOperation instanceof PhyTableOperation) { + PhyTableScanBuilder phyOperationBuilder = + (PhyTableScanBuilder) ((PhyTableOperation) queryOperation).getPhyOperationBuilder(); + if (phyOperationBuilder != null) { + List> groupTables = ((PhyTableOperation) queryOperation).getTableNames(); + params = new ArrayList<>(groupTables.size()); + for (List tables : groupTables) { + params.add(phyOperationBuilder.buildSplitParams(dbIndex, tables, useParameterDelegate)); + } + dbIndex = queryOperation.getDbIndex(); + phyTableNames = groupTables; + } + } + if (params == null) { + + Pair> dbIndexAndParam = + queryOperation + .getDbIndexAndParam(executionContext.getParams() == null ? null : executionContext.getParams() + .getCurrentParameter(), phyTableNames, executionContext); + Map p = dbIndexAndParam.getValue(); + + List splitParams = new ArrayList<>(); + params = Collections.singletonList(splitParams); + if (p != null) { + int paramCount = p.keySet().size(); + for (int i = 1; i <= paramCount; i++) { + splitParams.add(i - 1, p.get(i)); + } + } + dbIndex = dbIndexAndParam.getKey(); + + } + + IGroupExecutor groupExecutor = topology.get(dbIndex); + TGroupDataSource ds = (TGroupDataSource) groupExecutor.getDataSource(); + + String address = LOCAL_ADDRESS; + if (!DynamicConfig.getInstance().enableExtremePerformance()) { + address = ds.getOneAtomAddress(ConfigDataMode.isMasterMode()); + } + + byte[] hint = ExecUtils.buildDRDSTraceCommentBytes(executionContext); + BytesSql sqlTemplate = queryOperation.getBytesSql(); + Long intraGroupSortKey = + PhyTableOperationUtil + .fetchBaseOpIntraGroupConnKey(queryOperation, dbIndex, phyTableNames, + executionContext); + final ByteString galaxyDigestBS = logicalView.getGalaxyPrepareDigest(executionContext, sqlTemplate); + final byte[] galaxyDigest = null == galaxyDigestBS ? null : galaxyDigestBS.toByteArray(); + JdbcSplit split = new JdbcSplit(ds.getDbGroupKey(), + schemaName, + dbIndex, + hint, + sqlTemplate, + null, + params, + address, + ImmutableList.of(logicalView.getTableNames()), + rw, + false, + intraGroupSortKey, + galaxyDigest, + galaxyDigest != null && logicalView.isSupportGalaxyPrepare()); + splitList.add(new Split(false, split)); + + HashMap groups = new HashMap<>(); + groups.put(dbIndex, schemaName); + Map grpConnIdSet = new HashMap<>(); + grpConnIdSet.put(new GroupConnId(dbIndex, split.getGrpConnId(executionContext)), schemaName); + return new SplitInfo(logicalView.getRelatedId(), false, QueryConcurrencyPolicy.SEQUENTIAL, + ImmutableList.of(splitList), groups, 1, + 1, false, grpConnIdSet); + } + + private SplitInfo logicalViewSplit(LogicalView logicalView, ExecutionContext executionContext, + boolean highConcurrencyQuery) { + if (logicalView != null) { + ITransaction.RW rw = + logicalView.getLockMode() == SqlSelect.LockMode.UNDEF ? ITransaction.RW.READ : ITransaction.RW.WRITE; + boolean underSort = logicalView.pushedRelNodeIsSort(); + QueryConcurrencyPolicy concurrencyPolicy = highConcurrencyQuery ? CONCURRENT : + ExecUtils.getQueryConcurrencyPolicy(executionContext, logicalView); + + boolean allowMultipleReadConns = ExecUtils.allowMultipleReadConns(executionContext, logicalView); + + //在调度split之前,如果scan有子查询,则在server端计算完成 + List scalarList = logicalView.getScalarList(); + if (scalarList.size() > 0) { + SubqueryUtils.buildScalarSubqueryValue(scalarList, executionContext); // handle + } + + List inputs = ExecUtils.getInputs( + logicalView, executionContext, !ExecUtils.isMppMode(executionContext)); + + //FIXME 当优化器支持对memory的估算后,需要调用以下逻辑,预估内存如果超限,禁止执行SQL + //HandlerCommon.checkExecMemCost(ExecutionContext executionContext, List subNodes); + + if (inputs.size() > 1) { + /* + * 记录全表扫描,当前判断条件为访问分片数大于1,用于后续sql.log输出 + */ + executionContext.setHasScanWholeTable(true); + } + + byte[] hint = ExecUtils.buildDRDSTraceCommentBytes(executionContext); + + String schemaName = logicalView.getSchemaName(); + if (StringUtils.isEmpty(schemaName)) { + schemaName = executionContext.getSchemaName(); + } + + TopologyHandler topology = ExecutorContext.getContext(logicalView.getSchemaName()) + .getTopologyHandler(); + + HashMap shardSet = new HashMap<>(); + Set instSet = new HashSet<>(); + Map grpConnSet = new HashMap(); + int splitCount = 0; + switch (concurrencyPolicy) { + case SEQUENTIAL: + case CONCURRENT: + List sortInputs = ExecUtils.zigzagInputsByMysqlInst(inputs, schemaName, executionContext); + List splitList = new ArrayList<>(); + for (RelNode input : sortInputs) { + JdbcSplit split = + parseRelNode( + logicalView, topology, input, logicalView.getSchemaName(), hint, rw, executionContext); + if (split != null) { + shardSet.put(split.getDbIndex(), split.getSchemaName()); + instSet.add(split.getHostAddress()); + grpConnSet.put(new GroupConnId(split.getDbIndex(), split.getGrpConnId(executionContext)), + split.getSchemaName()); + splitList.add(new Split(false, split)); + splitCount++; + } + } + return new SplitInfo(logicalView.getRelatedId(), logicalView.isExpandView(), concurrencyPolicy, + ImmutableList.of(splitList), + shardSet, instSet.size(), + splitCount, underSort, grpConnSet); + case FIRST_THEN_CONCURRENT: //只有对广播表写的时候才会用到,在查询的时候我们恢复默认策略即可GROUP_CONCURRENT_BLOCK + case GROUP_CONCURRENT_BLOCK: + if (!allowMultipleReadConns) { + Map> splitAssignment = new HashMap<>(); + Map> instDbMap = new LinkedHashMap<>(); + List> outList = new ArrayList<>(); + int maxInstDbsize = 0; + + for (RelNode input : inputs) { + JdbcSplit split = + parseRelNode(logicalView, topology, input, logicalView.getSchemaName(), hint, rw, + executionContext); + if (split != null) { + shardSet.put(split.getDbIndex(), split.getSchemaName()); + instSet.add(split.getHostAddress()); + grpConnSet.put(new GroupConnId(split.getDbIndex(), split.getGrpConnId(executionContext)), + split.getSchemaName()); + if (!splitAssignment.containsKey(split.getDbIndex())) { + splitAssignment.put(split.getDbIndex(), new ArrayList<>()); + if (!instDbMap.containsKey(split.getHostAddress())) { + instDbMap.put(split.getHostAddress(), new ArrayList<>()); + } + instDbMap.get(split.getHostAddress()).add(split.getDbIndex()); + if (instDbMap.get(split.getHostAddress()).size() > maxInstDbsize) { + maxInstDbsize = instDbMap.get(split.getHostAddress()).size(); + } + } + splitAssignment.get(split.getDbIndex()).add(new Split(false, split)); + splitCount++; + } + } + for (int i = 0; i < maxInstDbsize; i++) { + for (List dbs : instDbMap.values()) { + if (i < dbs.size()) { + outList.add(splitAssignment.get(dbs.get(i))); + } + } + } + return new SplitInfo(logicalView.getRelatedId(), logicalView.isExpandView(), concurrencyPolicy, + outList.isEmpty() ? ImmutableList.of(new ArrayList<>()) : outList, shardSet, + instSet.size(), + splitCount, underSort, grpConnSet); + } else { + List sortInputByInts = + ExecUtils.zigzagInputsByMysqlInst(inputs, schemaName, executionContext); + List retLists = new ArrayList<>(); + for (RelNode input : sortInputByInts) { + JdbcSplit split = + parseRelNode(logicalView, topology, input, logicalView.getSchemaName(), hint, rw, + executionContext); + if (split != null) { + shardSet.put(split.getDbIndex(), split.getSchemaName()); + grpConnSet.put(new GroupConnId(split.getDbIndex(), split.getGrpConnId(executionContext)), + split.getSchemaName()); + instSet.add(split.getHostAddress()); + retLists.add(new Split(false, split)); + splitCount++; + } + } + return new SplitInfo(logicalView.getRelatedId(), logicalView.isExpandView(), GROUP_CONCURRENT_BLOCK, + ImmutableList.of(retLists), + shardSet, + instSet.size(), splitCount, underSort, grpConnSet); + } + case RELAXED_GROUP_CONCURRENT: + + if (!allowMultipleReadConns) { + /** + * the output of grpConnIdSet has been zigzag by mysqlInst + */ + List grpConnIdSetOutput = new ArrayList<>(); + List> newInputsGroupedByGrpConnIdOutput = new ArrayList<>(); + ExecUtils.zigzagInputsByBothDnInstAndGroupConnId(inputs, schemaName, executionContext, + grpConnIdSetOutput, newInputsGroupedByGrpConnIdOutput); + List> outList = new ArrayList<>(); + for (int i = 0; i < newInputsGroupedByGrpConnIdOutput.size(); i++) { + List phyOpListOfOneGrpConn = newInputsGroupedByGrpConnIdOutput.get(i); + List splits = new ArrayList<>(); + for (int j = 0; j < phyOpListOfOneGrpConn.size(); j++) { + JdbcSplit split = + parseRelNode(logicalView, topology, phyOpListOfOneGrpConn.get(j), + logicalView.getSchemaName(), hint, rw, executionContext); + if (split != null) { + shardSet.put(split.getDbIndex(), split.getSchemaName()); + grpConnSet.put(grpConnIdSetOutput.get(i), split.getSchemaName()); + instSet.add(split.getHostAddress()); + splitCount++; + splits.add(new Split(false, split)); + } + } + outList.add(splits); + } + return new SplitInfo(logicalView.getRelatedId(), logicalView.isExpandView(), concurrencyPolicy, + outList.isEmpty() ? ImmutableList.of(new ArrayList<>()) : outList, shardSet, + instSet.size(), + splitCount, underSort, grpConnSet); + } else { + /** + * Come here means it is allowed to do table scan by multiple read conns + */ + List sortInputByInts = + ExecUtils.zigzagInputsByMysqlInst(inputs, schemaName, executionContext); + List retLists = new ArrayList<>(); + for (RelNode input : sortInputByInts) { + JdbcSplit split = + parseRelNode(logicalView, topology, input, logicalView.getSchemaName(), hint, rw, + executionContext); + if (split != null) { + shardSet.put(split.getDbIndex(), split.getSchemaName()); + grpConnSet.put(new GroupConnId(split.getDbIndex(), split.getGrpConnId(executionContext)), + split.getSchemaName()); + instSet.add(split.getHostAddress()); + retLists.add(new Split(false, split)); + splitCount++; + } + } + return new SplitInfo(logicalView.getRelatedId(), logicalView.isExpandView(), + QueryConcurrencyPolicy.RELAXED_GROUP_CONCURRENT, + ImmutableList.of(retLists), + shardSet, + instSet.size(), splitCount, underSort, grpConnSet); + } + + case INSTANCE_CONCURRENT: + //FIXME 现在并没有实例间并行的配置 + break; + default: + break; + } + throw new TddlRuntimeException(ErrorCode.ERR_GENERATE_SPLIT, "getSplits error:" + concurrencyPolicy); + + } + + throw new TddlRuntimeException(ErrorCode.ERR_GENERATE_SPLIT, "logicalView is null"); + + } + + private SplitInfo ossTableScanSplit( + OSSTableScan ossTableScan, ExecutionContext executionContext, boolean highConcurrencyQuery) { + if (ossTableScan == null) { + throw new TddlRuntimeException(ErrorCode.ERR_GENERATE_SPLIT, "logicalView is null"); + } + + QueryConcurrencyPolicy concurrencyPolicy = + ExecUtils.getQueryConcurrencyPolicy(executionContext, ossTableScan); + + List inputs = ExecUtils.getInputs( + ossTableScan, executionContext, !ExecUtils.isMppMode(executionContext)); + + String schemaName = ossTableScan.getSchemaName(); + if (StringUtils.isEmpty(schemaName)) { + schemaName = executionContext.getSchemaName(); + } + + if (inputs.size() > 1) { + // record full table scan + executionContext.setHasScanWholeTable(true); + } + + HashMap shardSet = new HashMap<>(); + int splitCount = 0; + + // if storage is columnar index, zigzag by mysql instance has no meaning + List sortInputs = ossTableScan.isColumnarIndex() ? inputs : + ExecUtils.zigzagInputsByMysqlInst(inputs, schemaName, executionContext); + + List splitList = new ArrayList<>(); + // Before allocating splits, a certain tso must be fetched first + Long tso = null; + if (ossTableScan.isColumnarIndex()) { + ITransaction trans = executionContext.getTransaction(); + if (trans instanceof IColumnarTransaction) { + IColumnarTransaction columnarTrans = (IColumnarTransaction) trans; + if (!columnarTrans.snapshotSeqIsEmpty()) { + tso = columnarTrans.getSnapshotSeq(); + } else { + tso = ColumnarManager.getInstance().latestTso(); + columnarTrans.setTsoTimestamp(tso); + } + } else { + LOGGER.warn("Trying to access columnar index out of IMppReadOnlyTransaction, transaction class is: " + + trans.getTransactionClass().name()); + tso = ColumnarManager.getInstance().latestTso(); + } + } + + switch (concurrencyPolicy) { + case SEQUENTIAL: + case CONCURRENT: + case FIRST_THEN_CONCURRENT: + case GROUP_CONCURRENT_BLOCK: + // split according to physical table operations. + for (RelNode input : sortInputs) { + List splits = OssSplit.getTableConcurrencySplit(ossTableScan, input, executionContext, tso); + for (OssSplit split : splits) { + shardSet.put(split.getPhysicalSchema(), split.getLogicalSchema()); + splitList.add(new Split(false, split)); + splitCount++; + } + } + return new SplitInfo(ossTableScan.getRelatedId(), ossTableScan.isExpandView(), + concurrencyPolicy == FIRST_THEN_CONCURRENT ? GROUP_CONCURRENT_BLOCK : concurrencyPolicy, + ImmutableList.of(splitList), + shardSet, 1, + splitCount, false); + case FILE_CONCURRENT: + // split according to all table files. + for (RelNode input : sortInputs) { + List splits = OssSplit.getFileConcurrencySplit(ossTableScan, input, executionContext, tso); + if (splits != null) { + for (OssSplit split : splits) { + shardSet.put(split.getPhysicalSchema(), split.getLogicalSchema()); + splitList.add(new Split(false, split)); + splitCount++; + } + } + } + return new SplitInfo(ossTableScan.getRelatedId(), ossTableScan.isExpandView(), + FILE_CONCURRENT, + ImmutableList.of(splitList), + shardSet, 1, + splitCount, false); + case INSTANCE_CONCURRENT: + default: + break; + } + throw new TddlRuntimeException(ErrorCode.ERR_GENERATE_SPLIT, "getSplits error:" + concurrencyPolicy); + + } + + private JdbcSplit parseRelNode(LogicalView logicalView, TopologyHandler topology, RelNode input, + String schemaName, byte[] hint, ITransaction.RW rw, ExecutionContext ec) { + if (input instanceof PhyTableOperation) { + PhyTableOperation phyTableOperation = (PhyTableOperation) input; + + if (logicalView.isMGetEnabled()) { + phyTableOperation.setBytesSql(logicalView.getLookupSqlTemplateCache(() -> { + SqlSelect nativeSqlForMget = (SqlSelect) phyTableOperation.getNativeSqlNode() + .clone(phyTableOperation.getNativeSqlNode().getParserPosition()); + SqlNode filter = nativeSqlForMget.getWhere(); + SqlNode customFilter; + if (filter != null) { + SqlOperator operator = SqlStdOperatorTable.AND; + customFilter = new SqlBasicCall(operator, + new SqlNode[] {filter, DYNAMIC_CONDITION_PLACEHOLDER}, + SqlParserPos.ZERO); + } else { + customFilter = DYNAMIC_CONDITION_PLACEHOLDER; + } + nativeSqlForMget.setWhere(customFilter); + return RelUtils.toNativeBytesSql(nativeSqlForMget); + })); + } + + IGroupExecutor groupExecutor = topology.get(phyTableOperation.getDbIndex()); + TGroupDataSource ds = (TGroupDataSource) groupExecutor.getDataSource(); + String address = LOCAL_ADDRESS; + if (!DynamicConfig.getInstance().enableExtremePerformance()) { + address = ds.getOneAtomAddress(ConfigDataMode.isMasterMode()); + } + + PhyTableScanBuilder phyOperationBuilder = + (PhyTableScanBuilder) phyTableOperation.getPhyOperationBuilder(); + String orderBy = phyOperationBuilder.buildPhysicalOrderByClause(); + BytesSql sqlTemplate = phyTableOperation.getBytesSql(); + final ByteString galaxyDigestBS = phyTableOperation.getGalaxyPrepareDigest(); + final byte[] galaxyDigest = null == galaxyDigestBS ? null : galaxyDigestBS.toByteArray(); + + boolean useParameterDelegate = ExecUtils.useParameterDelegate(ec); + List> params = new ArrayList<>(phyTableOperation.getTableNames().size()); + for (List tables : phyTableOperation.getTableNames()) { + params.add(phyOperationBuilder.buildSplitParams( + phyTableOperation.getDbIndex(), tables, useParameterDelegate)); + } + Long intraGroupSortKey = PhyTableOperationUtil.fetchPhyOpIntraGroupConnKey(phyTableOperation, ec); + return new JdbcSplit(ds.getDbGroupKey(), + schemaName, + phyTableOperation.getDbIndex(), + hint, + sqlTemplate, + orderBy, + params, + address, + phyTableOperation.getTableNames(), + rw, + phyOperationBuilder.containLimit() || (logicalView.getLockMode() != null + && logicalView.getLockMode() != SqlSelect.LockMode.UNDEF), + intraGroupSortKey, + galaxyDigest, galaxyDigest != null && phyTableOperation.isSupportGalaxyPrepare()); + } else { + throw new UnsupportedOperationException("Unknown input " + input); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/util/Failures.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/util/Failures.java index 8cab0923f..daca11833 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/util/Failures.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/util/Failures.java @@ -29,11 +29,11 @@ */ package com.alibaba.polardbx.executor.mpp.util; -import com.google.common.collect.Lists; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.executor.mpp.execution.ExecutionFailureInfo; import com.alibaba.polardbx.executor.mpp.execution.Failure; +import com.google.common.collect.Lists; import javax.annotation.Nullable; import java.net.ConnectException; @@ -41,9 +41,9 @@ import java.util.List; import java.util.concurrent.RejectedExecutionException; -import static com.google.common.base.Functions.toStringFunction; import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_SERVER_SHUTTING_DOWN; import static com.alibaba.polardbx.util.MoreObjects.firstNonNull; +import static com.google.common.base.Functions.toStringFunction; import static java.lang.String.format; import static java.util.Arrays.asList; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/util/FinalizerService.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/util/FinalizerService.java index a4240acc7..9165c9f6e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/util/FinalizerService.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/util/FinalizerService.java @@ -29,10 +29,10 @@ */ package com.alibaba.polardbx.executor.mpp.util; -import com.google.common.collect.Sets; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.Threads; +import com.google.common.collect.Sets; import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ClusterStatsResource.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ClusterStatsResource.java index 4d7e1f1a2..f606472eb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ClusterStatsResource.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ClusterStatsResource.java @@ -16,14 +16,14 @@ package com.alibaba.polardbx.executor.mpp.web; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; import com.alibaba.polardbx.executor.mpp.execution.QueryInfo; import com.alibaba.polardbx.executor.mpp.execution.QueryManager; import com.alibaba.polardbx.executor.mpp.execution.QueryState; import com.alibaba.polardbx.gms.node.InternalNodeManager; import com.alibaba.polardbx.gms.node.Node; import com.alibaba.polardbx.gms.node.NodeState; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import javax.inject.Inject; import javax.ws.rs.GET; @@ -37,9 +37,9 @@ @Path("/v1/cluster") public class ClusterStatsResource { + private static ClusterStatsResource instance = null; private final InternalNodeManager nodeManager; private final QueryManager queryManager; - private static ClusterStatsResource instance = null; @Inject public ClusterStatsResource(InternalNodeManager nodeManager, QueryManager queryManager) { @@ -54,6 +54,50 @@ public static ClusterStatsResource getInstance() { return instance; } + protected static ClusterStats getClusterStatsInternal(List queryIfs, long activeNodes, + long totalQueries) { + double rowInputRate = 0; + double byteInputRate = 0; + double cpuTimeRate = 0; + + long runningDrivers = 0; + double memoryReservation = 0; + + long runningQueries = 0; + long blockedQueries = 0; + long queuedQueries = 0; + + for (QueryInfo query : queryIfs) { + if (query.getState() == QueryState.QUEUED) { + queuedQueries++; + } else if (query.getState() == QueryState.RUNNING) { + if (query.getQueryStats().isFullyBlocked()) { + blockedQueries++; + } else { + runningQueries++; + } + } + + if (!query.getState().isDone()) { + double totalExecutionTimeSeconds = query.getQueryStats().getElapsedTime().getValue(TimeUnit.SECONDS); + if (totalExecutionTimeSeconds != 0) { + byteInputRate += + query.getQueryStats().getProcessedInputDataSize().toBytes() / totalExecutionTimeSeconds; + rowInputRate += query.getQueryStats().getProcessedInputPositions() / totalExecutionTimeSeconds; + cpuTimeRate += (query.getQueryStats().getTotalCpuTime().getValue(TimeUnit.SECONDS)) + / totalExecutionTimeSeconds; + } + memoryReservation += query.getQueryStats().getTotalMemoryReservation().toBytes(); + runningDrivers += query.getQueryStats().getRunningPipelinExecs(); + } + } + + return new ClusterStats(totalQueries, runningQueries, blockedQueries, queuedQueries, + activeNodes, runningDrivers, memoryReservation, rowInputRate, byteInputRate, + cpuTimeRate + ); + } + @GET @Produces(MediaType.APPLICATION_JSON) public ClusterStats getClusterStats() { @@ -83,25 +127,25 @@ public static class ClusterStats { @JsonCreator public ClusterStats( @JsonProperty("totalQueries") - long totalQueries, //总的query数目 + long totalQueries, //总的query数目 @JsonProperty("runningQueries") - long runningQueries, //运行query数目 + long runningQueries, //运行query数目 @JsonProperty("blockedQueries") - long blockedQueries, //blocked query数目 + long blockedQueries, //blocked query数目 @JsonProperty("queuedQueries") - long queuedQueries, + long queuedQueries, @JsonProperty("activeWorkers") - long activeWorkers, + long activeWorkers, @JsonProperty("runningDrivers") - long runningDrivers, + long runningDrivers, @JsonProperty("reservedMemory") - double reservedMemory, + double reservedMemory, @JsonProperty("rowInputRate") - double rowInputRate, + double rowInputRate, @JsonProperty("byteInputRate") - double byteInputRate, + double byteInputRate, @JsonProperty("cpuTimeRate") - double cpuTimeRate + double cpuTimeRate ) { this.totalQueries = totalQueries; this.runningQueries = runningQueries; @@ -165,48 +209,4 @@ public double getCpuTimeRate() { return cpuTimeRate; } } - - protected static ClusterStats getClusterStatsInternal(List queryIfs, long activeNodes, - long totalQueries) { - double rowInputRate = 0; - double byteInputRate = 0; - double cpuTimeRate = 0; - - long runningDrivers = 0; - double memoryReservation = 0; - - long runningQueries = 0; - long blockedQueries = 0; - long queuedQueries = 0; - - for (QueryInfo query : queryIfs) { - if (query.getState() == QueryState.QUEUED) { - queuedQueries++; - } else if (query.getState() == QueryState.RUNNING) { - if (query.getQueryStats().isFullyBlocked()) { - blockedQueries++; - } else { - runningQueries++; - } - } - - if (!query.getState().isDone()) { - double totalExecutionTimeSeconds = query.getQueryStats().getElapsedTime().getValue(TimeUnit.SECONDS); - if (totalExecutionTimeSeconds != 0) { - byteInputRate += - query.getQueryStats().getProcessedInputDataSize().toBytes() / totalExecutionTimeSeconds; - rowInputRate += query.getQueryStats().getProcessedInputPositions() / totalExecutionTimeSeconds; - cpuTimeRate += (query.getQueryStats().getTotalCpuTime().getValue(TimeUnit.SECONDS)) - / totalExecutionTimeSeconds; - } - memoryReservation += query.getQueryStats().getTotalMemoryReservation().toBytes(); - runningDrivers += query.getQueryStats().getRunningPipelinExecs(); - } - } - - return new ClusterStats(totalQueries, runningQueries, blockedQueries, queuedQueries, - activeNodes, runningDrivers, memoryReservation, rowInputRate, byteInputRate, - cpuTimeRate - ); - } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/FailureDetectorConfig.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/FailureDetectorConfig.java index 7cadcf15f..9cd21f787 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/FailureDetectorConfig.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/FailureDetectorConfig.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.mpp.web; +import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import io.airlift.configuration.Config; import io.airlift.configuration.ConfigDescription; import io.airlift.units.Duration; @@ -23,7 +24,6 @@ import javax.validation.constraints.DecimalMax; import javax.validation.constraints.DecimalMin; -import com.alibaba.polardbx.executor.mpp.metadata.NotNull; import java.util.concurrent.TimeUnit; public class FailureDetectorConfig { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/HeartbeatFailureDetector.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/HeartbeatFailureDetector.java index 89de53048..8c75b3aaa 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/HeartbeatFailureDetector.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/HeartbeatFailureDetector.java @@ -16,15 +16,15 @@ package com.alibaba.polardbx.executor.mpp.web; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableMap; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.Threads; import com.alibaba.polardbx.executor.mpp.client.MppMediaTypes; import com.alibaba.polardbx.executor.mpp.util.Failures; import com.alibaba.polardbx.executor.mpp.util.ImmutableCollectors; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableMap; import io.airlift.discovery.client.ServiceDescriptor; import io.airlift.discovery.client.ServiceSelector; import io.airlift.discovery.client.ServiceType; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/NodeStatus.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/NodeStatus.java index 0f19a1e28..c4b63247b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/NodeStatus.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/NodeStatus.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.mpp.web; +import com.alibaba.polardbx.gms.node.NodeVersion; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.alibaba.polardbx.gms.node.NodeVersion; import io.airlift.units.Duration; import static java.util.Objects.requireNonNull; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/QueryResource.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/QueryResource.java index 9ab0c697b..201024eeb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/QueryResource.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/QueryResource.java @@ -29,18 +29,20 @@ */ package com.alibaba.polardbx.executor.mpp.web; -import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.execution.QueryExecution; import com.alibaba.polardbx.executor.mpp.execution.QueryInfo; import com.alibaba.polardbx.executor.mpp.execution.QueryManager; +import com.alibaba.polardbx.executor.mpp.execution.QuerySplitStats; import com.alibaba.polardbx.executor.mpp.execution.QueryState; +import com.alibaba.polardbx.executor.mpp.execution.QueryStatsInfo; import com.alibaba.polardbx.executor.mpp.execution.StageId; import com.alibaba.polardbx.executor.mpp.server.BasicQueryInfo; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.google.common.collect.ImmutableList; import io.airlift.http.client.HttpClient; import io.airlift.http.client.Request; @@ -116,19 +118,68 @@ public Response getQueryInfo(@PathParam("queryId") String queryId) { requireNonNull(queryId, "queryId is null"); try { - QueryInfo queryInfo = queryManager.getQueryInfo(queryId); - QueryInfo summary = queryInfo; - try { - summary = queryInfo.summary(); - } catch (Exception e) { - // ignore exceptions - } + QueryInfo summary = summarizeQuery(queryId); return Response.ok(summary).build(); } catch (NoSuchElementException e) { return Response.status(Status.GONE).build(); } } + @GET + @Path("stats/{queryId}") + public Response getQueryStatsInfo(@PathParam("queryId") String queryId) { + requireNonNull(queryId, "queryId is null"); + + try { + QueryInfo summary = summarizeQuery(queryId); + QueryStatsInfo statsInfo = QueryStatsInfo.from(summary); + return Response.ok(statsInfo).build(); + } catch (NoSuchElementException e) { + return Response.status(Status.GONE).build(); + } + } + + @GET + @Path("stats/splits/{queryId}") + public Response getSplitStatsInfo(@PathParam("queryId") String queryId) { + requireNonNull(queryId, "queryId is null"); + + try { + QueryInfo summary = summarizeQuery(queryId); + QueryStatsInfo statsInfo = QueryStatsInfo.from(summary); + QuerySplitStats splitStats = QuerySplitStats.from(statsInfo); + return Response.ok(splitStats).build(); + } catch (NoSuchElementException e) { + return Response.status(Status.GONE).build(); + } + } + + @GET + @Path("stats/pipeline/{queryId}") + public Response getPipelineStatsInfo(@PathParam("queryId") String queryId) { + requireNonNull(queryId, "queryId is null"); + + try { + QueryInfo summary = summarizeQuery(queryId); + QueryStatsInfo statsInfo = QueryStatsInfo.from(summary); + QuerySplitStats splitStats = QuerySplitStats.from(statsInfo); + return Response.ok(splitStats).build(); + } catch (NoSuchElementException e) { + return Response.status(Status.GONE).build(); + } + } + + protected QueryInfo summarizeQuery(String queryId) { + QueryInfo queryInfo = queryManager.getQueryInfo(queryId); + QueryInfo summary = queryInfo; + try { + summary = queryInfo.summary(); + } catch (Exception e) { + // ignore exceptions + } + return summary; + } + @DELETE @Path("{queryId}") public void cancelQuery(@PathParam("queryId") String queryId) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ServerInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ServerInfo.java index 16c94c8da..fd6d1da74 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ServerInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ServerInfo.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.mpp.web; +import com.alibaba.polardbx.gms.node.NodeVersion; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -import com.alibaba.polardbx.gms.node.NodeVersion; import io.airlift.units.Duration; import javax.annotation.concurrent.Immutable; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ServerInfoResource.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ServerInfoResource.java index d1b9b9939..8b3d449e4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ServerInfoResource.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/ServerInfoResource.java @@ -17,9 +17,9 @@ package com.alibaba.polardbx.executor.mpp.web; import com.alibaba.polardbx.common.TddlNode; -import com.alibaba.polardbx.gms.node.NodeVersion; import com.alibaba.polardbx.executor.mpp.deploy.ServiceProvider; import com.alibaba.polardbx.gms.node.NodeState; +import com.alibaba.polardbx.gms.node.NodeVersion; import io.airlift.node.NodeInfo; import javax.inject.Inject; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/StatusResource.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/StatusResource.java index 5aefb4eec..5f0ef5f95 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/StatusResource.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/mpp/web/StatusResource.java @@ -16,11 +16,11 @@ package com.alibaba.polardbx.executor.mpp.web; -import com.sun.management.OperatingSystemMXBean; import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; import com.alibaba.polardbx.gms.node.NodeVersion; import com.alibaba.polardbx.optimizer.memory.MemoryManager; import com.alibaba.polardbx.optimizer.memory.MemoryPool; +import com.sun.management.OperatingSystemMXBean; import io.airlift.node.NodeInfo; import javax.inject.Inject; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractBufferedJoinExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractBufferedJoinExec.java index d48274ec1..dcd8ae895 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractBufferedJoinExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractBufferedJoinExec.java @@ -24,12 +24,16 @@ import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkConverter; +import com.alibaba.polardbx.executor.operator.util.AntiJoinResultIterator; import com.alibaba.polardbx.executor.operator.util.ChunksIndex; +import com.alibaba.polardbx.executor.operator.util.ConcurrentRawHashTable; +import com.alibaba.polardbx.executor.operator.util.SyntheticAddress; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.expression.calc.IExpression; import com.alibaba.polardbx.optimizer.core.join.EquiJoinKey; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.memory.MemoryPool; +import com.clearspring.analytics.util.Preconditions; import org.apache.calcite.rel.core.JoinRelType; import java.util.List; @@ -42,6 +46,10 @@ * @author hongxi.chx */ abstract class AbstractBufferedJoinExec extends AbstractJoinExec { + /** + * A placeholder to mark there is no more element in this position link + */ + public static final int LIST_END = ConcurrentRawHashTable.NOT_EXISTS; private static final Logger logger = LoggerFactory.getLogger(AbstractBufferedJoinExec.class); @@ -55,32 +63,48 @@ abstract class AbstractBufferedJoinExec extends AbstractJoinExec { // Internal states protected Chunk probeChunk; - protected int[] probeKeyHashCode; - private int probePosition; - private Chunk probeJoinKeyChunk; // for equi-join + protected int probePosition; + protected Chunk probeJoinKeyChunk; // for equi-join // Special mode only for semi/anti-join protected boolean passThrough; protected boolean passNothing; - private boolean isMatching; - private int matchedPosition; - private boolean matched; + protected boolean isMatching; + protected int matchedPosition; + protected boolean matched; protected boolean streamJoin; // TODO all anti join use anticondition instead of antiJoinOperands - private boolean useAntiCondition = false; + protected boolean useAntiCondition = false; - AbstractBufferedJoinExec(Executor outerInput, - Executor innerInput, - JoinRelType joinType, - boolean maxOneRow, - List joinKeys, - IExpression condition, - List antiJoinOperands, - IExpression antiCondition, - boolean useAntiCondition, - ExecutionContext context) { + protected ProbeOperator probeOperator; + + protected boolean buildOuterInput; + + /** + * used to switch status from probe to output row under reverse anti join + */ + AntiJoinResultIterator antiJoinResultIterator; + + int resultPartition = -1; + + int partitionCount = -1; + + int currentPartition = -1; + + boolean keepPartition = false; + + protected AbstractBufferedJoinExec(Executor outerInput, + Executor innerInput, + JoinRelType joinType, + boolean maxOneRow, + List joinKeys, + IExpression condition, + List antiJoinOperands, + IExpression antiCondition, + boolean useAntiCondition, + ExecutionContext context) { this(outerInput, innerInput, joinType, maxOneRow, joinKeys, condition, antiJoinOperands, antiCondition, context); this.useAntiCondition = useAntiCondition; @@ -99,6 +123,7 @@ abstract class AbstractBufferedJoinExec extends AbstractJoinExec { context); createBlockBuilders(); this.isEquiJoin = joinKeys != null; + this.probeOperator = new DefaultProbeOperator(true); } @Override @@ -124,43 +149,91 @@ Chunk doNextChunk() { if (!streamJoin) { long start = System.currentTimeMillis(); while (currentPosition() < chunkLimit) { - if (probeChunk == null || probePosition == probeChunk.getPositionCount()) { + // if reverse semi join is matching(output matching rows), we cannot update probeChunk + if (probeChunk == null || probePosition == probeChunk.getPositionCount() + || reverseSemiJoinNotMatching()) { if (System.currentTimeMillis() - start >= MppConfig.getInstance().getSplitRunQuanta()) { //exceed 1 second probeJoinKeyChunk = null; - probeKeyHashCode = null; probeChunk = null; break; } + Chunk recyclableChunk = probeChunk; probeChunk = nextProbeChunk(); + + if (shouldRecycle && recyclableChunk != null) { + recyclableChunk.recycle(); + } + if (probeChunk == null) { probeJoinKeyChunk = null; - probeKeyHashCode = null; break; } else { if (isEquiJoin) { probeJoinKeyChunk = getProbeKeyChunkGetter().apply(probeChunk); - probeKeyHashCode = probeJoinKeyChunk.hashCodeVector(); } probePosition = 0; + // join result keep partition of probe side + int partition = probeChunk.getPartIndex(); + if (keepPartition && partition >= 0) { + if (currentPartition == -1) { + // init, no need to break + currentPartition = partition; + resultPartition = partition; + partitionCount = probeChunk.getPartCount(); + } else if (partition != currentPartition) { + resultPartition = currentPartition; + currentPartition = partition; + // break loop to output result of last partition + break; + } else { + resultPartition = currentPartition; + } + } } } // Process outer rows in this input chunk nextRows(); } } else { - if (probeChunk == null || probePosition == probeChunk.getPositionCount()) { + // if reverse semi join is matching(output matching rows), we cannot update probeChunk + if (probeChunk == null || probePosition == probeChunk.getPositionCount() || reverseSemiJoinNotMatching()) { + Chunk recyclableChunk = probeChunk; probeChunk = nextProbeChunk(); + + if (shouldRecycle && recyclableChunk != null) { + recyclableChunk.recycle(); + } + if (probeChunk == null) { probeJoinKeyChunk = null; - probeKeyHashCode = null; } else { if (isEquiJoin) { probeJoinKeyChunk = getProbeKeyChunkGetter().apply(probeChunk); - probeKeyHashCode = probeJoinKeyChunk.hashCodeVector(); + } probePosition = 0; - nextRows(); + // join result keep partition of probe side + int partition = probeChunk.getPartIndex(); + boolean continueMatch = true; + if (keepPartition && partition >= 0) { + if (currentPartition == -1) { + // init, no need to break + currentPartition = partition; + resultPartition = partition; + partitionCount = probeChunk.getPartCount(); + } else if (partition != currentPartition) { + resultPartition = currentPartition; + currentPartition = partition; + // not continue match to output result of last partition + continueMatch = false; + } else { + resultPartition = currentPartition; + } + } + if (continueMatch) { + nextRows(); + } } } else { nextRows(); @@ -175,108 +248,56 @@ Chunk doNextChunk() { } } } - if (currentPosition() == 0) { - return null; - } else { - return buildChunkAndReset(); - } - } - private void nextRows() { - final int positionCount = probeChunk.getPositionCount(); - for (; probePosition < positionCount; probePosition++) { - - // reset matched flag unless it's still during matching - if (!isMatching) { - matched = false; - matchedPosition = matchInit(probeJoinKeyChunk, probeKeyHashCode, probePosition); + if (joinType == JoinRelType.ANTI && buildOuterInput) { + // not reach the stage of output not matched records under reverse anti join + if (antiJoinResultIterator == null) { + return null; } else { - // continue from the last processed match - matchedPosition = matchNext(matchedPosition, probeJoinKeyChunk, probePosition); - isMatching = false; - } - - boolean hasAntiNull = false; - for (; matchValid(matchedPosition); - matchedPosition = matchNext(matchedPosition, probeJoinKeyChunk, probePosition)) { - if (hasAntiNull == false) { - hasAntiNull = - checkAntiJoinConditionHasNull(buildChunks, probeChunk, probePosition, matchedPosition); - } - if (!checkJoinCondition(buildChunks, probeChunk, probePosition, matchedPosition)) { - continue; - } - - if (joinType == JoinRelType.INNER || joinType == JoinRelType.LEFT) { - buildJoinRow(buildChunks, probeChunk, probePosition, matchedPosition); - } else if (joinType == JoinRelType.RIGHT) { - buildRightJoinRow(buildChunks, probeChunk, probePosition, matchedPosition); - } - - // checks max1row generated from scalar subquery - if ((!ConfigDataMode.isFastMock()) && singleJoin && matched) { - throw new TddlRuntimeException(ErrorCode.ERR_SCALAR_SUBQUERY_RETURN_MORE_THAN_ONE_ROW); - } - - // set matched flag - matched = true; - - // semi/anti-joins do not care multiple matches - if (semiJoin) { - break; - } - - // check buffered data is full - if (currentPosition() >= chunkLimit) { - isMatching = true; - return; - } + return antiJoinResultIterator.nextChunk(); } + } - // generates a null result while using outer join - if (outerJoin && outputNullRowInTime() && !matched) { - if (joinType != JoinRelType.RIGHT) { - buildLeftNullRow(probeChunk, probePosition); - } else { - buildRightNullRow(probeChunk, probePosition); - } + Chunk result = currentPosition() == 0 ? null : buildChunkAndReset(); + if (keepPartition) { + if (result != null) { + result.setPartIndex(resultPartition); + result.setPartCount(partitionCount); } + resultPartition = currentPartition; + } + return result; + } - // generates a semi-row result while using semi or anti-semi join - if (semiJoin) { - if (joinType == JoinRelType.SEMI && matched) { - buildSemiJoinRow(probeChunk, probePosition); - } else if (joinType == JoinRelType.ANTI && !matched) { - if (useAntiCondition) { - if (!hasAntiNull) { - buildSemiJoinRow(probeChunk, probePosition); - } - } else if (checkAntiJoinOperands(probeChunk, probePosition)) { - buildSemiJoinRow(probeChunk, probePosition); - } - } - } + private void nextRows() { + probeOperator.nextRows(); + } - // check buffered data is full - if (currentPosition() >= chunkLimit) { - probePosition++; - return; - } - } + private boolean reverseSemiJoinNotMatching() { + return semiJoin && joinType == JoinRelType.SEMI && buildOuterInput && !isMatching; } protected boolean nextJoinNullRows() { return false; } - private void buildSemiJoinRow(Chunk inputChunk, int position) { + protected void buildSemiJoinRow(Chunk inputChunk, int position) { // outer side only for (int i = 0; i < outerInput.getDataTypes().size(); i++) { inputChunk.getBlock(i).writePositionTo(position, blockBuilders[i]); } } - private boolean checkAntiJoinOperands(Chunk outerChunk, int outerPosition) { + protected void buildReverseSemiJoinRow(ChunksIndex inputChunk, int position) { + // inner side only + long chunkIdAndPos = inputChunk.getAddress(position); + for (int i = 0; i < getBuildInput().getDataTypes().size(); i++) { + inputChunk.getChunk(SyntheticAddress.decodeIndex(chunkIdAndPos)).getBlock(i) + .writePositionTo(SyntheticAddress.decodeOffset(chunkIdAndPos), blockBuilders[i]); + } + } + + protected boolean checkAntiJoinOperands(Chunk outerChunk, int outerPosition) { return checkAntiJoinOperands(outerChunk.rowAt(outerPosition)); } @@ -294,13 +315,18 @@ void doSpecialCheckForSemiJoin() { if (joinType == JoinRelType.SEMI) { passNothing = true; } else if (joinType == JoinRelType.ANTI) { - // Note that even for 'NOT IN' anti-join, we should not check operator anymore - passThrough = true; + // if this is reverse anti join, nothing should be returned + if (buildOuterInput) { + passNothing = true; + } else { + // Note that even for 'NOT IN' anti-join, we should not check operator anymore + passThrough = true; + } } else { throw new AssertionError(); } } else if (joinType == JoinRelType.ANTI && antiJoinOperands != null - && buildChunks.getChunk(0).getBlockCount() == 1) { + && buildChunks.getChunk(0).getBlockCount() == 1 && !buildOuterInput) { // Special case for x NOT IN (... NULL ...) which results in NULL if (checkContainsNull(buildChunks)) { passNothing = true; @@ -331,4 +357,318 @@ protected boolean outputNullRowInTime() { public void setStreamJoin(boolean streamJoin) { this.streamJoin = streamJoin; } + + class DefaultProbeOperator implements ProbeOperator { + protected final boolean useBloomFilter; + + protected DefaultProbeOperator(boolean useBloomFilter) { + this.useBloomFilter = useBloomFilter; + } + + @Override + public void nextRows() { + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + int[] probeKeyHashCode = probeJoinKeyChunk == null ? null : probeJoinKeyChunk.hashCodeVector(); + + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matched = false; + matchedPosition = matchInit(probeJoinKeyChunk, probeKeyHashCode, probePosition); + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probeJoinKeyChunk, probePosition); + isMatching = false; + } + + boolean hasAntiNull = false; + + if (joinType == JoinRelType.INNER && condition == null && !semiJoin) { + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probeJoinKeyChunk, probePosition)) { + + buildJoinRow(buildChunks, probeChunk, probePosition, matchedPosition); + + // set matched flag + matched = true; + + // check buffered data is full + if (currentPosition() >= chunkLimit) { + isMatching = true; + return; + } + } + } else { + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probeJoinKeyChunk, probePosition)) { + if (hasAntiNull == false) { + hasAntiNull = + checkAntiJoinConditionHasNull(buildChunks, probeChunk, probePosition, matchedPosition); + } + if (!checkJoinCondition(buildChunks, probeChunk, probePosition, matchedPosition)) { + continue; + } + + if (joinType == JoinRelType.INNER || joinType == JoinRelType.LEFT) { + buildJoinRow(buildChunks, probeChunk, probePosition, matchedPosition); + } else if (joinType == JoinRelType.RIGHT) { + buildRightJoinRow(buildChunks, probeChunk, probePosition, matchedPosition); + } + + // checks max1row generated from scalar subquery + if ((!ConfigDataMode.isFastMock()) && singleJoin && matched) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALAR_SUBQUERY_RETURN_MORE_THAN_ONE_ROW); + } + + // set matched flag + matched = true; + + // semi/anti-joins do not care multiple matches + if (semiJoin) { + break; + } + + // check buffered data is full + if (currentPosition() >= chunkLimit) { + isMatching = true; + return; + } + } + } + + // generates a null result while using outer join + if (outerJoin && outputNullRowInTime() && !matched) { + if (joinType != JoinRelType.RIGHT) { + buildLeftNullRow(probeChunk, probePosition); + } else { + buildRightNullRow(probeChunk, probePosition); + } + } + + // generates a semi-row result while using semi or anti-semi join + if (semiJoin) { + if (joinType == JoinRelType.SEMI && matched) { + buildSemiJoinRow(probeChunk, probePosition); + } else if (joinType == JoinRelType.ANTI && !matched) { + if (useAntiCondition) { + if (!hasAntiNull) { + buildSemiJoinRow(probeChunk, probePosition); + } + } else if (checkAntiJoinOperands(probeChunk, probePosition)) { + buildSemiJoinRow(probeChunk, probePosition); + } + } + } + + // check buffered data is full + if (currentPosition() >= chunkLimit) { + probePosition++; + return; + } + } + } + + @Override + public void close() { + + } + + @Override + public int estimateSize() { + // no extra memory usage. + return 0; + } + } + + class SimpleReverseSemiProbeOperator implements ProbeOperator { + protected final Synchronizer synchronizer; + + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + + protected SimpleReverseSemiProbeOperator(Synchronizer synchronizer) { + Preconditions.checkArgument(condition == null, + "simple reverse semi probe operator not support other join condition"); + this.synchronizer = synchronizer; + } + + @Override + public void nextRows() { + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matchedPosition = matchInit(probeJoinKeyChunk, probeKeyHashCode, probePosition); + isMatching = true; + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probeJoinKeyChunk, probePosition); + } + + // if condition not match or mark failed, just return + if (!matchValid(matchedPosition) || !synchronizer.getMatchedPosition().markAndGet(matchedPosition)) { + isMatching = false; + continue; + } + + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probeJoinKeyChunk, probePosition)) { + + buildReverseSemiJoinRow(buildChunks, matchedPosition); + + // check buffered data is full + if (currentPosition() >= chunkLimit) { + isMatching = true; + return; + } + } + + isMatching = false; + } + } + + @Override + public void close() { + + } + + @Override + public int estimateSize() { + // no extra memory usage. + return 0; + } + } + + class ReverseSemiProbeOperator implements ProbeOperator { + + protected final Synchronizer synchronizer; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + + protected ReverseSemiProbeOperator(Synchronizer synchronizer) { + this.synchronizer = synchronizer; + } + + @Override + public void nextRows() { + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matchedPosition = matchInit(probeJoinKeyChunk, probeKeyHashCode, probePosition); + isMatching = true; + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probeJoinKeyChunk, probePosition); + } + + // if condition not match, just return + if (!matchValid(matchedPosition)) { + isMatching = false; + continue; + } + + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probeJoinKeyChunk, probePosition)) { + + if (!checkJoinCondition(buildChunks, probeChunk, probePosition, matchedPosition)) { + continue; + } + + // if cas failed, another thread has output this record, but cannot stop + if (!synchronizer.getMatchedPosition().markAndGet(matchedPosition)) { + continue; + } + + buildReverseSemiJoinRow(buildChunks, matchedPosition); + + // check buffered data is full + if (currentPosition() >= chunkLimit) { + isMatching = true; + return; + } + } + + isMatching = false; + } + } + + @Override + public void close() { + + } + + @Override + public int estimateSize() { + // no extra memory usage. + return 0; + } + + } + + class SimpleReverseAntiProbeOperator implements ProbeOperator { + + protected final Synchronizer synchronizer; + + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + + protected SimpleReverseAntiProbeOperator(Synchronizer synchronizer) { + this.synchronizer = synchronizer; + } + + @Override + public void nextRows() { + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + for (; probePosition < positionCount; probePosition++) { + matchedPosition = matchInit(probeJoinKeyChunk, probeKeyHashCode, probePosition); + // if cas failed, another thread has marked all matched records + if (!matchValid(matchedPosition) || !synchronizer.getMatchedPosition().markAndGet(matchedPosition)) { + continue; + } + + for (; + matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probeJoinKeyChunk, probePosition)) { + synchronizer.getMatchedPosition().rawMark(matchedPosition); + } + } + } + + @Override + public void close() { + + } + + @Override + public int estimateSize() { + // no extra memory usage. + return 0; + } + + } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractChunkBuffer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractChunkBuffer.java index cffb16324..d927a5089 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractChunkBuffer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractChunkBuffer.java @@ -18,6 +18,7 @@ import com.google.common.collect.AbstractIterator; import com.alibaba.polardbx.executor.chunk.Chunk; +import com.google.common.collect.AbstractIterator; import java.util.Iterator; import java.util.LinkedList; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractExecutor.java index eeeb95bee..7870d7c1e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractExecutor.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.operator; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.common.properties.ConnectionParams; @@ -34,6 +35,8 @@ import com.alibaba.polardbx.optimizer.statis.OperatorStatistics; import com.alibaba.polardbx.statistics.RuntimeStatHelper; import com.alibaba.polardbx.statistics.RuntimeStatistics.OperatorStatisticsGroup; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; import java.util.ArrayList; import java.util.List; @@ -130,7 +133,7 @@ public final Chunk nextChunk() { return ret; } - final void createBlockBuilders() { + protected void createBlockBuilders() { // Create all block builders by default final List columns = getDataTypes(); blockBuilders = new BlockBuilder[columns.size()]; @@ -139,13 +142,13 @@ final void createBlockBuilders() { } } - final void reset() { + protected void reset() { for (int i = 0; i < blockBuilders.length; i++) { blockBuilders[i] = blockBuilders[i].newBlockBuilder(); } } - final Chunk buildChunkAndReset() { + protected Chunk buildChunkAndReset() { Block[] blocks = new Block[blockBuilders.length]; for (int i = 0; i < blockBuilders.length; i++) { blocks[i] = blockBuilders[i].build(); @@ -197,7 +200,7 @@ protected void afterProcess(Chunk result) { } /** - * An operator should records its memory usage (bytes) if it buffers any data + * An operator should record its memory usage (bytes) if it buffers any data */ void collectMemoryUsage(MemoryPool pool) { if (statistics != null && !pool.isDestoryed()) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractHashAggExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractHashAggExec.java index f47ca49f5..181a28282 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractHashAggExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractHashAggExec.java @@ -21,7 +21,7 @@ import com.alibaba.polardbx.executor.operator.util.AggResultIterator; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import com.alibaba.polardbx.optimizer.memory.MemoryPool; import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractHashJoinExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractHashJoinExec.java index df703b79f..11fb3fa4e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractHashJoinExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractHashJoinExec.java @@ -16,16 +16,28 @@ package com.alibaba.polardbx.executor.operator; +import com.alibaba.polardbx.common.utils.bloomfilter.ConcurrentIntBloomFilter; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.operator.util.BatchBlockWriter; +import com.alibaba.polardbx.executor.operator.util.ChunksIndex; import com.alibaba.polardbx.executor.operator.util.ConcurrentRawHashTable; +import com.alibaba.polardbx.executor.operator.util.TypedListHandle; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.expression.calc.IExpression; +import com.alibaba.polardbx.optimizer.core.expression.calc.InputRefExpression; +import com.alibaba.polardbx.optimizer.core.expression.calc.ScalarFunctionExpression; import com.alibaba.polardbx.optimizer.core.join.EquiJoinKey; -import com.alibaba.polardbx.common.utils.bloomfilter.FastIntBloomFilter; +import com.google.common.base.Preconditions; import org.apache.calcite.rel.core.JoinRelType; +import java.util.BitSet; import java.util.List; /** @@ -34,16 +46,14 @@ */ public abstract class AbstractHashJoinExec extends AbstractBufferedJoinExec implements ConsumerExecutor { - private static final Logger logger = LoggerFactory.getLogger(AbstractHashJoinExec.class); - /** * A placeholder to mark there is no more element in this position link */ public static final int LIST_END = ConcurrentRawHashTable.NOT_EXISTS; - + private static final Logger logger = LoggerFactory.getLogger(AbstractHashJoinExec.class); ConcurrentRawHashTable hashTable; int[] positionLinks; - FastIntBloomFilter bloomFilter; + ConcurrentIntBloomFilter bloomFilter; public AbstractHashJoinExec(Executor outerInput, Executor innerInput, @@ -57,6 +67,20 @@ public AbstractHashJoinExec(Executor outerInput, context); } + @Override + protected void createBlockBuilders() { + if (!useVecJoin || !enableVecBuildJoinRow) { + super.createBlockBuilders(); + return; + } + // Create all block builders by default + final List columns = getDataTypes(); + blockBuilders = new BlockBuilder[columns.size()]; + for (int i = 0; i < columns.size(); i++) { + blockBuilders[i] = BatchBlockWriter.create(columns.get(i), context, chunkLimit); + } + } + @Override public void closeConsume(boolean force) { buildChunks = null; @@ -74,12 +98,16 @@ void doClose() { this.hashTable = null; this.positionLinks = null; + + if (probeOperator != null) { + probeOperator.close(); + } } @Override int matchInit(Chunk keyChunk, int[] hashCodes, int position) { int hashCode = hashCodes[position]; - if (bloomFilter != null && !bloomFilter.mightContain(hashCode)) { + if (bloomFilter != null && !bloomFilter.mightContainInt(hashCode)) { return LIST_END; } @@ -109,4 +137,2003 @@ int matchNext(int current, Chunk keyChunk, int position) { boolean matchValid(int current) { return current != LIST_END; } -} + + /** + * get the condition index of BuildChunk for TypedHashTable, + * should check isScalarInputRefCondition before calling this method + */ + protected int getBuildChunkConditionIndex() { + List args = ((ScalarFunctionExpression) condition).getArgs(); + Preconditions.checkArgument(args.size() == 2, "Join condition arg count should be 2"); + + // get build chunk condition index for TypedHashTable + int idx1 = ((InputRefExpression) args.get(0)).getInputRefIndex(); + int idx2 = ((InputRefExpression) args.get(1)).getInputRefIndex(); + + if (buildOuterInput) { + // since this is outer build, build chunk is on the left side and has a smaller index + return Math.min(idx1, idx2); + } else { + int buildIndex = Math.max(idx1, idx2); + // since this is inner build, build chunk is on the left side + return buildIndex - outerInput.getDataTypes().size(); + } + } + + /** + * get the condition index of ProbeChunk for TypedHashTable, + * should check isScalarInputRefCondition before calling this method + */ + protected int getProbeChunkConditionIndex() { + List args = ((ScalarFunctionExpression) condition).getArgs(); + Preconditions.checkArgument(args.size() == 2, "Join condition arg count should be 2"); + + // get build chunk condition index for TypedHashTable + int idx1 = ((InputRefExpression) args.get(0)).getInputRefIndex(); + int idx2 = ((InputRefExpression) args.get(1)).getInputRefIndex(); + + if (buildOuterInput) { + // since this is outer build, the probe index is on the right side + int probeIndex = Math.max(idx1, idx2); + // since this is outer build (reverse), the probe index is on the right side + return probeIndex - outerInput.getDataTypes().size(); + } else { + // since this is inner build, the probe index is on the left side + return Math.min(idx1, idx2); + } + } + + class MultiIntProbeOperator implements ProbeOperator { + protected final boolean enableVecBuildJoinRow; + protected final int keySize; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + protected int[][] valueArray; + // for null values of probe keys. + protected boolean hasNull = false; + protected BitSet nullBitmap = new BitSet(chunkLimit); + protected long[] serializedValues; + protected int matchedRows = 0; + protected int[] matchedPositions = new int[chunkLimit]; + protected int[] probePositions = new int[chunkLimit]; + // for chunksIndex address + protected int[] chunkIds = new int[chunkLimit]; + protected int[] positionsInChunk = new int[chunkLimit]; + protected int startProbePosition; + + protected MultiIntProbeOperator(int keySize, boolean enableVecBuildJoinRow) { + this.enableVecBuildJoinRow = enableVecBuildJoinRow; + this.keySize = keySize; + this.valueArray = new int[keySize][chunkLimit]; + // the width for comparison is in (keySize + 1) / 2 * 64 bit. + this.serializedValues = new long[chunkLimit * ((keySize + 1) / 2)]; + } + + @Override + public void close() { + valueArray = null; + serializedValues = null; + matchedPositions = null; + probePositions = null; + chunkIds = null; + positionsInChunk = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit * keySize + Integer.BYTES * chunkLimit * 4 + Long.BYTES * chunkLimit; + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == keySize); + final int positionCount = probeJoinKeyChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + final int currentPosition = currentPosition(); + startProbePosition = probePosition; + + // copy array from long block, and collect null values. + nullBitmap.clear(); + for (int keyCol = 0; keyCol < keySize; keyCol++) { + Block keyBlock = probeJoinKeyChunk.getBlock(keyCol).cast(Block.class); + + keyBlock.copyToIntArray(startProbePosition, positionCount - startProbePosition, + valueArray[keyCol], 0, null); + + // collect null from all blocks. + hasNull |= keyBlock.mayHaveNull(); + if (keyBlock.mayHaveNull()) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + } + + // Build serialized value: + // for example, if keySize = 4, the serialized value array = + // {(array1[0], array2[0]), (array3[0], array4[0]), (array1[1], array2[1]), (array3[1], array4[1]) ... } + int serializedValuesIndex = 0; + for (int i = 0; i < positionCount - startProbePosition; i++) { + if (keySize % 2 == 0) { + // when the count of key columns is even number + for (int keyCol = 0; keyCol < keySize; keyCol++) { + serializedValues[serializedValuesIndex++] = + TypedListHandle.serialize(valueArray[keyCol][i], valueArray[keyCol + 1][i]); + keyCol++; + } + } else { + // when the count of key columns is odd number + for (int keyCol = 0; keyCol < keySize - 1; keyCol++) { + serializedValues[serializedValuesIndex++] = + TypedListHandle.serialize(valueArray[keyCol][i], valueArray[keyCol + 1][i]); + keyCol++; + } + // for the last column + serializedValues[serializedValuesIndex++] = + TypedListHandle.serialize(valueArray[keySize - 1][i], 0); + } + + } + + matchedRows = 0; + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matched = false; + matchedPosition = matchInit(probeKeyHashCode, probePosition); + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probePosition); + isMatching = false; + } + + for (; matchedPosition != LIST_END; + matchedPosition = matchNext(matchedPosition, probePosition)) { + + // record matched rows of [probed, matched] + matchedPositions[matchedRows] = matchedPosition; + probePositions[matchedRows] = probePosition; + matchedRows++; + + // set matched flag + matched = true; + + // check buffered data is full + if (currentPosition + matchedRows >= chunkLimit) { + buildJoinRowInBatch(buildChunks, probeChunk); + isMatching = true; + return; + } + } + + // check buffered data is full + if (currentPosition + matchedRows >= chunkLimit) { + buildJoinRowInBatch(buildChunks, probeChunk); + probePosition++; + return; + } + + } + buildJoinRowInBatch(buildChunks, probeChunk); + } + + protected void buildJoinRowInBatch(ChunksIndex chunksIndex, Chunk probeInputChunk) { + if (!enableVecBuildJoinRow) { + // first outer side, then inner side + int col = 0; + for (int i = 0; i < outerInput.getDataTypes().size(); i++) { + for (int row = 0; row < matchedRows; row++) { + int probePosition = probePositions[row]; + probeInputChunk.getBlock(i).writePositionTo(probePosition, blockBuilders[col]); + } + col++; + } + + final int rightColumns = singleJoin ? 1 : innerInput.getDataTypes().size(); + for (int i = 0; i < rightColumns; i++) { + for (int row = 0; row < matchedRows; row++) { + chunksIndex.writePositionTo(i, matchedPositions[row], blockBuilders[col]); + } + col++; + } + return; + } + + // first outer side, then inner side + int col = 0; + for (int i = 0; i < outerInput.getDataTypes().size(); i++) { + if (blockBuilders[col] instanceof BatchBlockWriter) { + ((BatchBlockWriter) blockBuilders[col]).copyBlock(probeInputChunk.getBlock(i), probePositions, + matchedRows); + } else { + for (int row = 0; row < matchedRows; row++) { + int probePosition = probePositions[row]; + probeInputChunk.getBlock(i).writePositionTo(probePosition, blockBuilders[col]); + } + } + + col++; + } + + final int rightColumns = singleJoin ? 1 : innerInput.getDataTypes().size(); + + boolean initialAddress = false; + for (int i = 0; i < rightColumns; i++) { + if (innerKeyMapping[i] >= 0 && blockBuilders[col] instanceof BatchBlockWriter) { + int keyColumnIndex = innerKeyMapping[i]; + IntegerBlock integerBlock = new IntegerBlock(0, chunkLimit, null, valueArray[keyColumnIndex]); + ((BatchBlockWriter) blockBuilders[col]).copyBlock(integerBlock, probePositions, + -startProbePosition, matchedRows); + } else { + + // get address of matched positions in batch. + if (!initialAddress) { + chunksIndex.getAddress(matchedPositions, chunkIds, positionsInChunk, matchedRows); + initialAddress = true; + } + + for (int row = 0; row < matchedRows; row++) { + chunksIndex.writePositionTo(chunkIds[row], positionsInChunk[row], i, blockBuilders[col]); + } + } + + col++; + } + + assert col == blockBuilders.length; + } + + int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + if (keySize == 1 || keySize == 2) { + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + // for key size = 1 or 2, the number for comparison is in 64bit. + if (serializedValues[position - startProbePosition] == buildKeyChunks.getLong(0, matchedPosition)) { + break; + } + + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } else if (keySize == 3 || keySize == 4) { + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + // for key size = 3 or 4, the number for comparison is in 128bit. + if (serializedValues[(position - startProbePosition) * 2] + == buildKeyChunks.getLong(0, matchedPosition * 2) + && serializedValues[(position - startProbePosition) * 2 + 1] + == buildKeyChunks.getLong(0, matchedPosition * 2 + 1)) { + break; + } + + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } else { + + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + // for key size > 4, the number for comparison is in (keySize + 1) / 2 * 64 bit. + int multiple = (keySize + 1) / 2; + boolean matched = true; + + for (int i = 0; i < multiple; i++) { + matched &= serializedValues[(position - startProbePosition) * multiple + i] + == buildKeyChunks.getLong(0, matchedPosition * multiple + i); + } + + if (matched) { + break; + } + + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + + } + + } + + int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + if (keySize == 1 || keySize == 2) { + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + // for key size = 1 or 2, the number for comparison is in 64bit. + if (serializedValues[position - startProbePosition] == buildKeyChunks.getLong(0, matchedPosition)) { + break; + } + + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } else if (keySize == 3 || keySize == 4) { + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (serializedValues[(position - startProbePosition) * 2] + == buildKeyChunks.getLong(0, matchedPosition * 2) + && serializedValues[(position - startProbePosition) * 2 + 1] + == buildKeyChunks.getLong(0, matchedPosition * 2 + 1)) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } else { + + // find matched positions for each row. + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + // for key size > 4, the number for comparison is in (keySize + 1) / 2 * 64 bit. + int multiple = (keySize + 1) / 2; + boolean matched = true; + + for (int i = 0; i < multiple; i++) { + matched &= serializedValues[(position - startProbePosition) * multiple + i] + == buildKeyChunks.getLong(0, matchedPosition * multiple + i); + } + + if (matched) { + break; + } + + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + + } + } + } + + class IntProbeOperator implements ProbeOperator { + protected final boolean enableVecBuildJoinRow; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + // for probe keys + protected int[] valueArray = new int[chunkLimit]; + // for null values of probe keys. + protected boolean hasNull = false; + + // protected int[] matchedValues = new int[chunkLimit]; + protected BitSet nullBitmap = new BitSet(chunkLimit); + protected int matchedRows = 0; + protected int[] matchedPositions = new int[chunkLimit]; + protected int[] probePositions = new int[chunkLimit]; + // for chunksIndex address + protected int[] chunkIds = new int[chunkLimit]; + protected int[] positionsInChunk = new int[chunkLimit]; + protected int startProbePosition; + + protected IntProbeOperator(boolean enableVecBuildJoinRow) { + this.enableVecBuildJoinRow = enableVecBuildJoinRow; + } + + @Override + public void close() { + valueArray = null; + matchedPositions = null; + probePositions = null; + chunkIds = null; + positionsInChunk = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit * 5; + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == 1 + && probeJoinKeyChunk.getBlock(0).cast(Block.class) instanceof IntegerBlock); + final int positionCount = probeJoinKeyChunk.getPositionCount(); + + // build hash code vector + boolean useHashCodeVector = probeJoinKeyChunk.getBlock(0).cast(IntegerBlock.class).getSelection() != null; + if (useHashCodeVector) { + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + } + + final int currentPosition = currentPosition(); + startProbePosition = probePosition; + // copy array from long block + Block keyBlock = probeJoinKeyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToIntArray(startProbePosition, positionCount - startProbePosition, valueArray, 0, null); + + // handle nulls + hasNull = keyBlock.mayHaveNull(); + nullBitmap.clear(); + if (hasNull) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + + matchedRows = 0; + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matched = false; + matchedPosition = useHashCodeVector + ? matchInit(probeKeyHashCode, probePosition) + : matchInit(probePosition); + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probePosition); + isMatching = false; + } + + for (; matchedPosition != LIST_END; + matchedPosition = matchNext(matchedPosition, probePosition)) { + + // record matched rows of [probed, matched] + matchedPositions[matchedRows] = matchedPosition; + probePositions[matchedRows] = probePosition; + matchedRows++; + + // set matched flag + matched = true; + + // check buffered data is full + if (currentPosition + matchedRows >= chunkLimit) { + buildJoinRowInBatch(buildChunks, probeChunk); + isMatching = true; + return; + } + } + + // check buffered data is full + if (currentPosition + matchedRows >= chunkLimit) { + buildJoinRowInBatch(buildChunks, probeChunk); + probePosition++; + return; + } + + } + buildJoinRowInBatch(buildChunks, probeChunk); + } + + protected void buildJoinRowInBatch(ChunksIndex chunksIndex, Chunk probeInputChunk) { + if (!enableVecBuildJoinRow) { + // first outer side, then inner side + int col = 0; + for (int i = 0; i < outerInput.getDataTypes().size(); i++) { + for (int row = 0; row < matchedRows; row++) { + int probePosition = probePositions[row]; + probeInputChunk.getBlock(i).writePositionTo(probePosition, blockBuilders[col]); + } + col++; + } + + final int rightColumns = singleJoin ? 1 : innerInput.getDataTypes().size(); + for (int i = 0; i < rightColumns; i++) { + for (int row = 0; row < matchedRows; row++) { + chunksIndex.writePositionTo(i, matchedPositions[row], blockBuilders[col]); + } + col++; + } + return; + } + + // first outer side, then inner side + int col = 0; + for (int i = 0; i < outerInput.getDataTypes().size(); i++) { + if (blockBuilders[col] instanceof BatchBlockWriter) { + ((BatchBlockWriter) blockBuilders[col]).copyBlock(probeInputChunk.getBlock(i), probePositions, + matchedRows); + } else { + for (int row = 0; row < matchedRows; row++) { + int probePosition = probePositions[row]; + probeInputChunk.getBlock(i).writePositionTo(probePosition, blockBuilders[col]); + } + } + + col++; + } + // single join only output the first row of right side + final int rightColumns = singleJoin ? 1 : innerInput.getDataTypes().size(); + boolean initialAddress = false; + for (int i = 0; i < rightColumns; i++) { + if (innerKeyMapping[i] >= 0 && blockBuilders[col] instanceof BatchBlockWriter) { + + IntegerBlock integerBlock = new IntegerBlock(0, chunkLimit, null, valueArray); + ((BatchBlockWriter) blockBuilders[col]).copyBlock(integerBlock, probePositions, + -startProbePosition, matchedRows); + + } else { + // get address of matched positions in batch. + if (!initialAddress) { + chunksIndex.getAddress(matchedPositions, chunkIds, positionsInChunk, matchedRows); + initialAddress = true; + } + for (int row = 0; row < matchedRows; row++) { + chunksIndex.writePositionTo(chunkIds[row], positionsInChunk[row], i, blockBuilders[col]); + } + } + + col++; + } + + assert col == blockBuilders.length; + } + + int matchInit(int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + int value = valueArray[position - startProbePosition]; + // find matched positions for each row. + int matchedPosition = hashTable.get(value); + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getInt(0, matchedPosition) == value) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getInt(0, matchedPosition) == valueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getInt(0, matchedPosition) == valueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + } + + class LongProbeOperator implements ProbeOperator { + protected final boolean enableVecBuildJoinRow; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + protected long[] valueArray = new long[chunkLimit]; + // for null values of probe keys. + protected boolean hasNull = false; + protected BitSet nullBitmap = new BitSet(chunkLimit); + protected int matchedRows = 0; + protected int[] matchedPositions = new int[chunkLimit]; + protected int[] probePositions = new int[chunkLimit]; + // for chunksIndex address + protected int[] chunkIds = new int[chunkLimit]; + protected int[] positionsInChunk = new int[chunkLimit]; + protected int startProbePosition; + + protected LongProbeOperator(boolean enableVecBuildJoinRow) { + this.enableVecBuildJoinRow = enableVecBuildJoinRow; + } + + @Override + public void close() { + valueArray = null; + matchedPositions = null; + probePositions = null; + chunkIds = null; + positionsInChunk = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit * 4 + Long.BYTES * chunkLimit; + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == 1 + && probeJoinKeyChunk.getBlock(0).cast(Block.class) instanceof LongBlock); + final int positionCount = probeJoinKeyChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + final int currentPosition = currentPosition(); + startProbePosition = probePosition; + + // copy array from long block + Block keyBlock = probeJoinKeyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToLongArray(startProbePosition, positionCount - startProbePosition, valueArray, 0); + + // handle nulls + hasNull = keyBlock.mayHaveNull(); + nullBitmap.clear(); + if (hasNull) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + + matchedRows = 0; + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matched = false; + matchedPosition = matchInit(probeKeyHashCode, probePosition); + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probePosition); + isMatching = false; + } + + for (; matchedPosition != LIST_END; + matchedPosition = matchNext(matchedPosition, probePosition)) { + + // record matched rows of [probed, matched] + matchedPositions[matchedRows] = matchedPosition; + probePositions[matchedRows] = probePosition; + matchedRows++; + + // set matched flag + matched = true; + + // check buffered data is full + if (currentPosition + matchedRows >= chunkLimit) { + buildJoinRowInBatch(buildChunks, probeChunk); + isMatching = true; + return; + } + } + + // check buffered data is full + if (currentPosition + matchedRows >= chunkLimit) { + buildJoinRowInBatch(buildChunks, probeChunk); + probePosition++; + return; + } + + } + buildJoinRowInBatch(buildChunks, probeChunk); + } + + protected void buildJoinRowInBatch(ChunksIndex chunksIndex, Chunk probeInputChunk) { + if (!enableVecBuildJoinRow) { + // first outer side, then inner side + int col = 0; + for (int i = 0; i < outerInput.getDataTypes().size(); i++) { + for (int row = 0; row < matchedRows; row++) { + int probePosition = probePositions[row]; + probeInputChunk.getBlock(i).writePositionTo(probePosition, blockBuilders[col]); + } + col++; + } + + final int rightColumns = singleJoin ? 1 : innerInput.getDataTypes().size(); + for (int i = 0; i < rightColumns; i++) { + for (int row = 0; row < matchedRows; row++) { + chunksIndex.writePositionTo(i, matchedPositions[row], blockBuilders[col]); + } + col++; + } + return; + } + + // first outer side, then inner side + int col = 0; + for (int i = 0; i < outerInput.getDataTypes().size(); i++) { + if (blockBuilders[col] instanceof BatchBlockWriter) { + ((BatchBlockWriter) blockBuilders[col]).copyBlock(probeInputChunk.getBlock(i), probePositions, + matchedRows); + } else { + for (int row = 0; row < matchedRows; row++) { + int probePosition = probePositions[row]; + probeInputChunk.getBlock(i).writePositionTo(probePosition, blockBuilders[col]); + } + } + + col++; + } + // single join only output the first row of right side + + final int rightColumns = singleJoin ? 1 : innerInput.getDataTypes().size(); + boolean initialAddress = false; + for (int i = 0; i < rightColumns; i++) { + if (innerKeyMapping[i] >= 0 && blockBuilders[col] instanceof BatchBlockWriter) { + + LongBlock longBlock = new LongBlock(0, chunkLimit, null, valueArray); + ((BatchBlockWriter) blockBuilders[col]).copyBlock(longBlock, probePositions, + -startProbePosition, matchedRows); + } else { + // get address of matched positions in batch. + if (!initialAddress) { + chunksIndex.getAddress(matchedPositions, chunkIds, positionsInChunk, matchedRows); + initialAddress = true; + } + for (int row = 0; row < matchedRows; row++) { + chunksIndex.writePositionTo(chunkIds[row], positionsInChunk[row], i, blockBuilders[col]); + } + } + + col++; + } + + assert col == blockBuilders.length; + } + + int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getLong(0, matchedPosition) == valueArray[position - startProbePosition]) { + break; + } + + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getLong(0, matchedPosition) == valueArray[position - startProbePosition]) { + break; + } + + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + } + + class ReverseAntiProbeOperator implements ProbeOperator { + + protected final Synchronizer synchronizer; + + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + + protected ReverseAntiProbeOperator(Synchronizer synchronizer) { + this.synchronizer = synchronizer; + } + + @Override + public void nextRows() { + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + for (; probePosition < positionCount; probePosition++) { + matchedPosition = matchInit(probeJoinKeyChunk, probeKeyHashCode, probePosition); + + for (; + matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probeJoinKeyChunk, probePosition)) { + if (!checkJoinCondition(buildChunks, probeChunk, probePosition, matchedPosition)) { + continue; + } + + synchronizer.getMatchedPosition().rawMark(matchedPosition); + } + } + } + + int matchInit(Chunk keyChunk, int[] hashCodes, int position) { + int hashCode = hashCodes[position]; + + int matchedPosition = hashTable.get(hashCode); + while (matchedPosition != LIST_END) { + // visit marked table first + if (!synchronizer.getMatchedPosition().hasSet(matchedPosition) && buildKeyChunks.equals(matchedPosition, + keyChunk, position)) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + int matchNext(int current, Chunk keyChunk, int position) { + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + // visit marked table first + if (!synchronizer.getMatchedPosition().hasSet(matchedPosition) && buildKeyChunks.equals(matchedPosition, + keyChunk, position)) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + @Override + public void close() { + + } + + @Override + public int estimateSize() { + // no extra memory usage. + return 0; + } + } + + class SemiLongProbeOperator implements ProbeOperator { + + protected final boolean enableVecBuildJoinRow; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + protected long[] longValueArray; + protected int startProbePosition; + // for null values of probe keys. + protected boolean hasNull = false; + protected BitSet nullBitmap = new BitSet(chunkLimit); + + protected SemiLongProbeOperator(boolean enableVecBuildJoinRow) { + Preconditions.checkArgument(antiJoinOperands == null); + Preconditions.checkArgument(condition == null); + + this.enableVecBuildJoinRow = enableVecBuildJoinRow; + this.longValueArray = new long[chunkLimit]; + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == 1 + && probeJoinKeyChunk.getBlock(0).cast(Block.class) instanceof LongBlock); + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + startProbePosition = probePosition; + // copy array from long block + Block keyBlock = probeJoinKeyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToLongArray(startProbePosition, positionCount - startProbePosition, longValueArray, 0); + + // handle nulls + hasNull = keyBlock.mayHaveNull(); + nullBitmap.clear(); + if (hasNull) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matched = false; + matchedPosition = matchInit(probeKeyHashCode, probePosition); + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probePosition); + isMatching = false; + } + + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probePosition)) { + + // set matched flag + matched = true; + + // semi join does not care multiple matches + break; + } + + if (matched) { + buildSemiJoinRow(probeChunk, probePosition); + } + + // check buffered data is full + if (currentPosition() >= chunkLimit) { + probePosition++; + return; + } + } + } + + private int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getLong(0, matchedPosition) == longValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getLong(0, matchedPosition) == longValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + @Override + public void close() { + longValueArray = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit * 3 + Long.BYTES * chunkLimit; + } + } + + /** + * 仅使用于 semi/anti join 且 long = long and int <> int + */ + class SemiLongNotEqIntegerProbeOperator implements ProbeOperator { + + protected final boolean enableVecBuildJoinRow; + protected final boolean isAnti; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + protected long[] longValueArray; + protected int[] intValueArray; + protected int startProbePosition; + protected int conditionProbeColIndex = -1; + // for null values of probe keys. + protected boolean hasNull = false; + protected BitSet nullBitmap = new BitSet(chunkLimit); + + protected SemiLongNotEqIntegerProbeOperator(boolean enableVecBuildJoinRow) { + if (joinType == JoinRelType.SEMI) { + this.isAnti = false; + } else if (joinType == JoinRelType.ANTI) { + this.isAnti = true; + } else { + throw new UnsupportedOperationException("JoinType not supported: " + joinType); + } + Preconditions.checkArgument(antiJoinOperands == null); + this.enableVecBuildJoinRow = enableVecBuildJoinRow; + + this.longValueArray = new long[chunkLimit]; + this.intValueArray = new int[chunkLimit]; + + conditionProbeColIndex = getProbeChunkConditionIndex(); + Preconditions.checkArgument( + conditionProbeColIndex >= 0 && conditionProbeColIndex < outerInput.getDataTypes().size(), + "Illegal Join condition probe index : " + conditionProbeColIndex); + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == 1 + && probeJoinKeyChunk.getBlock(0).cast(Block.class) instanceof LongBlock); + Preconditions.checkArgument( + probeChunk.getBlock(conditionProbeColIndex).cast(Block.class) instanceof IntegerBlock, + "Probe condition block should be IntegerBlock"); + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + startProbePosition = probePosition; + // copy array from long block + Block keyBlock = probeJoinKeyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToLongArray(startProbePosition, positionCount - startProbePosition, longValueArray, 0); + + // handle nulls + hasNull = keyBlock.mayHaveNull(); + nullBitmap.clear(); + if (hasNull) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + + probeChunk.getBlock(conditionProbeColIndex).cast(Block.class) + .copyToIntArray(startProbePosition, positionCount - startProbePosition, intValueArray, 0, null); + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matched = false; + matchedPosition = matchInit(probeKeyHashCode, probePosition); + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probePosition); + isMatching = false; + } + + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probePosition)) { + if (!matchJoinCondition(probePosition, matchedPosition)) { + continue; + } + + // set matched flag + matched = true; + + // semi join does not care multiple matches + break; + } + + // (!isAnti && matched) || (isAnti && !matched) + if (isAnti ^ matched) { + buildSemiJoinRow(probeChunk, probePosition); + } + + // check buffered data is full + if (currentPosition() >= chunkLimit) { + probePosition++; + return; + } + } + } + + private boolean matchJoinCondition(int probePosition, int matchedPosition) { + // NotEqual + return buildChunks.getInt(0, matchedPosition) != intValueArray[probePosition - startProbePosition]; + } + + private int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getLong(0, matchedPosition) == longValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getLong(0, matchedPosition) == longValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + @Override + public void close() { + longValueArray = null; + intValueArray = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit * 4 + Long.BYTES * chunkLimit; + } + } + + class ReverseSemiLongProbeOperator implements ProbeOperator { + protected final Synchronizer synchronizer; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + protected long[] longValueArray; + protected int startProbePosition; + // for null values of probe keys. + protected boolean hasNull = false; + protected BitSet nullBitmap = new BitSet(chunkLimit); + + protected ReverseSemiLongProbeOperator(Synchronizer synchronizer) { + Preconditions.checkArgument(condition == null, + "simple reverse semi probe operator not support other join condition"); + this.synchronizer = synchronizer; + this.longValueArray = new long[chunkLimit]; + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == 1 + && probeJoinKeyChunk.getBlock(0).cast(Block.class) instanceof LongBlock); + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + startProbePosition = probePosition; + // copy array from long block + Block keyBlock = probeJoinKeyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToLongArray(startProbePosition, positionCount - startProbePosition, longValueArray, 0); + + // handle nulls + hasNull = keyBlock.mayHaveNull(); + nullBitmap.clear(); + if (hasNull) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matchedPosition = matchInit(probeKeyHashCode, probePosition); + isMatching = true; + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probePosition); + } + + // if condition not match or mark failed, just return + if (!matchValid(matchedPosition) || !synchronizer.getMatchedPosition().markAndGet(matchedPosition)) { + isMatching = false; + continue; + } + + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probePosition)) { + + buildReverseSemiJoinRow(buildChunks, matchedPosition); + + // check buffered data is full + if (currentPosition() >= chunkLimit) { + isMatching = true; + return; + } + } + + isMatching = false; + } + } + + private int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getLong(0, matchedPosition) == longValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getLong(0, matchedPosition) == longValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + @Override + public void close() { + longValueArray = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit + Long.BYTES * chunkLimit; + } + } + + class ReverseSemiIntProbeOperator implements ProbeOperator { + protected final Synchronizer synchronizer; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + protected int[] intValueArray; + protected int startProbePosition; + // for null values of probe keys. + protected boolean hasNull = false; + protected BitSet nullBitmap = new BitSet(chunkLimit); + + protected ReverseSemiIntProbeOperator(Synchronizer synchronizer) { + Preconditions.checkArgument(condition == null, + "simple reverse semi probe operator not support other join condition"); + this.synchronizer = synchronizer; + this.intValueArray = new int[chunkLimit]; + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == 1 + && probeJoinKeyChunk.getBlock(0).cast(Block.class) instanceof IntegerBlock); + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + startProbePosition = probePosition; + // copy array from long block + Block keyBlock = probeJoinKeyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToIntArray(startProbePosition, positionCount - startProbePosition, intValueArray, 0, null); + + // handle nulls + hasNull = keyBlock.mayHaveNull(); + nullBitmap.clear(); + if (hasNull) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matchedPosition = matchInit(probeKeyHashCode, probePosition); + isMatching = true; + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probePosition); + } + + // if condition not match or mark failed, just return + if (!matchValid(matchedPosition) || !synchronizer.getMatchedPosition().markAndGet(matchedPosition)) { + isMatching = false; + continue; + } + + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probePosition)) { + + buildReverseSemiJoinRow(buildChunks, matchedPosition); + + // check buffered data is full + if (currentPosition() >= chunkLimit) { + isMatching = true; + return; + } + } + + isMatching = false; + } + } + + private int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getInt(0, matchedPosition) == intValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getInt(0, matchedPosition) == intValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + @Override + public void close() { + intValueArray = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit + Long.BYTES * chunkLimit; + } + } + + class ReverseSemiLongNotEqIntegerProbeOperator implements ProbeOperator { + + protected final Synchronizer synchronizer; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + protected long[] longValueArray; + protected int[] intValueArray; + protected int startProbePosition; + protected int conditionProbeColIndex = -1; + protected int conditionBuildColIndex = -1; + // for null values of probe keys. + protected boolean hasNull = false; + protected BitSet nullBitmap = new BitSet(chunkLimit); + + protected ReverseSemiLongNotEqIntegerProbeOperator(Synchronizer synchronizer) { + this.synchronizer = synchronizer; + Preconditions.checkArgument(antiJoinOperands == null); + + this.longValueArray = new long[chunkLimit]; + this.intValueArray = new int[chunkLimit]; + + conditionProbeColIndex = getProbeChunkConditionIndex(); + Preconditions.checkArgument( + conditionProbeColIndex >= 0 && conditionProbeColIndex < innerInput.getDataTypes().size(), + "Illegal Join condition probe index : " + conditionProbeColIndex); + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == 1 + && probeJoinKeyChunk.getBlock(0).cast(Block.class) instanceof LongBlock); + Preconditions.checkArgument( + probeChunk.getBlock(conditionProbeColIndex).cast(Block.class) instanceof IntegerBlock, + "Probe condition block should be IntegerBlock"); + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + startProbePosition = probePosition; + // copy array from long block + Block keyBlock = probeJoinKeyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToLongArray(startProbePosition, positionCount - startProbePosition, longValueArray, 0); + + // handle nulls + hasNull = keyBlock.mayHaveNull(); + nullBitmap.clear(); + if (hasNull) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + + probeChunk.getBlock(conditionProbeColIndex).cast(Block.class) + .copyToIntArray(startProbePosition, positionCount - startProbePosition, intValueArray, 0, null); + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matchedPosition = matchInit(probeKeyHashCode, probePosition); + isMatching = true; + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probePosition); + } + + // if condition not match, just return + if (!matchValid(matchedPosition)) { + isMatching = false; + continue; + } + + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probePosition)) { + + if (!matchJoinCondition(probePosition, matchedPosition)) { + continue; + } + + // if cas failed, another thread has output this record, but cannot stop + if (!synchronizer.getMatchedPosition().markAndGet(matchedPosition)) { + continue; + } + + buildReverseSemiJoinRow(buildChunks, matchedPosition); + + // check buffered data is full + if (currentPosition() >= chunkLimit) { + isMatching = true; + return; + } + } + + isMatching = false; + } + } + + private boolean matchJoinCondition(int probePosition, int matchedPosition) { + // NotEqual + return buildChunks.getInt(0, matchedPosition) != intValueArray[probePosition - startProbePosition]; + } + + private int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getLong(0, matchedPosition) == longValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getLong(0, matchedPosition) == longValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + @Override + public void close() { + longValueArray = null; + intValueArray = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit * 4 + Long.BYTES * chunkLimit; + } + } + + class ReverseSemiIntNotEqIntegerProbeOperator implements ProbeOperator { + + protected final Synchronizer synchronizer; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + protected int[] intValueArray2; + protected int[] intValueArray; + protected int startProbePosition; + protected int conditionProbeColIndex = -1; + protected int conditionBuildColIndex = -1; + // for null values of probe keys. + protected boolean hasNull = false; + protected BitSet nullBitmap = new BitSet(chunkLimit); + + protected ReverseSemiIntNotEqIntegerProbeOperator(Synchronizer synchronizer) { + this.synchronizer = synchronizer; + Preconditions.checkArgument(antiJoinOperands == null); + + this.intValueArray2 = new int[chunkLimit]; + this.intValueArray = new int[chunkLimit]; + + conditionProbeColIndex = getProbeChunkConditionIndex(); + Preconditions.checkArgument( + conditionProbeColIndex >= 0 && conditionProbeColIndex < innerInput.getDataTypes().size(), + "Illegal Join condition probe index : " + conditionProbeColIndex); + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == 1 + && probeJoinKeyChunk.getBlock(0).cast(Block.class) instanceof IntegerBlock); + Preconditions.checkArgument( + probeChunk.getBlock(conditionProbeColIndex).cast(Block.class) instanceof IntegerBlock, + "Probe condition block should be IntegerBlock"); + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + startProbePosition = probePosition; + // copy array from long block + Block keyBlock = probeJoinKeyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToIntArray(startProbePosition, positionCount - startProbePosition, intValueArray2, 0, null); + + // handle nulls + hasNull = keyBlock.mayHaveNull(); + nullBitmap.clear(); + if (hasNull) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + + probeChunk.getBlock(conditionProbeColIndex).cast(Block.class) + .copyToIntArray(startProbePosition, positionCount - startProbePosition, intValueArray, 0, null); + for (; probePosition < positionCount; probePosition++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matchedPosition = matchInit(probeKeyHashCode, probePosition); + isMatching = true; + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, probePosition); + } + + // if condition not match, just return + if (!matchValid(matchedPosition)) { + isMatching = false; + continue; + } + + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probePosition)) { + + if (!matchJoinCondition(probePosition, matchedPosition)) { + continue; + } + + // if cas failed, another thread has output this record, but cannot stop + if (!synchronizer.getMatchedPosition().markAndGet(matchedPosition)) { + continue; + } + + buildReverseSemiJoinRow(buildChunks, matchedPosition); + + // check buffered data is full + if (currentPosition() >= chunkLimit) { + isMatching = true; + return; + } + } + + isMatching = false; + } + } + + private boolean matchJoinCondition(int probePosition, int matchedPosition) { + // NotEqual + return buildChunks.getInt(0, matchedPosition) != intValueArray[probePosition - startProbePosition]; + } + + private int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getInt(0, matchedPosition) == intValueArray2[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getInt(0, matchedPosition) == intValueArray2[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + @Override + public void close() { + intValueArray2 = null; + intValueArray = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit * 4 + Long.BYTES * chunkLimit; + } + } + + class ReverseAntiIntegerProbeOperator implements ProbeOperator { + + protected final Synchronizer synchronizer; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + protected int[] intValueArray; + protected int startProbePosition; + // for null values of probe keys. + protected boolean hasNull = false; + protected BitSet nullBitmap = new BitSet(chunkLimit); + + protected ReverseAntiIntegerProbeOperator(Synchronizer synchronizer) { + this.synchronizer = synchronizer; + this.intValueArray = new int[chunkLimit]; + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == 1 + && probeJoinKeyChunk.getBlock(0).cast(Block.class) instanceof IntegerBlock); + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + startProbePosition = probePosition; + // copy array from long block + Block keyBlock = probeJoinKeyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToIntArray(startProbePosition, positionCount - startProbePosition, intValueArray, 0, null); + + // handle nulls + hasNull = keyBlock.mayHaveNull(); + nullBitmap.clear(); + if (hasNull) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + + for (; probePosition < positionCount; probePosition++) { + matchedPosition = matchInit(probeKeyHashCode, probePosition); + // if cas failed, another thread has marked all matched records + if (!matchValid(matchedPosition) || !synchronizer.getMatchedPosition().markAndGet(matchedPosition)) { + continue; + } + + for (; + matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probePosition)) { + synchronizer.getMatchedPosition().rawMark(matchedPosition); + } + } + } + + private int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + // find matched positions for each row. + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getInt(0, matchedPosition) == intValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (buildKeyChunks.getInt(0, matchedPosition) == intValueArray[position - startProbePosition]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + @Override + public void close() { + intValueArray = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit * 4; + } + + } + + class ReverseAntiLongNotEqIntegerProbeOperator implements ProbeOperator { + + protected final Synchronizer synchronizer; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + protected long[] longValueArray; + protected int[] intValueArray; + protected int startProbePosition; + protected int conditionProbeColIndex = -1; + protected int conditionBuildColIndex = -1; + // for null values of probe keys. + protected boolean hasNull = false; + protected BitSet nullBitmap = new BitSet(chunkLimit); + + protected ReverseAntiLongNotEqIntegerProbeOperator(Synchronizer synchronizer) { + this.synchronizer = synchronizer; + Preconditions.checkArgument(antiJoinOperands == null); + + this.longValueArray = new long[chunkLimit]; + this.intValueArray = new int[chunkLimit]; + + conditionProbeColIndex = getProbeChunkConditionIndex(); + Preconditions.checkArgument( + conditionProbeColIndex >= 0 && conditionProbeColIndex < innerInput.getDataTypes().size(), + "Illegal Join condition probe index : " + conditionProbeColIndex); + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == 1 + && probeJoinKeyChunk.getBlock(0).cast(Block.class) instanceof LongBlock); + Preconditions.checkArgument( + probeChunk.getBlock(conditionProbeColIndex).cast(Block.class) instanceof IntegerBlock, + "Probe condition block should be IntegerBlock"); + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + startProbePosition = probePosition; + // copy array from long block + Block keyBlock = probeJoinKeyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToLongArray(startProbePosition, positionCount - startProbePosition, longValueArray, 0); + + // handle nulls + hasNull = keyBlock.mayHaveNull(); + nullBitmap.clear(); + if (hasNull) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + + probeChunk.getBlock(conditionProbeColIndex).cast(Block.class) + .copyToIntArray(startProbePosition, positionCount - startProbePosition, intValueArray, 0, null); + for (; probePosition < positionCount; probePosition++) { + matchedPosition = matchInit(probeKeyHashCode, probePosition); + + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probePosition)) { + if (!matchJoinCondition(probePosition, matchedPosition)) { + continue; + } + + synchronizer.getMatchedPosition().rawMark(matchedPosition); + } + } + } + + private boolean matchJoinCondition(int probePosition, int matchedPosition) { + // NotEqual + return buildChunks.getInt(0, matchedPosition) != intValueArray[probePosition - startProbePosition]; + } + + int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + // visit marked table first + if (buildKeyChunks.getLong(0, matchedPosition) == longValueArray[position - startProbePosition] && + !synchronizer.getMatchedPosition().hasSet(matchedPosition)) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + // visit marked table first + if (buildKeyChunks.getLong(0, matchedPosition) == longValueArray[position - startProbePosition] && + !synchronizer.getMatchedPosition().hasSet(matchedPosition)) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + @Override + public void close() { + longValueArray = null; + intValueArray = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit * 4 + Long.BYTES * chunkLimit; + } + } + + class ReverseAntiIntNotEqIntegerProbeOperator implements ProbeOperator { + + protected final Synchronizer synchronizer; + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + protected final int[] intermediates = new int[chunkLimit]; + protected final int[] blockHashCodes = new int[chunkLimit]; + protected int[] int2ValueArray; + protected int[] intValueArray; + protected int startProbePosition; + protected int conditionProbeColIndex = -1; + // for null values of probe keys. + protected boolean hasNull = false; + protected BitSet nullBitmap = new BitSet(chunkLimit); + + protected ReverseAntiIntNotEqIntegerProbeOperator(Synchronizer synchronizer) { + this.synchronizer = synchronizer; + Preconditions.checkArgument(antiJoinOperands == null); + + this.int2ValueArray = new int[chunkLimit]; + this.intValueArray = new int[chunkLimit]; + + conditionProbeColIndex = getProbeChunkConditionIndex(); + Preconditions.checkArgument( + conditionProbeColIndex >= 0 && conditionProbeColIndex < innerInput.getDataTypes().size(), + "Illegal Join condition probe index : " + conditionProbeColIndex); + } + + @Override + public void nextRows() { + Preconditions.checkArgument(probeJoinKeyChunk.getBlockCount() == 1 + && probeJoinKeyChunk.getBlock(0).cast(Block.class) instanceof IntegerBlock); + Preconditions.checkArgument( + probeChunk.getBlock(conditionProbeColIndex).cast(Block.class) instanceof IntegerBlock, + "Probe condition block should be IntegerBlock"); + final int positionCount = probeChunk.getPositionCount(); + + // build hash code vector + probeJoinKeyChunk.hashCodeVector(probeKeyHashCode, intermediates, blockHashCodes, positionCount); + + startProbePosition = probePosition; + // copy array from long block + Block keyBlock = probeJoinKeyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToIntArray(startProbePosition, positionCount - startProbePosition, int2ValueArray, 0, null); + + // handle nulls + hasNull = keyBlock.mayHaveNull(); + nullBitmap.clear(); + if (hasNull) { + keyBlock.collectNulls(startProbePosition, positionCount - startProbePosition, nullBitmap, 0); + } + + probeChunk.getBlock(conditionProbeColIndex).cast(Block.class) + .copyToIntArray(startProbePosition, positionCount - startProbePosition, intValueArray, 0, null); + for (; probePosition < positionCount; probePosition++) { + matchedPosition = matchInit(probeKeyHashCode, probePosition); + + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, probePosition)) { + if (!matchJoinCondition(probePosition, matchedPosition)) { + continue; + } + + synchronizer.getMatchedPosition().rawMark(matchedPosition); + } + } + } + + private boolean matchJoinCondition(int probePosition, int matchedPosition) { + // NotEqual + return buildChunks.getInt(0, matchedPosition) != intValueArray[probePosition - startProbePosition]; + } + + int matchInit(int[] hashCodes, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + int matchedPosition = hashTable.get(hashCodes[position]); + while (matchedPosition != LIST_END) { + // visit marked table first + if (buildKeyChunks.getInt(0, matchedPosition) == int2ValueArray[position - startProbePosition] && + !synchronizer.getMatchedPosition().hasSet(matchedPosition)) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + int matchNext(int current, int position) { + // check null + if (hasNull && nullBitmap.get(position - startProbePosition)) { + return LIST_END; + } + + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + // visit marked table first + if (buildKeyChunks.getInt(0, matchedPosition) == int2ValueArray[position - startProbePosition] && + !synchronizer.getMatchedPosition().hasSet(matchedPosition)) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + @Override + public void close() { + int2ValueArray = null; + intValueArray = null; + } + + @Override + public int estimateSize() { + return Integer.BYTES * chunkLimit * 4 + Long.BYTES * chunkLimit; + } + + } +} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractJoinExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractJoinExec.java index 53083ccb0..ef6ac3a84 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractJoinExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractJoinExec.java @@ -17,11 +17,14 @@ package com.alibaba.polardbx.executor.operator; import com.alibaba.polardbx.common.datatype.UInt64; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkConverter; import com.alibaba.polardbx.executor.chunk.Converters; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; import com.alibaba.polardbx.executor.operator.util.ChunksIndex; +import com.alibaba.polardbx.executor.operator.util.ObjectPools; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -33,7 +36,10 @@ import org.apache.calcite.rel.core.JoinRelType; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; /** * Abstract Join Executor @@ -64,6 +70,13 @@ abstract class AbstractJoinExec extends AbstractExecutor { final List ignoreNullBlocks = new ArrayList<>(); + protected int[] innerKeyMapping; + + protected boolean useVecJoin; + protected boolean enableVecBuildJoinRow; + + protected boolean shouldRecycle; + AbstractJoinExec(Executor outerInput, Executor innerInput, JoinRelType joinType, @@ -86,11 +99,23 @@ abstract class AbstractJoinExec extends AbstractExecutor { this.outerJoin = joinType == JoinRelType.LEFT || joinType == JoinRelType.RIGHT; this.semiJoin = (joinType == JoinRelType.SEMI || joinType == JoinRelType.ANTI) && !maxOneRow; + this.useVecJoin = context.getParamManager().getBoolean(ConnectionParams.ENABLE_VEC_JOIN); + this.enableVecBuildJoinRow = context.getParamManager().getBoolean(ConnectionParams.ENABLE_VEC_BUILD_JOIN_ROW); + this.shouldRecycle = context.getParamManager().getBoolean(ConnectionParams.ENABLE_DRIVER_OBJECT_POOL); + + // mapping from key columns to its ref index in chunk. + this.innerKeyMapping = new int[this.innerInput.getDataTypes().size()]; + Arrays.fill(innerKeyMapping, -1); + if (joinKeys != null) { DataType[] keyColumnTypes = joinKeys.stream().map(t -> t.getUnifiedType()).toArray(DataType[]::new); int[] outerKeyColumns = joinKeys.stream().mapToInt(t -> t.getOuterIndex()).toArray(); int[] innerKeyColumns = joinKeys.stream().mapToInt(t -> t.getInnerIndex()).toArray(); + for (int i = 0; i < innerKeyColumns.length; i++) { + innerKeyMapping[innerKeyColumns[i]] = i; + } + outerKeyChunkGetter = Converters.createChunkConverter( outerInput.getDataTypes(), outerKeyColumns, keyColumnTypes, context); innerKeyChunkGetter = Converters.createChunkConverter( @@ -256,4 +281,13 @@ protected boolean checkAntiJoinConditionHasNull( ChunksIndex chunksIndex, Chunk outerChunk, int outerPosition, int innerPosition) { return false; } + + interface ProbeOperator { + void nextRows(); + + void close(); + + int estimateSize(); + } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractOSSTableScanExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractOSSTableScanExec.java index fdc22f24c..518ca97d6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractOSSTableScanExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AbstractOSSTableScanExec.java @@ -33,7 +33,8 @@ public AbstractOSSTableScanExec(ExecutionContext context) { super(context); } - public static AbstractOSSTableScanExec create(OSSTableScan ossTableScan, ExecutionContext context, List dataTypeList) { + public static AbstractOSSTableScanExec create(OSSTableScan ossTableScan, ExecutionContext context, + List dataTypeList) { final boolean useBufferPool = context.getParamManager().getBoolean(ConnectionParams.ENABLE_OSS_BUFFER_POOL); AbstractOSSTableScanExec exec; if (useBufferPool) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AsyncOSSTableScanExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AsyncOSSTableScanExec.java index 811e61e48..69243499c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AsyncOSSTableScanExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/AsyncOSSTableScanExec.java @@ -74,6 +74,7 @@ public AsyncOSSTableScanExec(OSSTableScan ossTableScan, ExecutionContext context this.resultSetHandler = new SimpleOSSPhysicalTableReadResult(inProjectDataTypeList, context, ossTableScan); } + @Override public synchronized void initWaitFuture(ListenableFuture> listListenableFuture) { this.client.initWaitFuture(listListenableFuture); } @@ -102,7 +103,6 @@ void doOpen() { // open prefetch threads client.executePrefetchThread(); - createBlockBuilders(); } @@ -110,7 +110,11 @@ void doOpen() { Chunk doSourceNextChunk() { long fetchStartNano = System.nanoTime(); try { - return fetchChunk(); + Chunk chunk = fetchChunk(); + if (chunk == null || chunk.getPositionCount() == 0) { + return null; + } + return chunk; } finally { fetchTimeCost.addAndGet(System.nanoTime() - fetchStartNano); } @@ -132,10 +136,23 @@ private Chunk fetchChunk() { } // if there are results in a chunk(using statistics), return the result directly - if (resultFromOSS.isChunk()) { + if (resultFromOSS.isChunk() && !resultFromOSS.isDelta()) { return resultFromOSS.getChunk(); } + // read chunk result from delta cache + if (resultFromOSS.isChunk() && resultFromOSS.isDelta()) { + Chunk result = resultFromOSS.getChunk(); + if (result == null) { + // may impossible. + return null; + } + + int selSize = resultFromOSS.getSelSize(); + int[] selection = resultFromOSS.getSelection(); + return doConsumeChunk(result, selSize, selection); + } + // fetch the IO results. VectorizedRowBatch batch = resultFromOSS.getBatch(); if (batch == null) { @@ -143,8 +160,10 @@ private Chunk fetchChunk() { // Driver call the is_blocked. return null; } - return doConsumeBatch(batch, - resultFromOSS.getOssColumnTransformer()); + int selSize = resultFromOSS.getSelSize(); + int[] selection = resultFromOSS.getSelection(); + return doConsumeBatch( + batch, resultFromOSS.getOssColumnTransformer(), selSize, selection); } } finally { // restore the buffer to producer. @@ -161,12 +180,33 @@ private Chunk fetchChunk() { while ((resultFromOSS = client.popResult()) != null) { try { // if there are results in a chunk(using statistics), return the result directly - if (resultFromOSS.isChunk()) { + if (resultFromOSS.isChunk() && !resultFromOSS.isDelta()) { return resultFromOSS.getChunk(); } + + // read chunk result from delta cache + if (resultFromOSS.isChunk() && resultFromOSS.isDelta()) { + Chunk result = resultFromOSS.getChunk(); + if (result == null) { + // may impossible. + return null; + } + + int selSize = resultFromOSS.getSelSize(); + int[] selection = resultFromOSS.getSelection(); + return doConsumeChunk(result, selSize, selection); + } + // fetch the IO results. - return doConsumeBatch(resultFromOSS.getBatch(), - resultFromOSS.getOssColumnTransformer()); + VectorizedRowBatch batch = resultFromOSS.getBatch(); + if (batch == null) { + // impossible. + return null; + } + int selSize = resultFromOSS.getSelSize(); + int[] selection = resultFromOSS.getSelection(); + return doConsumeBatch( + batch, resultFromOSS.getOssColumnTransformer(), selSize, selection); } finally { // restore the buffer to producer. if (resultFromOSS != null && resultFromOSS.shouldRecycle()) { @@ -187,15 +227,36 @@ private Chunk fetchChunk() { @Nullable private Chunk doConsumeBatch(VectorizedRowBatch batch, - OSSColumnTransformer ossColumnTransformer) { + OSSColumnTransformer ossColumnTransformer, + int selSize, int[] selection) { Chunk chunk; if (condition == null) { // for unconditional table scan - chunk = resultSetHandler.next(batch, ossColumnTransformer, inProjectDataTypeList, blockBuilders, context); + chunk = resultSetHandler.next(batch, ossColumnTransformer, inProjectDataTypeList, blockBuilders, context, + selSize, selection); } else { // for conditional table scan chunk = resultSetHandler.next(batch, ossColumnTransformer, inProjectDataTypeList, blockBuilders, condition, - preAllocatedChunk, filterBitmap, outProject, context); + preAllocatedChunk, filterBitmap, outProject, context, selSize, selection); + } + + if (chunk != null) { + // reset block builders. + reset(); + return chunk; + } + return null; + } + + private Chunk doConsumeChunk(Chunk inputChunk, int selSize, int[] selection) { + Chunk chunk; + if (condition == null) { + // for unconditional table scan + chunk = resultSetHandler.next(inputChunk, blockBuilders, context, selSize, selection); + } else { + // for conditional table scan + chunk = resultSetHandler.next(inputChunk, inProjectDataTypeList, condition, + preAllocatedChunk, filterBitmap, outProject, context, selSize, selection); } if (chunk != null) { @@ -236,6 +297,7 @@ public VectorizedExpression getCondition() { return condition; } + @Override public void setCondition(VectorizedExpression condition) { this.condition = condition; } @@ -244,6 +306,7 @@ public MutableChunk getPreAllocatedChunk() { return preAllocatedChunk; } + @Override public void setPreAllocatedChunk(MutableChunk preAllocatedChunk) { this.preAllocatedChunk = preAllocatedChunk; } @@ -252,6 +315,7 @@ public int[] getFilterBitmap() { return filterBitmap; } + @Override public void setFilterBitmap(int[] filterBitmap) { this.filterBitmap = filterBitmap; } @@ -260,6 +324,7 @@ public int[] getOutProject() { return outProject; } + @Override public void setOutProject(int[] outProject) { this.outProject = outProject; } @@ -268,6 +333,7 @@ public List> getFilterInputTypes() { return filterInputTypes; } + @Override public void setFilterInputTypes(List> filterInputTypes) { this.filterInputTypes = filterInputTypes; } @@ -276,6 +342,7 @@ public List> getFilterOutputTypes() { return filterOutputTypes; } + @Override public void setFilterOutputTypes(List> filterOutputTypes) { this.filterOutputTypes = filterOutputTypes; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/BucketDivideChunkBuffer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/BucketDivideChunkBuffer.java index 9cdd3b3c6..0f82ae66f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/BucketDivideChunkBuffer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/BucketDivideChunkBuffer.java @@ -16,7 +16,7 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.util.concurrent.ListenableFuture; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkBuilder; import com.alibaba.polardbx.executor.chunk.ChunkConverter; @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.mpp.operator.PartitionFunction; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.util.concurrent.ListenableFuture; import it.unimi.dsi.fastutil.ints.IntArrayList; import it.unimi.dsi.fastutil.ints.IntList; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CacheCursor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CacheCursor.java index ed5d47e05..547823c60 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CacheCursor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CacheCursor.java @@ -16,10 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.alibaba.polardbx.optimizer.core.row.ArrayRow; -import com.alibaba.polardbx.optimizer.spill.QuerySpillSpaceMonitor; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.exception.MemoryNotEnoughException; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.properties.ConnectionParams; @@ -38,11 +34,15 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.CursorMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.row.ArrayRow; import com.alibaba.polardbx.optimizer.core.row.Row; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.memory.MemoryPool; import com.alibaba.polardbx.optimizer.memory.MemoryType; +import com.alibaba.polardbx.optimizer.spill.QuerySpillSpaceMonitor; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ListenableFuture; import java.util.ArrayList; import java.util.Iterator; @@ -65,7 +65,7 @@ public class CacheCursor implements Cursor { protected Spiller spiller; protected int chunkLimit; private BlockBuilder[] blockBuilders; - private Iterator iterator; + protected Iterator iterator; protected Chunk currentChunk; protected int currentPos; protected CursorMeta cursorMeta; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CacheExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CacheExec.java index ce2b154cb..3b35eada1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CacheExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CacheExec.java @@ -16,14 +16,14 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.memory.MemoryPool; import com.alibaba.polardbx.optimizer.memory.MemoryPoolUtils; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ColumnarDeletedScanExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ColumnarDeletedScanExec.java new file mode 100644 index 000000000..e7b19e4b2 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ColumnarDeletedScanExec.java @@ -0,0 +1,73 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator; + +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.mpp.split.OssSplit; +import com.alibaba.polardbx.executor.operator.scan.impl.DefaultScanPreProcessor; +import com.alibaba.polardbx.executor.operator.scan.impl.DeletedScanPreProcessor; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.rel.OSSTableScan; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.jetbrains.annotations.NotNull; + +import java.util.List; + +public class ColumnarDeletedScanExec extends ColumnarScanExec { + + public ColumnarDeletedScanExec(OSSTableScan ossTableScan, + ExecutionContext context, + List outputDataTypes) { + super(ossTableScan, context, outputDataTypes); + // TODO: add a validation process to forbid ossTableScan containing filter get here. + } + + @Override + @NotNull + protected DefaultScanPreProcessor getPreProcessor(OssSplit ossSplit, + String logicalSchema, + String logicalTableName, + TableMeta tableMeta, + FileSystem fileSystem, + Configuration configuration, + ColumnarManager columnarManager) { + return new DefaultScanPreProcessor( + configuration, fileSystem, + + // for pruning + logicalSchema, + logicalTableName, + enableIndexPruning, + context.getParamManager().getBoolean(ConnectionParams.ENABLE_OSS_COMPATIBLE), + tableMeta.getAllColumns(), + ossTableScan.getOrcNode().getOriFilters(), + ossSplit.getParams(), + + // for mock + DEFAULT_GROUPS_RATIO, + DEFAULT_DELETION_RATIO, + + // for columnar mode. + columnarManager, + ossSplit.getCheckpointTso(), + tableMeta.getColumnarFieldIdList()); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ColumnarScanExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ColumnarScanExec.java new file mode 100644 index 000000000..aa1b39778 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ColumnarScanExec.java @@ -0,0 +1,654 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.ddl.job.task.basic.oss.OSSTaskUtils; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.mpp.metadata.Split; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFManager; +import com.alibaba.polardbx.executor.mpp.split.OssSplit; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.ColumnarSplit; +import com.alibaba.polardbx.executor.operator.scan.IOStatus; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.ScanPreProcessor; +import com.alibaba.polardbx.executor.operator.scan.ScanState; +import com.alibaba.polardbx.executor.operator.scan.ScanWork; +import com.alibaba.polardbx.executor.operator.scan.WorkPool; +import com.alibaba.polardbx.executor.operator.scan.impl.CsvColumnarSplit; +import com.alibaba.polardbx.executor.operator.scan.impl.DefaultLazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.impl.DefaultScanPreProcessor; +import com.alibaba.polardbx.executor.operator.scan.impl.MorselColumnarSplit; +import com.alibaba.polardbx.executor.operator.scan.impl.SimpleWorkPool; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.executor.vectorized.build.InputRefTypeChecker; +import com.alibaba.polardbx.gms.engine.FileSystemManager; +import com.alibaba.polardbx.gms.engine.FileSystemUtils; +import com.alibaba.polardbx.optimizer.config.table.FileMeta; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.rel.OSSTableScan; +import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; +import com.alibaba.polardbx.optimizer.memory.MemoryPool; +import com.alibaba.polardbx.optimizer.memory.MemoryPoolUtils; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.calcite.rex.RexDynamicParam; +import org.apache.calcite.rex.RexNode; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TimeZone; +import java.util.TreeMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +// columnar table scan exec. +public class ColumnarScanExec extends SourceExec { + private static final Logger LOGGER = LoggerFactory.getLogger("oss"); + private static final int CPU_CORES = Runtime.getRuntime().availableProcessors(); + + private static final ExecutorService IO_EXECUTOR = + Executors.newFixedThreadPool(CPU_CORES * CPU_CORES, new NamedThreadFactory( + "columnar-io" + )); + + private static final ExecutorService SCAN_EXECUTOR = + Executors.newFixedThreadPool(CPU_CORES * CPU_CORES, new NamedThreadFactory( + "columnar-scan" + )); + private static final AtomicLong SNAPSHOT_FILE_ACCESS_COUNT = new AtomicLong(0); + + public static final double DEFAULT_RATIO = .3D; + public static final double DEFAULT_GROUPS_RATIO = 1D; + public static final double DEFAULT_DELETION_RATIO = 0D; + + protected OSSTableScan ossTableScan; + private List outputDataTypes; + + // Shared by all scan operators of one partition in one server node. + private WorkPool workPool; + private ExecutorService scanExecutor; + + private ScanPreProcessor preProcessor; + private ListenableFuture preProcessorFuture; + private Supplier> blockedSupplier; + + /** + * The running scan work. + */ + private volatile ScanWork currentWork; + private volatile boolean lastWorkNotExecutable; + private volatile boolean noAvailableWork; + private Map> finishedWorks; + private List splitList; + + /** + * NOTE: The current IO status does not necessarily come from the current scan work. + * Before t1: 1st scan work is running + * Before t2: the results of 1st IO status is run out. + * When time between t1 and t2, we would run the 2nd scan work + * but read the results from 1st IO status. + * sequence diagram: + * O-----------------t1--------t2--------------------> t + * ______1st work____|_____________2st work___________ + * ____ read 1st IO status_____|__ read 2nd IO status__ + */ + private volatile IOStatus currentIOStatus; + private volatile boolean lastStatusRunOut; + + // To manage the session-level variables. + private boolean useVerboseMetricsReport; + private boolean enableMetrics; + protected boolean enableIndexPruning; + private boolean enableDebug; + + // memory management. + private MemoryPool memoryPool; + private MemoryAllocatorCtx memoryAllocator; + + // plan fragment level runtime filter manager. + private volatile FragmentRFManager fragmentRFManager; + + public ColumnarScanExec(OSSTableScan ossTableScan, ExecutionContext context, List outputDataTypes) { + super(context); + this.ossTableScan = ossTableScan; + this.outputDataTypes = outputDataTypes; + + this.scanExecutor = SCAN_EXECUTOR; + + this.workPool = new SimpleWorkPool(); + this.finishedWorks = new TreeMap<>(String::compareTo); + + // status of ScanWork + this.lastWorkNotExecutable = true; + this.noAvailableWork = false; + + // status of IOStatus + this.lastStatusRunOut = true; + + this.useVerboseMetricsReport = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_VERBOSE_METRICS_REPORT); + this.enableMetrics = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_COLUMNAR_METRICS); + this.enableIndexPruning = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_INDEX_PRUNING); + this.enableDebug = LOGGER.isDebugEnabled(); + this.splitList = new ArrayList<>(); + + this.memoryPool = MemoryPoolUtils + .createOperatorTmpTablePool("ColumnarScanExec@" + System.identityHashCode(this), + context.getMemoryPool()); + this.memoryAllocator = memoryPool.getMemoryAllocatorCtx(); + } + + public void setFragmentRFManager(FragmentRFManager fragmentRFManager) { + this.fragmentRFManager = fragmentRFManager; + } + + private List getOrcFiles(OssSplit ossSplit) { + List fileNames = ossSplit.getDesignatedFile(); + + if (fileNames != null && ossTableScan.getFlashback() instanceof RexDynamicParam) { + String timestampString = context.getParams().getCurrentParameter() + .get(((RexDynamicParam) ossTableScan.getFlashback()).getIndex() + 1).getValue().toString(); + TimeZone fromTimeZone; + if (context.getTimeZone() != null) { + fromTimeZone = context.getTimeZone().getTimeZone(); + } else { + fromTimeZone = TimeZone.getDefault(); + } + + long readTs = OSSTaskUtils.getTsFromTimestampWithTimeZone(timestampString, fromTimeZone); + TableMeta tableMeta = context.getSchemaManager(ossSplit.getLogicalSchema()).getTable( + ossSplit.getLogicalTableName()); + Set filterSet = ossSplit.getFilterSet(context); + Map> flatFileMetas = tableMeta.getFlatFileMetas(); + + return ossSplit.getPhyTableNameList().stream() + .map(flatFileMetas::get) + .flatMap(List::stream) + .filter(x -> { + if (filterSet == null || filterSet.contains(x.getFileName())) { + if (readTs < x.getCommitTs()) { + // not committed yet at this ts + return false; + } + // not removed yet at this ts + return x.getRemoveTs() == null || readTs <= x.getRemoveTs(); + } else { + // not designated for this split + return false; + } + }) + .map(FileMeta::getFileName) + .collect(Collectors.toList()); + } else { + return fileNames; + } + } + + @Override + public void addSplit(Split split) { + splitList.add(split); + OssSplit ossSplit = (OssSplit) split.getConnectorSplit(); + List orcFileNames = getOrcFiles(ossSplit); + + // partition info of this split + int partNum = ossSplit.getPartIndex(); + int nodePartCount = ossSplit.getNodePartCount(); + + // base info of table meta. + String logicalSchema = ossSplit.getLogicalSchema(); + String logicalTableName = ossSplit.getLogicalTableName(); + TableMeta tableMeta = context.getSchemaManager(logicalSchema).getTable(logicalTableName); + + // To distinguish from + final boolean isColumnar = tableMeta.isColumnar(); + + // engine and filesystem for files in split. + Engine engine = tableMeta.getEngine(); + FileSystem fileSystem = FileSystemManager.getFileSystemGroup(engine).getMaster(); + + // todo it's not the unique id for each table-scan exec in different work thread. + int sequenceId = getSourceId(); + + // todo It's time consuming because the constructor of configuration will initialize a large parameter list. + Configuration configuration = new Configuration(); + + ColumnarManager columnarManager = ColumnarManager.getInstance(); + OSSColumnTransformer columnTransformer = ossSplit.getColumnTransformer(ossTableScan, context); + + // build pre-processor for splits to contain all time-consuming processing + // like pruning, bitmap loading and metadata preheating. + if (preProcessor == null) { + preProcessor = getPreProcessor( + ossSplit, + logicalSchema, + logicalTableName, + tableMeta, + fileSystem, + configuration, + columnarManager); + } + + // Schema-level cache manager. + BlockCacheManager blockCacheManager = BlockCacheManager.getInstance(); + + // Get the push-down predicate. + // The refs of input-type will be consistent with refs in RexNode. + LazyEvaluator evaluator = null; + List inputRefsForFilter = ImmutableList.of(); + if (!ossTableScan.getOrcNode().getFilters().isEmpty()) { + RexNode rexNode = ossTableScan.getOrcNode().getFilters().get(0); + List> inputTypes = ossTableScan.getOrcNode().getInProjectsDataType(); + + // Build evaluator suitable for columnar scan, with the ratio to decide the evaluation strategy. + evaluator = DefaultLazyEvaluator.builder() + .setRexNode(rexNode) + .setRatio(DEFAULT_RATIO) + .setInputTypes(inputTypes) + .setContext(context) + .build(); + + // Collect input refs for filter (predicate) and project + InputRefTypeChecker inputRefTypeChecker = new InputRefTypeChecker(inputTypes); + rexNode.accept(inputRefTypeChecker); + + // The input-ref-indexes is the list of index of in-projects. + inputRefsForFilter = inputRefTypeChecker.getInputRefIndexes() + .stream() + .map(index -> ossTableScan.getOrcNode().getInProjects().get(index)) + .sorted() + .collect(Collectors.toList()); + } + + // The output projects is the list of index of in-projects. + List inputRefsForProject = ossTableScan.getOrcNode().getOutProjects() + .stream() + .map(index -> ossTableScan.getOrcNode().getInProjects().get(index)) + .sorted() + .collect(Collectors.toList()); + + final int chunkLimit = context.getParamManager().getInt(ConnectionParams.CHUNK_SIZE); + final int morselUnit = context.getParamManager().getInt(ConnectionParams.COLUMNAR_WORK_UNIT); + + final OssSplit.DeltaReadOption deltaReadOption = ossSplit.getDeltaReadOption(); + + // Build csv split for all csv files in deltaReadOption and fill into work pool. + if (deltaReadOption != null) { + final Map> allCsvFiles = deltaReadOption.getAllCsvFiles(); + + List finalInputRefsForFilterForCsv = inputRefsForFilter; + LazyEvaluator finalEvaluatorForCsv = evaluator; + allCsvFiles.values().stream().flatMap(List::stream).forEach( + csvFile -> { + Path filePath = FileSystemUtils.buildPath(fileSystem, csvFile, isColumnar); + preProcessor.addFile(filePath); + workPool.addSplit(sequenceId, CsvColumnarSplit.newBuilder() + .executionContext(context) + .columnarManager(columnarManager) + .file(filePath, 0) + .inputRefs(finalInputRefsForFilterForCsv, inputRefsForProject) + .tso(ossSplit.getCheckpointTso()) + .prepare(preProcessor) + .pushDown(finalEvaluatorForCsv) + .columnTransformer(columnTransformer) + .partNum(partNum) + .nodePartCount(nodePartCount) + .memoryAllocator(memoryAllocator) + .build() + ); + } + ); + } + + // Build columnar style split for all orc files in oss-split and fill into work pool. + if (orcFileNames != null) { + for (String fileName : orcFileNames) { + // The pre-processor shared by all columnar-splits in this table scan. + Path filePath = FileSystemUtils.buildPath(fileSystem, fileName, isColumnar); + preProcessor.addFile(filePath); + + // todo need columnar file-id mapping. + int fileId = 0; + + ColumnarSplit columnarSplit = MorselColumnarSplit.newBuilder() + .executionContext(context) + .ioExecutor(IO_EXECUTOR) + .fileSystem(fileSystem, engine) + .configuration(configuration) + .sequenceId(sequenceId) + .file(filePath, fileId) + .columnTransformer(columnTransformer) + .inputRefs(inputRefsForFilter, inputRefsForProject) + .cacheManager(blockCacheManager) + .chunkLimit(chunkLimit) + .morselUnit(morselUnit) + .pushDown(evaluator) + .prepare(preProcessor) + .columnarManager(columnarManager) + .isColumnarMode(isColumnar) + .tso(ossSplit.getCheckpointTso()) + .partNum(partNum) + .nodePartCount(nodePartCount) + .memoryAllocator(memoryAllocator) + .fragmentRFManager(fragmentRFManager) + .operatorStatistic(statistics) + .build(); + + workPool.addSplit(sequenceId, columnarSplit); + } + if (isColumnar) { + SNAPSHOT_FILE_ACCESS_COUNT.getAndAdd(orcFileNames.size()); + } + } + } + + protected DefaultScanPreProcessor getPreProcessor(OssSplit ossSplit, + String logicalSchema, + String logicalTableName, + TableMeta tableMeta, + FileSystem fileSystem, + Configuration configuration, + ColumnarManager columnarManager) { + return new DefaultScanPreProcessor( + configuration, fileSystem, + + // for pruning + logicalSchema, + logicalTableName, + enableIndexPruning, + context.getParamManager().getBoolean(ConnectionParams.ENABLE_OSS_COMPATIBLE), + tableMeta.getAllColumns(), + ossTableScan.getOrcNode().getOriFilters(), + ossSplit.getParams(), + + // for mock + DEFAULT_GROUPS_RATIO, + DEFAULT_DELETION_RATIO, + + // for columnar mode. + columnarManager, + ossSplit.getCheckpointTso(), + tableMeta.getColumnarFieldIdList() + ); + } + + @Override + public void noMoreSplits() { + workPool.noMoreSplits(getSourceId()); + } + + @Override + public Integer getSourceId() { + return ossTableScan.getRelatedId(); + } + + @Override + void doOpen() { + // invoke pre-processor. + if (preProcessor != null) { + preProcessorFuture = preProcessor.prepare(scanExecutor, context.getTraceId(), context.getColumnarTracer()); + } + } + + @Override + Chunk doSourceNextChunk() { + // There is no split added. + if (splitList.isEmpty()) { + // If there is no split, don't block the Driver. + blockedSupplier = () -> Futures.immediateFuture(null); + return null; + } + // Firstly, Check if pre-processor is done. + if (preProcessorFuture != null && !preProcessorFuture.isDone()) { + + // The blocked future is from pre-processor. + blockedSupplier = () -> preProcessorFuture; + return null; + } else { + + // The blocked future is from IOStatus. + blockedSupplier = () -> currentIOStatus.isBlocked(); + } + + tryInvokeNext(); + if (currentIOStatus == null) { + // If there is no selected row-group, don't block the Driver. + blockedSupplier = () -> Futures.immediateFuture(null); + return null; + } + + // fetch the next chunk according to the state. + IOStatus ioStatus = currentIOStatus; + ScanState state = ioStatus.state(); + Chunk result; + switch (state) { + case READY: + case BLOCKED: { + result = ioStatus.popResult(); + // if chunk is null, the Driver should call is_blocked + if (result == null || result.getPositionCount() == 0) { + return null; + } + return result; + } + case FINISHED: { + // We must firstly mark the last work to state of not-executable, + // so that when fetch the next chunks from the last IOStatus, The Exec can + // invoke the next work. + if (currentWork != null && currentWork.getWorkId().equals(ioStatus.workId())) { + lastWorkNotExecutable = true; + } + + // Try to pop all the rest results + while ((result = ioStatus.popResult()) != null) { + if (result.getPositionCount() == 0) { + continue; + } + return result; + } + + // The results of this scan work is run out. + finishedWorks.put(currentWork.getWorkId(), currentWork); + lastStatusRunOut = true; + if (enableDebug) { + LOGGER.info(MessageFormat.format( + "finish IOStatus, exec: {0}, workId: {1}, rowCount: {2}", + this.toString(), currentIOStatus.workId(), currentIOStatus.rowCount() + )); + } + + tryInvokeNext(); + + break; + } + case FAILED: { + if (currentWork != null && currentWork.getWorkId().equals(ioStatus.workId())) { + lastWorkNotExecutable = true; + } + // throw any stored exception in client. + ioStatus.throwIfFailed(); + break; + } + case CLOSED: { + if (currentWork != null && currentWork.getWorkId().equals(ioStatus.workId())) { + lastWorkNotExecutable = true; + } + // The results of this scan work is run out. + finishedWorks.put(currentWork.getWorkId(), currentWork); + lastStatusRunOut = true; + if (enableDebug) { + LOGGER.info(MessageFormat.format( + "finish IOStatus, exec: {0}, workId: {1}, rowCount: {2}", + this.toString(), currentIOStatus.workId(), currentIOStatus.rowCount() + )); + } + + tryInvokeNext(); + + break; + } + } + + return null; + } + + void tryInvokeNext() { + // if the current scan work is no longer executable? + if (lastWorkNotExecutable) { + if (!noAvailableWork && currentWork != null) { + + if (enableDebug) { + LOGGER.info(MessageFormat.format( + "finish work, exec: {0}, workId: {1}", + this.toString(), currentWork.getWorkId() + )); + } + + } + + // should recycle the resources of the last scan work. + // pick up the next split from work pool. + ScanWork newWork = workPool.pickUp(getSourceId()); + if (newWork == null) { + noAvailableWork = true; + } else { + if (enableDebug) { + LOGGER.info(MessageFormat.format( + "start work, exec: {0}, workId: {1}", + this.toString(), newWork.getWorkId() + )); + } + + currentWork = newWork; + currentWork.invoke(scanExecutor); + + lastWorkNotExecutable = false; + } + } + + // if the current io status is run out? + if (lastStatusRunOut && !noAvailableWork) { + // switch io status + currentIOStatus = currentWork.getIOStatus(); + lastStatusRunOut = false; + + if (enableDebug) { + LOGGER.info(MessageFormat.format( + "start IOStatus, exec: {0}, workId: {1}", + this.toString(), currentIOStatus.workId() + )); + } + + } + } + + @Override + void doClose() { + Throwable t = null; + RuntimeMetrics summaryMetrics = null; + for (ScanWork scanWork : finishedWorks.values()) { + try { + if (enableMetrics) { + RuntimeMetrics metrics = scanWork.getMetrics(); + if (useVerboseMetricsReport) { + // print verbose metrics. + String report = metrics.reportAll(); + LOGGER.info(MessageFormat.format("the scan-work report: {0}", report)); + } + + // To merge all metrics into the first one. + if (summaryMetrics == null) { + summaryMetrics = metrics; + } else { + summaryMetrics.merge(metrics); + } + } + } catch (Throwable e) { + // don't throw here to prevent from memory leak. + t = e; + } finally { + try { + scanWork.close(false); + } catch (Throwable e) { + // don't throw here to prevent from memory leak. + t = e; + } + } + } + + // print summary metrics. + if (enableMetrics && summaryMetrics != null) { + LOGGER.info(MessageFormat.format("the summary of scan-work report: {0}", summaryMetrics.reportAll())); + } + + if (t != null) { + throw GeneralUtil.nestedException(t); + } + } + + @Override + public List getDataTypes() { + return outputDataTypes; + } + + @Override + public boolean produceIsFinished() { + return splitList.isEmpty() || + (noAvailableWork && lastWorkNotExecutable && lastStatusRunOut); + } + + @Override + public ListenableFuture produceIsBlocked() { + return blockedSupplier.get(); + } + + public static ExecutorService getIoExecutor() { + return IO_EXECUTOR; + } + + public static ExecutorService getScanExecutor() { + return SCAN_EXECUTOR; + } + + public static long getSnapshotFileAccessCount() { + return SNAPSHOT_FILE_ACCESS_COUNT.get(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ConsumerExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ConsumerExecutor.java index f2c556a05..b48c08cbd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ConsumerExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ConsumerExecutor.java @@ -16,9 +16,10 @@ package com.alibaba.polardbx.executor.operator; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; -import com.alibaba.polardbx.executor.chunk.Chunk; /** * Basic interface for consumer-operator which only receive the chunk. diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CorrelateExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CorrelateExec.java index 81a77bb7b..d521f3e2b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CorrelateExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/CorrelateExec.java @@ -16,8 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.Chunk; @@ -25,6 +23,8 @@ import com.alibaba.polardbx.executor.utils.SubqueryUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import org.apache.calcite.plan.RelOptUtil; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.CorrelationId; @@ -141,8 +141,8 @@ Chunk doNextChunk() { blockBuilders[getDataTypes().size() - 1].writeObject(null); } else { blockBuilders[getDataTypes().size() - 1].writeObject( - getDataTypes().get(getDataTypes().size() - 1) - .convertFrom(curSubqueryApply.getResultValue())); + getDataTypes().get(getDataTypes().size() - 1) + .convertFrom(curSubqueryApply.getResultValue())); } curSubqueryApply = null; @@ -208,10 +208,10 @@ private SubqueryApply createSubqueryApply(int rowIndex) { Chunk.ChunkRow chunkRow = currentChunk.rowAt(rowIndex); // handle apply subquerys return SubqueryUtils - .createSubqueryApply( - correlateId + "_" + Thread.currentThread().getName() + "_" + SubqueryUtils.nextSubqueryId - .getAndIncrement(), chunkRow, - plan, leftConditions, opKind, context, - correlateId, correlateDataRowType, semiJoinType, true); + .createSubqueryApply( + correlateId + "_" + Thread.currentThread().getName() + "_" + SubqueryUtils.nextSubqueryId + .getAndIncrement(), chunkRow, + plan, leftConditions, opKind, context, + correlateId, correlateDataRowType, semiJoinType, true); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/DynamicValueExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/DynamicValueExec.java index a3bb8d4e8..368c81d58 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/DynamicValueExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/DynamicValueExec.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.alibaba.polardbx.common.jdbc.RawString; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.operator.util.DataTypeUtils; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ExchangeExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ExchangeExec.java index d7ed20378..acaeb4110 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ExchangeExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ExchangeExec.java @@ -16,8 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; @@ -31,6 +29,8 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.memory.MemoryPool; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import javax.annotation.concurrent.GuardedBy; import java.io.Closeable; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/Executor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/Executor.java index 6d63ba979..e765f7b11 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/Executor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/Executor.java @@ -17,6 +17,7 @@ package com.alibaba.polardbx.executor.operator; import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.mpp.operator.DriverContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ExpandExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ExpandExec.java index af5d0a3f0..23960b882 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ExpandExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ExpandExec.java @@ -16,8 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.Chunk; @@ -25,6 +23,8 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.expression.calc.IExpression; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/FilterExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/FilterExec.java index 21cc8f1f6..b650fe1b4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/FilterExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/FilterExec.java @@ -16,8 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.BooleanBlockBuilder; @@ -29,6 +27,8 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.expression.calc.IExpression; import com.alibaba.polardbx.optimizer.core.expression.calc.ScalarFunctionExpression; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import it.unimi.dsi.fastutil.booleans.BooleanArrayList; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashAggExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashAggExec.java index 7eb0403f4..dcce1ee68 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashAggExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashAggExec.java @@ -16,12 +16,12 @@ package com.alibaba.polardbx.executor.operator; -import com.alibaba.polardbx.executor.operator.util.AggregateUtils; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.google.common.util.concurrent.ListenableFuture; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkConverter; import com.alibaba.polardbx.executor.chunk.Converters; +import com.alibaba.polardbx.executor.mpp.execution.TaskExecutor; import com.alibaba.polardbx.executor.operator.spill.MemoryRevoker; import com.alibaba.polardbx.executor.operator.spill.SpillerFactory; import com.alibaba.polardbx.executor.operator.util.AggOpenHashMap; @@ -29,14 +29,18 @@ import com.alibaba.polardbx.executor.operator.util.SpillableAggHashMap; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.AvgV2; import com.alibaba.polardbx.optimizer.memory.MemoryPoolUtils; import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; import com.google.common.util.concurrent.ListenableFuture; +import java.text.MessageFormat; import java.util.List; public class HashAggExec extends AbstractHashAggExec implements ConsumerExecutor, MemoryRevoker { + private static final Logger LOGGER = LoggerFactory.getLogger(HashAggExec.class); + protected final ChunkConverter inputKeyChunkGetter; private final DataType[] groupKeyType; @@ -98,7 +102,7 @@ public HashAggExec( public void openConsume() { boolean spillEnabled = spillerFactory != null; for (Aggregator aggCall : aggregators) { - if (((AbstractAggregator) aggCall).isDistinct() || !AggregateUtils.supportSpill(aggCall)) { + if (aggCall.isDistinct() || aggCall instanceof AvgV2) { spillEnabled = false; break; } @@ -109,7 +113,7 @@ public void openConsume() { if (!memoryAllocator.isRevocable()) { hashTable = new AggOpenHashMap(groupKeyType, aggregators, aggValueType, inputType, expectedGroups, chunkLimit, - context); + context, memoryAllocator); } else { hashTable = new SpillableAggHashMap(groupKeyType, aggregators, aggValueType, outputColumnMeta, inputType, expectedGroups, chunkLimit, context, memoryAllocator, spillerFactory); @@ -135,16 +139,18 @@ public void closeConsume(boolean force) { @Override public void consumeChunk(Chunk inputChunk) { Chunk inputKeyChunk; - // no group by - if (groups.length == 0) { + if (groups.length == 0) { // no group by inputKeyChunk = inputChunk; } else { inputKeyChunk = inputKeyChunkGetter.apply(inputChunk); } long beforeEstimateSize = hashTable.estimateSize(); - hashTable.putChunk(inputKeyChunk, inputChunk); + hashTable.putChunk(inputKeyChunk, inputChunk, null); long afterEstimateSize = hashTable.estimateSize(); this.needMemoryAllocated = Math.max(afterEstimateSize - beforeEstimateSize, 0); + + // release input chunk + inputChunk.recycle(); } @Override @@ -159,9 +165,13 @@ void doClose() { @Override public void buildConsume() { + long start = System.nanoTime(); if (hashTable != null) { resultIterator = hashTable.buildChunks(); } + long end = System.nanoTime(); + LOGGER.debug(MessageFormat.format("HashAggExec: {0} build consume time cost = {1} ns, " + + "start = {2}, end = {3}", this.toString(), (end - start), start, end)); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashGroupJoinExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashGroupJoinExec.java index 3b63e6807..c6fefe351 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashGroupJoinExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashGroupJoinExec.java @@ -16,30 +16,37 @@ package com.alibaba.polardbx.executor.operator; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ListenableFuture; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.common.utils.bloomfilter.FastIntBloomFilter; +import com.alibaba.polardbx.common.utils.bloomfilter.ConcurrentIntBloomFilter; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.common.utils.memory.SizeOf; import com.alibaba.polardbx.config.ConfigDataMode; +import com.alibaba.polardbx.executor.accumulator.Accumulator; +import com.alibaba.polardbx.executor.accumulator.AccumulatorBuilders; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.BlockBuilders; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkConverter; +import com.alibaba.polardbx.executor.chunk.Converters; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; import com.alibaba.polardbx.executor.chunk.IntegerBlockBuilder; import com.alibaba.polardbx.executor.chunk.NullBlock; +import com.alibaba.polardbx.executor.operator.util.ChunksIndex; import com.alibaba.polardbx.executor.operator.util.ConcurrentRawHashTable; +import com.alibaba.polardbx.executor.operator.util.DataTypeUtils; import com.alibaba.polardbx.executor.operator.util.DistinctSet; -import com.alibaba.polardbx.executor.operator.util.ElementaryChunksIndex; import com.alibaba.polardbx.executor.operator.util.HashAggResultIterator; import com.alibaba.polardbx.executor.operator.util.TypedBuffer; +import com.alibaba.polardbx.executor.operator.util.TypedList; +import com.alibaba.polardbx.executor.operator.util.TypedListHandle; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.datatype.IntegerType; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import com.alibaba.polardbx.optimizer.core.expression.calc.IExpression; import com.alibaba.polardbx.optimizer.core.join.EquiJoinKey; import com.alibaba.polardbx.optimizer.core.row.JoinRow; @@ -50,6 +57,7 @@ import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ListenableFuture; import org.apache.calcite.rel.core.JoinRelType; +import org.apache.calcite.util.Util; import java.util.ArrayList; import java.util.Arrays; @@ -75,7 +83,7 @@ public class HashGroupJoinExec extends AbstractJoinExec implements ConsumerExecu protected ConcurrentRawHashTable hashTable; protected int[] positionLinks; - protected FastIntBloomFilter bloomFilter; + protected ConcurrentIntBloomFilter bloomFilter; private TypedBuffer groupKeyBuffer; private final DataType[] aggValueType; @@ -87,16 +95,20 @@ public class HashGroupJoinExec extends AbstractJoinExec implements ConsumerExecu private final BlockBuilder[] valueBlockBuilders; + private ChunkConverter[] valueConverters; + + protected Accumulator[] valueAccumulators; + private DistinctSet[] distinctSets; private HashAggResultIterator resultIterator; - private Chunk nullChunk = new Chunk(new NullBlock(1)); + private Chunk nullChunk = new Chunk(new NullBlock(chunkLimit)); private int groupId = 0; - private ElementaryChunksIndex outerChunks; - private ElementaryChunksIndex outKeyChunks; + private ChunksIndex outerChunks; + private ChunksIndex outKeyChunks; private MemoryPool memoryPool; private MemoryAllocatorCtx memoryAllocator; @@ -105,13 +117,14 @@ public class HashGroupJoinExec extends AbstractJoinExec implements ConsumerExecu private int[] keys; private BitSet usedKeys; + private Chunk[] inputAggregatorInputs; private boolean isFinished = false; private ListenableFuture blocked = ProducerExecutor.NOT_BLOCKED; private List outputRelDataTypes; - private long needMemoryAllocated = 0; + private GroupJoinProbeOperator probeOperator; public HashGroupJoinExec(Executor outerInput, Executor innerInput, @@ -130,6 +143,7 @@ public HashGroupJoinExec(Executor outerInput, Preconditions.checkArgument(!semiJoin, "Don't support semiJoin!"); this.groups = groups; this.aggregators = aggregators; + this.inputAggregatorInputs = new Chunk[aggregators.size()]; this.outputRelDataTypes = outputRelDataTypes; createBlockBuilders(); @@ -147,14 +161,26 @@ public HashGroupJoinExec(Executor outerInput, this.groupKeyBuffer = TypedBuffer.create(groupTypes, chunkLimit, context); this.aggValueType = aggTypes; + //init aggregate accumulator + this.valueAccumulators = new Accumulator[aggregators.size()]; + this.valueConverters = new ChunkConverter[aggregators.size()]; this.distinctSets = new DistinctSet[aggregators.size()]; for (i = 0; i < aggregators.size(); i++) { - final AbstractAggregator aggregator = (AbstractAggregator) aggregators.get(i); - aggregator.open(expectedOutputRowCount); - int[] aggIndexInChunk = aggregator.getOriginTargetIndexes(); + final Aggregator aggregator = aggregators.get(i); + this.valueAccumulators[i] = + AccumulatorBuilders + .create(aggregator, aggValueType[i], aggInputType, expectedOutputRowCount, this.context); + + DataType[] originalInputTypes = DataTypeUtils.gather(aggInputType, aggregator.getInputColumnIndexes()); + DataType[] accumulatorInputTypes = Util.first(valueAccumulators[i].getInputTypes(), originalInputTypes); + this.valueConverters[i] = + Converters.createChunkConverter(aggregator.getInputColumnIndexes(), aggInputType, accumulatorInputTypes, + joinType == JoinRelType.RIGHT ? 0 : outerInput.getDataTypes().size(), context); + if (aggregator.isDistinct()) { - distinctSets[i] = - new DistinctSet(aggInputType, aggIndexInChunk, expectedOutputRowCount, chunkLimit, + int[] distinctIndexes = aggregator.getNewForAccumulator().getAggTargetIndexes(); + this.distinctSets[i] = + new DistinctSet(accumulatorInputTypes, distinctIndexes, expectedOutputRowCount, chunkLimit, context); } } @@ -162,6 +188,54 @@ public HashGroupJoinExec(Executor outerInput, for (i = 0; i < aggregators.size(); i++) { this.valueBlockBuilders[i] = BlockBuilders.create(aggValueType[i], context); } + + this.outerChunks = new ChunksIndex(); + this.outKeyChunks = new ChunksIndex(); + + boolean enableVecJoin = context.getParamManager().getBoolean(ConnectionParams.ENABLE_VEC_JOIN); + + boolean hasNoDistinct = true; + for (int index = 0; index < distinctSets.length; index++) { + hasNoDistinct &= distinctSets[index] == null; + } + + final boolean isSingleIntegerType = + joinKeys.size() == 1 && (joinKeys.get(0).getUnifiedType() instanceof IntegerType) && !joinKeys.get(0) + .isNullSafeEqual(); + + if (enableVecJoin && isSingleIntegerType && hasNoDistinct && condition == null) { + this.probeOperator = new IntGroupJoinProbeOperator(); + + // for fast group key output + this.groupKeyBuffer = TypedBuffer.createTypeSpecific(DataTypes.IntegerType, chunkLimit, context); + + // for fast probe + this.outKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createInt(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).cast(Block.class) + .appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + } else { + this.probeOperator = new DefaultGroupJoinProbeOperator(); + } + } @Override @@ -176,9 +250,6 @@ public void doOpen() { @Override public void openConsume() { - outerChunks = new ElementaryChunksIndex(chunkLimit); - outKeyChunks = new ElementaryChunksIndex(chunkLimit); - memoryPool = MemoryPoolUtils.createOperatorTmpTablePool(getExecutorName(), context.getMemoryPool()); memoryAllocator = memoryPool.getMemoryAllocatorCtx(); @@ -191,8 +262,6 @@ public void buildConsume() { passNothing = true; } doBuildHashTable(); - outerChunks.buildRow(); - outKeyChunks.buildRow(); } } @@ -206,21 +275,26 @@ void doBuildHashTable() { positionLinks = new int[size]; Arrays.fill(positionLinks, LIST_END); + // create type list + outKeyChunks.openTypedHashTable(); + if (size <= BLOOM_FILTER_ROWS_LIMIT) { - bloomFilter = FastIntBloomFilter.create(size); + bloomFilter = ConcurrentIntBloomFilter.create(size); memoryAllocator.allocateReservedMemory(bloomFilter.sizeInBytes()); } int position = 0; for (int chunkId = 0; chunkId < outKeyChunks.getChunkCount(); ++chunkId) { + outKeyChunks.addChunkToTypedList(chunkId); + final Chunk keyChunk = outKeyChunks.getChunk(chunkId); - buildOneChunk(keyChunk, position, hashTable, positionLinks, bloomFilter, aggregators); + buildOneChunk(keyChunk, position, hashTable, positionLinks, bloomFilter, aggregators, valueAccumulators); position += keyChunk.getPositionCount(); } assert position == size; // Allocate memory for the hash-table - memoryAllocator.allocateReservedMemory(hashTable.estimateSize()); + memoryAllocator.allocateReservedMemory(hashTable.estimateSizeInBytes()); memoryAllocator.allocateReservedMemory(SizeOf.sizeOf(positionLinks)); logger.info("complete building hash table"); @@ -258,49 +332,40 @@ public void closeConsume(boolean force) { } } - private int matchInit(int hashCode, Chunk keyChunk, int position) { - if (bloomFilter != null && !bloomFilter.mightContain(hashCode)) { - return LIST_END; - } - - int matchedPosition = hashTable.get(hashCode); - while (matchedPosition != LIST_END) { - if (outKeyChunks.equals(matchedPosition, keyChunk, position)) { - break; - } - matchedPosition = positionLinks[matchedPosition]; - } - return matchedPosition; - } - - private int matchNext(int current, Chunk keyChunk, int position) { - int matchedPosition = positionLinks[current]; - while (matchedPosition != LIST_END) { - if (outKeyChunks.equals(matchedPosition, keyChunk, position)) { - break; - } - matchedPosition = positionLinks[matchedPosition]; - } - return matchedPosition; - } - - private boolean matchValid(int current) { - return current != LIST_END; - } - private void buildOneChunk(Chunk keyChunk, int position, ConcurrentRawHashTable hashTable, int[] positionLinks, - FastIntBloomFilter bloomFilter, List aggregators) { + ConcurrentIntBloomFilter bloomFilter, List aggregators, + Accumulator[] valueAccumulators) { // Calculate hash codes of the whole chunk int[] hashes = keyChunk.hashCodeVector(); - for (int offset = 0; offset < keyChunk.getPositionCount(); offset++, position++) { - int next = hashTable.put(position, hashes[offset]); - positionLinks[position] = next; - if (bloomFilter != null) { - bloomFilter.put(hashes[offset]); + if (checkJoinKeysAllNullSafe(keyChunk, ignoreNullBlocks)) { + // If all keys are not null, we can leave out the null-check procedure + for (int offset = 0; offset < keyChunk.getPositionCount(); offset++, position++) { + int next = hashTable.put(position, hashes[offset]); + positionLinks[position] = next; + if (bloomFilter != null) { + bloomFilter.putInt(hashes[offset]); + } + for (int i = 0; i < aggregators.size(); i++) { + valueAccumulators[i].appendInitValue(); + } + } + + } else { + // Otherwise we have to check nullability for each row + for (int offset = 0; offset < keyChunk.getPositionCount(); offset++, position++) { + if (checkJoinKeysNulSafe(keyChunk, offset, ignoreNullBlocks)) { + int next = hashTable.put(position, hashes[offset]); + positionLinks[position] = next; + if (bloomFilter != null) { + bloomFilter.putInt(hashes[offset]); + } + for (int i = 0; i < aggregators.size(); i++) { + valueAccumulators[i].appendInitValue(); + } + } } - aggregators.forEach(Aggregator::appendInitValue); } } @@ -311,29 +376,21 @@ Chunk doNextChunk() { return null; } if (resultIterator == null) { - if (needMemoryAllocated != 0) { - memoryAllocator.allocateReservedMemory(needMemoryAllocated); - needMemoryAllocated = 0; - } Chunk inputChunk = nextProbeChunk(); boolean inputIsFinished = false; if (inputChunk != null) { + for (int i = 0; i < aggregators.size(); i++) { + inputAggregatorInputs[i] = valueConverters[i].apply(inputChunk); + } // Process outer rows in this input chunk - long beforeEstimateSize = aggregators.stream().mapToLong(Aggregator::estimateSize).sum(); - calculateJoinAgg(inputChunk); - long afterEstimateSize = aggregators.stream().mapToLong(Aggregator::estimateSize).sum(); - needMemoryAllocated = Math.max(afterEstimateSize - beforeEstimateSize, 0); + probeOperator.calcJoinAgg(inputChunk); } else { inputIsFinished = getProbeInput().produceIsFinished(); } if (inputIsFinished) { //input is already finished. if (joinType == JoinRelType.LEFT || joinType == JoinRelType.RIGHT) { - final Chunk nullChunk = this.nullChunk; - for (int i = usedKeys.nextClearBit(0), size = outKeyChunks.getPositionCount(); i >= 0 && i < size; - i = usedKeys.nextClearBit(i + 1)) { - buildNullRow(nullChunk, 0, i); - } + probeOperator.handleNull(); } resultIterator = buildChunks(); } else { @@ -356,43 +413,6 @@ private Chunk nextProbeChunk() { return getProbeInput().nextChunk(); } - private void calculateJoinAgg(Chunk inputChunk) { - final int positionCount = inputChunk.getPositionCount(); - Chunk inputJoinKeyChunk = getProbeKeyChunkGetter().apply(inputChunk); - int[] currInputJoinKeyChunks = inputJoinKeyChunk.hashCodeVector(); - final int[] ints = currInputJoinKeyChunks; - int position = 0; - boolean isMatching = false; - boolean matched = false; - int matchedPosition = LIST_END; - for (; position < positionCount; position++) { - - // reset matched flag unless it's still during matching - if (!isMatching) { - matched = false; - matchedPosition = matchInit(ints[position], inputJoinKeyChunk, position); - } else { - // continue from the last processed match - matchedPosition = matchNext(matchedPosition, inputJoinKeyChunk, position); - isMatching = false; - } - - for (; matchValid(matchedPosition); - matchedPosition = matchNext(matchedPosition, inputJoinKeyChunk, position)) { - if (!checkJoinCondition(inputChunk, position, matchedPosition)) { - continue; - } - buildJoinRow(position, matchedPosition, inputChunk); - // checks max1row generated from scalar subquery - if (singleJoin && matched && !(ConfigDataMode.isFastMock())) { - throw GeneralUtil.nestedException("Scalar subquery returns more than 1 row"); - } - // set matched flag - matched = true; - } - } - } - private boolean checkJoinCondition(Chunk outerChunk, int outerPosition, int innerPosition) { if (condition == null) { return true; @@ -422,8 +442,8 @@ private List buildValueChunks() { for (int groupId = 0, i = usedKeys.nextSetBit(0), size = outKeyChunks.getPositionCount(); i >= 0 && i < size; i = usedKeys.nextSetBit(i + 1)) { - for (int j = 0; j < aggregators.size(); j++) { - aggregators.get(j).writeResultTo(groupId++, valueBlockBuilders[j]); + for (int j = 0; j < valueAccumulators.length; j++) { + valueAccumulators[j].writeResultTo(groupId++, valueBlockBuilders[j]); } if (++offset == chunkLimit) { chunks.add(buildValueChunk()); @@ -438,8 +458,8 @@ private List buildValueChunks() { case RIGHT: int offsetLeft = 0; for (int keyIndex = 0; keyIndex < this.outKeyChunks.getPositionCount(); keyIndex++) { - for (int j = 0; j < aggregators.size(); j++) { - aggregators.get(j).writeResultTo(keyIndex, valueBlockBuilders[j]); + for (int j = 0; j < valueAccumulators.length; j++) { + valueAccumulators[j].writeResultTo(keyIndex, valueBlockBuilders[j]); } if (++offsetLeft == chunkLimit) { chunks.add(buildValueChunk()); @@ -455,6 +475,9 @@ private List buildValueChunks() { } + //set null to deallocate memory + this.valueAccumulators = null; + return chunks; } @@ -472,46 +495,14 @@ private Chunk buildValueChunk() { return new Chunk(blocks); } - public void buildJoinRow(int position, int matchedPosition, Chunk inputChunk) { - usedKeys.set(matchedPosition); - final Chunk.ChunkRow chunkRow = outerChunks.rowAt(matchedPosition); - //build group keys - final int key = keys[matchedPosition]; - if (key == LIST_END) { - keys[matchedPosition] = appendAndIncrementGroup(chunkRow.getChunk(), chunkRow.getPosition()); - } - - for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { - boolean[] isDistinct = null; - if (distinctSets[aggIndex] != null) { - final IntegerBlockBuilder integerBlockBuilder = new IntegerBlockBuilder(1); - integerBlockBuilder.writeInt(keys[matchedPosition]); - isDistinct = distinctSets[aggIndex] - .checkDistinct(integerBlockBuilder.build(), inputChunk, position); - } - if (isDistinct == null || isDistinct[0]) { - //to accumulate the Aggregate function result,here need 'aggregate convert' and 'aggregate accumulator' - doAggregate(inputChunk, aggIndex, position, keys[matchedPosition]); - } - } - } - - public void buildNullRow(Chunk inputChunk, int position, int matchedPosition) { - final Chunk.ChunkRow chunkRow = outerChunks.rowAt(matchedPosition); - final int key = keys[matchedPosition]; - if (key == LIST_END) { - keys[matchedPosition] = appendAndIncrementGroup(chunkRow.getChunk(), chunkRow.getPosition()); - } - for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { - //对右表计算Agg值,这里需要 aggregate convert 和 aggregate accumulator - doAggregate(inputChunk, aggIndex, position, keys[matchedPosition]); - } - + private void doAggregate(Chunk[] aggInputChunks, int aggIndex, int position, int groupId) { + assert aggInputChunks != null; + valueAccumulators[aggIndex].accumulate(groupId, aggInputChunks[aggIndex], position); } private void doAggregate(Chunk aggInputChunk, int aggIndex, int position, int groupId) { assert aggInputChunk != null; - aggregators.get(aggIndex).accumulate(groupId, aggInputChunk, position); + valueAccumulators[aggIndex].accumulate(groupId, aggInputChunk, position); } private int appendAndIncrementGroup(Chunk chunk, int position) { @@ -544,4 +535,341 @@ private ChunkConverter getBuildKeyChunkGetter() { private ChunkConverter getProbeKeyChunkGetter() { return innerKeyChunkGetter; } + + interface GroupJoinProbeOperator { + void calcJoinAgg(Chunk inputChunk); + + void handleNull(); + } + + class DefaultGroupJoinProbeOperator implements GroupJoinProbeOperator { + + @Override + public void calcJoinAgg(Chunk inputChunk) { + final int positionCount = inputChunk.getPositionCount(); + Chunk inputJoinKeyChunk = getProbeKeyChunkGetter().apply(inputChunk); + final int[] hashVector = inputJoinKeyChunk.hashCodeVector(); + + int position = 0; + boolean isMatching = false; + boolean matched = false; + int matchedPosition = LIST_END; + + for (; position < positionCount; position++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matched = false; + matchedPosition = matchInit(hashVector[position], inputJoinKeyChunk, position); + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, inputJoinKeyChunk, position); + isMatching = false; + } + + for (; matchValid(matchedPosition); + matchedPosition = matchNext(matchedPosition, inputJoinKeyChunk, position)) { + if (!checkJoinCondition(inputChunk, position, matchedPosition)) { + continue; + } + buildJoinRow(position, matchedPosition); + // checks max1row generated from scalar subquery + if (singleJoin && matched && !(ConfigDataMode.isFastMock())) { + throw GeneralUtil.nestedException("Scalar subquery returns more than 1 row"); + } + // set matched flag + matched = true; + } + } + } + + @Override + public void handleNull() { + for (int i = usedKeys.nextClearBit(0), size = outKeyChunks.getPositionCount(); i >= 0 && i < size; + i = usedKeys.nextClearBit(i + 1)) { + buildNullRow(nullChunk, 0, i); + } + } + + private void buildNullRow(Chunk inputChunk, int position, int matchedPosition) { + final Chunk.ChunkRow chunkRow = outerChunks.rowAt(matchedPosition); + final int key = keys[matchedPosition]; + if (key == LIST_END) { + keys[matchedPosition] = appendAndIncrementGroup(chunkRow.getChunk(), chunkRow.getPosition()); + } + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + doAggregate(inputChunk, aggIndex, position, keys[matchedPosition]); + } + + } + + private int matchInit(int hashCode, Chunk keyChunk, int position) { + if (bloomFilter != null && !bloomFilter.mightContainInt(hashCode)) { + return LIST_END; + } + + int matchedPosition = hashTable.get(hashCode); + while (matchedPosition != LIST_END) { + if (outKeyChunks.equals(matchedPosition, keyChunk, position)) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private int matchNext(int current, Chunk keyChunk, int position) { + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (outKeyChunks.equals(matchedPosition, keyChunk, position)) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private boolean matchValid(int current) { + return current != LIST_END; + } + + private void buildJoinRow(int position, int matchedPosition) { + usedKeys.set(matchedPosition); + Chunk[] aggregatorInputs = inputAggregatorInputs; + final Chunk.ChunkRow chunkRow = outerChunks.rowAt(matchedPosition); + //build group keys + final int key = keys[matchedPosition]; + if (key == LIST_END) { + keys[matchedPosition] = appendAndIncrementGroup(chunkRow.getChunk(), chunkRow.getPosition()); + } + + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + boolean[] isDistinct = null; + if (distinctSets[aggIndex] != null) { + final IntegerBlockBuilder integerBlockBuilder = new IntegerBlockBuilder(1); + integerBlockBuilder.writeInt(keys[matchedPosition]); + isDistinct = distinctSets[aggIndex] + .checkDistinct(integerBlockBuilder.build(), aggregatorInputs[aggIndex], position); + } + if (isDistinct == null || isDistinct[0]) { + //to accumulate the Aggregate function result,here need 'aggregate convert' and 'aggregate accumulator' + doAggregate(aggregatorInputs, aggIndex, position, keys[matchedPosition]); + } + } + } + } + + // Join condition = null + // distinct set = null + // singleJoin = false + class IntGroupJoinProbeOperator implements GroupJoinProbeOperator { + private int[] sourceArray = new int[chunkLimit]; + + // for join + protected int matchedRows = 0; + protected int[] matchedPositions = new int[chunkLimit]; + protected int[] probePositions = new int[chunkLimit]; + + // for null value + protected BitSet nullBitmap = new BitSet(chunkLimit); + protected boolean hasNull = false; + + // for hash code. + protected final int[] probeKeyHashCode = new int[chunkLimit]; + + // for agg + protected int[] groupIds = new int[chunkLimit]; + + // for GroupKeyBuffer append + protected int groupKeyBufferArrayIndex = 0; + protected int[] groupKeyBufferArray = new int[chunkLimit]; + + @Override + public void calcJoinAgg(Chunk inputChunk) { + // clear state for null values + hasNull = false; + nullBitmap.clear(); + + final int positionCount = inputChunk.getPositionCount(); + Chunk inputJoinKeyChunk = getProbeKeyChunkGetter().apply(inputChunk); + + Preconditions.checkArgument(inputJoinKeyChunk.getBlockCount() == 1 + && inputJoinKeyChunk.getBlock(0).cast(Block.class) instanceof IntegerBlock); + + boolean useHashVector = inputJoinKeyChunk.getBlock(0).cast(IntegerBlock.class).getSelection() != null; + if (useHashVector) { + inputJoinKeyChunk.hashCodeVector(probeKeyHashCode, null, null, positionCount); + } + + // copy array from integer block + IntegerBlock integerBlock = inputJoinKeyChunk.getBlock(0).cast(IntegerBlock.class); + integerBlock.copyToIntArray(0, positionCount, sourceArray, 0, null); + if (integerBlock.mayHaveNull()) { + // collect nulls if block may have null value. + integerBlock.collectNulls(0, positionCount, nullBitmap, 0); + hasNull = !nullBitmap.isEmpty(); + } + + // clear matched rows for each input chunk. + matchedRows = 0; + + boolean isMatching = false; + int matchedPosition = LIST_END; + for (int position = 0; position < positionCount; position++) { + + // reset matched flag unless it's still during matching + if (!isMatching) { + matchedPosition = useHashVector + ? matchInit(probeKeyHashCode[position], position) + : matchInit(position); + } else { + // continue from the last processed match + matchedPosition = matchNext(matchedPosition, position); + isMatching = false; + } + + for (; matchedPosition != LIST_END; + matchedPosition = matchNext(matchedPosition, position)) { + + // record matched rows of [probed, matched] + matchedPositions[matchedRows] = matchedPosition; + probePositions[matchedRows] = position; + matchedRows++; + } + } + + buildJoinRow(); + } + + @Override + public void handleNull() { + // clear group key buffer + groupKeyBufferArrayIndex = 0; + + int position = 0; + for (int unmatchedPosition = usedKeys.nextClearBit(0), size = outKeyChunks.getPositionCount(); + unmatchedPosition >= 0 && unmatchedPosition < size; + unmatchedPosition = usedKeys.nextClearBit(unmatchedPosition + 1)) { + + // find and allocate group id + if (keys[unmatchedPosition] == LIST_END) { + // allocate group id. + + // we should get value in matched position of out key chunks. + // It is equal to value in probe position of probe side chunk. + int unmatchedValue = outKeyChunks.getInt(0, unmatchedPosition); + groupKeyBufferArray[groupKeyBufferArrayIndex++] = unmatchedValue; + + keys[unmatchedPosition] = groupId++; + } + + groupIds[position] = keys[unmatchedPosition]; + position++; + + if (position >= chunkLimit) { + flushNullChunk(position); + position = 0; + } + } + + if (position > 0) { + flushNullChunk(position); + } + } + + private void flushNullChunk(int positionCount) { + // flush group key buffer. + groupKeyBuffer.appendRow(groupKeyBufferArray, -1, groupKeyBufferArrayIndex); + groupKeyBufferArrayIndex = 0; + + // accumulate with given group ids. + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + valueAccumulators[aggIndex].accumulate(groupIds, nullChunk, positionCount); + } + } + + private int matchInit(int position) { + // not null safe + if (hasNull && nullBitmap.get(position)) { + return LIST_END; + } + + int value = sourceArray[position]; + int matchedPosition = hashTable.get(value); + while (matchedPosition != LIST_END) { + if (outKeyChunks.getInt(0, matchedPosition) == value) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private int matchInit(int hashCode, int position) { + // not null safe + if (hasNull && nullBitmap.get(position)) { + return LIST_END; + } + + int matchedPosition = hashTable.get(hashCode); + while (matchedPosition != LIST_END) { + if (outKeyChunks.getInt(0, matchedPosition) == sourceArray[position]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private int matchNext(int current, int position) { + // not null safe + if (hasNull && nullBitmap.get(position)) { + return LIST_END; + } + + int matchedPosition = positionLinks[current]; + while (matchedPosition != LIST_END) { + if (outKeyChunks.getInt(0, matchedPosition) == sourceArray[position]) { + break; + } + matchedPosition = positionLinks[matchedPosition]; + } + return matchedPosition; + } + + private void buildJoinRow() { + for (int i = 0; i < matchedRows; i++) { + final int position = probePositions[i]; + final int matchedPosition = matchedPositions[i]; + + // for outer join output + usedKeys.set(matchedPosition); + + // find and allocate group id + if (keys[matchedPosition] == LIST_END) { + // allocate group id. + + // we should get value in matched position of out key chunks. + // It is equal to value in probe position of probe side chunk. + int matchedValue = sourceArray[position]; + groupKeyBufferArray[groupKeyBufferArrayIndex++] = matchedValue; + + keys[matchedPosition] = groupId++; + } + + groupIds[position] = keys[matchedPosition]; + } + + // flush group key buffer. + groupKeyBuffer.appendRow(groupKeyBufferArray, -1, groupKeyBufferArrayIndex); + groupKeyBufferArrayIndex = 0; + + // A special agg for group join + // the probe positions array may have repeated elements like {0, 0, 1, 1, 1, 2, 5, 5, 7 ...} + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + valueAccumulators[aggIndex].accumulate(groupIds, inputAggregatorInputs[aggIndex], probePositions, + matchedRows); + } + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashWindowExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashWindowExec.java index 22c4fcd74..b770af29d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashWindowExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HashWindowExec.java @@ -16,7 +16,7 @@ package com.alibaba.polardbx.executor.operator; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkConverter; import com.alibaba.polardbx.executor.chunk.Converters; @@ -91,7 +91,7 @@ public void openConsume() { memoryAllocator = new OperatorMemoryAllocatorCtx(memoryPool, false); hashTable = new HashWindowOpenHashMap(groupKeyType, aggregators, aggValueType, inputType, expectedGroups, chunkLimit, - context); + context, memoryAllocator); } @Override @@ -113,7 +113,7 @@ public void consumeChunk(Chunk inputChunk) { // TODO should be optimized Chunk inputKeyChunk = groups.length == 0 ? inputChunk : inputKeyChunkGetter.apply(inputChunk); long beforeEstimateSize = hashTable.estimateSize(); - hashTable.putChunk(inputKeyChunk, inputChunk); + hashTable.putChunk(inputKeyChunk, inputChunk, null); long afterEstimateSize = hashTable.estimateSize(); // inputChunk will be cached in HashWindowAggMap long cachedChunkMemory = inputChunk.estimateSize(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HybridHashJoinExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HybridHashJoinExec.java index 038d220e1..7a7e61920 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HybridHashJoinExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/HybridHashJoinExec.java @@ -16,10 +16,10 @@ package com.alibaba.polardbx.executor.operator; -import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.bloomfilter.ConcurrentIntBloomFilter; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.config.ConfigDataMode; @@ -39,7 +39,7 @@ import com.alibaba.polardbx.optimizer.memory.MemoryPool; import com.alibaba.polardbx.optimizer.memory.MemoryPoolUtils; import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; -import com.alibaba.polardbx.common.utils.bloomfilter.FastIntBloomFilter; +import com.google.common.util.concurrent.ListenableFuture; import org.apache.calcite.rel.core.JoinRelType; import java.util.ArrayList; @@ -47,10 +47,10 @@ import java.util.List; import java.util.Optional; -import static com.google.common.base.Preconditions.checkState; -import static com.google.common.util.concurrent.Futures.immediateFuture; import static com.alibaba.polardbx.executor.operator.AbstractHashJoinExec.LIST_END; import static com.alibaba.polardbx.executor.utils.ExecUtils.buildOneChunk; +import static com.google.common.base.Preconditions.checkState; +import static com.google.common.util.concurrent.Futures.immediateFuture; // TODO: spillCnt应该是一直使用最外层的,但现在迭代生成新的spillCnt,统计失效。 public class HybridHashJoinExec extends AbstractJoinExec implements MemoryRevoker, ConsumerExecutor { @@ -619,7 +619,7 @@ private class BucketArea { //build ConcurrentRawHashTable hashTable; int[] positionLinks; - FastIntBloomFilter bloomFilter; + ConcurrentIntBloomFilter bloomFilter; // Special mode only for semi/anti-join private boolean directOutputProbe; @@ -700,7 +700,7 @@ public void consumeOuterChunkAndTryProduce() { if (this.bloomFilter != null && needBloomfilter) { for (; saveProbePosition < positionCount; saveProbePosition++) { int hashCode = saveProbeHashCodes[saveProbePosition]; - if (this.bloomFilter.mightContain(hashCode)) { + if (this.bloomFilter.mightContainInt(hashCode)) { this.spillHandler.getProbeSpillerExec() .addRowToSpill(saveProbeChunk, saveProbePosition); } else { @@ -875,7 +875,7 @@ void doBuildTable() { if (!alreadyBuild) { if (size <= BLOOM_FILTER_ROWS_LIMIT && size > 0) { - this.bloomFilter = FastIntBloomFilter.create(size); + this.bloomFilter = ConcurrentIntBloomFilter.create(size); } } @@ -897,7 +897,7 @@ void doBuildTable() { } int matchInit(Chunk keyChunk, int position, int hashCode) { - if (bloomFilter != null && !bloomFilter.mightContain(hashCode)) { + if (bloomFilter != null && !bloomFilter.mightContainInt(hashCode)) { return LIST_END; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LimitExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LimitExec.java index bc2aeb872..e92da1349 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LimitExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LimitExec.java @@ -16,10 +16,10 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LookupJoinExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LookupJoinExec.java index 4d3855141..469db9e02 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LookupJoinExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LookupJoinExec.java @@ -36,7 +36,6 @@ import java.util.Arrays; import java.util.List; -import static com.alibaba.polardbx.executor.operator.AbstractHashJoinExec.LIST_END; import static com.alibaba.polardbx.executor.utils.ExecUtils.buildOneChunk; /** @@ -295,7 +294,7 @@ Chunk nextProbeChunk() { } buildHashTable(); // Allocate memory for hash-table - bufferMemoryAllocator.allocateReservedMemory(hashTable.estimateSize()); + bufferMemoryAllocator.allocateReservedMemory(hashTable.estimateSizeInBytes()); bufferMemoryAllocator.allocateReservedMemory(SizeOf.sizeOf(positionLinks)); Chunk ret = savePopChunk; this.savePopChunk = null; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LookupTableScanExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LookupTableScanExec.java index 9374bc322..f8c475b0b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LookupTableScanExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/LookupTableScanExec.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.common.jdbc.UnionBytesSql; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.MathUtils; +import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.mpp.metadata.Split; import com.alibaba.polardbx.executor.mpp.split.JdbcSplit; @@ -30,19 +31,14 @@ import com.alibaba.polardbx.executor.operator.lookup.ShardingLookupConditionBuilder; import com.alibaba.polardbx.executor.operator.spill.SpillerFactory; import com.alibaba.polardbx.executor.utils.ExecUtils; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.join.LookupEquiJoinKey; import com.alibaba.polardbx.optimizer.core.join.LookupPredicate; import com.alibaba.polardbx.optimizer.core.rel.LogicalView; -import com.alibaba.polardbx.optimizer.core.rel.PhyTableScanBuilder; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.optimizeralert.OptimizerAlertUtil; -import com.alibaba.polardbx.optimizer.partition.PartitionInfo; -import com.alibaba.polardbx.optimizer.partition.PartitionInfoManager; import com.alibaba.polardbx.optimizer.partition.pruning.PartLookupPruningCache; -import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; import com.alibaba.polardbx.optimizer.utils.RelUtils; import com.google.common.base.Preconditions; import org.apache.calcite.sql.SqlBasicCall; @@ -60,11 +56,8 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_EXECUTE_ON_MYSQL; -import static com.alibaba.polardbx.common.utils.GeneralUtil.buildPhysicalQuery; public class LookupTableScanExec extends TableScanExec implements LookupTableExec { @@ -411,8 +404,10 @@ public BytesSql getUnionBytesSql(boolean ignore) { } for (SqlNode condition : lookupConditions) { if (condition != null) { - query = StringUtils.replace(query, "'bka_magic' = 'bka_magic'", - RelUtils.toNativeSql(condition), 1); + String conditionSql = RelUtils.toNativeSql(condition); + // escape condition sql using mysql escape char '\' + String dialectSql = TStringUtil.escape(conditionSql, '\\', '\\'); + query = StringUtils.replace(query, "'bka_magic' = 'bka_magic'", dialectSql, 1); } } hintSql = query; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MaterializedSemiJoinExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MaterializedSemiJoinExec.java index 62e5f1d47..18392aa0d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MaterializedSemiJoinExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MaterializedSemiJoinExec.java @@ -16,10 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ListenableFuture; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.operator.util.BufferInputBatchQueue; @@ -29,6 +25,8 @@ import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.memory.MemoryPool; import com.alibaba.polardbx.optimizer.memory.MemoryPoolUtils; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.ListenableFuture; import org.apache.calcite.rel.core.JoinRelType; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MergeSortExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MergeSortExec.java index 22f614fb4..f8677bef5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MergeSortExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MergeSortExec.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkBuilder; import com.alibaba.polardbx.executor.mpp.operator.WorkProcessor; @@ -26,6 +25,7 @@ import com.alibaba.polardbx.executor.utils.OrderByOption; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; import java.util.function.BiPredicate; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MergeSortWithBufferTableScanClient.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MergeSortWithBufferTableScanClient.java index d2fda14be..b129add33 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MergeSortWithBufferTableScanClient.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/MergeSortWithBufferTableScanClient.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.collect.Lists; import com.alibaba.polardbx.common.exception.MemoryNotEnoughException; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.properties.ConnectionParams; @@ -33,6 +32,7 @@ import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.memory.MemoryPool; import com.alibaba.polardbx.optimizer.memory.MemoryType; +import com.google.common.collect.Lists; import java.sql.ResultSet; import java.util.LinkedList; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NestedLoopJoinExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NestedLoopJoinExec.java index 3c8899a6f..ff45e471d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NestedLoopJoinExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NestedLoopJoinExec.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.Chunk; @@ -26,6 +25,7 @@ import com.alibaba.polardbx.optimizer.core.row.JoinRow; import com.alibaba.polardbx.optimizer.core.row.Row; import com.alibaba.polardbx.optimizer.memory.MemoryPoolUtils; +import com.google.common.util.concurrent.ListenableFuture; import org.apache.calcite.rel.core.JoinRelType; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NonBlockGeneralSourceExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NonBlockGeneralSourceExec.java index 22b141a68..4af7f0357 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NonBlockGeneralSourceExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NonBlockGeneralSourceExec.java @@ -71,7 +71,6 @@ void doOpen() { final String defaultSchema = schema; this.listenableFuture = context.getExecutorService().submitListenableFuture(schema, traceId, -1, () -> { - RelMetadataQuery.THREAD_PROVIDERS.set(JaninoRelMetadataProvider.of(DrdsRelMetadataProvider.INSTANCE)); long startExecNano = System.nanoTime(); long threadCpuTime = ThreadCpuStatUtil.getThreadCpuTimeNano(); DefaultSchema.setSchemaName(defaultSchema); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NonFrameOverWindowExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NonFrameOverWindowExec.java index 028ced1b8..3e0c5da34 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NonFrameOverWindowExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/NonFrameOverWindowExec.java @@ -16,15 +16,15 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.BlockBuilders; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; @@ -56,6 +56,7 @@ public class NonFrameOverWindowExec extends AbstractExecutor { private boolean isFinish; private ListenableFuture blocked; + private boolean needToProcessEachRow; public NonFrameOverWindowExec(Executor input, ExecutionContext context, List aggregators, List partitionIndexes, boolean[] resetAccumulators, @@ -72,8 +73,6 @@ public NonFrameOverWindowExec(Executor input, ExecutionContext context, List t.open(1)); - aggregators.forEach(Aggregator::appendInitValue); } private void processFirstLine(Chunk.ChunkRow chunkRow, int rowsCount) { @@ -81,14 +80,17 @@ private void processFirstLine(Chunk.ChunkRow chunkRow, int rowsCount) { if (changePartition) { lastPartition = chunkRow; } + Aggregator tempAggregator; for (int i = 0; i < aggregators.size(); i++) { if (resetAccumulators[i] || changePartition) { - aggregators.get(i).resetToInitValue(0); + aggregators.set(i, aggregators.get(i).getNew()); } - aggregators.get(i).accumulate(0, chunkRow.getChunk(), chunkRow.getPosition()); + tempAggregator = aggregators.get(i); + tempAggregator.aggregate(chunkRow); + Object result = tempAggregator.value(); blockBuilders[i] = BlockBuilders.create(dataTypes.get(i + input.getDataTypes().size()), context); - aggregators.get(i).writeResultTo(0, blockBuilders[i]); + blockBuilders[i].writeObject(result); } } @@ -111,10 +113,11 @@ Chunk doNextChunk() { } for (int i = 0; i < aggFuncNumber; i++) { if (resetAccumulators[i] || changePartition) { - aggregators.get(i).resetToInitValue(0); + aggregators.set(i, aggregators.get(i).getNew()); } - aggregators.get(i).accumulate(0, chunkRow.getChunk(), chunkRow.getPosition()); - aggregators.get(i).writeResultTo(0, blockBuilders[i]); + aggregators.get(i).aggregate(chunkRow); + Object result = aggregators.get(i).value(); + blockBuilders[i].writeObject(result); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/OSSTableScanClient.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/OSSTableScanClient.java index f77ad2c4b..2234cf84e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/OSSTableScanClient.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/OSSTableScanClient.java @@ -30,10 +30,16 @@ import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; import com.alibaba.polardbx.executor.archive.reader.OSSReadOption; import com.alibaba.polardbx.executor.archive.reader.UnPushableORCReaderTask; +import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.gms.ColumnarStoreUtils; +import com.alibaba.polardbx.executor.gms.DynamicColumnarManager; import com.alibaba.polardbx.executor.mpp.deploy.ServiceProvider; import com.alibaba.polardbx.executor.mpp.split.OssSplit; import com.alibaba.polardbx.optimizer.config.table.FileMeta; +import com.alibaba.polardbx.optimizer.config.table.OSSOrcFileMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.TddlOperatorTable; import com.alibaba.polardbx.optimizer.core.datatype.DataType; @@ -47,7 +53,11 @@ import org.apache.calcite.rex.RexBuilder; import org.apache.calcite.rex.RexCall; import org.apache.calcite.rex.RexNode; +import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector; import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.orc.TypeDescription; +import org.apache.orc.UserMetadataUtil; +import org.apache.orc.impl.OrcTail; import java.io.Closeable; import java.util.ArrayList; @@ -68,28 +78,22 @@ public class OSSTableScanClient implements Closeable { public static final Logger LOGGER = LoggerFactory.getLogger(OSSTableScanClient.class); + public static final int TIMEOUT = 8000000; private static final int POOL_SIZE = 2; - public static final int TIMEOUT = 8000; - + private final int chunkLimit; + private final List> blockedCallers = new ArrayList<>(); + protected TddlRuntimeException exception = null; private OSSTableClientInitializer initializer; - private List splits; private int splitIndex; - private Map> poolMap; private ConcurrentLinkedQueue results; - private ExecutionContext context; private SessionProperties sessionProperties; - private final int chunkLimit; private volatile SettableFuture waitBloomFilterFuture = null; private volatile boolean needWaitBloomFilter; - private boolean isFinished; private boolean isClosed; - protected TddlRuntimeException exception = null; - - private final List> blockedCallers = new ArrayList<>(); private PrefetchThread prefetchThread; private List registeredOrcTasks = new ArrayList<>(); @@ -232,6 +236,58 @@ public void close() { notifyBlockedCallers(); } + private TypeDescription.RowBatchVersion getRowBatchVersion(FileMeta fileMeta) { + if (fileMeta instanceof OSSOrcFileMeta) { + return ((OSSOrcFileMeta) fileMeta).isEnableDecimal64() ? TypeDescription.RowBatchVersion.USE_DECIMAL64 : + TypeDescription.RowBatchVersion.ORIGINAL; + } + return TypeDescription.RowBatchVersion.ORIGINAL; + } + + /** + * The Pool hold all the buffer in a Prefetch thread. + * + * @param buffer type. + */ + private static class Pool { + private final ArrayBlockingQueue pool; + + private final int poolSize; + + /** + * Timeout in millis + */ + private long timeout; + private volatile boolean initialized; + + public Pool(int poolSize, long timeout) { + this.pool = new ArrayBlockingQueue<>(poolSize); + this.poolSize = poolSize; + this.timeout = timeout; + this.initialized = false; + } + + public void supply(Supplier supplier) { + int n = this.poolSize; + while (!initialized && n-- > 0) { + this.pool.add(supplier.get()); + } + initialized = true; + } + + public T poll() throws InterruptedException { + // When the consumer is slower than producer. + // Wait for timeout-milliseconds + return this.pool.poll(timeout, TimeUnit.MILLISECONDS); + } + + public void recycle(T element) throws InterruptedException { + // There will be 0, 1, ... n-1 elements in n-size pool. (at least 1 element has been fetched.) + // So it's impossible to fail to recycle elements. + this.pool.offer(element, timeout, TimeUnit.MILLISECONDS); + } + } + private class PrefetchThread implements Callable { private volatile boolean isCancelled; @@ -252,12 +308,35 @@ public Object call() { // initial the split (e.g. orc pruning) List readOptions = initializer.lazyInitSplit(splitIndex); + + // for each split, check its delta read option firstly. + OssSplit split = splits.get(splitIndex); + OssSplit.DeltaReadOption deltaReadOption; + if ((deltaReadOption = split.getDeltaReadOption()) != null) { + // It must be in columnar mode when delta read option is not null. + ColumnarManager columnarManager = ColumnarManager.getInstance(); + + final long checkpointTso = deltaReadOption.getCheckpointTso(); + final Map> allCsvFiles = deltaReadOption.getAllCsvFiles(); + final List projectColumnIndexes = deltaReadOption.getProjectColumnIndexes(); + + allCsvFiles.values().stream().flatMap(List::stream).forEach( + csvFile -> foreachDeltaFile(csvFile, checkpointTso, columnarManager, + projectColumnIndexes) + ); + } + + Long checkpointTso = splits.get(splitIndex).getCheckpointTso(); for (OSSReadOption readOption : readOptions) { for (int i = 0; i < readOption.getTableFileList().size(); i++) { + FileMeta fileMeta = readOption.getPhyTableFileMetas().get(i); + TypeDescription.RowBatchVersion rowBatchVersion = getRowBatchVersion(fileMeta); + // supply initial elements to pool. - poolMap.get(splitIndex).supply(() -> readOption.getReadSchema().createRowBatch(chunkLimit)); + poolMap.get(splitIndex) + .supply(() -> readOption.getReadSchema().createRowBatch(rowBatchVersion, chunkLimit)); // do fetch file rows. - foreachFile(readOption, i, splitIndex); + foreachFile(readOption, i, splitIndex, checkpointTso); } } } @@ -271,10 +350,49 @@ public Object call() { return null; } - private void foreachFile(OSSReadOption readOption, int fileIndex, int poolIndex) { + private void foreachDeltaFile(String csvFile, long tso, ColumnarManager columnarManager, + List projectColumnIndexes) { + List chunkList = columnarManager.csvData(tso, csvFile); + int chunkIndex = 0; + while (!isCancelled) { + try { + if (chunkIndex < chunkList.size()) { + Chunk chunk = chunkList.get(chunkIndex); + + // fill selection array in columnar store mode. + int[] selection = new int[chunk.getPositionCount()]; + IntegerBlock integerBlock = + chunk.getBlock(ColumnarStoreUtils.POSITION_COLUMN_INDEX).cast( + IntegerBlock.class); + int selSize = columnarManager.fillSelection(csvFile, tso, selection, integerBlock); + + // project columns at given index. + Block[] projectBlocks = projectColumnIndexes.stream() + .map(chunk::getBlock).collect(Collectors.toList()).toArray(new Block[0]); + Chunk result = new Chunk(projectBlocks); + + ResultFromOSS resultFromOSS = new ResultFromOSS(result, true); + resultFromOSS.setSelSize(selSize); + resultFromOSS.setSelection(selection); + results.add(resultFromOSS); + + chunkIndex++; + notifyBlockedCallers(); + } else { + // no more chunks + return; + } + } catch (Throwable t) { + setException(new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_OSS, t, t.getMessage())); + } + } + } + + private void foreachFile(OSSReadOption readOption, int fileIndex, int poolIndex, Long checkpointTso) { String tableFile = readOption.getTableFileList().get(fileIndex); FileMeta fileMeta = readOption.getPhyTableFileMetas().get(fileIndex); PruningResult pruningResult = readOption.getPruningResultList().get(fileIndex); + final String fileName = fileMeta.getFileName(); // build orc reader task for each file. UnPushableORCReaderTask task = @@ -306,14 +424,29 @@ private void foreachFile(OSSReadOption readOption, int fileIndex, int poolIndex) // the result chunk comes from statistics if (readResult.getChunk() != null) { // need to recycle the batch - results.add(new ResultFromOSS(readResult.getChunk())); + results.add(new ResultFromOSS(readResult.getChunk(), false)); } else { // Fill the result and notify the block callers. // Do not recycle the batch. needRecycle = false; - results.add(new ResultFromOSS(batch, + ResultFromOSS resultFromOSS = new ResultFromOSS(batch, task.getOssReadOption().getOssColumnTransformer(), - poolIndex)); + poolIndex); + if (readOption.isColumnarIndex()) { + // fill selection array in columnar store mode. + ColumnarManager columnarManager = ColumnarManager.getInstance(); + int[] selection = new int[batch.size]; + + // in columnar mode, we set implicit column in first column index. + LongColumnVector longColumnVector = (LongColumnVector) batch.cols[0]; + int selSize = + columnarManager.fillSelection(fileName, checkpointTso, selection, longColumnVector, + batch.size); + + resultFromOSS.setSelSize(selSize); + resultFromOSS.setSelection(selection); + } + results.add(resultFromOSS); } notifyBlockedCallers(); } catch (Throwable t) { @@ -331,58 +464,13 @@ private void foreachFile(OSSReadOption readOption, int fileIndex, int poolIndex) } } - /** - * The Pool hold all the buffer in a Prefetch thread. - * - * @param buffer type. - */ - private static class Pool { - private final ArrayBlockingQueue pool; - - private final int poolSize; - - /** - * Timeout in millis - */ - private long timeout; - private volatile boolean initialized; - - public Pool(int poolSize, long timeout) { - this.pool = new ArrayBlockingQueue<>(poolSize); - this.poolSize = poolSize; - this.timeout = timeout; - this.initialized = false; - } - - public void supply(Supplier supplier) { - int n = this.poolSize; - while (!initialized && n-- > 0) { - this.pool.add(supplier.get()); - } - initialized = true; - } - - public T poll() throws InterruptedException { - // When the consumer is slower than producer. - // Wait for timeout-milliseconds - return this.pool.poll(timeout, TimeUnit.MILLISECONDS); - } - - public void recycle(T element) throws InterruptedException { - // There will be 0, 1, ... n-1 elements in n-size pool. (at least 1 element has been fetched.) - // So it's impossible to fail to recycle elements. - this.pool.offer(element, timeout, TimeUnit.MILLISECONDS); - } - } - private class OSSTableClientInitializer { + private final Object lock = new Object(); private OSSTableScan ossTableScan; private volatile Map bloomFilterInfos; private RexNode bloomFilterCondition; private volatile ScheduledFuture monitorWaitBloomFilterFuture; - private final Object lock = new Object(); - OSSTableClientInitializer(OSSTableScan ossTableScan) { this.ossTableScan = ossTableScan; } @@ -461,13 +549,16 @@ public class ResultFromOSS { private Chunk chunk; private VectorizedRowBatch batch; + private boolean isDelta; + private int[] selection; + private int selSize; private int poolIndex; - private OSSColumnTransformer ossColumnTransformer; - public ResultFromOSS(Chunk chunk) { + public ResultFromOSS(Chunk chunk, boolean isDelta) { this.chunk = chunk; this.batch = null; + this.isDelta = isDelta; } public ResultFromOSS(VectorizedRowBatch batch, @@ -475,6 +566,7 @@ public ResultFromOSS(VectorizedRowBatch batch, int poolIndex) { this.chunk = null; this.batch = batch; + this.isDelta = false; this.ossColumnTransformer = ossColumnTransformer; this.poolIndex = poolIndex; } @@ -483,6 +575,22 @@ public int getPoolIndex() { return poolIndex; } + public int[] getSelection() { + return selection; + } + + public void setSelection(int[] selection) { + this.selection = selection; + } + + public int getSelSize() { + return selSize; + } + + public void setSelSize(int selSize) { + this.selSize = selSize; + } + public VectorizedRowBatch getBatch() { return batch; } @@ -502,5 +610,9 @@ public OSSColumnTransformer getOssColumnTransformer() { boolean shouldRecycle() { return batch != null; } + + public boolean isDelta() { + return isDelta; + } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/OverWindowFramesExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/OverWindowFramesExec.java index 2d40ba9d5..6de16fc79 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/OverWindowFramesExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/OverWindowFramesExec.java @@ -18,15 +18,18 @@ import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.executor.calc.Aggregator; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.BlockBuilders; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.operator.frame.OverWindowFrame; import com.alibaba.polardbx.executor.operator.util.ChunksIndex; +import com.alibaba.polardbx.executor.operator.util.DataTypeUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.AvgV2; import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.ListenableFuture; @@ -86,21 +89,28 @@ public OverWindowFramesExec(Executor input, ExecutionContext context, OverWindow Arrays.stream(overWindowFrames).map(OverWindowFrame::getAggregators).flatMap(List::stream) .collect(Collectors.toList()); targetTypes = new DataType[collect.size()]; + converts = new boolean[collect.size()]; for (int i = 0; i < blockBuilders.length; i++) { DataType dataType = columnMetas.get(input.getDataTypes().size() + i); targetTypes[i] = dataType; + if (dataType == DataTypes.DoubleType && collect.get(i) instanceof AvgV2) { + converts[i] = true; + continue; + } + if (collect.get(i).getAggTargetIndexes() == null || collect.get(i).getAggTargetIndexes().length == 0) { + converts[i] = false; + continue; + } + int aggTargetIndex = collect.get(i).getAggTargetIndexes()[0]; + converts[i] = input.getDataTypes().get(aggTargetIndex) != dataType; } + } // open的时候儿就放一个chunk进去 @Override void doOpen() { input.open(); - List collect = - Arrays.stream(overWindowFrames).map(OverWindowFrame::getAggregators).flatMap(List::stream) - .collect(Collectors.toList()); - collect.forEach(t -> t.open(1)); - collect.forEach(Aggregator::appendInitValue); } @Override @@ -264,20 +274,21 @@ private void processRow(int rowIndex) { return; } for (int i = 0, m = 0; i < overWindowFrames.length; i++) { - overWindowFrames[i].processData(lastRowInPreviousChunk + rowIndex); - List aggregators = overWindowFrames[i].getAggregators(); + List result = overWindowFrames[i].processData(lastRowInPreviousChunk + rowIndex); if (rowIndex == 0) { - for (int j = 0; j < aggregators.size(); j++) { - //aggregator + List aggregator = overWindowFrames[i].getAggregators(); + for (int j = 0; j < aggregator.size(); j++) { + //aggregator int aggPosition = m + j; blockBuilders[aggPosition] = BlockBuilders .create(targetTypes[aggPosition], context); } } - for (int j = 0; j < aggregators.size(); j++) { - aggregators.get(j).writeResultTo(0, blockBuilders[m + j]); + for (int j = 0; j < overWindowFrames[i].getAggregators().size(); j++) { + blockBuilders[m + j].writeObject( + converts[m + j] ? DataTypeUtils.convert(targetTypes[m + j], result.get(j)) : result.get(j)); } - m += aggregators.size(); + m += overWindowFrames[i].getAggregators().size(); } } @@ -321,5 +332,6 @@ public boolean produceIsFinished() { public ListenableFuture produceIsBlocked() { return blocked; } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ParallelHashJoinExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ParallelHashJoinExec.java index 65509f6b5..fb6a6e936 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ParallelHashJoinExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ParallelHashJoinExec.java @@ -16,29 +16,37 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ListenableFuture; -import com.alibaba.polardbx.common.utils.memory.SizeOf; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkConverter; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; import com.alibaba.polardbx.executor.operator.util.ChunksIndex; -import com.alibaba.polardbx.executor.operator.util.ConcurrentRawHashTable; +import com.alibaba.polardbx.executor.mpp.execution.TaskExecutor; +import com.alibaba.polardbx.executor.operator.util.AntiJoinResultIterator; +import com.alibaba.polardbx.executor.operator.util.TypedList; +import com.alibaba.polardbx.executor.operator.util.TypedListHandle; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.IntegerType; +import com.alibaba.polardbx.optimizer.core.datatype.LongType; import com.alibaba.polardbx.optimizer.core.expression.calc.IExpression; +import com.alibaba.polardbx.optimizer.core.expression.calc.InputRefExpression; +import com.alibaba.polardbx.optimizer.core.expression.calc.ScalarFunctionExpression; +import com.alibaba.polardbx.optimizer.core.function.calc.scalar.filter.NotEqual; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.optimizer.core.join.EquiJoinKey; import com.alibaba.polardbx.optimizer.core.row.JoinRow; import com.alibaba.polardbx.optimizer.core.row.Row; -import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.memory.MemoryPoolUtils; -import com.alibaba.polardbx.common.utils.bloomfilter.FastIntBloomFilter; import org.apache.calcite.rel.core.JoinRelType; -import java.util.Arrays; -import java.util.BitSet; -import java.util.HashSet; +import java.text.MessageFormat; import java.util.List; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.StampedLock; import static com.alibaba.polardbx.executor.utils.ExecUtils.buildOneChunk; @@ -47,18 +55,15 @@ * */ public class ParallelHashJoinExec extends AbstractHashJoinExec { + private static final Logger LOGGER = LoggerFactory.getLogger(TaskExecutor.class); + private final int probeParallelism; + private final JoinKeyType joinKeyType; private Synchronizer shared; - private boolean finished; private ListenableFuture blocked; - - protected boolean buildOuterInput; - private int buildChunkSize = 0; - private int operatorIndex = -1; - private boolean probeInputIsFinish = false; public ParallelHashJoinExec(Synchronizer synchronizer, @@ -71,17 +76,677 @@ public ParallelHashJoinExec(Synchronizer synchronizer, List antiJoinOperands, boolean buildOuterInput, ExecutionContext context, - int operatorIndex) { + int operatorIndex, + int probeParallelism, + boolean keepPartition) { super(outerInput, innerInput, joinType, maxOneRow, joinKeyTuples, otherCondition, antiJoinOperands, context); + super.keepPartition = keepPartition; this.shared = Preconditions.checkNotNull(synchronizer); this.finished = false; this.blocked = ProducerExecutor.NOT_BLOCKED; this.buildOuterInput = buildOuterInput; this.operatorIndex = operatorIndex; + this.probeParallelism = probeParallelism; if (buildOuterInput) { - this.shared.recordOperatorIds(operatorIndex); + if (super.semiJoin) { + // do nothing + } else { + this.shared.recordOperatorIds(operatorIndex); + } + } + this.joinKeyType = getJoinKeyType(joinKeys); + + boolean enableVecBuildJoinRow = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_VEC_BUILD_JOIN_ROW); + + boolean isNotNullSafeJoin = joinKeys.stream().noneMatch(EquiJoinKey::isNullSafeEqual); + boolean isSimpleInnerJoin = + joinType == JoinRelType.INNER && condition == null && !semiJoin && isNotNullSafeJoin; + + if (isSimpleInnerJoin) { + buildSimpleInnerProbe(enableVecBuildJoinRow); + } else if (joinType == JoinRelType.SEMI && buildOuterInput) { + buildReverseSemiProbe(synchronizer, isNotNullSafeJoin, enableVecBuildJoinRow); + } else if (joinType == JoinRelType.ANTI && buildOuterInput) { + buildReverseAntiProbe(synchronizer, isNotNullSafeJoin, enableVecBuildJoinRow); + } else if (joinType == JoinRelType.SEMI || joinType == JoinRelType.ANTI) { + buildVecSemiAntiProbe(isNotNullSafeJoin, enableVecBuildJoinRow); + } else { + buildDefaultProbe(); + } + } + + private void buildDefaultProbe() { + this.probeOperator = new DefaultProbeOperator(true); + } + + private JoinKeyType getJoinKeyType(List joinKeys) { + if (joinKeys.size() == 1) { + EquiJoinKey equiJoinKey = joinKeys.get(0); + DataType outerType = outerInput.getDataTypes().get(equiJoinKey.getOuterIndex()); + DataType innerType = innerInput.getDataTypes().get(equiJoinKey.getInnerIndex()); + if (!innerType.equalDeeply(outerType)) { + return JoinKeyType.OTHER; + } + boolean isSingleLongType = equiJoinKey.getUnifiedType() instanceof LongType; + if (isSingleLongType) { + return JoinKeyType.LONG; + } + boolean isSingleIntegerType = equiJoinKey.getUnifiedType() instanceof IntegerType; + if (isSingleIntegerType) { + return JoinKeyType.INTEGER; + } + return JoinKeyType.OTHER; + } + + boolean isMultiIntegerType = + joinKeys.stream().allMatch(key -> { + DataType outerType = outerInput.getDataTypes().get(key.getOuterIndex()); + DataType innerType = innerInput.getDataTypes().get(key.getInnerIndex()); + boolean isSame = innerType.equalDeeply(outerType); + boolean isInteger = key.getUnifiedType() instanceof IntegerType; + return isInteger && isSame; + }); + if (isMultiIntegerType) { + return JoinKeyType.MULTI_INTEGER; + } + return JoinKeyType.OTHER; + } + + private void buildSimpleInnerProbe(boolean enableVecBuildJoinRow) { + if (!useVecJoin) { + buildDefaultProbe(); + return; + } + switch (joinKeyType) { + case LONG: + buildSingleLongProbe(enableVecBuildJoinRow); + break; + case INTEGER: + buildSingleIntProbe(enableVecBuildJoinRow); + break; + case MULTI_INTEGER: + buildMultiIntProbe(enableVecBuildJoinRow); + break; + case OTHER: + buildDefaultProbe(); + break; + default: + throw new UnsupportedOperationException( + "Unsupported joinKeyType: " + joinKeyType + ", should not reach here"); + } + } + + private void buildSingleLongProbe(boolean enableVecBuildJoinRow) { + this.probeOperator = new LongProbeOperator(enableVecBuildJoinRow); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createLong(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).cast(Block.class) + .appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + } + + private void buildSingleIntProbe(boolean enableVecBuildJoinRow) { + this.probeOperator = new IntProbeOperator(enableVecBuildJoinRow); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createInt(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).cast(Block.class) + .appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + } + + private void buildMultiIntProbe(boolean enableVecBuildJoinRow) { + this.probeOperator = new MultiIntProbeOperator(joinKeys.size(), enableVecBuildJoinRow); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + final int targetSize = fixedSize * ((getBuildKeyChunkGetter().columnWidth() + 1) / 2); + return TypedList.LongTypedList.estimatedSizeInBytes(targetSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + // The width of comparison serialized number is (blockCount + 1) / 2 + if (typedLists == null) { + final int targetSize = fixedSize * ((getBuildKeyChunkGetter().columnWidth() + 1) / 2); + typedLists = new TypedList[] {TypedList.createLong(targetSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + final int blockCount = chunk.getBlockCount(); + final int positionCount = chunk.getPositionCount(); + int[][] arrays = new int[blockCount][0]; + for (int blockIndex = 0; blockIndex < blockCount; blockIndex++) { + arrays[blockIndex] = chunk.getBlock(blockIndex).cast(IntegerBlock.class).intArray(); + } + + // The width of comparison serialized number is (blockCount + 1) / 2 + if (blockCount % 2 == 0) { + // when block count is even number. + for (int i = 0; i < positionCount; i++) { + for (int blockIndex = 0; blockIndex < blockCount; blockIndex += 2) { + long serialized = + TypedListHandle.serialize(arrays[blockIndex][i], arrays[blockIndex + 1][i]); + typedLists[0].setLong(sourceIndex++, serialized); + } + } + } else { + // when block count is odd number. + for (int i = 0; i < positionCount; i++) { + for (int blockIndex = 0; blockIndex < blockCount - 1; blockIndex += 2) { + long serialized = + TypedListHandle.serialize(arrays[blockIndex][i], arrays[blockIndex + 1][i]); + typedLists[0].setLong(sourceIndex++, serialized); + } + long serialized = TypedListHandle.serialize(arrays[blockCount - 1][i], 0); + typedLists[0].setLong(sourceIndex++, serialized); + } + } + + } + }); + } + + private void buildReverseSemiProbe(Synchronizer synchronizer, boolean isNotNullSafeJoin, + boolean enableVecBuildJoinRow) { + // try type-specific implementation first + if (useVecJoin && isNotNullSafeJoin && condition == null) { + switch (joinKeyType) { + case LONG: + buildReverseSemiLongProbe(synchronizer, enableVecBuildJoinRow); + return; + case INTEGER: + buildReverseSemiIntProbe(synchronizer, enableVecBuildJoinRow); + return; + default: + // fall through + } + } + + // try matched condition cases + boolean isSemiLongNotEqInteger = joinKeyType == JoinKeyType.LONG; + boolean isSemiIntegerNotEqInteger = joinKeyType == JoinKeyType.INTEGER; + int buildChunkConditionIndex = -1; + if (!useAntiCondition && isScalarInputRefCondition()) { + boolean isNotEq = ((ScalarFunctionExpression) condition).isA(NotEqual.class); + + isSemiLongNotEqInteger &= isNotEq; + isSemiIntegerNotEqInteger &= isNotEq; + // since this is outer build (reverse) + buildChunkConditionIndex = getBuildChunkConditionIndex(); + } else { + isSemiLongNotEqInteger = false; + isSemiIntegerNotEqInteger = false; + } + + if (useVecJoin && isNotNullSafeJoin && isSemiLongNotEqInteger) { + buildReverseSemiLongNotEqIntProbe(synchronizer, enableVecBuildJoinRow, buildChunkConditionIndex); + return; + } else if (useVecJoin && isNotNullSafeJoin && isSemiIntegerNotEqInteger) { + buildReverseSemiIntNotEqIntProbe(synchronizer, enableVecBuildJoinRow, buildChunkConditionIndex); + return; } + + // normal cases + if (super.condition == null) { + this.probeOperator = new SimpleReverseSemiProbeOperator(synchronizer); + } else { + this.probeOperator = new ReverseSemiProbeOperator(synchronizer); + } + } + + private void buildReverseSemiLongProbe(Synchronizer synchronizer, boolean enableVecBuildJoinRow) { + this.probeOperator = new ReverseSemiLongProbeOperator(synchronizer); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.LongTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createLong(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + + } + + private void buildReverseSemiIntProbe(Synchronizer synchronizer, boolean enableVecBuildJoinRow) { + this.probeOperator = new ReverseSemiIntProbeOperator(synchronizer); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createInt(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + } + + private void buildReverseSemiLongNotEqIntProbe(Synchronizer synchronizer, boolean enableVecBuildJoinRow, + int buildChunkConditionIndex) { + this.probeOperator = new ReverseSemiLongNotEqIntegerProbeOperator(synchronizer); + shared.builderChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createInt(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(buildChunkConditionIndex) + .appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.LongTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createLong(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + } + + private void buildReverseSemiIntNotEqIntProbe(Synchronizer synchronizer, boolean enableVecBuildJoinRow, + int buildChunkConditionIndex) { + this.probeOperator = new ReverseSemiIntNotEqIntegerProbeOperator(synchronizer); + shared.builderChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createInt(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(buildChunkConditionIndex) + .appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createInt(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + } + + private void buildReverseAntiProbe(Synchronizer synchronizer, boolean isNotNullSafeJoin, + boolean enableVecBuildJoinRow) { + // try type-specific implementation first + boolean isAntiLongNotEqInteger = joinKeyType == JoinKeyType.LONG; + boolean isAntiIntegerNotEqInteger = joinKeyType == JoinKeyType.INTEGER; + int buildChunkConditionIndex = -1; + if (!useAntiCondition && isScalarInputRefCondition()) { + boolean isNotEq = ((ScalarFunctionExpression) condition).isA(NotEqual.class); + isAntiLongNotEqInteger &= isNotEq; + isAntiIntegerNotEqInteger &= isNotEq; + buildChunkConditionIndex = getBuildChunkConditionIndex(); + } else { + isAntiLongNotEqInteger = false; + isAntiIntegerNotEqInteger = false; + } + + if (useVecJoin && isNotNullSafeJoin && isAntiLongNotEqInteger) { + buildReverseAntiLongNotEqIntProbe(synchronizer, enableVecBuildJoinRow, buildChunkConditionIndex); + return; + } else if (useVecJoin && isNotNullSafeJoin && isAntiIntegerNotEqInteger) { + buildReverseAntiIntNotEqIntProbe(synchronizer, enableVecBuildJoinRow, buildChunkConditionIndex); + return; + } + + if (useVecJoin && isNotNullSafeJoin && joinKeyType == JoinKeyType.INTEGER + && condition == null && !useAntiCondition) { + + buildReversAntiIntProbe(synchronizer, enableVecBuildJoinRow); + return; + } + + // normal cases + if (this.antiJoinOperands == null && this.antiCondition == null && super.condition == null) { + this.probeOperator = new SimpleReverseAntiProbeOperator(synchronizer); + } else if (this.antiJoinOperands == null && this.antiCondition == null) { + // with join condition + this.probeOperator = new ReverseAntiProbeOperator(synchronizer); + } else { + // should not access here + throw new RuntimeException(String.format("reverse anti hash join not support this, " + + "antiJoinOperands is %s, antiCondition is %s", this.antiJoinOperands, this.antiCondition)); + } + } + + private void buildReverseAntiLongNotEqIntProbe(Synchronizer synchronizer, boolean enableVecBuildJoinRow, + int buildChunkConditionIndex) { + this.probeOperator = new ReverseAntiLongNotEqIntegerProbeOperator(synchronizer); + shared.builderChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createInt(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(buildChunkConditionIndex) + .appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.LongTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createLong(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + } + + private void buildReverseAntiIntNotEqIntProbe(Synchronizer synchronizer, boolean enableVecBuildJoinRow, + int buildChunkConditionIndex) { + this.probeOperator = new ReverseAntiIntNotEqIntegerProbeOperator(synchronizer); + shared.builderChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createInt(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(buildChunkConditionIndex) + .appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createInt(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + } + + private void buildReversAntiIntProbe(Synchronizer synchronizer, boolean enableVecBuildJoinRow) { + this.probeOperator = new ReverseAntiIntegerProbeOperator(synchronizer); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createInt(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + } + + private void buildVecSemiAntiProbe(boolean isNotNullSafeJoin, boolean enableVecBuildJoinRow) { + if (!useVecJoin || !isNotNullSafeJoin) { + buildDefaultProbe(); + return; + } + + if (joinType == JoinRelType.SEMI && joinKeyType == JoinKeyType.LONG + && condition == null) { + buildSemiLongProbe(enableVecBuildJoinRow); + return; + } + + int buildChunkConditionIndex = -1; + boolean isLongKeyNotEqInteger = joinKeyType == JoinKeyType.LONG; + if (!useAntiCondition && isScalarInputRefCondition()) { + isLongKeyNotEqInteger &= ((ScalarFunctionExpression) condition).isA(NotEqual.class); + buildChunkConditionIndex = getBuildChunkConditionIndex(); + } else { + isLongKeyNotEqInteger = false; + } + + if (joinType == JoinRelType.SEMI && isLongKeyNotEqInteger) { + buildSemiLongNotEqIntProbe(enableVecBuildJoinRow, buildChunkConditionIndex); + return; + } + + // normal cases + buildDefaultProbe(); + } + + private void buildSemiLongProbe(boolean enableVecBuildJoinRow) { + this.probeOperator = new SemiLongProbeOperator(enableVecBuildJoinRow); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.LongTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createLong(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + } + + private void buildSemiLongNotEqIntProbe(boolean enableVecBuildJoinRow, int buildChunkConditionIndex) { + this.probeOperator = new SemiLongNotEqIntegerProbeOperator(enableVecBuildJoinRow); + shared.builderChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.IntTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createInt(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(buildChunkConditionIndex) + .appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); + shared.builderKeyChunks.setTypedHashTable(new TypedListHandle() { + private TypedList[] typedLists; + + @Override + public long estimatedSize(int fixedSize) { + return TypedList.LongTypedList.estimatedSizeInBytes(fixedSize); + } + + @Override + public TypedList[] getTypedLists(int fixedSize) { + if (typedLists == null) { + typedLists = new TypedList[] {TypedList.createLong(fixedSize)}; + } + return typedLists; + } + + @Override + public void consume(Chunk chunk, int sourceIndex) { + chunk.getBlock(0).appendTypedHashTable(typedLists[0], sourceIndex, 0, chunk.getPositionCount()); + } + }); } @Override @@ -106,10 +771,15 @@ public void doOpen() { @Override public void buildConsume() { if (memoryPool != null) { + long start = System.nanoTime(); int partition = shared.buildCount.getAndIncrement(); - if (partition < shared.numPartitions) { - shared.buildHashTable(partition, memoryAllocator, getIgnoreNullsInJoinKey()); + if (partition < shared.numberOfExec) { + int[] ignoreNullBlocks = getIgnoreNullsInJoinKey().stream().mapToInt(i -> i).toArray(); + shared.buildHashTable(partition, memoryAllocator, ignoreNullBlocks, ignoreNullBlocks.length); } + long end = System.nanoTime(); + LOGGER.debug(MessageFormat.format("HashJoinExec: {0} build consume time cost = {1} ns, partition is {2}, " + + "start = {3}, end = {4}", this.toString(), (end - start), partition, start, end)); // Copy the built hash-table from shared states into this executor this.buildChunks = shared.builderChunks; this.buildKeyChunks = shared.builderKeyChunks; @@ -136,7 +806,8 @@ Chunk doNextChunk() { return null; } - if (buildOuterInput && shared.joinNullRowBitSet == null) { + if (buildOuterInput && joinType != JoinRelType.SEMI && joinType != JoinRelType.ANTI + && shared.joinNullRowBitSet == null) { shared.buildNullBitSets(buildChunkSize); } @@ -155,14 +826,8 @@ Chunk nextProbeChunk() { @Override public void consumeChunk(Chunk inputChunk) { - synchronized (shared) { - shared.builderChunks.addChunk(inputChunk); - - Chunk keyChunk = getBuildKeyChunkGetter().apply(inputChunk); - shared.builderKeyChunks.addChunk(keyChunk); - - memoryAllocator.allocateReservedMemory(inputChunk.estimateSize() + keyChunk.estimateSize()); - } + Chunk keyChunk = getBuildKeyChunkGetter().apply(inputChunk); + shared.nextPartition().appendChunk(inputChunk, keyChunk); } @Override @@ -203,22 +868,84 @@ public boolean nextJoinNullRows() { @Override protected void afterProcess(Chunk outputChunk) { super.afterProcess(outputChunk); - if (buildOuterInput) { - if (probeChunk == null) { - if (probeInputIsFinish) { - if (shared.consumeInputIsFinish(operatorIndex) && shared.finishIterator()) { - finished = true; - blocked = ProducerExecutor.NOT_BLOCKED; + // reverse semi join do not need extra process + if (buildOuterInput && joinType != JoinRelType.SEMI) { + if (joinType == JoinRelType.ANTI) { + // first stage: get all probe chunks and mark the concurrent hash table + // outputChunk is always null at this stage, because we return nothing + // should not finish, always mark not finish + // and should not modify the status of block(meaning maybe blocked under this stage) + // + // second stage: report finish info + // + // third stage: wait other thread finish probe, should not finish + // + // final stage: output result + // + // should use outputChunk rather than probeChunk, because we should switch status depend on result + if (outputChunk == null) { + if (probeInputIsFinish) { + if (antiJoinResultIterator == null) { + int probeNumOfSynchronizer = shared.getProbeParallelism(); + boolean firstMark = shared.antiProbeFinished.markAndGet(operatorIndex); + if (firstMark) { + // update anti probe count and build anti join row ids if all probe finished + int finishedProbeCount = shared.antiProbeCount.addAndGet(1); + if (finishedProbeCount == probeNumOfSynchronizer) { + shared.buildAntiJoinOutputRowIds(); + } + } + int finishedProbeCount = shared.antiProbeCount.get(); + // build result iterator if all probe finished + // and anti join output row has been build (important) + if (finishedProbeCount == probeNumOfSynchronizer && shared.antJoinOutputRowId != null) { + int recordsPerExec = shared.antJoinOutputRowId.size() / probeNumOfSynchronizer; + int startOffset = operatorIndex * recordsPerExec; + int endOffset = (operatorIndex == probeNumOfSynchronizer - 1) + ? shared.antJoinOutputRowId.size() : (operatorIndex + 1) * recordsPerExec; + antiJoinResultIterator = + new AntiJoinResultIterator(shared.antJoinOutputRowId, shared.builderChunks, + blockBuilders, + super.chunkLimit, + startOffset, + endOffset); + } + finished = false; + blocked = ProducerExecutor.NOT_BLOCKED; + } else { + // anti join result iterator not null, meaning reached final stage + if (antiJoinResultIterator.finished()) { + finished = true; + } else { + finished = false; + } + blocked = ProducerExecutor.NOT_BLOCKED; + } } else { + // probe input not finish finished = false; - blocked = ProducerExecutor.NOT_BLOCKED; } } else { finished = false; + blocked = ProducerExecutor.NOT_BLOCKED; } } else { - finished = false; - blocked = ProducerExecutor.NOT_BLOCKED; + if (probeChunk == null) { + if (probeInputIsFinish) { + if (shared.consumeInputIsFinish(operatorIndex) && shared.finishIterator()) { + finished = true; + blocked = ProducerExecutor.NOT_BLOCKED; + } else { + finished = false; + blocked = ProducerExecutor.NOT_BLOCKED; + } + } else { + finished = false; + } + } else { + finished = false; + blocked = ProducerExecutor.NOT_BLOCKED; + } } } else { if (outputChunk == null) { @@ -274,7 +1001,9 @@ protected void buildRightJoinRow( void doClose() { // Release the reference to shared hash table etc. if (shared != null) { - this.shared.consumeInputIsFinish(operatorIndex); + if (!(buildOuterInput && semiJoin)) { + this.shared.consumeInputIsFinish(operatorIndex); + } this.shared = null; } super.doClose(); @@ -348,120 +1077,50 @@ protected boolean checkJoinCondition( return checkJoinCondition(joinRow); } + private boolean isScalarInputRefCondition() { + return condition instanceof ScalarFunctionExpression && + ((ScalarFunctionExpression) condition).isInputRefArgs(); + } + @Override protected boolean outputNullRowInTime() { return !buildOuterInput; } - public static class Synchronizer { - - private final int numPartitions; - - private final AtomicInteger buildCount = new AtomicInteger(); - - // Shared States - private final ChunksIndex builderChunks = new ChunksIndex(); - private final ChunksIndex builderKeyChunks = new ChunksIndex(); - - private ConcurrentRawHashTable hashTable; - private int[] positionLinks; - private FastIntBloomFilter bloomFilter; - private boolean alreadyUseRuntimeFilter; - private boolean useBloomFilter; - - private BitSet joinNullRowBitSet; - private int maxIndex = -1; - private int nextIndexId = 0; - - private final Set operatorIds = new HashSet<>(); - - public Synchronizer(int numPartitions, boolean alreadyUseRuntimeFilter, boolean useBloomFilter) { - this.numPartitions = numPartitions; - this.alreadyUseRuntimeFilter = alreadyUseRuntimeFilter; - this.useBloomFilter = useBloomFilter; - } - - public Synchronizer(int numPartitions, boolean alreadyUseRuntimeFilter) { - this(numPartitions, alreadyUseRuntimeFilter, true); - } - - private synchronized void initHashTable(MemoryAllocatorCtx ctx) { - if (hashTable == null) { - final int size = builderKeyChunks.getPositionCount(); - hashTable = new ConcurrentRawHashTable(size); - - positionLinks = new int[size]; - Arrays.fill(positionLinks, LIST_END); - - ctx.allocateReservedMemory(hashTable.estimateSize()); - ctx.allocateReservedMemory(SizeOf.sizeOf(positionLinks)); - - if (useBloomFilter && !alreadyUseRuntimeFilter && size <= BLOOM_FILTER_ROWS_LIMIT_FOR_PARALLEL && size > 0) { - bloomFilter = FastIntBloomFilter.create(size); - ctx.allocateReservedMemory(bloomFilter.sizeInBytes()); - } - } - } - - private void buildHashTable(int partition, MemoryAllocatorCtx ctx, List ignoreNullBlocks) { - initHashTable(ctx); - final int partitionSize = -Math.floorDiv(-builderKeyChunks.getChunkCount(), numPartitions); - final int startChunkId = partitionSize * partition; - - if (startChunkId >= builderKeyChunks.getChunkCount()) { - return; // skip invalid chunk ranges - } - - final int endChunkId = Math.min(startChunkId + partitionSize, builderKeyChunks.getChunkCount()); - final int startPosition = builderKeyChunks.getChunkOffset(startChunkId); - final int endPosition = builderKeyChunks.getChunkOffset(endChunkId); - - int position = startPosition; - for (int chunkId = startChunkId; chunkId < endChunkId; ++chunkId) { - final Chunk keyChunk = builderKeyChunks.getChunk(chunkId); - buildOneChunk(keyChunk, position, hashTable, positionLinks, bloomFilter, ignoreNullBlocks); - position += keyChunk.getPositionCount(); - } - assert position == endPosition; - } + enum JoinKeyType { + LONG, + INTEGER, + MULTI_INTEGER, + OTHER + } - private synchronized void recordOperatorIds(int operatorId) { - this.operatorIds.add(operatorId); - } + public static class PartitionChunksIndex { + private final ChunksIndex builderChunks; + private final ChunksIndex builderKeyChunks; + private final StampedLock lock; - private synchronized boolean consumeInputIsFinish(int operatorId) { - this.operatorIds.remove(operatorId); - return this.operatorIds.isEmpty(); + public PartitionChunksIndex() { + this.builderChunks = new ChunksIndex(); + this.builderKeyChunks = new ChunksIndex(); + this.lock = new StampedLock(); } - private synchronized void buildNullBitSets(int buildSize) { - if (joinNullRowBitSet == null) { - joinNullRowBitSet = new BitSet(buildSize); - maxIndex = buildSize; + public void appendChunk(Chunk chunk, Chunk keyChunk) { + long stamp = lock.writeLock(); + try { + builderChunks.addChunk(chunk); + builderKeyChunks.addChunk(keyChunk); + } finally { + lock.unlockWrite(stamp); } } - private synchronized void markUsedKeys(int matchedPosition) { - joinNullRowBitSet.set(matchedPosition); - } - - private synchronized int nextUnmatchedPosition() { - if (maxIndex > nextIndexId) { - int unmatchedPosition = joinNullRowBitSet.nextClearBit(nextIndexId); - nextIndexId = unmatchedPosition + 1; - if (maxIndex > unmatchedPosition) { - return unmatchedPosition; - } else { - return -1; - } - - } else { - return -1; - } + public ChunksIndex getBuilderChunks() { + return builderChunks; } - private synchronized boolean finishIterator() { - return nextIndexId >= maxIndex; + public ChunksIndex getBuilderKeyChunks() { + return builderKeyChunks; } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ProjectExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ProjectExec.java index 4f57588a7..826806f2a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ProjectExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ProjectExec.java @@ -16,9 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.BlockBuilders; @@ -28,6 +25,9 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.expression.calc.IExpression; import com.alibaba.polardbx.optimizer.core.expression.calc.InputRefExpression; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; @@ -89,7 +89,10 @@ Chunk doNextChunk() { } } - return this.buildChunk(inputChunk); + Chunk result = this.buildChunk(inputChunk); + result.setPartIndex(inputChunk.getPartIndex()); + result.setPartCount(inputChunk.getPartCount()); + return result; } public Chunk buildChunk(Chunk inputChunk) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ResultSetCursorExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ResultSetCursorExec.java index 039751645..acc0c9bbb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ResultSetCursorExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ResultSetCursorExec.java @@ -16,15 +16,22 @@ package com.alibaba.polardbx.executor.operator; -import com.alibaba.polardbx.common.datatype.UInt64; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; -import com.alibaba.polardbx.rpc.compatible.XResultSet; -import com.alibaba.polardbx.rpc.result.XResult; import com.alibaba.polardbx.common.charset.CharsetName; import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.UInt64; +import com.alibaba.polardbx.common.datatype.UInt64Utils; +import com.alibaba.polardbx.common.utils.BigDecimalUtil; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; +import com.alibaba.polardbx.common.utils.time.MySQLTimeConverter; +import com.alibaba.polardbx.common.utils.time.core.MySQLTimeVal; +import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; +import com.alibaba.polardbx.common.utils.time.core.TimeStorage; +import com.alibaba.polardbx.common.utils.time.parser.StringNumericParser; +import com.alibaba.polardbx.common.utils.time.parser.StringTimeParser; +import com.alibaba.polardbx.common.utils.time.parser.TimeParseStatus; import com.alibaba.polardbx.executor.Xprotocol.XRowSet; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.Chunk; @@ -32,6 +39,8 @@ import com.alibaba.polardbx.executor.chunk.SliceBlockBuilder; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.BigBitType; +import com.alibaba.polardbx.optimizer.core.datatype.BitType; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -39,8 +48,15 @@ import com.alibaba.polardbx.optimizer.core.datatype.ULongType; import com.alibaba.polardbx.optimizer.core.row.ResultSetRow; import com.alibaba.polardbx.optimizer.core.row.Row; +import com.alibaba.polardbx.repo.mysql.common.ResultSetWrapper; +import com.alibaba.polardbx.rpc.compatible.XResultSet; +import com.alibaba.polardbx.rpc.result.XResult; +import com.alibaba.polardbx.rpc.result.XResultUtil; import com.alibaba.polardbx.statistics.RuntimeStatHelper; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import io.airlift.slice.Slice; +import org.apache.orc.impl.TypeUtils; import java.math.BigDecimal; import java.math.BigInteger; @@ -51,11 +67,15 @@ import java.sql.SQLException; import java.sql.Time; import java.sql.Timestamp; +import java.sql.Types; +import java.text.MessageFormat; import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; +import static com.alibaba.polardbx.executor.Xprotocol.XRowSet.DEFAULT_TIME_ZONE; + /** * Convert Cursor to Executor * @@ -153,6 +173,259 @@ public static void buildOneRow(Row row, DataType[] dataTypes, BlockBuilder[] blo } } + public static void buildRawOrcTypeRow(ResultSet rs, + DataType[] dataTypes, + BlockBuilder[] blockBuilders) throws SQLException { + for (int i = 0; i < dataTypes.length; i++) { + // Convert data into orc raw type: Long, Double, or byte[] + switch (dataTypes[i].fieldType()) { + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_DATETIME2: { + byte[] bytes = ResultSetWrapper.getBytes(rs, i + 1); + if (null == bytes) { + blockBuilders[i].appendNull(); + } else { + MysqlDateTime t = StringTimeParser.parseString(bytes, Types.TIMESTAMP); + blockBuilders[i].writeLong(TimeStorage.writeTimestamp(t)); + } + break; + } + + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_TIMESTAMP2: { + byte[] bytes = ResultSetWrapper.getBytes(rs, i + 1); + if (null == bytes) { + blockBuilders[i].appendNull(); + } else { + MysqlDateTime t = StringTimeParser.parseString(bytes, Types.TIMESTAMP); + TimeParseStatus timeParseStatus = new TimeParseStatus(); + MySQLTimeVal timeVal = MySQLTimeConverter.convertDatetimeToTimestampWithoutCheck(t, timeParseStatus, + DEFAULT_TIME_ZONE); + if (timeVal == null) { + // for error time value, set to zero. + timeVal = new MySQLTimeVal(); + } + blockBuilders[i].writeLong(XResultUtil.timeValToLong(timeVal)); + } + break; + } + + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: { + byte[] bytes = ResultSetWrapper.getBytes(rs, i + 1); + if (null == bytes) { + blockBuilders[i].appendNull(); + } else { + MysqlDateTime t = StringTimeParser.parseString(bytes, Types.DATE); + blockBuilders[i].writeLong(TimeStorage.writeTimestamp(t)); + } + break; + } + + case MYSQL_TYPE_TIME: { + byte[] bytes = ResultSetWrapper.getBytes(rs, i + 1); + if (null == bytes) { + blockBuilders[i].appendNull(); + } else { + MysqlDateTime t = StringTimeParser.parseString(bytes, Types.TIME); + blockBuilders[i].writeLong(TimeStorage.writeTimestamp(t)); + } + break; + } + + case MYSQL_TYPE_YEAR: { + long val = rs.getLong(i + 1); + if (0 == val && rs.wasNull()) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeLong(val); + } + break; + } + + case MYSQL_TYPE_INT24: { + int val = rs.getInt(i + 1); + if (0 == val && rs.wasNull()) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeLong(val); + } + break; + } + + case MYSQL_TYPE_LONG: { + if (dataTypes[i].isUnsigned()) { + long val = rs.getLong(i + 1); + if (0 == val && rs.wasNull()) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeLong(val); + } + } else { + int val = rs.getInt(i + 1); + if (0 == val && rs.wasNull()) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeLong(val); + } + } + break; + } + + case MYSQL_TYPE_SHORT: { + if (dataTypes[i].isUnsigned()) { + int val = rs.getInt(i + 1); + if (0 == val && rs.wasNull()) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeLong(val); + } + } else { + short val = rs.getShort(i + 1); + if (0 == val && rs.wasNull()) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeLong(val); + } + } + break; + } + + case MYSQL_TYPE_TINY: { + if (dataTypes[i].isUnsigned()) { + short val = rs.getShort(i + 1); + if (0 == val && rs.wasNull()) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeLong(val); + } + } else { + byte val = rs.getByte(i + 1); + if (0 == val && rs.wasNull()) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeLong(val); + } + } + break; + } + + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: { + byte[] bytes = ResultSetWrapper.getBytes(rs, i + 1); + if (null == bytes) { + blockBuilders[i].appendNull(); + } else { + if (TypeUtils.isDecimal64Precision(dataTypes[i].getPrecision())) { + // Convert to Long for Decimal64 + long val = BigDecimalUtil.decodeAsUnscaledLong(bytes, dataTypes[i].getScale()); + blockBuilders[i].writeLong(val); + } else { + // Convert to byte[] + DecimalStructure d = new DecimalStructure(); + DecimalConverter.parseString(bytes, 0, bytes.length, d, false); + + final int precision = dataTypes[i].getPrecision(); + final int scale = dataTypes[i].getScale(); + + // NOTE: It will be handled as string in latin1 character set for .orc + byte[] result = new byte[DecimalConverter.binarySize(precision, scale)]; + DecimalConverter.decimalToBin(d, result, precision, scale); + blockBuilders[i].writeByteArray(result); + } + } + break; + } + + case MYSQL_TYPE_LONGLONG: { + if (dataTypes[i].isUnsigned()) { + byte[] bytes = ResultSetWrapper.getBytes(rs, i + 1); + if (null == bytes) { + blockBuilders[i].appendNull(); + } else { + long[] parseResult = StringNumericParser.parseString(bytes); + // check error occurs + if (parseResult[StringNumericParser.ERROR_INDEX] != 0) { + throw GeneralUtil.nestedException(MessageFormat.format( + "failed to parse unsigned long value %s.", new String(bytes))); + } + long parsedNumber = parseResult[StringNumericParser.NUMERIC_INDEX]; + blockBuilders[i].writeLong(parsedNumber ^ UInt64Utils.FLIP_MASK); + } + } else { + long val = rs.getLong(i + 1); + if (0 == val && rs.wasNull()) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeLong(val); + } + } + break; + } + + case MYSQL_TYPE_BIT: { + if (dataTypes[i] == DataTypes.BitType) { + Object obj = rs.getObject(i + 1); + if (null == obj) { + blockBuilders[i].appendNull(); + } else { + /* when BitType is null, rs.getInt will throw the Exception */ + int val = rs.getInt(i + 1); + blockBuilders[i].writeLong(val); + } + } else { + byte[] bytes = ResultSetWrapper.getBytes(rs, i + 1); + if (null == bytes) { + blockBuilders[i].appendNull(); + } else { + long val = bytesToLong(bytes); + blockBuilders[i].writeLong(val); + } + } + break; + } + + case MYSQL_TYPE_DOUBLE: { + double val = rs.getDouble(i + 1); + if (0 == val && rs.wasNull()) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeDouble(val); + } + break; + } + + case MYSQL_TYPE_FLOAT: { + float val = rs.getFloat(i + 1); + if (0 == val && rs.wasNull()) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeDouble(val); + } + break; + } + + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_SET: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_JSON: { + byte[] bytes = ResultSetWrapper.getBytes(rs, i + 1); + if (null == bytes) { + blockBuilders[i].appendNull(); + } else { + blockBuilders[i].writeByteArray(bytes); + } + } + break; + + default: + throw new UnsupportedOperationException(dataTypes[i].fieldType().toString()); + } + } + } + private static void buildOneCell(ResultSet rs, int i, DataType type, BlockBuilder builder, ExecutionContext context) throws SQLException { final Class clazz = type.getDataClass(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ResumeTableScanExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ResumeTableScanExec.java index 68443a093..2b8bb983a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ResumeTableScanExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ResumeTableScanExec.java @@ -16,26 +16,21 @@ package com.alibaba.polardbx.executor.operator; -import com.alibaba.polardbx.common.jdbc.BytesSql; -import com.alibaba.polardbx.common.jdbc.StreamBytesSql; -import com.alibaba.polardbx.common.jdbc.UnionBytesSql; -import com.google.common.base.Preconditions; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.jdbc.BytesSql; +import com.alibaba.polardbx.common.jdbc.StreamBytesSql; import com.alibaba.polardbx.executor.mpp.metadata.Split; import com.alibaba.polardbx.executor.mpp.split.JdbcSplit; import com.alibaba.polardbx.executor.operator.spill.SpillerFactory; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.rel.LogicalView; -import com.alibaba.polardbx.optimizer.core.rel.PhyTableScanBuilder; -import org.bouncycastle.util.Arrays; +import com.google.common.base.Preconditions; import java.util.Iterator; import java.util.List; -import static com.alibaba.polardbx.common.utils.GeneralUtil.buildPhysicalQuery; - public class ResumeTableScanExec extends TableScanExec implements ResumeExec { protected long stepSize; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/RuntimeFilterBuilderExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/RuntimeFilterBuilderExec.java index 17f7131c0..5fd1029c1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/RuntimeFilterBuilderExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/RuntimeFilterBuilderExec.java @@ -16,14 +16,14 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.operator.util.BloomFilterProduce; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/RuntimeFilterMerger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/RuntimeFilterMerger.java new file mode 100644 index 000000000..f6146b07e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/RuntimeFilterMerger.java @@ -0,0 +1,58 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator; + +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItem; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFManager; +import com.alibaba.polardbx.executor.operator.util.ChunksIndex; + +/** + * A concurrency-safe runtime-filter merger, receiving chunks from different threads, writing into + * runtime-filter components or partial runtime-filters, and finally merging into FragmentRFManager. + */ +public interface RuntimeFilterMerger { + /** + * A binding FragmentRFManager of this RuntimeFilterMerger object. + * + * @return FragmentRFManager + */ + FragmentRFManager getFragmentRFManager(); + + FragmentRFItem getFragmentItem(); + + /** + * Add chunks into global runtime filter. + * + * @param builderKeyChunks A chunk collection. + * @param startChunkId The start position of this chunk in collection. + * @param endChunkId The end position of this chunk in collection. + */ + void addChunksForBroadcastRF(ChunksIndex builderKeyChunks, int startChunkId, int endChunkId); + + /** + * Add chunks into local partitioned runtime filter. + * + * @param builderKeyChunks A chunk collection. + * @param startChunkId The start position of this chunk in collection. + * @param endChunkId The end position of this chunk in collection. + * @param isHashTableShared If the hash table is shared by all threads, we will choose the + * hash_table_size / (partition_num / worker_count) to be the estimated size of partial runtime-filter. And if not, + * the estimated size of partial runtime-filter will be builderKeyChunks_length / partitionNum. + */ + void addChunksForPartialRF(ChunksIndex builderKeyChunks, int startChunkId, + int endChunkId, boolean isHashTableShared, int partitionsInSynchronizer); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortAggExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortAggExec.java index 1bafb06c9..64b97cd9a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortAggExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortAggExec.java @@ -16,14 +16,17 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.operator.util.DataTypeUtils; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.row.Row; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; +import java.util.ArrayList; import java.util.List; /** @@ -35,6 +38,8 @@ public class SortAggExec extends AbstractExecutor { private final List aggregators; + private AggCallsHolder aggCallsHolder; + private final List outputColumnMeta; private final int[] groups; @@ -48,8 +53,6 @@ public class SortAggExec extends AbstractExecutor { private boolean finished; private ListenableFuture blocked; - private boolean hasAddToResult = false; - public SortAggExec(Executor input, int[] groups, List aggregators, List outputColumnMeta, ExecutionContext context) { super(context); @@ -62,9 +65,11 @@ public SortAggExec(Executor input, int[] groups, List aggregators, L @Override void doOpen() { + if (groups.length == 0) { + // we need a initial one when no group by + aggCallsHolder = new AggCallsHolder(aggregators); + } input.open(); - aggregators.forEach(t -> t.open(1)); - aggregators.forEach(Aggregator::appendInitValue); createBlockBuilders(); } @@ -78,28 +83,24 @@ Chunk doNextChunk() { if (!finished) { return null; } - if (!hasAddToResult) { - if (currentKey != null) { - buildRow(); - } - hasAddToResult = true; + if (aggCallsHolder != null) { // there is a group need to return + buildRow(aggCallsHolder); + aggCallsHolder = null; } break; } - // first row, new a group - if (currentKey == null) { - aggregators.forEach(t -> t.accumulate(0, currentInputChunk, row.getPosition())); + if (currentKey == null) { // first row, new a group + aggCallsHolder = createAggCallsHolder(); + aggCallsHolder.aggregate(row); currentKey = row; - // no group by or key equal in the same group - } else if (groups.length == 0 || checkKeyEqual(currentKey, row)) { - aggregators.forEach(t -> t.accumulate(0, currentInputChunk, row.getPosition())); - // key not equal, new a group - } else { - buildRow(); - aggregators.forEach(t -> t.resetToInitValue(0)); - aggregators.forEach(t -> t.accumulate(0, currentInputChunk, row.getPosition())); - hasAddToResult = false; + } else if (groups.length == 0 || checkKeyEqual(currentKey, + row)) { // no group by or key equal in the same group + aggCallsHolder.aggregate(row); + } else { // key not equal, new a group + buildRow(aggCallsHolder); + aggCallsHolder = createAggCallsHolder(); + aggCallsHolder.aggregate(row); currentKey = row; } } @@ -111,7 +112,7 @@ Chunk doNextChunk() { } } - private void buildRow() { + private void buildRow(AggCallsHolder aggCallsHolder) { int col = 0; Chunk.ChunkRow chunkRow = currentKey; for (int i = 0; i < groups.length; i++, col++) { @@ -119,7 +120,8 @@ private void buildRow() { } for (int i = 0; i < aggregators.size(); i++, col++) { - aggregators.get(i).writeResultTo(0, blockBuilders[col]); + Object result = aggCallsHolder.getValue(i); + blockBuilders[col].writeObject(DataTypeUtils.convert(outputColumnMeta.get(col), result)); } } @@ -155,7 +157,33 @@ public List getDataTypes() { @Override void doClose() { input.close(); - hasAddToResult = true; + aggCallsHolder = null; + } + + private AggCallsHolder createAggCallsHolder() { + return new AggCallsHolder(aggregators); + } + + private static class AggCallsHolder { + + private final List aggCalls; + + AggCallsHolder(List aggCalls) { + this.aggCalls = new ArrayList<>(aggCalls.size()); + for (Aggregator aggCall : aggCalls) { + this.aggCalls.add(aggCall.getNew()); + } + } + + void aggregate(Row row) { + for (Aggregator aggCall : aggCalls) { + aggCall.aggregate(row); + } + } + + Object getValue(int index) { + return aggCalls.get(index).value(); + } } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortExec.java index 2dddf851a..48a6d5669 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortExec.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.operator.spill.MemoryRevoker; import com.alibaba.polardbx.executor.operator.spill.SpillerFactory; @@ -29,6 +28,7 @@ import com.alibaba.polardbx.optimizer.memory.MemoryPool; import com.alibaba.polardbx.optimizer.memory.MemoryPoolUtils; import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortMergeExchangeExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortMergeExchangeExec.java index ff9c201ba..2cb7dffa0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortMergeExchangeExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortMergeExchangeExec.java @@ -16,10 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.collect.ImmutableList; -import com.google.common.io.Closer; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.mpp.execution.RecordMemSystemListener; import com.alibaba.polardbx.executor.mpp.execution.buffer.PagesSerde; @@ -37,6 +33,10 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.memory.MemoryPool; import com.alibaba.polardbx.optimizer.memory.MemoryPoolUtils; +import com.google.common.collect.ImmutableList; +import com.google.common.io.Closer; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import java.io.Closeable; import java.io.IOException; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortMergeJoinExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortMergeJoinExec.java index 820065cdf..c88038814 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortMergeJoinExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SortMergeJoinExec.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.config.ConfigDataMode; @@ -31,6 +30,7 @@ import com.alibaba.polardbx.optimizer.core.join.EquiJoinKey; import com.alibaba.polardbx.optimizer.core.row.JoinRow; import com.alibaba.polardbx.optimizer.core.row.Row; +import com.google.common.util.concurrent.ListenableFuture; import org.apache.calcite.rel.core.JoinRelType; import java.util.ArrayList; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SpilledTopNExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SpilledTopNExec.java index 7875de23d..55eaca15b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SpilledTopNExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SpilledTopNExec.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.operator.spill.MemoryRevoker; import com.alibaba.polardbx.executor.operator.spill.SpillerFactory; @@ -28,6 +27,7 @@ import com.alibaba.polardbx.optimizer.memory.MemoryPool; import com.alibaba.polardbx.optimizer.memory.MemoryPoolUtils; import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SpillerExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SpillerExec.java index 2f930d06b..9c376b86e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SpillerExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SpillerExec.java @@ -16,12 +16,12 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkBuilder; import com.alibaba.polardbx.executor.operator.spill.SingleStreamSpiller; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import java.util.Iterator; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/Synchronizer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/Synchronizer.java new file mode 100644 index 000000000..317c6ae26 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/Synchronizer.java @@ -0,0 +1,346 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator; + +import com.alibaba.polardbx.common.utils.bloomfilter.ConcurrentIntBloomFilter; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.common.utils.memory.SizeOf; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.mpp.execution.TaskExecutor; +import com.alibaba.polardbx.executor.operator.util.ChunksIndex; +import com.alibaba.polardbx.executor.operator.util.ConcurrentRawDirectHashTable; +import com.alibaba.polardbx.executor.operator.util.ConcurrentRawHashTable; +import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.calcite.rel.core.JoinRelType; + +import java.text.MessageFormat; +import java.util.Arrays; +import java.util.BitSet; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.executor.utils.ExecUtils.buildOneChunk; + +public class Synchronizer { + private static final Logger LOGGER = LoggerFactory.getLogger(TaskExecutor.class); + private final static int DEFAULT_CHUNK_LIMIT = 1000; + + /** + * How many degrees of parallelism in this Synchronizer instance. + */ + final int numberOfExec; + + /** + * How many partitions in this Synchronizer instance. + * It's useful in local partition mode. + */ + final int numberOfDataPartition; + + final AtomicInteger buildCount = new AtomicInteger(); + + /** + * used to synchronize probe side of reverse anti join + */ + final AtomicInteger antiProbeCount = new AtomicInteger(); + + ConcurrentRawDirectHashTable antiProbeFinished; + + volatile List antJoinOutputRowId; + + // Shared States + final ChunksIndex builderChunks; + final ChunksIndex builderKeyChunks; + + volatile ConcurrentRawHashTable hashTable; + int[] positionLinks; + ConcurrentIntBloomFilter bloomFilter; + boolean alreadyUseRuntimeFilter; + boolean useBloomFilter; + + // exception during initializing hash table (e.g. MemoryNotEnoughException) + volatile Throwable initException; + + BitSet joinNullRowBitSet; + int maxIndex = -1; + int nextIndexId = 0; + + final Set operatorIds = new HashSet<>(); + + ConcurrentRawDirectHashTable matchedPosition; + + // Thread Local arrays for hash code vector + final int chunkLimit; + final ThreadLocal hashCodeResultsThreadLocal; + final ThreadLocal intermediatesThreadLocal; + final ThreadLocal blockHashCodesThreadLocal; + + final JoinRelType joinType; + final boolean outerDriver; + + // partition-level chunk index to avoid lock + final ParallelHashJoinExec.PartitionChunksIndex[] partitionChunksIndexes; + final AtomicLong nextPartition; + + final boolean isHashTableShared; + Map synchronizerRFMergers; + + private int probeParallelism; + + public Synchronizer(JoinRelType joinType, boolean outerDriver, int numberOfExec, + boolean alreadyUseRuntimeFilter, boolean useBloomFilter, int chunkLimit, + int probeParallelism, int numberOfDataPartition, boolean isHashTableShared) { + this.isHashTableShared = isHashTableShared; + this.joinType = joinType; + this.outerDriver = outerDriver; + + this.numberOfExec = numberOfExec; + this.alreadyUseRuntimeFilter = alreadyUseRuntimeFilter; + this.useBloomFilter = useBloomFilter; + this.builderChunks = new ChunksIndex(); + this.builderKeyChunks = new ChunksIndex(); + + this.chunkLimit = chunkLimit; + hashCodeResultsThreadLocal = ThreadLocal.withInitial(() -> new int[chunkLimit]); + intermediatesThreadLocal = ThreadLocal.withInitial(() -> new int[chunkLimit]); + blockHashCodesThreadLocal = ThreadLocal.withInitial(() -> new int[chunkLimit]); + + // avoid unnecessary allocate + if (joinType == JoinRelType.ANTI && outerDriver) { + this.antiProbeFinished = new ConcurrentRawDirectHashTable(probeParallelism); + } + + this.partitionChunksIndexes = new ParallelHashJoinExec.PartitionChunksIndex[numberOfExec]; + this.nextPartition = new AtomicLong(0L); + for (int i = 0; i < numberOfExec; i++) { + partitionChunksIndexes[i] = new ParallelHashJoinExec.PartitionChunksIndex(); + } + this.numberOfDataPartition = numberOfDataPartition; + this.probeParallelism = probeParallelism; + this.synchronizerRFMergers = new TreeMap<>(); + } + + // Just for test + public Synchronizer(JoinRelType joinType, boolean outerDriver, int numberOfExec, + boolean alreadyUseRuntimeFilter, int probeParallelism) { + this(joinType, outerDriver, numberOfExec, alreadyUseRuntimeFilter, true, DEFAULT_CHUNK_LIMIT, + probeParallelism, -1, false); + } + + public void putSynchronizerRFMerger(int ordinal, SynchronizerRFMerger synchronizerRFMerger) { + synchronizerRFMergers.put(ordinal, synchronizerRFMerger); + } + + public ParallelHashJoinExec.PartitionChunksIndex nextPartition() { + return partitionChunksIndexes[(int) (nextPartition.getAndIncrement() % numberOfExec)]; + } + + public void initHashTable(MemoryAllocatorCtx ctx) { + if (hashTable == null) { + long start = System.nanoTime(); + + // merge chunk index + List partitionBuilderChunks = Arrays.stream(partitionChunksIndexes) + .map(ParallelHashJoinExec.PartitionChunksIndex::getBuilderChunks).collect(Collectors.toList()); + List partitionBuilderKeyChunks = Arrays.stream(partitionChunksIndexes) + .map(ParallelHashJoinExec.PartitionChunksIndex::getBuilderKeyChunks).collect(Collectors.toList()); + builderChunks.merge(partitionBuilderChunks); + builderKeyChunks.merge(partitionBuilderKeyChunks); + + final int size = builderKeyChunks.getPositionCount(); + + // large memory allocation: hash table for build-side + ctx.allocateReservedMemory(ConcurrentRawHashTable.estimateSizeInBytes(size)); + hashTable = new ConcurrentRawHashTable(size); + + if (outerDriver && (joinType == JoinRelType.ANTI || joinType == JoinRelType.SEMI)) { + // large memory allocation: hash table for reversed anti/semi join + ctx.allocateReservedMemory(ConcurrentRawDirectHashTable.estimatedSizeInBytes(size)); + matchedPosition = new ConcurrentRawDirectHashTable(size); + } + + // large memory allocation: linked list for build-side. + ctx.allocateReservedMemory(SizeOf.sizeOfIntArray(size)); + positionLinks = new int[size]; + Arrays.fill(positionLinks, AbstractHashJoinExec.LIST_END); + + // large memory allocation: type-specific lists in key columns chunks index. + ctx.allocateReservedMemory(builderKeyChunks.estimateTypedListSizeInBytes()); + builderKeyChunks.openTypedHashTable(); + + // large memory allocation: type-specific lists in full columns chunks index. + ctx.allocateReservedMemory(builderChunks.estimateTypedListSizeInBytes()); + builderChunks.openTypedHashTable(); + + if (useBloomFilter && !alreadyUseRuntimeFilter + && size <= AbstractJoinExec.BLOOM_FILTER_ROWS_LIMIT_FOR_PARALLEL + && size > 0) { + // large memory allocation: bloom-filter + ctx.allocateReservedMemory( + ConcurrentIntBloomFilter.estimatedSizeInBytes(size, ConcurrentIntBloomFilter.DEFAULT_FPP)); + bloomFilter = ConcurrentIntBloomFilter.create(size); + } + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + MessageFormat.format("initialize hash table time cost = {0} ns, positionCount = {1}, " + + "hash table size = {2}", (System.nanoTime() - start), size, hashTable.size())); + } + + } + } + + public void buildHashTable(int partition, MemoryAllocatorCtx ctx, int[] ignoreNullBlocks, + int ignoreNullBlocksSize) { + + synchronized (this) { + if (hashTable == null && initException == null) { + try { + initHashTable(ctx); + } catch (Throwable t) { + // Avoid allocating hash table after encountering out-of-memory exception. + this.initException = t; + throw t; + } + } + } + + final int partitionSize = -Math.floorDiv(-builderKeyChunks.getChunkCount(), numberOfExec); + final int startChunkId = partitionSize * partition; + + if (startChunkId >= builderKeyChunks.getChunkCount()) { + return; // skip invalid chunk ranges + } + + final int endChunkId = Math.min(startChunkId + partitionSize, builderKeyChunks.getChunkCount()); + final int startPosition = builderKeyChunks.getChunkOffset(startChunkId); + final int endPosition = builderKeyChunks.getChunkOffset(endChunkId); + + int[] hashCodeResults = hashCodeResultsThreadLocal.get(); + int[] intermediates = intermediatesThreadLocal.get(); + int[] blockHashCodes = blockHashCodesThreadLocal.get(); + + int position = startPosition; + for (int chunkId = startChunkId; chunkId < endChunkId; ++chunkId) { + + // step1. add chunk into type list + builderChunks.addChunkToTypedList(chunkId); + builderKeyChunks.addChunkToTypedList(chunkId); + + // step2. add chunk into hash table. + final Chunk keyChunk = builderKeyChunks.getChunk(chunkId); + buildOneChunk(keyChunk, position, hashTable, positionLinks, + hashCodeResults, intermediates, blockHashCodes, bloomFilter, ignoreNullBlocks, + ignoreNullBlocksSize); + + position += keyChunk.getPositionCount(); + } + + // step3. add fragment-level runtime filter. + if (synchronizerRFMergers != null && !synchronizerRFMergers.isEmpty()) { + + for (Map.Entry entry : synchronizerRFMergers.entrySet()) { + SynchronizerRFMerger merger = entry.getValue(); + + switch (merger.getFragmentItem().getRFType()) { + case BROADCAST: + merger.addChunksForBroadcastRF(builderKeyChunks, startChunkId, endChunkId); + break; + case LOCAL: + merger.addChunksForPartialRF(builderKeyChunks, startChunkId, endChunkId, + isHashTableShared, numberOfDataPartition); + break; + } + } + } + + assert position == endPosition; + } + + public synchronized void buildAntiJoinOutputRowIds() { + if (antJoinOutputRowId != null) { + return; + } + antJoinOutputRowId = matchedPosition.getNotMarkedPosition(); + } + + public synchronized void recordOperatorIds(int operatorId) { + this.operatorIds.add(operatorId); + } + + public synchronized boolean consumeInputIsFinish(int operatorId) { + this.operatorIds.remove(operatorId); + return this.operatorIds.isEmpty(); + } + + public synchronized void buildNullBitSets(int buildSize) { + if (joinNullRowBitSet == null) { + joinNullRowBitSet = new BitSet(buildSize); + maxIndex = buildSize; + } + } + + public synchronized void markUsedKeys(int matchedPosition) { + joinNullRowBitSet.set(matchedPosition); + } + + public synchronized int nextUnmatchedPosition() { + if (maxIndex > nextIndexId) { + int unmatchedPosition = joinNullRowBitSet.nextClearBit(nextIndexId); + nextIndexId = unmatchedPosition + 1; + if (maxIndex > unmatchedPosition) { + return unmatchedPosition; + } else { + return -1; + } + + } else { + return -1; + } + } + + public synchronized boolean finishIterator() { + return nextIndexId >= maxIndex; + } + + @VisibleForTesting + public int getNumberOfExec() { + return numberOfExec; + } + + public int getProbeParallelism() { + return probeParallelism; + } + + public ConcurrentRawDirectHashTable getMatchedPosition() { + return matchedPosition; + } + + public void close() { + this.builderChunks.close(); + this.builderKeyChunks.close(); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SynchronizerRFMerger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SynchronizerRFMerger.java new file mode 100644 index 000000000..2a00c4948 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/SynchronizerRFMerger.java @@ -0,0 +1,333 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator; + +import com.alibaba.polardbx.common.utils.bloomfilter.ConcurrentIntBloomFilter; +import com.alibaba.polardbx.common.utils.bloomfilter.RFBloomFilter; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItem; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFManager; +import com.alibaba.polardbx.executor.operator.util.ChunksIndex; + +import java.text.MessageFormat; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +public class SynchronizerRFMerger implements RuntimeFilterMerger { + private static final Logger LOGGER = LoggerFactory.getLogger(FragmentRFManager.class); + + // plan fragment level runtime filter manager. + private final FragmentRFManager fragmentRFManager; + private final FragmentRFItem rfItem; + + private final int buildSideParallelism; + private final int blockChannel; + + private RFBloomFilter[] rfBloomFilters; + private Lock[] bfInitializingLocks; + private AtomicInteger bfParallelismCounter; + private AtomicInteger bfPartitionCounter; + + private volatile RFBloomFilter globalRFBloomFilter; + + public SynchronizerRFMerger(FragmentRFManager fragmentRFManager, FragmentRFItem rfItem, int buildSideParallelism, + int blockChannel) { + this.fragmentRFManager = fragmentRFManager; + this.rfItem = rfItem; + this.buildSideParallelism = buildSideParallelism; + + final int runtimeFilterCount = + (rfItem.getRFType() == FragmentRFManager.RFType.BROADCAST + || fragmentRFManager.getTotalPartitionCount() <= 0) + ? 1 : fragmentRFManager.getTotalPartitionCount(); + this.rfBloomFilters = new RFBloomFilter[runtimeFilterCount]; + this.bfInitializingLocks = new Lock[runtimeFilterCount]; + this.blockChannel = blockChannel; + + for (int i = 0; i < bfInitializingLocks.length; i++) { + bfInitializingLocks[i] = new ReentrantLock(); + } + bfParallelismCounter = new AtomicInteger(buildSideParallelism); + + final int partitionsOfNode = fragmentRFManager.getPartitionsOfNode(); + bfPartitionCounter = new AtomicInteger(partitionsOfNode); + } + + @Override + public FragmentRFManager getFragmentRFManager() { + return this.fragmentRFManager; + } + + @Override + public FragmentRFItem getFragmentItem() { + return this.rfItem; + } + + @Override + public void addChunksForBroadcastRF(ChunksIndex builderKeyChunks, final int startChunkId, final int endChunkId) { + final long buildSideRows = builderKeyChunks.getPositionCount(); + final List registeredSource = rfItem.getRegisteredSource(); + final long rowUpperBound = fragmentRFManager.getUpperBound(); + final long rowLowerBound = fragmentRFManager.getLowerBound(); + + if (registeredSource == null || registeredSource.isEmpty()) { + // No source has been registered, don't build RF. + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format( + "Do not have any source exec for join buildSide = {0}, probeSide = {1}", + rfItem.getBuildColumnName(), rfItem.getProbeColumnName() + )); + } + + return; + } + + if (buildSideRows > rowUpperBound || buildSideRows < rowLowerBound) { + // For global RF, the rows in build side is more than the given threshold, so don't build RF. + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format( + "Do not generate RF for join buildSide = {0}, probeSide = {1}, buildSideRows = {2}, threshold = {3}", + rfItem.getBuildColumnName(), rfItem.getProbeColumnName(), + buildSideRows, rowUpperBound + )); + } + return; + } + + // construct the global RF in concurrency safe mode. + long startGlobalMode = System.nanoTime(); + if (globalRFBloomFilter == null) { + synchronized (this) { + if (globalRFBloomFilter == null) { + globalRFBloomFilter = RFBloomFilter.createBlockLongBloomFilter((int) buildSideRows); + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("initialize global RF for buildSide = " + + rfItem.getBuildColumnName() + + ", probeSide = " + rfItem.getProbeColumnName() + + ", rowCount = " + + buildSideRows + + ", sizeInBytes = " + + (globalRFBloomFilter).sizeInBytes()); + } + + } + } + } + + // create thread-local RF-BloomFilter component. + RFBloomFilter rfComponent = RFBloomFilter.createBlockLongBloomFilter((int) buildSideRows); + + // Every thread has it's owned chunk id to build RF. + for (int chunkId = startChunkId; chunkId < endChunkId; ++chunkId) { + final Chunk keyChunk = builderKeyChunks.getChunk(chunkId); + Block block = keyChunk.getBlock(blockChannel); + block.addLongToBloomFilter(rfComponent); + } + + synchronized (globalRFBloomFilter) { + // merge thread-local component into globalRFBloomFilter + globalRFBloomFilter.merge(rfComponent); + + // check if all thread has finished the global RF building. + if (bfParallelismCounter.decrementAndGet() == 0) { + rfItem.assignRF(new RFBloomFilter[] {globalRFBloomFilter}); + + long endGlobalMode = System.nanoTime(); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("build global RF timeCost = " + (endGlobalMode - startGlobalMode) + + "ns for buildSide = " + + rfItem.getBuildColumnName() + + ", probeSide = " + rfItem.getProbeColumnName()); + } + } + } + } + + @Override + public void addChunksForPartialRF(ChunksIndex builderKeyChunks, final int startChunkId, + final int endChunkId, boolean isHashTableShared, int partitionsInSynchronizer) { + final long buildSideRows = builderKeyChunks.getPositionCount(); + + final int totalPartitionCount = fragmentRFManager.getTotalPartitionCount(); + final double fpp = fragmentRFManager.getDefaultFpp(); + final List registeredSource = rfItem.getRegisteredSource(); + + if (registeredSource == null || registeredSource.isEmpty()) { + // No source has been registered, don't build RF. + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format( + "Do not have any source exec for join buildSide = {0}, probeSide = {1}", + rfItem.getBuildColumnName(), rfItem.getProbeColumnName() + )); + } + + return; + } + + if (rfItem.useXXHashInBuild()) { + // For partition wise mode, a chunk is only from one partition. + + long bfSizeOfPartition = getBfSizeOfPartition(isHashTableShared, partitionsInSynchronizer, buildSideRows); + + for (int chunkId = startChunkId; chunkId < endChunkId; ++chunkId) { + final Chunk keyChunk = builderKeyChunks.getChunk(chunkId); + Block block = keyChunk.getBlock(blockChannel); + + // the partition number within a block is consistent. + final int rfPartition = getPartition(block, 0, totalPartitionCount); + + // To initialization. + if (rfBloomFilters[rfPartition] == null) { + bfInitializingLocks[rfPartition].lock(); + try { + if (rfBloomFilters[rfPartition] == null) { + + rfBloomFilters[rfPartition] = RFBloomFilter.createBlockLongBloomFilter( + (int) bfSizeOfPartition); + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("initialize local RF for buildSide = " + + rfItem.getBuildColumnName() + + ", probeSide = " + rfItem.getProbeColumnName() + + ", rowCount = " + + bfSizeOfPartition + + ", sizeInBytes = " + + rfBloomFilters[rfPartition].sizeInBytes() + + ", partition num = " + rfPartition); + } + + } + + } finally { + bfInitializingLocks[rfPartition].unlock(); + } + } + + // Get write lock for a chunk. + bfInitializingLocks[rfPartition].lock(); + try { + // write into thread-local component. + block.addLongToBloomFilter(rfBloomFilters[rfPartition]); + } finally { + bfInitializingLocks[rfPartition].unlock(); + } + + } + } else { + for (int chunkId = startChunkId; chunkId < endChunkId; ++chunkId) { + final Chunk keyChunk = builderKeyChunks.getChunk(chunkId); + Block block = keyChunk.getBlock(blockChannel); + + if (bfPartitionCounter.get() > 0) { + // Bad case for initialization. + for (int pos = 0; pos < keyChunk.getPositionCount(); pos++) { + + final int rfPartition = getPartition(block, pos, totalPartitionCount); + + if (rfBloomFilters[rfPartition] == null) { + + bfInitializingLocks[rfPartition].lock(); + try { + if (rfBloomFilters[rfPartition] == null) { + // To initialize the bloom-filter for each partition. + final int partitionsOfNode = fragmentRFManager.getPartitionsOfNode(); + final long bfSizeOfPartition = buildSideRows / partitionsOfNode; + + rfBloomFilters[rfPartition] = + ConcurrentIntBloomFilter.create(bfSizeOfPartition, fpp); + + bfPartitionCounter.decrementAndGet(); + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("initialize local RF for buildSide = " + + rfItem.getBuildColumnName() + + ", probeSide = " + rfItem.getProbeColumnName() + + ", rowCount = " + + bfSizeOfPartition + + ", sizeInBytes = " + + rfBloomFilters[rfPartition].sizeInBytes() + + ", partition num = " + rfPartition); + } + + } + + } finally { + bfInitializingLocks[rfPartition].unlock(); + } + } + + rfBloomFilters[rfPartition].putInt(block.hashCode(pos)); + } + } else { + + // CAS operation on array for each row. + block.addIntToBloomFilter(totalPartitionCount, rfBloomFilters); + + } + } + } + + // check if all thread has finished the partition RF building. + if (bfParallelismCounter.decrementAndGet() == 0) { + rfItem.assignRF(rfBloomFilters); + + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("assign RF : " + rfBloomFilters.length + + " for FragmentRFManager: " + fragmentRFManager); + } + + } + + } + + // for test + public AtomicInteger getBfParallelismCounter() { + return bfParallelismCounter; + } + + private long getBfSizeOfPartition(boolean isHashTableShared, int partitionsInSynchronizer, long buildSideRows) { + long bfSizeOfPartition; + final int partitionsOfNode = fragmentRFManager.getPartitionsOfNode(); + if (isHashTableShared) { + // To initialize the bloom-filter for each partition. + bfSizeOfPartition = (long) Math.ceil(buildSideRows * 1.0d / partitionsOfNode); + } else if (partitionsInSynchronizer > 0) { + // Valid partition number. + bfSizeOfPartition = (long) Math.ceil(buildSideRows * 1.0d / partitionsInSynchronizer); + } else { + // Invalid partition number. Evenly divided according to total parallelism. + bfSizeOfPartition = (long) Math.ceil( + buildSideRows / (partitionsOfNode * 1.0d / buildSideParallelism)); + } + return bfSizeOfPartition; + } + + private static int getPartition(Block block, int position, int partitionCount) { + + // Convert the searchVal from field space to hash space + long hashVal = block.hashCodeUseXxhash(position); + int partition = (int) ((hashVal & Long.MAX_VALUE) % partitionCount); + + return partition; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/TableScanClient.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/TableScanClient.java index 68f0c0d3b..37ef2a91e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/TableScanClient.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/TableScanClient.java @@ -17,6 +17,7 @@ package com.alibaba.polardbx.executor.operator; import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; import com.alibaba.polardbx.common.datatype.UInt64; import com.alibaba.polardbx.common.jdbc.BytesSql; import com.alibaba.polardbx.optimizer.planmanager.feedback.PhyFeedBack; @@ -47,6 +48,7 @@ import com.alibaba.polardbx.common.jdbc.ZeroDate; import com.alibaba.polardbx.common.jdbc.ZeroTimestamp; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.properties.DynamicConfig; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.common.utils.logger.Logger; @@ -56,8 +58,10 @@ import com.alibaba.polardbx.common.utils.thread.ExecutorUtil; import com.alibaba.polardbx.common.utils.thread.NamedThreadFactory; import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; +import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.Xprotocol.XRowSet; import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.DecimalBlockBuilder; import com.alibaba.polardbx.executor.chunk.SliceBlockBuilder; import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.mpp.deploy.ServiceProvider; @@ -182,7 +186,7 @@ public TableScanClient(ExecutionContext context, CursorMeta meta, this.useTransaction = useTransaction; this.socketTimeout = (int) context.getParamManager().getLong(ConnectionParams.SOCKET_TIMEOUT); this.prefetchNum = prefetchNum; - this.slowTimeThreshold = context.getPhysicalRecorder().getSlowSqlTime(); + this.slowTimeThreshold = context.getParamManager().getLong(ConnectionParams.SLOW_SQL_TIME); this.enableTaskCpu = ExecUtils.isSQLMetricEnabled(context); if (context.getRuntimeStatistics() != null) { this.runtimeStat = (RuntimeStatistics) context.getRuntimeStatistics(); @@ -582,6 +586,9 @@ protected void collectConnectionStats() { } protected String getCurrentDbkey() { + if (ConfigDataMode.isColumnarMode()) { + return ANONAMOUS_DBKEY; + } IDataSource o = ExecutorContext.getContext(jdbcSplit.getSchemaName()) .getTopologyHandler().get(jdbcSplit.getDbIndex()).getDataSource(); String currentDbKey = ANONAMOUS_DBKEY; @@ -673,7 +680,14 @@ protected void executeQuery() throws SQLException { if (pureAsync) { final XPreparedStatement xPreparedStatement = stmt.unwrap(XPreparedStatement.class); assert xPreparedStatement.getConnection().unwrap(XConnection.class).isStreamMode(); - xPreparedStatement.getConnection().unwrap(XConnection.class).getSession().setChunkResult(true); + boolean chunkResult = true; + if (TableScanClient.this.context.isEnableOrcRawTypeBlock()) { + // For raw type block, do not enable chunk result. + // Normal query should not get here. + chunkResult = false; + } + xPreparedStatement.getConnection().unwrap(XConnection.class).getSession() + .setChunkResult(chunkResult); xResult = xPreparedStatement.executeQueryX(); // Return immediately when stream mode. } else { startTimeNano = System.nanoTime(); @@ -970,6 +984,10 @@ private void block2block(DataType type, PolarxResultset.ColumnMetaData metaData, } } } else if (clazz == Decimal.class) { + DecimalBlockBuilder decBuilder = (DecimalBlockBuilder) dst; + boolean useDecimal64 = DynamicConfig.getInstance().enableXResultDecimal64() + && decBuilder.canWriteDecimal64() + && type.getScale() != DecimalTypeBase.DEFAULT_SCALE; for (int i = 0; i < rowCount; ++i) { src.next(); if (src.isNull()) { @@ -977,7 +995,11 @@ private void block2block(DataType type, PolarxResultset.ColumnMetaData metaData, } else { final com.alibaba.polardbx.rpc.result.chunk.Decimal decimal = src.getDecimal(); if (null == decimal.getBigUnscaled()) { - dst.writeDecimal(new Decimal(decimal.getUnscaled(), decimal.getScale())); + if (useDecimal64) { + dst.writeLong(decimal.getUnscaled()); + } else { + dst.writeDecimal(new Decimal(decimal.getUnscaled(), decimal.getScale())); + } } else { dst.writeDecimal( Decimal.fromBigDecimal(new BigDecimal(decimal.getBigUnscaled(), decimal.getScale()))); @@ -1094,6 +1116,16 @@ protected int fillChunk(DataType[] dataTypes, BlockBuilder[] blockBuilders, int } } + protected void fillRawOrcTypeRow(DataType[] dataTypes, BlockBuilder[] blockBuilders, + ExecutionContext context) throws Exception { + assert xResult != null; + XResultObject resultObject = xResult.current(); + if (resultObject.getRow() != null) { + XRowSet.appendRawOrcTypeRowForX(xResult, dataTypes, blockBuilders, context); + ++count; + } + } + private boolean chunkNext() throws SQLException { final XResultObject current = xResult.current(); if (null == current || current.getRow() != null) { @@ -1140,7 +1172,7 @@ protected boolean next() { logger.warn(context.getTraceId() + " here occur error, but current scan is closed!", e); } else { throw new TddlRuntimeException(ErrorCode.ERR_EXECUTE_ON_MYSQL, e, jdbcSplit.getDbIndex(), - getCurrentDbkey(), e.getMessage()); + getCurrentDbkey(), context.getTraceId() + "," + e.getMessage()); } } return false; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/TableScanExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/TableScanExec.java index bca0ddfd8..2570e477f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/TableScanExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/TableScanExec.java @@ -136,12 +136,12 @@ void doOpen() { } if (dataTypes == null) { - createBlockBuilders(); List columns = getDataTypes(); dataTypes = new DataType[columns.size()]; for (int i = 0; i < columns.size(); i++) { dataTypes[i] = columns.get(i); } + createBlockBuilders(); } if (scanClient.getSplitNum() != 0) { scanClient.executePrefetchThread(false); @@ -235,7 +235,12 @@ protected Chunk fetchChunk() { continue; } } - if (consumeResultSet.isPureAsyncMode()) { + if (context.isEnableOrcRawTypeBlock()) { + // Special block parsing for raw orc block builder. + // Normal table scan should not get there. + appendRawOrcTypeRow(consumeResultSet); + count++; + } else if (consumeResultSet.isPureAsyncMode()) { final int filled = consumeResultSet.fillChunk(dataTypes, blockBuilders, chunkLimit - count); count += filled; } else { @@ -265,6 +270,14 @@ protected void appendRow(TableScanClient.SplitResultSet consumeResultSet) throws ResultSetCursorExec.buildOneRow(consumeResultSet.getResultSet(), dataTypes, blockBuilders, context); } + protected void appendRawOrcTypeRow(TableScanClient.SplitResultSet consumeResultSet) throws Exception { + if (consumeResultSet.isOnlyXResult()) { + consumeResultSet.fillRawOrcTypeRow(dataTypes, blockBuilders, context); + } else { + ResultSetCursorExec.buildRawOrcTypeRow(consumeResultSet.getResultSet(), dataTypes, blockBuilders); + } + } + @Override synchronized void doClose() { if (targetPlanStatGroup != null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/UnionAllExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/UnionAllExec.java index f0b32848f..4dc2fbb72 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/UnionAllExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/UnionAllExec.java @@ -16,15 +16,15 @@ package com.alibaba.polardbx.executor.operator; - import com.google.common.util.concurrent.ListenableFuture; - import com.alibaba.polardbx.executor.chunk.Chunk; - import com.alibaba.polardbx.executor.chunk.ChunkConverter; - import com.alibaba.polardbx.executor.chunk.Converters; - import com.alibaba.polardbx.optimizer.context.ExecutionContext; - import com.alibaba.polardbx.optimizer.core.datatype.DataType; - - import java.util.ArrayList; - import java.util.List; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.ChunkConverter; +import com.alibaba.polardbx.executor.chunk.Converters; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.util.concurrent.ListenableFuture; + +import java.util.ArrayList; +import java.util.List; /** * union all chunk executor diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ValueExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ValueExec.java index 4a01a56e3..796240224 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ValueExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/ValueExec.java @@ -16,11 +16,11 @@ package com.alibaba.polardbx.executor.operator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/VectorizedProjectExec.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/VectorizedProjectExec.java index 3aef20836..a325a15be 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/VectorizedProjectExec.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/VectorizedProjectExec.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.chunk.MutableChunk; import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; import com.alibaba.polardbx.executor.chunk.ReferenceBlock; +import com.alibaba.polardbx.executor.operator.util.ObjectPools; import com.alibaba.polardbx.executor.vectorized.EvaluationContext; import com.alibaba.polardbx.executor.vectorized.InputRefVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; @@ -65,6 +66,9 @@ public class VectorizedProjectExec extends AbstractExecutor { */ private Pair[] commonSubExpressions; + private ObjectPools objectPools; + private boolean shouldRecycle; + public VectorizedProjectExec(Executor input, List expressions, List preAllocatedChunks, List dataTypes, @@ -76,6 +80,8 @@ public VectorizedProjectExec(Executor input, List expressi this.preAllocatedChunks = preAllocatedChunks; this.commonSubExpressions = new Pair[expressions.size()]; this.dataTypes = dataTypes; + this.objectPools = ObjectPools.create(); + this.shouldRecycle = context.getParamManager().getBoolean(ConnectionParams.ENABLE_DRIVER_OBJECT_POOL); Preconditions.checkArgument(expressions.size() == dataTypes.size()); } @@ -84,7 +90,7 @@ void doOpen() { this.outputBlocks = new Block[dataTypes.size()]; if (context.getParamManager().getBoolean(ConnectionParams.ENABLE_COMMON_SUB_EXPRESSION_TREE_ELIMINATE)) { - for(int i = 0; i < expressions.size(); i++) { + for (int i = 0; i < expressions.size(); i++) { this.commonSubExpressions[i] = null; VectorizedExpression e = expressions.get(i); @@ -105,10 +111,11 @@ void doOpen() { List otherInputIndexes = VectorizedExpressionUtils.getInputIndex(otherExpression); DataType otherOutputDataType = otherExpression.getOutputDataType(); - for(int k = 0; k < e.getChildren().length; k++) { + for (int k = 0; k < e.getChildren().length; k++) { VectorizedExpression child = e.getChildren()[k]; - if (child.getOutputIndex() == outputIndex && DataTypeUtil.equalsSemantically(outputDataType, otherOutputDataType)) { + if (child.getOutputIndex() == outputIndex && DataTypeUtil.equalsSemantically(outputDataType, + otherOutputDataType)) { List inputIndexes = VectorizedExpressionUtils.getInputIndex(child); if (!otherInputIndexes.equals(inputIndexes)) { break; @@ -129,7 +136,6 @@ void doOpen() { } } - for (int i = 0; i < expressions.size(); i++) { VectorizedExpression e = expressions.get(i); @@ -158,7 +164,10 @@ Chunk doNextChunk() { } } - return this.buildChunk(inputChunk); + Chunk result = this.buildChunk(inputChunk); + result.setPartIndex(inputChunk.getPartIndex()); + result.setPartCount(inputChunk.getPartCount()); + return result; } private void evaluateExpression(int index, Chunk inputChunk) { @@ -181,17 +190,27 @@ private void evaluateExpression(int index, Chunk inputChunk) { // Allocate the memory of output vector at runtime. if (this.commonSubExpressions[index] == null) { - preAllocatedChunk.reallocate(chunkSize, blockCount); + if (shouldRecycle) { + preAllocatedChunk.allocateWithObjectPool(chunkSize, blockCount, objectPools); + } else { + preAllocatedChunk.reallocate(chunkSize, blockCount); + } + } else { // for common sub expression Pair subExpressionInfo = this.commonSubExpressions[index]; int expressionIndex = subExpressionInfo.getKey(); int commonBlockIndex = subExpressionInfo.getValue(); - preAllocatedChunk.reallocate(chunkSize, commonBlockIndex + 1); + + if (shouldRecycle) { + preAllocatedChunk.allocateWithObjectPool(chunkSize, commonBlockIndex + 1, objectPools); + } else { + preAllocatedChunk.reallocate(chunkSize, commonBlockIndex + 1); + } + preAllocatedChunk.setSlotAt((RandomAccessBlock) this.outputBlocks[expressionIndex], commonBlockIndex); } - // Evaluation & Result Output. EvaluationContext evaluationContext = new EvaluationContext(preAllocatedChunk, this.context); expression.eval(evaluationContext); @@ -202,7 +221,8 @@ private void evaluateExpression(int index, Chunk inputChunk) { Block outputBlock = (Block) Objects.requireNonNull(preAllocatedChunk.slotIn(outputIndex)); if (outputBlock instanceof ReferenceBlock) { // If output block is reference block, try to get a type-specific materialized block from it. - Block typeSpecificBlock = ((ReferenceBlock) outputBlock).toTypeSpecificBlock(evaluationContext); + Block typeSpecificBlock = + ((ReferenceBlock) outputBlock).toTypeSpecificBlock(evaluationContext, dataTypes.get(index)); outputBlock = typeSpecificBlock; } else { // compaction by selection array @@ -239,6 +259,9 @@ public Chunk buildChunk(Chunk inputChunk) { @Override void doClose() { + if (objectPools != null) { + objectPools.clear(); + } input.close(); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/AbstractOverWindowFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/AbstractOverWindowFrame.java deleted file mode 100644 index e10dc4cec..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/AbstractOverWindowFrame.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.operator.frame; - -import com.alibaba.polardbx.executor.operator.util.ChunksIndex; -import com.alibaba.polardbx.executor.calc.Aggregator; - -import java.util.List; - -public abstract class AbstractOverWindowFrame implements OverWindowFrame { - protected List aggregators; - protected ChunksIndex chunksIndex; - - public AbstractOverWindowFrame(List aggregators) { - this.aggregators = aggregators; - } - - @Override - public List getAggregators() { - return aggregators; - } - - @Override - public void resetChunks(ChunksIndex chunksIndex) { - this.chunksIndex = chunksIndex; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/ChunkBuffer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/ChunkBuffer.java new file mode 100644 index 000000000..960813a00 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/ChunkBuffer.java @@ -0,0 +1,28 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.frame; + +import com.alibaba.polardbx.executor.chunk.Chunk; + +public class ChunkBuffer { + protected Chunk chunk; + + void reset(Chunk chunk) { + this.chunk = chunk; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/OverWindowFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/OverWindowFrame.java index 716752d14..a053e201c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/OverWindowFrame.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/OverWindowFrame.java @@ -17,7 +17,7 @@ package com.alibaba.polardbx.executor.operator.frame; import com.alibaba.polardbx.executor.operator.util.ChunksIndex; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import java.io.Serializable; import java.util.List; @@ -26,22 +26,13 @@ public interface OverWindowFrame extends Serializable { List getAggregators(); - /** - * @param chunksIndex chunkList that related to the current partition - */ + // 更新该partition涉及的chunks void resetChunks(ChunksIndex chunksIndex); - /** - * @param leftIndex left index of the current partition in the chunkList - * @param rightIndex right index of the current partition in the chunkList - */ + // 更新该partition在chunksIndex中的边界索引 void updateIndex(int leftIndex, int rightIndex); - /** - * process data - * - * @param index index in the chunkList - */ - void processData(int index); + // 调用window frame处理当前行 + List processData(int index); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeSlidingOverFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeSlidingOverFrame.java index 6d3eae7eb..86ae79964 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeSlidingOverFrame.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeSlidingOverFrame.java @@ -20,10 +20,11 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import java.math.BigInteger; import java.util.List; +import java.util.stream.Collectors; /** * The range sliding window frame calculates frames with the following SQL form: @@ -91,18 +92,20 @@ public void updateIndex(int leftIndex, int rightIndex) { } @Override - public void processData(int index) { + public List processData(int index) { Object currentValue = chunksIndex.rowAt(index).getObject(orderByColIndex); if (currentValue == null) { if (!isNullVisit) { - process(index, getNullRowsRight(index)); + return process(index, getNullRowsRight(index)); } - return; + return aggregators.stream().map(t -> t.value()).collect(Collectors.toList()); } - if (!(lastProcessedValue != null && lastProcessedValue.equals(currentValue))) { + if (lastProcessedValue != null && lastProcessedValue.equals(currentValue)) { + return aggregators.stream().map(t -> t.value()).collect(Collectors.toList()); + } else { int[] indexes = getBound(index); lastProcessedValue = currentValue; - process(indexes[0], indexes[1]); + return process(indexes[0], indexes[1]); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeUnboundedFollowingOverFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeUnboundedFollowingOverFrame.java index f92faa96a..c198a562e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeUnboundedFollowingOverFrame.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeUnboundedFollowingOverFrame.java @@ -17,14 +17,14 @@ package com.alibaba.polardbx.executor.operator.frame; import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import java.math.BigInteger; import java.util.List; +import java.util.stream.Collectors; /** * The range unboundedFollowing window frame calculates frames with the following SQL form: @@ -33,7 +33,7 @@ * *

e.g.: ... RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING. */ -public class RangeUnboundedFollowingOverFrame extends AbstractOverWindowFrame { +public class RangeUnboundedFollowingOverFrame extends UnboundedFollowingOverFrame { private int rightIndex = 0; private int leftIndex = 0; @@ -82,27 +82,26 @@ public void updateIndex(int leftIndex, int rightIndex) { currentIndex = leftIndex; // 升序与降序的逻辑不一样 if (isAscOrder < 0) { - aggregators.forEach(t -> t.resetToInitValue(0)); + aggregators = aggregators.stream().map(aggregator -> aggregator.getNew()).collect(Collectors.toList()); } } - private void process(int leftIndex, int rightIndex) { + private List process(int leftIndex, int rightIndex) { if (lastProcessedLeft == leftIndex && lastProcessedRight == rightIndex) { - return; + return aggregators.stream().map(t -> t.value()).collect(Collectors.toList()); + } + aggregators = aggregators.stream().map(aggregator -> aggregator.getNew()).collect(Collectors.toList()); + for (int i = leftIndex; i <= rightIndex; i++) { + final int l = i; + aggregators.forEach(aggregator -> aggregator.aggregate(chunksIndex.rowAt(l))); } - aggregators.forEach(t -> { - t.resetToInitValue(0); - for (int i = leftIndex; i <= rightIndex; i++) { - Chunk.ChunkRow row = chunksIndex.rowAt(i); - t.accumulate(0, row.getChunk(), row.getPosition()); - } - }); lastProcessedLeft = leftIndex; lastProcessedRight = rightIndex; + return aggregators.stream().map(t -> t.value()).collect(Collectors.toList()); } @Override - public void processData(int index) { + public List processData(int index) { Object currentValue = chunksIndex.rowAt(index).getObject(orderByColIndex); int[] indexes; if (currentValue == null) { @@ -111,7 +110,7 @@ public void processData(int index) { indexes = getBound(index); } lastProcessedValue = currentValue; - process(indexes[0], indexes[1]); + return process(indexes[0], indexes[1]); } private void updateNullRows(int index) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeUnboundedPrecedingOverFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeUnboundedPrecedingOverFrame.java index 1e48abc82..4d427c06a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeUnboundedPrecedingOverFrame.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RangeUnboundedPrecedingOverFrame.java @@ -17,14 +17,14 @@ package com.alibaba.polardbx.executor.operator.frame; import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import java.math.BigInteger; import java.util.List; +import java.util.stream.Collectors; /** * The range unboundedFollowing window frame calculates frames with the following SQL form: @@ -33,7 +33,7 @@ * *

e.g.: ... RANGE BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING. */ -public class RangeUnboundedPrecedingOverFrame extends AbstractOverWindowFrame { +public class RangeUnboundedPrecedingOverFrame extends UnboundedPrecedingOverFrame { private final int rightBound; private int rightIndex = 0; @@ -79,16 +79,17 @@ public void updateIndex(int leftIndex, int rightIndex) {//[] } currentIndex = leftIndex; if (isAscOrder > 0) { - aggregators.forEach(t -> t.resetToInitValue(0)); + aggregators = aggregators.stream().map(aggregator -> aggregator.getNew()).collect(Collectors.toList()); } } @Override - public void processData(int index) { + public List processData(int index) { Object currentValue = chunksIndex.rowAt(index).getObject(orderByColIndex); int[] indexes = new int[2]; if (lastProcessedValue != null && lastProcessedValue.equals(currentValue)) { - return; + return aggregators.stream().map(aggregator -> aggregator.eval(chunksIndex.rowAt(index))) + .collect(Collectors.toList()); } indexes[0] = leftIndex; if (currentValue == null) { @@ -98,31 +99,31 @@ public void processData(int index) { indexes[1] = otherSize; } lastProcessedValue = currentValue; - process(indexes[0], indexes[1]); + return process(indexes[0], indexes[1]); } - private void process(int leftIndex, int rightIndex) { + private List process(int leftIndex, int rightIndex) { // 升序时,不停的添加结果 if (isAscOrder > 0) { while (currentIndex <= rightIndex) { - Chunk.ChunkRow row = chunksIndex.rowAt(currentIndex++); - aggregators.forEach(t -> t.accumulate(0, row.getChunk(), row.getPosition())); + aggregators.forEach(aggregator -> aggregator.aggregate(chunksIndex.rowAt(currentIndex))); + currentIndex++; } - return; + return aggregators.stream().map(t -> t.eval(chunksIndex.rowAt(rightIndex))).collect(Collectors.toList()); } if (lastProcessedLeft == leftIndex && lastProcessedRight == rightIndex) { - return; + return aggregators.stream().map(t -> t.eval(chunksIndex.rowAt(rightIndex))).collect(Collectors.toList()); } // 降序时,根据范围重新进行计算,如果实现了部分函数的retract方法,则可减少该部分函数的计算量 - aggregators.forEach(t -> { - t.resetToInitValue(0); - for (int i = leftIndex; i <= rightIndex; i++) { - Chunk.ChunkRow row = chunksIndex.rowAt(i); - t.accumulate(0, row.getChunk(), row.getPosition()); - } - }); + aggregators = aggregators.stream().map(aggregator -> aggregator.getNew()).collect(Collectors.toList()); + for (int i = leftIndex; i <= rightIndex; i++) { + final int l = i; + aggregators.forEach(aggregator -> aggregator.aggregate(chunksIndex.rowAt(l))); + } lastProcessedRight = rightIndex; lastProcessedLeft = leftIndex; + return aggregators.stream().map(t -> t.eval(chunksIndex.rowAt(lastProcessedRight))) + .collect(Collectors.toList()); } private void updateNullRows(int index) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowSlidingOverFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowSlidingOverFrame.java index 3b402afe9..72506a76c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowSlidingOverFrame.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowSlidingOverFrame.java @@ -16,7 +16,7 @@ package com.alibaba.polardbx.executor.operator.frame; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import java.util.List; @@ -55,10 +55,10 @@ public void updateIndex(int leftIndex, int rightIndex) { } @Override - public void processData(int index) { + public List processData(int index) { int realLeftIndex = Math.max(leftIndex, index - leftBound); int realRightIndex = Math.min(rightIndex, index + rightBound); - process(realLeftIndex, realRightIndex); + return process(realLeftIndex, realRightIndex); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowUnboundedFollowingOverFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowUnboundedFollowingOverFrame.java index 306b5e634..9ba9f554f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowUnboundedFollowingOverFrame.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowUnboundedFollowingOverFrame.java @@ -16,10 +16,10 @@ package com.alibaba.polardbx.executor.operator.frame; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import java.util.List; +import java.util.stream.Collectors; /** * The row unboundedFollowing window frame calculates frames with the following SQL form: @@ -28,7 +28,7 @@ * *

e.g.: ... ROW BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING. */ -public class RowUnboundedFollowingOverFrame extends AbstractOverWindowFrame { +public class RowUnboundedFollowingOverFrame extends UnboundedFollowingOverFrame { private int leftBound; private int leftIndex; @@ -50,20 +50,19 @@ public void updateIndex(int leftIndex, int rightIndex) { } @Override - public void processData(int index) { + public List processData(int index) { // 比如 10 preceding and unbounded following,则前十行的处理结果是相同的;必须加状态判断,避免滑动到上一个partition if (currentFrame && index - leftBound <= leftIndex) { - return; + return aggregators.stream().map(t -> t.value()).collect(Collectors.toList()); } currentFrame = true; int realLeftIndex = Math.max(leftIndex, index - leftBound); - aggregators.forEach(t -> { - t.resetToInitValue(0); - for (int j = realLeftIndex; j <= rightIndex; j++) { - Chunk.ChunkRow row = chunksIndex.rowAt(j); - t.accumulate(0, row.getChunk(), row.getPosition()); - } - }); + aggregators = aggregators.stream().map(t -> t.getNew()).collect(Collectors.toList()); + for (int i = realLeftIndex; i <= rightIndex; i++) { + final int l = i; + aggregators.forEach(aggregator -> aggregator.aggregate(chunksIndex.rowAt(l))); + } + return aggregators.stream().map(t -> t.value()).collect(Collectors.toList()); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowUnboundedPrecedingOverFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowUnboundedPrecedingOverFrame.java index 61aa9e3f1..d0055b1ca 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowUnboundedPrecedingOverFrame.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/RowUnboundedPrecedingOverFrame.java @@ -16,10 +16,10 @@ package com.alibaba.polardbx.executor.operator.frame; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import java.util.List; +import java.util.stream.Collectors; /** * The row UnboundPreceding window frame calculates frames with the following SQL form: @@ -28,7 +28,7 @@ * *

e.g.: ... ROW BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING. */ -public class RowUnboundedPrecedingOverFrame extends AbstractOverWindowFrame { +public class RowUnboundedPrecedingOverFrame extends UnboundedPrecedingOverFrame { private int rightBound; @@ -38,6 +38,7 @@ public class RowUnboundedPrecedingOverFrame extends AbstractOverWindowFrame { public RowUnboundedPrecedingOverFrame( List aggregators, int rightBound) { +// Expression rightBound) { super(aggregators); this.rightBound = rightBound; } @@ -47,16 +48,17 @@ public void updateIndex(int leftIndex, int rightIndex) { this.rightIndex = rightIndex - 1; currentIndex = leftIndex; // 每次更换partition时重置,且只需重置一次 - aggregators.forEach(t -> t.resetToInitValue(0)); + aggregators = aggregators.stream().map(aggregator -> aggregator.getNew()).collect(Collectors.toList()); } @Override - public void processData(int index) { + public List processData(int index) { // 因为是一直追加行,因此不需要重置window function // 形如,unbounded preceding and 10 following,后十行不会重复计算 while (currentIndex <= (index + rightBound) && currentIndex <= rightIndex) { - Chunk.ChunkRow row = chunksIndex.rowAt(currentIndex++); - aggregators.forEach(t -> t.accumulate(0, row.getChunk(), row.getPosition())); + final int l = currentIndex++; + aggregators.forEach(aggregator -> aggregator.aggregate(chunksIndex.rowAt(l))); } + return aggregators.stream().map(t -> t.value()).collect(Collectors.toList()); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/SlidingOverFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/SlidingOverFrame.java index 59edb4e8b..db485f0f0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/SlidingOverFrame.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/SlidingOverFrame.java @@ -16,38 +16,57 @@ package com.alibaba.polardbx.executor.operator.frame; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.executor.operator.util.ChunksIndex; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; +import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; /** * The sliding window frame. */ -public abstract class SlidingOverFrame extends AbstractOverWindowFrame { +public abstract class SlidingOverFrame implements OverWindowFrame { + + protected List aggregators; + protected ChunksIndex chunksIndex; // 保存上次处理的当前行的范围,如果完全一致则可直接返回结果,无需重复计算 // updateIndex时重置,即每计算一个新的partition时重置 protected int prevLeftIndex = -1; protected int prevRightIndex = -1; - public SlidingOverFrame(List aggregators) { - super(aggregators); + public SlidingOverFrame(List aggregator) { + this.aggregators = aggregator; + } + + @Override + public void resetChunks(ChunksIndex chunksIndex) { + this.chunksIndex = chunksIndex; } - public void process(int leftIndex, int rightIndex) { + @Override + public List getAggregators() { + return aggregators; + } + + public List process(int leftIndex, int rightIndex) { if (leftIndex == prevLeftIndex && rightIndex == prevRightIndex) { - return; + return aggregators.stream().map(t -> t.value()).collect(Collectors.toList()); } prevLeftIndex = leftIndex; prevRightIndex = rightIndex; - aggregators.forEach(t -> { - t.resetToInitValue(0); - for (int j = leftIndex; j <= rightIndex; j++) { - Chunk.ChunkRow row = chunksIndex.rowAt(j); - t.accumulate(0, row.getChunk(), row.getPosition()); + final List newAggregators = new ArrayList<>(aggregators.size()); + List collect = aggregators.stream().map(t -> { + Aggregator newAggregator = t.getNew(); + newAggregators.add(newAggregator); + for (int i = leftIndex; i <= rightIndex; i++) { + newAggregator.aggregate(chunksIndex.rowAt(i)); } - }); - } + return newAggregator.value(); + }).collect(Collectors.toList()); + aggregators = newAggregators; + return collect; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/UnboundedFollowingOverFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/UnboundedFollowingOverFrame.java new file mode 100644 index 000000000..154c7b9cf --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/UnboundedFollowingOverFrame.java @@ -0,0 +1,45 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.frame; + +import com.alibaba.polardbx.executor.operator.util.ChunksIndex; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; + +import java.util.List; + +/** + * The UnboundedFollowing window frame. + */ +public abstract class UnboundedFollowingOverFrame implements OverWindowFrame { + + protected List aggregators; + protected ChunksIndex chunksIndex; + + public UnboundedFollowingOverFrame(List aggregators) { + this.aggregators = aggregators; + } + + @Override + public void resetChunks(ChunksIndex chunksIndex) { + this.chunksIndex = chunksIndex; + } + + @Override + public List getAggregators() { + return aggregators; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/UnboundedOverFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/UnboundedOverFrame.java index b0c493ae3..dd123ef49 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/UnboundedOverFrame.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/UnboundedOverFrame.java @@ -16,42 +16,52 @@ package com.alibaba.polardbx.executor.operator.frame; -import com.alibaba.polardbx.executor.chunk.Chunk; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.executor.operator.util.ChunksIndex; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import java.util.Arrays; +import java.util.List; import java.util.stream.Collectors; import static com.google.common.collect.Lists.newArrayList; -public class UnboundedOverFrame extends AbstractOverWindowFrame { - private boolean changePartition = true; - private int leftIndex; - private int rightIndex; +public class UnboundedOverFrame implements OverWindowFrame { - public UnboundedOverFrame(Aggregator... aggregators) { - super(Arrays.stream(aggregators).collect(Collectors.toList())); + private Aggregator[] aggregators; + private ChunksIndex chunksIndex; + + public UnboundedOverFrame( + Aggregator... aggregators) { + this.aggregators = aggregators; } @Override - public void updateIndex(int leftIndex, int rightIndex) { - changePartition = true; - this.leftIndex = leftIndex; - this.rightIndex = rightIndex; + public void resetChunks(ChunksIndex chunksIndex) { + this.chunksIndex = chunksIndex; } @Override - public void processData(int index) { - if (changePartition) { - aggregators.forEach(t -> { - t.resetToInitValue(0); - for (int j = leftIndex; j <= rightIndex - 1; j++) { - Chunk.ChunkRow row = chunksIndex.rowAt(j); - t.accumulate(0, row.getChunk(), row.getPosition()); - } - }); - changePartition = false; + public void updateIndex(int leftIndex, int rightIndex) { + for (int i = 0; i < aggregators.length; i++) { + Aggregator aggregator = aggregators[i]; + aggregator = aggregator.getNew(); + aggregators[i] = aggregator; + for (int j = leftIndex; j <= rightIndex - 1; j++) { + aggregator.aggregate(chunksIndex.rowAt(j)); + } + } } + + @Override + public List processData(int index) { + return Arrays.stream(aggregators).map(aggregator -> aggregator.eval(chunksIndex.rowAt(index))) + .collect(Collectors.toList()); + } + + @Override + public List getAggregators() { + return newArrayList(aggregators); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/UnboundedPrecedingOverFrame.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/UnboundedPrecedingOverFrame.java new file mode 100644 index 000000000..f033e2b7e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/frame/UnboundedPrecedingOverFrame.java @@ -0,0 +1,45 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.frame; + +import com.alibaba.polardbx.executor.operator.util.ChunksIndex; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; + +import java.util.List; + +/** + * The UnboundedPreceding window frame. + */ +public abstract class UnboundedPrecedingOverFrame implements OverWindowFrame { + + protected List aggregators; + protected ChunksIndex chunksIndex; + + public UnboundedPrecedingOverFrame(List aggregators) { + this.aggregators = aggregators; + } + + @Override + public void resetChunks(ChunksIndex chunksIndex) { + this.chunksIndex = chunksIndex; + } + + @Override + public List getAggregators() { + return aggregators; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/AbstractColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/AbstractColumnReader.java new file mode 100644 index 000000000..d203b9f17 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/AbstractColumnReader.java @@ -0,0 +1,107 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.StampedLock; + +/** + * The abstract implement of column reader for management of reference. + */ +public abstract class AbstractColumnReader implements ColumnReader { + protected static final Logger LOGGER = LoggerFactory.getLogger("oss"); + + protected final int columnId; + + private final boolean isPrimaryKey; + + /** + * The count of reference initialized by zero. + */ + private final AtomicInteger refCount; + + /** + * To ensure the idempotency of the close method + */ + protected final AtomicBoolean isClosed; + + protected final AtomicBoolean hasNoMoreBlocks; + + protected final StampedLock stampedLock; + + protected AbstractColumnReader(int columnId, boolean isPrimaryKey) { + this.columnId = columnId; + this.isPrimaryKey = isPrimaryKey; + this.refCount = new AtomicInteger(0); + this.isClosed = new AtomicBoolean(false); + this.hasNoMoreBlocks = new AtomicBoolean(false); + this.stampedLock = new StampedLock(); + } + + @Override + public StampedLock getLock() { + return stampedLock; + } + + @Override + public void releaseRef(int decrement) { + Preconditions.checkArgument(decrement > 0); + refCount.getAndAdd(-decrement); + } + + @Override + public void retainRef(int increment) { + Preconditions.checkArgument(increment > 0); + refCount.getAndAdd(increment); + } + + @Override + public int refCount() { + return refCount.get(); + } + + @Override + public void setNoMoreBlocks() { + hasNoMoreBlocks.set(true); + } + + @Override + public boolean hasNoMoreBlocks() { + return hasNoMoreBlocks.get(); + } + + @Override + public boolean needCache() { + return isPrimaryKey; + } + + /** + * Seek to given row group. + */ + abstract public void seek(int rowGroupId) throws IOException; + + @Override + public boolean isClosed() { + return isClosed.get(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/BlockCacheManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/BlockCacheManager.java new file mode 100644 index 000000000..7ea15e5ee --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/BlockCacheManager.java @@ -0,0 +1,126 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.alibaba.polardbx.common.properties.DynamicConfig; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.operator.scan.impl.SimpleBlockCacheManager; +import org.apache.hadoop.fs.Path; + +import java.util.Map; + +/** + * It is responsible for managing block-level in-memory cache. + * + * @param class of block. + */ +public interface BlockCacheManager { + /** + * The TTL of in-flight cache entries. + */ + int IN_FLIGHT_CACHE_TTL_IN_SECOND = 5; + + /** + * The limitation of in-flight entries + */ + long MAXIMUM_IN_FLIGHT_ENTRIES = 1 << 12; + + float RATIO = DynamicConfig.getInstance().getBlockCacheMemoryFactor(); + long MAXIMUM_MEMORY_SIZE = (long) (Runtime.getRuntime().maxMemory() * RATIO); + + BlockCacheManager INSTANCE = new SimpleBlockCacheManager(); + + static BlockCacheManager getInstance() { + return INSTANCE; + } + + /** + * Get memory size in bytes held by block cache. + * + * @return size in bytes + */ + long getMemorySize(); + + /** + * Clear all caches. + */ + void clear(); + + /** + * Generate cache stats packet for `show cache stats` statement. + * + * @return cache stats packet + */ + byte[][] generateCacheStatsPacket(); + + /** + * We consider {group_id, column_id} has already been cached only if all blocks in row-group are cached. + * + * @param path file path + * @param stripeId stripe id + * @param rowGroupId row group id + * @param columnId column id + * @return True if row group has been cached. + */ + boolean isCached(Path path, int stripeId, int rowGroupId, int columnId); + + /** + * Get a sequence of cached blocks from given row-group and column. + * + * @param path file path + * @param stripeId stripe id + * @param rowGroupId row group id + * @param columnId column id + * @return Iterator of cached blocks. + */ + SeekableIterator getCaches(Path path, int stripeId, int rowGroupId, int columnId); + + /** + * Get all the cached blocks in this stripe with given column. + * + * @param path file path + * @param stripeId stripe id + * @param columnId column id + * @param rowGroupIncluded selected row groups. + * @return Mapping from row-group id to Iterator of cached blocks. + */ + Map> getCachedRowGroups(Path path, int stripeId, int columnId, + boolean[] rowGroupIncluded); + + Map> getInFlightCachedRowGroups(Path path, int stripeId, int columnId, + boolean[] rowGroupIncluded); + + /** + * Put block into cache manager and check if rows of blocks in this row-group is out of total rows. + * If true, we consider this row-group with given column has already been cached. + * + * @param block data block. + * @param totalRows total rows of this row group. + * @param path file path + * @param stripeId stripe id + * @param rowGroupId row group id + * @param columnId column id + * @param position starting position of block in this row group. + * @param rows rows of block. + */ + void putCache(VECTOR block, int chunkLimit, int totalRows, + Path path, int stripeId, int rowGroupId, int columnId, int position, int rows); + + long getHitCount(); + + long getMissCount(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/BlockDictionary.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/BlockDictionary.java new file mode 100644 index 000000000..7fa5a544b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/BlockDictionary.java @@ -0,0 +1,61 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.alibaba.polardbx.executor.operator.scan.impl.LocalBlockDictionary; +import io.airlift.slice.Slice; +import io.airlift.slice.SliceInput; +import io.airlift.slice.SliceOutput; + +/** + * The dictionary stores all distinct value with implicit id. + */ +public interface BlockDictionary { + /** + * Get dictionary value by given id. + * + * @param id dictionary id. + * @return dictionary value + */ + Slice getValue(int id); + + /** + * Get the size of this dictionary. + */ + int size(); + + /** + * Get the size in bytes of this dictionary. + */ + int sizeInBytes(); + + /** + * Encoding the dictionary into the sliceOutput. + */ + void encoding(SliceOutput sliceOutput); + + static BlockDictionary decoding(SliceInput sliceInput) { + // only support local block dictionary util now. + int size = sliceInput.readInt(); + Slice[] dict = new Slice[size]; + for (int i = 0; i < size; i++) { + int len = sliceInput.readInt(); + dict[i] = sliceInput.readSlice(len); + } + return new LocalBlockDictionary(dict); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/CacheReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/CacheReader.java new file mode 100644 index 000000000..429c54d01 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/CacheReader.java @@ -0,0 +1,79 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import java.util.Map; + +/** + * Handle stripe-level cached blocks of given column at runtime. + * + * @param class of block. + */ +public interface CacheReader { + /** + * Initialize the cache reader with given cached blocks. + * + * @param allCaches given cached blocks. + */ + void initialize(Map> allCaches); + + /** + * Initialize the cache reader with given cached blocks. + * + * @param allValidCaches valid caches that caching all blocks in total row-group. + * @param inFlightCaches the in-flight caches that caching part of blocks in total row-group. + */ + void initialize(Map> allValidCaches, + Map> inFlightCaches); + + /** + * Check if this cache reader is initialized. + * + * @return TRUE if initialized. + */ + boolean isInitialized(); + + /** + * The column id of this cache reader. + * + * @return The column id + */ + int columnId(); + + /** + * The row-group bitmap covered by this cache reader. + * + * @return The row-group bitmap + */ + boolean[] cachedRowGroupBitmap(); + + /** + * Get all caches held by this reader. + * + * @return mapping from row-group id to iterator of cached blocks. + */ + Map> allCaches(); + + /** + * Fetch the cache with given row-group and starting position. + * + * @param groupId row group + * @param position starting position. + * @return cached block. + */ + VECTOR getCache(int groupId, int position); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ColumnReader.java new file mode 100644 index 000000000..34117b0a5 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ColumnReader.java @@ -0,0 +1,146 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.IntegerReader; +import org.apache.orc.impl.RunLengthIntegerReader; +import org.apache.orc.impl.RunLengthIntegerReaderV2; +import org.apache.orc.impl.StreamName; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.locks.StampedLock; + +/** + * A column reader is responsible for column-level parsing and reading. + * It consists of several input streams. + * There are seekBytes and seekRow method because the element position + * and byte location are not aligned due to compression. + */ +public interface ColumnReader { + String COLUMN_READER_MEMORY = "ColumnReader.Memory"; + String COLUMN_READER_TIMER = "ColumnReader.Timer"; + + /** + * Release a reference of this column reader. + */ + void releaseRef(int decrement); + + /** + * Retain a reference of this column reader. + */ + void retainRef(int increment); + + /** + * Get the count of reference. + */ + int refCount(); + + void setNoMoreBlocks(); + + boolean hasNoMoreBlocks(); + + StampedLock getLock(); + + /** + * Get the row group included in this reader. + */ + boolean[] rowGroupIncluded(); + + /** + * Check if the resource of this column reader has been opened. + */ + boolean isOpened(); + + /** + * Open the resource of column reader. + * + * @param await Whether we wait for IO results synchronously. + * @param rowGroupIncluded The row group bitmap used to load the stream data. The value is null means + * all row groups will be loaded. + */ + void open(boolean await, boolean[] rowGroupIncluded); + + /** + * Open the column-reader use a built IO tasks whose result is mapping from stream-name to InStream object. + * + * @param loadFuture future of IO tasks. + * @param await Whether we wait for IO results synchronously. + * @param rowGroupIncluded The row group bitmap used to load the stream data. The value is null means + * all row groups will be loaded. + */ + void open(CompletableFuture> loadFuture, boolean await, boolean[] rowGroupIncluded); + + /** + * Set the starting position for the next reading process + * + * @param rowGroupId starting row group. + * @param elementPosition starting element position. + */ + void startAt(int rowGroupId, int elementPosition) throws IOException; + + /** + * NOTE: use fix length block to reading parsed data. + * After seeking the byte location and row position, fill the given block + * from the last position with given position count. + * + * @param positionCount the position count to fill. + */ + default void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + throw new UnsupportedOperationException(); + } + + default int next(RandomAccessBlock randomAccessBlock, int positionCount, int[] selection, int selSize) + throws IOException { + next(randomAccessBlock, positionCount); + return 0; + } + + /** + * Release the buffers in this column reader. + */ + void close(); + + boolean needCache(); + + boolean isClosed(); + + static IntegerReader createIntegerReader(InStream dataStream, OrcProto.ColumnEncoding.Kind kind) + throws IOException { + return createIntegerReader(dataStream, kind, true); + } + + static IntegerReader createIntegerReader(InStream dataStream, OrcProto.ColumnEncoding.Kind kind, boolean signed) + throws IOException { + switch (kind) { + case DIRECT_V2: + case DICTIONARY_V2: + return new RunLengthIntegerReaderV2(dataStream, signed, true); + case DIRECT: + case DICTIONARY: + return new RunLengthIntegerReader(dataStream, signed); + default: + throw GeneralUtil.nestedException("Unknown encoding " + kind); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ColumnarSplit.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ColumnarSplit.java new file mode 100644 index 000000000..0efde51b7 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ColumnarSplit.java @@ -0,0 +1,144 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFManager; +import com.alibaba.polardbx.executor.mpp.spi.ConnectorSplit; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; +import com.alibaba.polardbx.optimizer.statis.OperatorStatistics; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import java.util.BitSet; +import java.util.List; +import java.util.concurrent.ExecutorService; + +public interface ColumnarSplit extends ConnectorSplit, Comparable { + + /** + * The unique identifier of the split. + */ + int getSequenceId(); + + /** + * The unique identifier of the columnar data file. + */ + int getFileId(); + + /** + * Get the next executable scan work. + * It must record the inner states including the last IO position. + * + * @param class of split. + * @param class of data batch. + * @return the next executable scan work. + */ + ScanWork nextWork(); + + @Deprecated + @Override + default Object getInfo() { + return null; + } + + ColumnarSplitPriority getPriority(); + + @Override + default int compareTo(ColumnarSplit split) { + return Integer.compare(getPriority().getValue(), split.getPriority().getValue()); + } + + interface ColumnarSplitBuilder { + ColumnarSplit build(); + + ColumnarSplitBuilder executionContext(ExecutionContext context); + + ColumnarSplitBuilder ioExecutor(ExecutorService ioExecutor); + + ColumnarSplitBuilder fileSystem(FileSystem fileSystem, Engine engine); + + ColumnarSplitBuilder configuration(Configuration configuration); + + ColumnarSplitBuilder sequenceId(int sequenceId); + + ColumnarSplitBuilder file(Path filePath, int fileId); + + ColumnarSplitBuilder tableMeta(String logicalSchema, String logicalTable); + + ColumnarSplitBuilder columnTransformer(OSSColumnTransformer ossColumnTransformer); + + ColumnarSplitBuilder inputRefs(List inputRefsForFilter, List inputRefsForProject); + + ColumnarSplitBuilder cacheManager(BlockCacheManager blockCacheManager); + + ColumnarSplitBuilder chunkLimit(int chunkLimit); + + ColumnarSplitBuilder morselUnit(int rgThreshold); + + ColumnarSplitBuilder pushDown(LazyEvaluator lazyEvaluator); + + ColumnarSplitBuilder prepare(ScanPreProcessor scanPreProcessor); + + ColumnarSplitBuilder columnarManager(ColumnarManager columnarManager); + + ColumnarSplitBuilder isColumnarMode(boolean isColumnarMode); + + ColumnarSplitBuilder tso(Long tso); + + ColumnarSplitBuilder partNum(int partNum); + + ColumnarSplitBuilder nodePartCount(int nodePartCount); + + ColumnarSplitBuilder memoryAllocator(MemoryAllocatorCtx memoryAllocatorCtx); + + ColumnarSplitBuilder fragmentRFManager(FragmentRFManager fragmentRFManager); + + ColumnarSplitBuilder operatorStatistic(OperatorStatistics operatorStatistics); + } + + public enum ColumnarSplitPriority { + /** + * orc file has lower priority + */ + ORC_SPLIT_PRIORITY(1), + /** + * csv file has the highest priority, should be read in advanced + */ + CSV_SPLIT_PRIORITY(0); + + /** + * small number of priority means higher priority + */ + private final int priority; + + ColumnarSplitPriority(int priority) { + this.priority = priority; + } + + public int getValue() { + return priority; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/IOStatus.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/IOStatus.java new file mode 100644 index 000000000..597a46949 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/IOStatus.java @@ -0,0 +1,48 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.google.common.util.concurrent.ListenableFuture; + +import java.util.List; + +public interface IOStatus { + /** + * The unique identifier of the scan work. + */ + String workId(); + + ScanState state(); + + ListenableFuture isBlocked(); + + void addResult(BATCH batch); + + void addResults(List batches); + + BATCH popResult(); + + void addException(Throwable t); + + void throwIfFailed(); + + void finish(); + + void close(); + + long rowCount(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/LazyEvaluator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/LazyEvaluator.java new file mode 100644 index 000000000..4b4b9f86e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/LazyEvaluator.java @@ -0,0 +1,55 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import org.roaringbitmap.RoaringBitmap; + +import java.util.BitSet; + +/** + * Evaluate push-down predicate and store the result into batch and selection. + * + * @param class of column batch + * @param class of bitmap to store the filtered positions. + */ +public interface LazyEvaluator { + + /** + * Get bound vectorized expression tree. + */ + VectorizedExpression getCondition(); + + /** + * Evaluate push-down predicate and store the result into selection array. + * + * @param batch the batch produced by row-group reader. + * @param startPosition the start position of this batch in total file. + * @param positionCount the position count of this batch. + * @param deletion the file-level deletion bitmap. + * @return selection array for input batch. + */ + BITMAP eval(BATCH batch, int startPosition, int positionCount, RoaringBitmap deletion); + + int eval(BATCH batch, int startPosition, int positionCount, RoaringBitmap deletion, boolean[] bitmap); + + /** + * Check if this evaluator is a constant expression. + */ + boolean isConstantExpression(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/LazyEvaluatorBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/LazyEvaluatorBuilder.java new file mode 100644 index 000000000..f6583e0a9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/LazyEvaluatorBuilder.java @@ -0,0 +1,29 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +/** + * Build the evaluator for push-down predicate. + * The column data will not be fetched util first access. + * + * @param class of column batch + * @param class of bitmap to store the filtered positions. + */ +public interface LazyEvaluatorBuilder { + + LazyEvaluator build(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/LogicalRowGroup.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/LogicalRowGroup.java new file mode 100644 index 000000000..46035d424 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/LogicalRowGroup.java @@ -0,0 +1,49 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.alibaba.polardbx.executor.chunk.Chunk; +import org.apache.hadoop.fs.Path; + +/** + * Logical information of an orc row group. + * + * @param the class of a column in row group (arrow block, value vector, array...) + * @param the class of column statistics + */ +public interface LogicalRowGroup { + String BLOCK_LOAD_TIMER = "BlockLoadTimer"; + String BLOCK_MEMORY_COUNTER = "BlockMemoryCounter"; + + Path path(); + + int stripeId(); + + int groupId(); + + /** + * The row count of the row group. + */ + int rowCount(); + + /** + * The starting row id of the row group. + */ + int startRowId(); + + RowGroupReader getReader(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ORCMetaReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ORCMetaReader.java new file mode 100644 index 000000000..04f07828d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ORCMetaReader.java @@ -0,0 +1,44 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.alibaba.polardbx.executor.operator.scan.impl.ORCMetaReaderImpl; +import com.alibaba.polardbx.executor.operator.scan.impl.PreheatFileMeta; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import java.io.Closeable; +import java.io.IOException; + +/** + * A stand-alone orc reading interface to apply the customized IO optimization. + */ +public interface ORCMetaReader extends Closeable { + + static ORCMetaReader create(Configuration configuration, FileSystem fileSystem) { + return new ORCMetaReaderImpl(configuration, fileSystem); + } + + /** + * Execute the preheating and get preheating results. + * + * @param path file path to preheat. + * @return preheating results including stripe-level and file-level meta. + */ + PreheatFileMeta preheat(Path path) throws IOException; +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/RFEfficiencyChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/RFEfficiencyChecker.java new file mode 100644 index 000000000..85f75e30d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/RFEfficiencyChecker.java @@ -0,0 +1,47 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItemKey; + +/** + * Introduce a dynamic sampling inspection mechanism + * on the Scan side to check the filter ratio of the first 5 to 10 chunks in each batch; + * when the filter ratio of the first 5 to 10 chunks is very poor, + * terminate The filtering calculation of the runtime filter under this batch. + * Of course, the runtime filter has been built at this time, + * and the performance loss in this part cannot be avoided. + */ +public interface RFEfficiencyChecker { + /** + * Check the filter ratio of the first 5 to 10 chunks in each batch and if the filter ratio of the first 5 to + * 10 chunks is very poor, terminate The filtering calculation of the runtime filter under this batch. + * + * @param rfItemKey item key of runtime filter. + * @return TRUE if valid. + */ + boolean check(FragmentRFItemKey rfItemKey); + + /** + * Sampling and calculating the first 5~10 chunks of each batch to obtain the filter ratio。 + * + * @param rfItemKey item key of runtime filter. + * @param originalCount the row count before filtering. + * @param selectedCount the row count after filtering. + */ + void sample(FragmentRFItemKey rfItemKey, int originalCount, int selectedCount); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/RowGroupIterator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/RowGroupIterator.java new file mode 100644 index 000000000..8d07c70e6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/RowGroupIterator.java @@ -0,0 +1,93 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import org.apache.hadoop.fs.Path; +import org.jetbrains.annotations.Nullable; + +import java.util.Iterator; + +/** + * A Row Group Iterator holding a sequence of available row-group metas. + * It's the unit of scan work scheduling, and maintain the stripe-level modules that + * shared by all row-group and all columns in this stripe. + * + * @param the class of a column in row group (value vector, array...) + * @param the class of column statistics + */ +public interface RowGroupIterator extends Iterator { + Path filePath(); + + int stripeId(); + + void noMoreChunks(); + + boolean[] columnIncluded(); + + /** + * The bitmap of row groups included in this iterator. + * + * @return bitmap of row groups + */ + boolean[] rgIncluded(); + + /** + * Seek to the first row group matched the clustering key range conjuncts. + * After the seek, the current row group should contain the target row or it is on the left side of the row group that + * contains the target row. + * + * @param rowId row id + */ + void seek(int rowId); + + /** + * Get the row group pointed by current iterator-pointer. + */ + LogicalRowGroup current(); + + /** + * Get global block cache manager wrapped in this iterator. + */ + BlockCacheManager getCacheManager(); + + /** + * Get the stripe-loader of this row-group sequence. + * + * @return stripe loader. + */ + StripeLoader getStripeLoader(); + + /** + * Get the column reader of given column-id. + * + * @param columnId column id + * @return column reader + */ + @Nullable + ColumnReader getColumnReader(int columnId); + + /** + * Get the block cache reader of given column-id. + * + * @param columnId column id + * @return block cache reader + */ + @Nullable + CacheReader getCacheReader(int columnId); + + void close(boolean force); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/RowGroupReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/RowGroupReader.java new file mode 100644 index 000000000..1bd536fdd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/RowGroupReader.java @@ -0,0 +1,59 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.alibaba.polardbx.common.utils.Pair; + +/** + * A row group reader is responsible for lazy block allocation of all columns in a row group. + * + * @param the class of batch. + */ +public interface RowGroupReader { + /** + * Unique row group id in a ORC Stripe. + */ + int groupId(); + + /** + * How many rows in this row group. + */ + int rowCount(); + + /** + * How many batches exists in this row group. + */ + int batches(); + + /** + * Batch batch = RowGroupReader.nextBatch + * Block block = batch.blocks[col_id]; + * block->loader.load() loader hold positions + length + * block.column_reader.seek(row_index of this rg) + * block.column_reader.nextVector(); + * we need boundary check + */ + BATCH nextBatch(); + + /** + * Get the batch row range in total columnar file. + * + * @return pair of {start, length} + */ + int[] batchRange(); + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanPolicy.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanPolicy.java new file mode 100644 index 000000000..24a5e4add --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanPolicy.java @@ -0,0 +1,54 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +/** + * Policy of data scan. + */ +public enum ScanPolicy { + IO_PRIORITY(1), + + FILTER_PRIORITY(2), + + MERGE_IO(3), + + DELETED_SCAN(4); + + private final int policyId; + + ScanPolicy(int policyId) { + this.policyId = policyId; + } + + public int getPolicyId() { + return policyId; + } + + public static ScanPolicy of(final int policyId) { + switch (policyId) { + case 4: + return DELETED_SCAN; + case 2: + return FILTER_PRIORITY; + case 3: + return MERGE_IO; + case 1: + default: + return IO_PRIORITY; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanPreProcessor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanPreProcessor.java new file mode 100644 index 000000000..fc608fe0d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanPreProcessor.java @@ -0,0 +1,66 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.alibaba.polardbx.executor.operator.scan.impl.PreheatFileMeta; +import com.alibaba.polardbx.optimizer.statis.ColumnarTracer; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.fs.Path; +import org.roaringbitmap.RoaringBitmap; + +import java.util.SortedMap; +import java.util.concurrent.ExecutorService; + +/** + * Any time-consuming processing for preparation of columnar table-scan will converge here. + */ +public interface ScanPreProcessor { + + void addFile(Path filePath); + + /** + * Prepare necessary data, like pruning + */ + ListenableFuture prepare(ExecutorService executor, String traceId, ColumnarTracer tracer); + + /** + * Check if preparation is done. + */ + boolean isPrepared(); + + /** + * Get pruning result represented by row-group matrix with given file path. + */ + SortedMap getPruningResult(Path filePath); + + /** + * Get preheated file meta by file path. + */ + PreheatFileMeta getPreheated(Path filePath); + + /** + * Get deletion bitmap by file path. + */ + RoaringBitmap getDeletion(Path filePath); + + /** + * Throw runtime exception if precessing is failed. + */ + default void throwIfFailed() { + } +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanState.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanState.java new file mode 100644 index 000000000..164eeaf40 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanState.java @@ -0,0 +1,54 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +/** + * States of File Scan. + * The transformation between states: + * +----------+ +----------+ +---------+ + * | READY |<-------> | BLOCKED |----->| FAILED | + * +----------+ +----------+ +---------+ + * | | + * | | + * | | + * | | + * | +------------+ |--> +---------+ + * | -------------> | FINISHED |--------------->| CLOSED | + * . +------------+ +---------+ + */ +public enum ScanState { + /** + * IO production is ready for fetching. + */ + READY, + /** + * Consumption is blocked because IO progress is running. + */ + BLOCKED, + /** + * IO progress is done, and waiting for resource closed. + */ + FINISHED, + /** + * IO progress is broken because of any exceptions. + */ + FAILED, + /** + * File resources are all closed. + */ + CLOSED +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanWork.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanWork.java new file mode 100644 index 000000000..198bf73f2 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/ScanWork.java @@ -0,0 +1,60 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; + +import java.io.IOException; +import java.util.concurrent.ExecutorService; + +/** + * A splittable scan work represent a file fragment (range <= chunk threshold) in Columnar Split + * which can be scheduled among different Drivers. + * + * @param class of columnar split + */ +public interface ScanWork { + String EVALUATION_TIMER = "Evaluation.Timer"; + + /** + * Allocate the IO thread resource and invoke the scan work processing. + * + * @param executor IO thread. + */ + void invoke(ExecutorService executor); + + void cancel(); + + /** + * Get the IO status of current IO task. + */ + IOStatus getIOStatus(); + + /** + * Get the unique identifier of scan work. + */ + String getWorkId(); + + /** + * Get the metrics of this scan work. + */ + RuntimeMetrics getMetrics(); + + void close() throws IOException; + + void close(boolean force); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/SeekableIterator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/SeekableIterator.java new file mode 100644 index 000000000..50f650b82 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/SeekableIterator.java @@ -0,0 +1,23 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import java.util.Iterator; + +public interface SeekableIterator extends Iterator { + E seek(int position); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/StripeLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/StripeLoader.java new file mode 100644 index 000000000..3a144bcc4 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/StripeLoader.java @@ -0,0 +1,76 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.StreamName; + +import java.io.Closeable; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; + +/** + * A Stripe loader is related to stripe-level IO processing. + * We must merge IO tasks of several row groups in one stream, and manage them in stripe-level. + *

+ * These methods should be called by ColumnReader or some external IO task scheduler. + */ +public interface StripeLoader extends Closeable { + void open(); + + /** + * Load several columns with different row group bitmaps. + * + * @param columnIds column id list. + * @param rowGroupBitmaps row group bitmaps of columns + * @return A future of mapping from stream name to InStream object which hold the buffered compressed data. + */ + default CompletableFuture> load(List columnIds, + Map rowGroupBitmaps) { + return load(columnIds, rowGroupBitmaps, null); + } + + /** + * invoke Stripe-level IO processing. + * It's a matrix of stream * row_group. + * Mapping: stream name <-> stream information <-> InStream + * stream-manager to hold the whole stream-information within stripe. + * + * @param columnId target column to load. + * @param targetRowGroups target row group range to load. + * @return A future of mapping from stream name to InStream object which hold the buffered compressed data. + */ + default CompletableFuture> load(int columnId, boolean[] targetRowGroups) { + return load(columnId, targetRowGroups, null); + } + + CompletableFuture> load(List columnIds, Map rowGroupBitmaps, + Supplier controller); + + CompletableFuture> load(int columnId, boolean[] targetRowGroups, + Supplier controller); + + /** + * Clear the memory resources of given stream. + * + * @return Released bytes. + */ + long clearStream(StreamName streamName); + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/WorkPool.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/WorkPool.java new file mode 100644 index 000000000..2e46cc2c1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/WorkPool.java @@ -0,0 +1,49 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan; + +/** + * A morsel-driven scan work pool whose scan works can be shared among the scan operators. + * + * @param the class of split. + */ +public interface WorkPool { + /** + * Add the split to the morsel-driver work pool. + * + * @param driverId the unique id of table scan exec + * @param split the readable split. + */ + void addSplit(int driverId, SplitT split); + + /** + * Notify the work pool that this scan operator will no longer supply splits. + * + * @param driverId the unique id of table scan exec + */ + void noMoreSplits(int driverId); + + /** + * Get the next split of given diver_id(unique id of table scan exec) + * The pickup method would prefer to get the split owning by scan exec. Otherwise, steal the split + * belong to other scan exec from pool. + * + * @param driverId unique id of table scan exec + * @return readable split. + */ + ScanWork pickUp(int driverId); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AbstractDictionaryColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AbstractDictionaryColumnReader.java new file mode 100644 index 000000000..8f2a9dabb --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AbstractDictionaryColumnReader.java @@ -0,0 +1,506 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.operator.scan.AbstractColumnReader; +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.MetricsNameBuilder; +import com.alibaba.polardbx.executor.operator.scan.metrics.ORCMetricsWrapper; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileKeys; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import io.airlift.slice.Slice; +import io.airlift.slice.Slices; +import org.apache.orc.OrcProto; +import org.apache.orc.customized.ORCProfile; +import org.apache.orc.impl.BitFieldReader; +import org.apache.orc.impl.DynamicByteArray; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.IntegerReader; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.PositionProvider; +import org.apache.orc.impl.RecordReaderImpl; +import org.apache.orc.impl.RunLengthIntegerReaderV2; +import org.apache.orc.impl.StreamName; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * parsing the dictionary-encoding + */ +public abstract class AbstractDictionaryColumnReader extends AbstractColumnReader { + // basic metadata + protected final StripeLoader stripeLoader; + + // in preheat mode, all row-indexes in orc-index should not be null. + protected final OrcIndex orcIndex; + protected final RuntimeMetrics metrics; + + protected final OrcProto.ColumnEncoding encoding; + protected final int indexStride; + + protected final boolean enableMetrics; + + // open parameters + protected boolean[] rowGroupIncluded; + protected boolean await; + + // inner states + protected AtomicBoolean openFailed; + protected AtomicBoolean initializeOnlyOnce; + protected AtomicBoolean isOpened; + + // IO results + protected Throwable throwable; + protected Map inStreamMap; + protected CompletableFuture> openFuture; + + // for semantic parser + protected BitFieldReader present; + protected IntegerReader dictIdReader; + + // for dictionary + protected BlockDictionary dictionary; + + // record read positions + protected int currentRowGroup; + protected int lastPosition; + + // execution time metrics. + protected Counter preparingTimer; + protected Counter seekTimer; + protected Counter parseTimer; + + public AbstractDictionaryColumnReader(int columnId, boolean isPrimaryKey, + StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, OrcProto.ColumnEncoding encoding, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey); + this.stripeLoader = stripeLoader; + this.orcIndex = orcIndex; + this.metrics = metrics; + this.encoding = encoding; + this.indexStride = indexStride; + this.enableMetrics = enableMetrics; + + // inner states + openFailed = new AtomicBoolean(false); + initializeOnlyOnce = new AtomicBoolean(false); + isOpened = new AtomicBoolean(false); + throwable = null; + inStreamMap = null; + openFuture = null; + + // for parser + present = null; + dictIdReader = null; + + // read position control + // The initial value is -1 means it must seek to the correct row group firstly. + currentRowGroup = -1; + lastPosition = -1; + + rowGroupIncluded = null; + await = false; + + if (enableMetrics) { + preparingTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER.getProfileUnit() + ); + + seekTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_SEEK_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_SEEK_TIMER.getProfileUnit() + ); + + parseTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_PARSE_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_PARSE_TIMER.getProfileUnit() + ); + } + + } + + @Override + public boolean[] rowGroupIncluded() { + Preconditions.checkArgument(isOpened.get()); + return rowGroupIncluded; + } + + @Override + public boolean isOpened() { + return isOpened.get(); + } + + @Override + public void open(boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + + // load the specified streams. + openFuture = stripeLoader.load(columnId, rowGroupIncluded); + + if (await) { + doWait(); + } + } + + @Override + public void open(CompletableFuture> loadFuture, + boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + this.openFuture = loadFuture; + if (await) { + doWait(); + } + } + + // wait for open future and handle failure. + private void doWait() { + try { + inStreamMap = openFuture.get(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + + if (throwable != null) { + // throw if failed. + throw GeneralUtil.nestedException(throwable); + } + } + + protected void init() throws IOException { + if (!initializeOnlyOnce.compareAndSet(false, true)) { + return; + } + + long start = System.nanoTime(); + if (!await) { + doWait(); + } + if (openFailed.get()) { + return; + } + + // Unlike StripePlanner in raw ORC SDK, the stream names and IO production are + // all determined at runtime. + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + + // We should firstly parse the dictionary data and its offsets. + parseDictionary(); + + InStream presentStream = inStreamMap.get(presentName); + InStream dataStream = inStreamMap.get(dataName); + + // initialize present and integer reader + present = presentStream == null ? null : new BitFieldReader(presentStream); + dictIdReader = + dataStream == null ? null : ColumnReader.createIntegerReader(dataStream, encoding.getKind(), false); + + // Add memory metrics. + if (present != null) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + presentName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + present.setMemoryCounter(memoryCounter); + } + + if (dictIdReader != null) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + dataName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + if (dictIdReader instanceof RunLengthIntegerReaderV2) { + ((RunLengthIntegerReaderV2) dictIdReader).setMemoryCounter(memoryCounter); + } + + } + + // metrics time cost of preparing (IO waiting + data steam reader constructing) + if (enableMetrics) { + preparingTimer.inc(System.nanoTime() - start); + } + } + + private void parseDictionary() throws IOException { + StreamName dictDataName = new StreamName(columnId, OrcProto.Stream.Kind.DICTIONARY_DATA); + InStream dictStream = inStreamMap.get(dictDataName); + + // parse the dictionary blob. + DynamicByteArray dictionaryBuffer = null; + if (dictStream != null) { + // Guard against empty dictionary stream. + if (dictStream.available() > 0) { + dictionaryBuffer = new DynamicByteArray(64, dictStream.available()); + dictionaryBuffer.readAll(dictStream); + } + dictStream.close(); + } + stripeLoader.clearStream(dictDataName); + + // read the dictionary lengths. + StreamName dictionaryLengthName = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH); + InStream dictionaryLengthStream = inStreamMap.get(dictionaryLengthName); + int dictionarySize = encoding.getDictionarySize(); + int[] dictionaryOffsets = null; + if (dictionaryLengthStream != null) { + // Guard against empty LENGTH stream. + IntegerReader lenReader = + ColumnReader.createIntegerReader(dictionaryLengthStream, encoding.getKind(), false); + int offset = 0; + if (dictionaryOffsets == null || + dictionaryOffsets.length < dictionarySize + 1) { + dictionaryOffsets = new int[dictionarySize + 1]; + } + for (int i = 0; i < dictionarySize; ++i) { + dictionaryOffsets[i] = offset; + offset += (int) lenReader.next(); + } + dictionaryOffsets[dictionarySize] = offset; + dictionaryLengthStream.close(); + } + stripeLoader.clearStream(dictionaryLengthName); + + // Construct dictionary from offset and data. + Slice[] dict = null; + if (dictionaryBuffer != null && dictionaryOffsets != null) { + dict = new Slice[dictionarySize]; + + // Wrap and slice dict values into array. + byte[] rawBytes = dictionaryBuffer.get(); + Slice dictionarySlice = Slices.wrappedBuffer(rawBytes); + for (int dictId = 0; dictId < dictionarySize; dictId++) { + + int offset = dictionaryOffsets[dictId]; + int length = getDictLength(dictId, offset, dictionaryOffsets, dictionarySlice); + dict[dictId] = Slices.copyOf(dictionarySlice, offset, length); + } + } else if (dictionaryBuffer == null && dictionaryOffsets != null) { + // The only dictionary value is empty string. + dict = new Slice[1]; + dict[0] = Slices.EMPTY_SLICE; + } + if (dict != null) { + dictionary = new LocalBlockDictionary(dict); + } + } + + private int getDictLength(int dictId, int offset, int[] offsets, Slice buffer) { + final int length; + // if it isn't the last entry, subtract the offsets otherwise use + // the buffer length. + if (dictId < offsets.length - 1) { + length = offsets[dictId + 1] - offset; + } else { + length = buffer.length() - offset; + } + return length; + } + + @Override + public void startAt(int rowGroupId, int elementPosition) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(rowGroupIncluded[rowGroupId]); + init(); + + long start = System.nanoTime(); + + // case 1: the column-reader has not been accessed, + // and the first access is the first effective row-group and the position is 0. + boolean isFirstAccess = (currentRowGroup == -1 && lastPosition == -1) + && elementPosition == 0 + && rowGroupId == 0; + + // case 2: the next access follows the last position in the same row-group. + boolean isConsecutive = rowGroupId == currentRowGroup && elementPosition == lastPosition; + + // case 3: the last access reach the last position of the row-group, and the next access is the next + // valid row-group starting at position 0. + boolean isNextRowGroup = currentRowGroup < rowGroupId + && elementPosition == 0 + && lastPosition == indexStride + && (currentRowGroup + 1 == rowGroupId); + + // It's in order. + if (isFirstAccess || isConsecutive || isNextRowGroup) { + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + return; + } + + // It's not in order, need skip some position. + if (rowGroupId != currentRowGroup || elementPosition < lastPosition) { + // case 1: when given row group is different from the current group, seek to the position of it. + // case 2: when elementPosition <= lastPosition, we need go back to the start position of this row group. + seek(rowGroupId); + + long skipLen = skipPresent(elementPosition); + if (dictIdReader != null) { + dictIdReader.skip(skipLen); + } + + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + } else if (elementPosition > lastPosition && elementPosition < indexStride) { + // case 3: when elementPosition > lastPosition and the group is same, just skip to given position. + long skipLen = skipPresent(elementPosition - lastPosition); + if (dictIdReader != null) { + dictIdReader.skip(skipLen); + } + + lastPosition = elementPosition; + } else if (elementPosition >= indexStride) { + // case 4: the position is out of range. + throw GeneralUtil.nestedException("Invalid element position: " + elementPosition); + } + // case 5: the elementPosition == lastPosition and rowGroupId is equal. + + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + } + + // Try to skip rows on present stream and count down + // the actual rows need skipped by data stream. + protected long skipPresent(long rows) throws IOException { + if (present == null) { + return rows; + } + + long result = 0; + for (long c = 0; c < rows; ++c) { + // record the count of non-null values + // in range of [current_position, current_position + rows) + if (present.next() == 1) { + result += 1; + } + } + // It must be less than or equal to count of rows. + return result; + } + + @Override + public void seek(int rowGroupId) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + init(); + + // Find the position-provider of given column and row group. + PositionProvider positionProvider; + OrcProto.RowIndex[] rowIndices = orcIndex.getRowGroupIndex(); + OrcProto.RowIndexEntry entry = rowIndices[columnId].getEntry(rowGroupId); + // This is effectively a test for pre-ORC-569 files. + if (rowGroupId == 0 && entry.getPositionsCount() == 0) { + positionProvider = new RecordReaderImpl.ZeroPositionProvider(); + } else { + positionProvider = new RecordReaderImpl.PositionProviderImpl(entry); + } + + // NOTE: The order of seeking is strict! + if (present != null) { + present.seek(positionProvider); + } + if (dictIdReader != null) { + dictIdReader.seek(positionProvider); + } + + currentRowGroup = rowGroupId; + lastPosition = 0; + } + + @Override + public void close() { + if (!isClosed.compareAndSet(false, true)) { + return; + } + + // 1. Clear the resources allocated in InStream + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + + if (inStreamMap != null) { + InStream presentStream = inStreamMap.get(presentName); + InStream dataStream = inStreamMap.get(dataName); + + if (presentStream != null) { + presentStream.close(); + } + + if (dataStream != null) { + dataStream.close(); + } + } + + // 2. Clear the memory resources held by stream + long releasedBytes = 0L; + releasedBytes += stripeLoader.clearStream(presentName); + releasedBytes += stripeLoader.clearStream(dataName); + + if (releasedBytes > 0) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format( + "Release the resource of work: {0}, columnId: {1}, bytes: {2}", + metrics.name(), columnId, releasedBytes + )); + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AbstractLongColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AbstractLongColumnReader.java new file mode 100644 index 000000000..fce68ac19 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AbstractLongColumnReader.java @@ -0,0 +1,425 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.operator.scan.AbstractColumnReader; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.MetricsNameBuilder; +import com.alibaba.polardbx.executor.operator.scan.metrics.ORCMetricsWrapper; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileKeys; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.customized.ORCProfile; +import org.apache.orc.impl.BitFieldReader; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.IntegerReader; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.PositionProvider; +import org.apache.orc.impl.RecordReaderImpl; +import org.apache.orc.impl.RunLengthIntegerReaderV2; +import org.apache.orc.impl.StreamName; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; + +public abstract class AbstractLongColumnReader extends AbstractColumnReader { + // basic metadata + protected final StripeLoader stripeLoader; + + // in preheat mode, all row-indexes in orc-index should not be null. + protected final OrcIndex orcIndex; + protected final RuntimeMetrics metrics; + + protected final OrcProto.ColumnEncoding.Kind kind; + protected final int indexStride; + + protected final boolean enableMetrics; + + // open parameters + protected boolean[] rowGroupIncluded; + protected boolean await; + + // inner states + protected AtomicBoolean openFailed; + protected AtomicBoolean initializeOnlyOnce; + protected AtomicBoolean isOpened; + + // IO results + protected Throwable throwable; + protected Map inStreamMap; + protected CompletableFuture> openFuture; + + // for semantic parser + protected BitFieldReader present; + protected IntegerReader data; + + // record read positions + protected int currentRowGroup; + protected int lastPosition; + + // execution time metrics. + protected Counter preparingTimer; + protected Counter seekTimer; + protected Counter parseTimer; + + public AbstractLongColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, OrcProto.ColumnEncoding.Kind kind, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey); + this.stripeLoader = stripeLoader; + this.orcIndex = orcIndex; + this.metrics = metrics; + this.kind = kind; + this.indexStride = indexStride; + this.enableMetrics = enableMetrics; + + // inner states + openFailed = new AtomicBoolean(false); + initializeOnlyOnce = new AtomicBoolean(false); + isOpened = new AtomicBoolean(false); + throwable = null; + inStreamMap = null; + openFuture = null; + + // for parser + present = null; + data = null; + + // read position control + // The initial value is -1 means it must seek to the correct row group firstly. + currentRowGroup = -1; + lastPosition = -1; + + rowGroupIncluded = null; + await = false; + + if (enableMetrics) { + preparingTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER.getProfileUnit() + ); + + seekTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_SEEK_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_SEEK_TIMER.getProfileUnit() + ); + + parseTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_PARSE_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_PARSE_TIMER.getProfileUnit() + ); + } + + } + + @Override + public boolean[] rowGroupIncluded() { + Preconditions.checkArgument(isOpened.get()); + return rowGroupIncluded; + } + + @Override + public boolean isOpened() { + return isOpened.get(); + } + + @Override + public void open(boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + + // load the specified streams. + openFuture = stripeLoader.load(columnId, rowGroupIncluded); + + if (await) { + doWait(); + } + } + + @Override + public void open(CompletableFuture> loadFuture, + boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + this.openFuture = loadFuture; + if (await) { + doWait(); + } + } + + // wait for open future and handle failure. + private void doWait() { + try { + inStreamMap = openFuture.get(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + + if (throwable != null) { + // throw if failed. + throw GeneralUtil.nestedException(throwable); + } + } + + protected void init() throws IOException { + if (!initializeOnlyOnce.compareAndSet(false, true)) { + return; + } + + long start = System.nanoTime(); + if (!await) { + doWait(); + } + if (openFailed.get()) { + return; + } + + // Unlike StripePlanner in raw ORC SDK, the stream names and IO production are + // all determined at runtime. + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + + InStream presentStream = inStreamMap.get(presentName); + InStream dataStream = inStreamMap.get(dataName); + + // initialize present and integer reader + present = presentStream == null ? null : new BitFieldReader(presentStream); + data = dataStream == null ? null : ColumnReader.createIntegerReader(dataStream, kind); + + // Add memory metrics. + if (present != null) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + presentName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + present.setMemoryCounter(memoryCounter); + } + + if (data != null && data instanceof RunLengthIntegerReaderV2) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + dataName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + ((RunLengthIntegerReaderV2) data).setMemoryCounter(memoryCounter); + } + + // metrics time cost of preparing (IO waiting + data steam reader constructing) + if (enableMetrics) { + preparingTimer.inc(System.nanoTime() - start); + } + } + + @Override + public void startAt(int rowGroupId, int elementPosition) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(rowGroupIncluded[rowGroupId]); + init(); + + long start = System.nanoTime(); + + // case 1: the column-reader has not been accessed, + // and the first access is the first effective row-group and the position is 0. + boolean isFirstAccess = (currentRowGroup == -1 && lastPosition == -1) + && elementPosition == 0 + && rowGroupId == 0; + + // case 2: the next access follows the last position in the same row-group. + boolean isConsecutive = rowGroupId == currentRowGroup && elementPosition == lastPosition; + + // case 3: the last access reach the last position of the row-group, and the next access is the next + // valid row-group starting at position 0. + boolean isNextRowGroup = currentRowGroup < rowGroupId + && elementPosition == 0 + && lastPosition == indexStride + && (currentRowGroup + 1 == rowGroupId); + +// boolean isFirstAccess = (currentRowGroup == -1 && lastPosition == -1) +// && elementPosition == 0 +// && ColumnReader.countTrue(rowGroupIncluded, 0, rowGroupId) == 0; + +// boolean isNextRowGroup = currentRowGroup < rowGroupId +// && elementPosition == 0 +// && lastPosition == indexStride +// && ((currentRowGroup + 1 == rowGroupId) +// || (ColumnReader.countTrue(rowGroupIncluded, currentRowGroup + 1, rowGroupId) == 0)); + + // It's in order. + if (isFirstAccess || isConsecutive || isNextRowGroup) { + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + + return; + } + + // It's not in order, need skip some position. + if (rowGroupId != currentRowGroup || elementPosition < lastPosition) { + // case 1: when given row group is different from the current group, seek to the position of it. + // case 2: when elementPosition <= lastPosition, we need go back to the start position of this row group. + seek(rowGroupId); + + long skipLen = skipPresent(elementPosition); + + if (data != null) { + data.skip(skipLen); + } + + } else if (elementPosition > lastPosition && elementPosition < indexStride) { + // case 3: when elementPosition > lastPosition and the group is same, just skip to given position. + long skipLen = skipPresent(elementPosition - lastPosition); + + if (data != null) { + data.skip(skipLen); + } + + } else if (elementPosition >= indexStride) { + // case 4: the position is out of range. + throw GeneralUtil.nestedException("Invalid element position: " + elementPosition); + } + // case 5: the elementPosition == lastPosition and rowGroupId is equal. + + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + } + + // Try to skip rows on present stream and count down + // the actual rows need skipped by data stream. + protected long skipPresent(long rows) throws IOException { + if (present == null) { + return rows; + } + + long result = 0; + for (long c = 0; c < rows; ++c) { + // record the count of non-null values + // in range of [current_position, current_position + rows) + if (present.next() == 1) { + result += 1; + } + } + // It must be less than or equal to count of rows. + return result; + } + + @Override + public void seek(int rowGroupId) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + init(); + + // Find the position-provider of given column and row group. + PositionProvider positionProvider; + OrcProto.RowIndex[] rowIndices = orcIndex.getRowGroupIndex(); + OrcProto.RowIndexEntry entry = rowIndices[columnId].getEntry(rowGroupId); + // This is effectively a test for pre-ORC-569 files. + if (rowGroupId == 0 && entry.getPositionsCount() == 0) { + positionProvider = new RecordReaderImpl.ZeroPositionProvider(); + } else { + positionProvider = new RecordReaderImpl.PositionProviderImpl(entry); + } + + // Seek on present stream and data stream. + if (present != null) { + present.seek(positionProvider); + } + if (data != null) { + data.seek(positionProvider); + } + + currentRowGroup = rowGroupId; + lastPosition = 0; + } + + @Override + public void close() { + if (!isClosed.compareAndSet(false, true)) { + return; + } + // 1. Clear the resources allocated in InStream + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + + // maybe the stripe loading task is not finished yet. + if (inStreamMap != null) { + InStream presentStream = inStreamMap.get(presentName); + InStream dataStream = inStreamMap.get(dataName); + + if (presentStream != null) { + presentStream.close(); + } + + if (dataStream != null) { + dataStream.close(); + } + } + + // 2. Clear the memory resources held by stream + long releasedBytes = 0L; + releasedBytes += stripeLoader.clearStream(presentName); + releasedBytes += stripeLoader.clearStream(dataName); + + if (releasedBytes > 0) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format( + "Release the resource of work: {0}, columnId: {1}, bytes: {2}", + metrics.name(), columnId, releasedBytes + )); + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AbstractScanWork.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AbstractScanWork.java new file mode 100644 index 000000000..bf5675547 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AbstractScanWork.java @@ -0,0 +1,560 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.columnar.CommonLazyBlock; +import com.alibaba.polardbx.executor.chunk.columnar.LazyBlock; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.CacheReader; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.executor.operator.scan.ColumnarSplit; +import com.alibaba.polardbx.executor.operator.scan.IOStatus; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.RowGroupIterator; +import com.alibaba.polardbx.executor.operator.scan.ScanWork; +import com.alibaba.polardbx.executor.operator.scan.SeekableIterator; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileKeys; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileUnit; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.Path; +import org.apache.orc.ColumnStatistics; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.StreamName; +import org.jetbrains.annotations.Nullable; +import org.roaringbitmap.RoaringBitmap; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +public abstract class AbstractScanWork implements ScanWork { + protected static final Logger LOGGER = LoggerFactory.getLogger(FilterPriorityScanWork.class); + + /** + * IO status to manage the output and state of scan work. + */ + protected final IOStatus ioStatus; + + /** + * The unique identifier of this work. + */ + protected final String workId; + + /** + * The runtime metrics of this scan-work. + */ + protected final RuntimeMetrics metrics; + + protected final boolean enableMetrics; + + /** + * To enumerator all available chunks in all available row-group. + */ + protected RowGroupIterator rgIterator; + + /** + * Deletion bitmap for this columnar file in this query session. + */ + protected final RoaringBitmap deletionBitmap; + + /** + * To evaluate the chunks produced from RowGroupIterator. + */ + protected final LazyEvaluator lazyEvaluator; + + protected final MorselColumnarSplit.ScanRange scanRange; + + /** + * The reference number for filter. + */ + protected final List inputRefsForFilter; + + /** + * The reference number for projection. + */ + protected final List inputRefsForProject; + + /** + * Mapping from ref to chunk index. + * For example, the refs for filter is {3, 5} and the refs for project is {3, 7, 8}, + * then we get the chunkRefMap: {(3, 0), (5, 1), (7, 2), (8, 3)} + */ + protected final int[] chunkRefMap; + protected final SortedSet refSet; + + protected boolean isCanceled; + + /** + * To count the time cost of evaluation in scan work. + */ + protected Counter evaluationTimer; + + protected int partNum; + + protected int nodePartCount; + + /** + * The flag to be checked by IO processing, and the IO processing will stop + * immediately if this flag was found to be TRUE. + */ + protected AtomicBoolean isIOCanceled; + + protected OSSColumnTransformer columnTransformer; + + public AbstractScanWork(String workId, + RuntimeMetrics metrics, boolean enableMetrics, + LazyEvaluator lazyEvaluator, + RowGroupIterator rgIterator, + RoaringBitmap deletionBitmap, + MorselColumnarSplit.ScanRange scanRange, + List inputRefsForFilter, + List inputRefsForProject, + int partNum, + int nodePartCount, + OSSColumnTransformer columnTransformer) { + this.workId = workId; + this.metrics = metrics; + this.enableMetrics = enableMetrics; + this.lazyEvaluator = lazyEvaluator; + this.ioStatus = IOStatusImpl.create(workId); + this.rgIterator = rgIterator; + this.deletionBitmap = deletionBitmap; + this.scanRange = scanRange; + this.inputRefsForFilter = inputRefsForFilter.stream().sorted().collect(Collectors.toList()); + this.inputRefsForProject = inputRefsForProject.stream().sorted().collect(Collectors.toList()); + + refSet = new TreeSet<>(); + refSet.addAll(inputRefsForFilter); + refSet.addAll(inputRefsForProject); + + this.chunkRefMap = new int[refSet.last() + 1]; + int chunkIndex = 0; + for (int ref : refSet) { + chunkRefMap[ref] = chunkIndex++; + } + + if (enableMetrics) { + this.evaluationTimer = metrics.addCounter( + ProfileKeys.SCAN_WORK_EVALUATION_TIMER.getName(), + ScanWork.EVALUATION_TIMER, + ProfileUnit.NANO_SECOND + ); + } + + this.partNum = partNum; + this.nodePartCount = nodePartCount; + + this.isIOCanceled = new AtomicBoolean(false); + this.columnTransformer = columnTransformer; + } + + abstract protected void handleNextWork() throws Throwable; + + @Override + public void invoke(ExecutorService executor) { + executor.submit(() -> { + try { + handleNextWork(); + } catch (Throwable e) { + e.printStackTrace(); + ioStatus.addException(e); + LOGGER.error("fail to execute sequential scan work: " + e); + } + }); + } + + @Override + public void cancel() { + this.isCanceled = true; + this.isIOCanceled.set(true); + } + + protected Chunk rebuildProject(Chunk chunk) { + return rebuildProject(chunk, null, chunk.getPositionCount()); + } + + protected Chunk rebuildProject(Chunk chunk, int[] selection, int selSize) { + Block[] blocks = new Block[inputRefsForProject.size()]; + int blockIndex = 0; + + for (int projectRef : inputRefsForProject) { + // mapping blocks for projection. + int chunkIndex = chunkRefMap[projectRef]; + Block block = chunk.getBlock(chunkIndex); + blocks[blockIndex++] = block; + + // fill with selection. + if (selection != null && chunk.getPositionCount() != selSize) { + Preconditions.checkArgument(block instanceof CommonLazyBlock); + ((CommonLazyBlock) block).setSelection(selection, selSize); + } + } + + // NOTE: we must set position count of chunk to selection size. + Chunk projectChunk = new Chunk(selSize, blocks); + projectChunk.setPartIndex(partNum); + projectChunk.setPartCount(nodePartCount); + return projectChunk; + } + + protected void releaseRef(Chunk chunk) { + if (chunk == null) { + return; + } + for (int blockIndex = 0; blockIndex < chunk.getBlockCount(); blockIndex++) { + Block block = chunk.getBlock(blockIndex); + if (block instanceof LazyBlock) { + ((LazyBlock) block).releaseRef(); + } + } + } + + protected void singleIO(int filterColumnId, Path filePath, int stripeId, boolean[] prunedRowGroupBitmap, + BlockCacheManager blockCacheManager, boolean useInFlightCache) { + // Fetch all cached block in this column on different row group, + // and initialize the cached-block-reader for block loading. + CacheReader cacheReader = rgIterator.getCacheReader(filterColumnId); + if (cacheReader != null && !cacheReader.isInitialized()) { + // Check the block-cache-manager what row-group of this column have been cached. + Map> caches = blockCacheManager + .getCachedRowGroups(filePath, stripeId, filterColumnId, prunedRowGroupBitmap); + + if (useInFlightCache) { + // Use in-flight cache but also invoke IO processing for it's row-group. + // Because we cannot ensure if all needed blocks are contained in in-flight caches. + Map> inFlightCaches = + blockCacheManager.getInFlightCachedRowGroups( + filePath, stripeId, filterColumnId, prunedRowGroupBitmap + ); + cacheReader.initialize(caches, inFlightCaches); + } else { + cacheReader.initialize(caches); + } + } + + // Open column-reader only once and invoke synchronous IO tasks with given row group bitmap. + ColumnReader columnReader = rgIterator.getColumnReader(filterColumnId); + if (columnReader != null && cacheReader != null && !columnReader.isOpened()) { + // Remove cached row groups from original row group bitmap + // that don't need IO processing. + boolean[] cachedRowGroupBitmap = cacheReader.cachedRowGroupBitmap(); + boolean[] rowGroupIncluded = remove(prunedRowGroupBitmap, cachedRowGroupBitmap); + + // Proactively invoke the column-level IO processing. + // It should be aware of the total row group bitmap in Stripe-level. + // In most cases, we will open column-reader in table scan works. + columnReader.open(true, rowGroupIncluded); + } + + } + + protected void singleIO(int filterColumnId, Path filePath, int stripeId, boolean[] prunedRowGroupBitmap, + BlockCacheManager blockCacheManager) { + singleIO(filterColumnId, filePath, stripeId, prunedRowGroupBitmap, blockCacheManager, false); + } + + protected void mergeIO(Path filePath, int stripeId, + List inputRefs, + BlockCacheManager blockCacheManager, boolean[] rowGroupIncluded, + boolean useInFlightCache) { + List columnIds = new ArrayList<>(); + Map rgMatrix = new HashMap<>(); + for (int i = 0; i < inputRefs.size(); i++) { + final Integer columnId = columnTransformer.getLocInOrc(chunkRefMap[inputRefs.get(i)]); + if (columnId == null) { + continue; + } + // Fetch all cached block in this column on different row group, + // and initialize the cached-block-readr for block loading. + CacheReader cacheReader = rgIterator.getCacheReader(columnId); + if (cacheReader != null && !cacheReader.isInitialized()) { + // Check the block-cache-manager what row-group of this column have been cached. + Map> caches = blockCacheManager + .getCachedRowGroups(filePath, stripeId, columnId, rowGroupIncluded); + if (useInFlightCache) { + // Use in-flight cache but also invoke IO processing for it's row-group. + // Because we cannot ensure if all needed blocks are contained in in-flight caches. + Map> inFlightCaches = + blockCacheManager.getInFlightCachedRowGroups( + filePath, stripeId, columnId, rowGroupIncluded + ); + cacheReader.initialize(caches, inFlightCaches); + } else { + cacheReader.initialize(caches); + } + } + + // No matter whether rows are selected or not, + // We must load the whole column stream at once into memory. + ColumnReader columnReader = rgIterator.getColumnReader(columnId); + if (columnReader != null && cacheReader != null && !columnReader.isOpened()) { + // Remove cached row groups from original row group bitmap + // that don't need IO processing. + boolean[] cachedRowGroupBitmap = cacheReader.cachedRowGroupBitmap(); + boolean[] rowGroupInColumn = remove(rowGroupIncluded, cachedRowGroupBitmap); + + columnIds.add(columnId); + rgMatrix.put(columnId, rowGroupInColumn); + } + } + + // Invoke stripe-level IO tasks that has been merged. + StripeLoader stripeLoader = rgIterator.getStripeLoader(); + CompletableFuture> loadFuture = + stripeLoader.load(columnIds, rgMatrix, () -> isIOCanceled.get()); + + for (int i = 0; i < inputRefs.size(); i++) { + final Integer columnId = columnTransformer.getLocInOrc(chunkRefMap[inputRefs.get(i)]); + if (columnId == null) { + continue; + } + + ColumnReader columnReader = rgIterator.getColumnReader(columnId); + if (columnReader != null && !columnReader.isOpened() && rgMatrix.get(columnId) != null) { + // Open column reader with IO task future. + columnReader.open(loadFuture, false, rgMatrix.get(columnId)); + } + } + } + + protected void mergeIO(Path filePath, int stripeId, + List inputRefs, + BlockCacheManager blockCacheManager, boolean[] rowGroupIncluded) { + mergeIO(filePath, stripeId, inputRefs, blockCacheManager, rowGroupIncluded, false); + } + + /** + * Not using block cache, do IO and parse for all needed row-groups. + */ + protected void mergeIONoCache(List inputRefs, + boolean[] rowGroupIncluded) { + List columnIds = new ArrayList<>(); + Map rgMatrix = new HashMap<>(); + for (int i = 0; i < inputRefs.size(); i++) { + final Integer columnId = columnTransformer.getLocInOrc(chunkRefMap[inputRefs.get(i)]); + if (columnId == null) { + continue; + } + // No matter whether rows are selected or not, + // We must load the whole column stream at once into memory. + ColumnReader columnReader = rgIterator.getColumnReader(columnId); + if (!columnReader.isOpened()) { + columnIds.add(columnId); + rgMatrix.put(columnId, rowGroupIncluded); + } + } + + // Invoke stripe-level IO tasks that has been merged. + StripeLoader stripeLoader = rgIterator.getStripeLoader(); + CompletableFuture> loadFuture = + stripeLoader.load(columnIds, rgMatrix, () -> isIOCanceled.get()); + + for (int i = 0; i < inputRefs.size(); i++) { + final Integer columnId = columnTransformer.getLocInOrc(chunkRefMap[inputRefs.get(i)]); + if (columnId == null) { + continue; + } + ColumnReader columnReader = rgIterator.getColumnReader(columnId); + if (!columnReader.isOpened() && rgMatrix.get(columnId) != null) { + // Open column reader with IO task future. + columnReader.open(loadFuture, false, rgMatrix.get(columnId)); + } + } + } + + protected static boolean[] remove(boolean[] left, boolean[] right) { + boolean[] result = new boolean[left.length]; + for (int i = 0; i < left.length; i++) { + result[i] = left[i] && + (i >= right.length || (i < right.length && !right[i])); + } + return result; + } + + protected boolean hasNonZeros(int[] selection) { + for (int selected : selection) { + if (selected != 0) { + return true; + } + } + return false; + } + + protected int countTrue(boolean[] included) { + int result = 0; + for (boolean b : included) { + result += b ? 1 : 0; + } + return result; + } + + protected boolean[] toRowGroupBitmap(int rowGroupCount, List selectedRowGroups) { + boolean[] result = new boolean[rowGroupCount]; + for (Integer selected : selectedRowGroups) { + if (selected < rowGroupCount) { + result[selected] = true; + } + } + return result; + } + + protected int[] selectionOf(boolean[] bitmap, int selectCount) { + int[] selection = new int[selectCount]; + int selSize = 0; + for (int i = 0; i < bitmap.length; i++) { + if (bitmap[i]) { + selection[selSize++] = i; + } + } + + return selection; + } + + protected int[] selectionOf(BitSet bitmap) { + int[] selection = new int[bitmap.cardinality()]; + int selSize = 0; + for (int i = bitmap.nextSetBit(0); i >= 0; i = bitmap.nextSetBit(i + 1)) { + selection[selSize++] = i; + } + + return selection; + } + + /** + * Generate selection array from deletion bitmap in given range. + */ + protected int @Nullable [] selectionOf(int[] batchRange, RoaringBitmap deletion) { + int[] preSelection = null; + if (deletion != null && !deletion.isEmpty()) { + final int startPosition = batchRange[0]; + final int positionCount = batchRange[1]; + + long cardinality = deletion.rangeCardinality( + startPosition, startPosition + positionCount); + + if (cardinality != 0) { + // partial positions are selected. + preSelection = new int[positionCount - (int) cardinality]; + + // remove deleted positions. + int selectionIndex = 0; + for (int i = 0; i < positionCount; i++) { + if (!deletion.contains(i + startPosition)) { + preSelection[selectionIndex++] = i; + } + } + } + } + return preSelection; + } + + /** + * Generate deleted selection array from deletion bitmap in given range. + */ + protected int @Nullable [] selectionOfDeleted(int[] batchRange, RoaringBitmap deletion) { + int[] preSelection = null; + if (deletion != null && !deletion.isEmpty()) { + final int startPosition = batchRange[0]; + final int positionCount = batchRange[1]; + + long cardinality = deletion.rangeCardinality(startPosition, startPosition + positionCount); + + if (cardinality != 0) { + // deleted positions are selected. + preSelection = new int[(int) cardinality]; + + // select deleted positions only + int selectionIndex = 0; + for (int i = 0; i < positionCount; i++) { + if (deletion.contains(i + startPosition)) { + preSelection[selectionIndex++] = i; + } + } + } + } + return preSelection; + } + + @Override + public IOStatus getIOStatus() { + return ioStatus; + } + + @Override + public String getWorkId() { + return workId; + } + + @Override + public void close() throws IOException { + close(true); + } + + @Override + public void close(boolean force) { + // The close method must be idempotent. + if (rgIterator != null) { + rgIterator.close(force); + } + + if (ioStatus != null) { + ioStatus.close(); + } + } + + @Override + public RuntimeMetrics getMetrics() { + return metrics; + } + + public MorselColumnarSplit.ScanRange getScanRange() { + return scanRange; + } + + /** + * This method is for test only + * + * @return if all readers are closed + */ + public boolean checkIfAllReadersClosed() { + if (rgIterator != null) { + return ((RowGroupIteratorImpl) rgIterator).checkIfAllReadersClosed(); + } else { + return true; + } + } + +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AsyncStripeLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AsyncStripeLoader.java new file mode 100644 index 000000000..242140446 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/AsyncStripeLoader.java @@ -0,0 +1,569 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.ORCMetricsWrapper; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileKeys; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileUnit; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.io.DiskRangeList; +import org.apache.orc.CompressionKind; +import org.apache.orc.DataReader; +import org.apache.orc.OrcFile; +import org.apache.orc.OrcProto; +import org.apache.orc.StripeInformation; +import org.apache.orc.TypeDescription; +import org.apache.orc.customized.ORCProfile; +import org.apache.orc.impl.BufferChunk; +import org.apache.orc.impl.BufferChunkList; +import org.apache.orc.impl.DataReaderProperties; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.OrcCodecPool; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.RecordReaderUtils; +import org.apache.orc.impl.StreamName; +import org.apache.orc.impl.reader.ReaderEncryption; +import org.apache.orc.impl.reader.StreamInformation; +import org.jetbrains.annotations.NotNull; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static com.alibaba.polardbx.executor.operator.scan.metrics.MetricsNameBuilder.columnMetricsKey; +import static com.alibaba.polardbx.executor.operator.scan.metrics.MetricsNameBuilder.columnsMetricsKey; +import static com.alibaba.polardbx.executor.operator.scan.metrics.MetricsNameBuilder.streamMetricsKey; + +public class AsyncStripeLoader implements StripeLoader { + private static final Logger LOGGER = LoggerFactory.getLogger("oss"); + + // Name of metrics + public static final String ASYNC_STRIPE_LOADER_MEMORY = "AsyncStripeLoader.Memory"; + public static final String ASYNC_STRIPE_LOADER_TIMER = "AsyncStripeLoader.Timer"; + public static final String ASYNC_STRIPE_LOADER_BYTES_RANGE = "AsyncStripeLoader.BytesRange"; + + // To sort the InStream with different stream name. + private static final Comparator STREAM_NAME_COMPARATOR = (s1, s2) -> { + if (s1.getColumn() != s2.getColumn()) { + return s1.getColumn() - s2.getColumn(); + } else { + return s1.getKind().name().compareTo(s2.getKind().name()); + } + }; + + // parameters for IO processing + private final ExecutorService ioExecutor; + private final FileSystem fileSystem; + private final Configuration configuration; + private final Path filePath; + private final boolean[] columnIncluded; + + // for compression + private final int compressionSize; + private final CompressionKind compressionKind; + + // preheated meta of this stripe + private final PreheatFileMeta preheatFileMeta; + + // context for stripe parser + private final StripeInformation stripeInformation; + private final TypeDescription fileSchema; + private final OrcFile.WriterVersion version; + + private final ReaderEncryption encryption; + private final OrcProto.ColumnEncoding[] encodings; + private final boolean ignoreNonUtf8BloomFilter; + private final long maxBufferSize; + private final int maxDiskRangeChunkLimit; + private final long maxMergeDistance; + + // need initialized + private StripeContext stripeContext; + private StreamManager streamManager; + private InStream.StreamOptions streamOptions; + + // register loading or loaded columns. + // NODE: The Stripe-Loader is stateful, and a column can only be loaded once in one stripe. + private ConcurrentHashMap registerMap; + + // for metrics + private final RuntimeMetrics metrics; + private final boolean enableMetrics; + private boolean isOpened; + private Counter openingTimer; + + // for memory management. + private final MemoryAllocatorCtx memoryAllocatorCtx; + private AtomicLong totalAllocatedBytes; + private Set releasedStreams; + + public AsyncStripeLoader( + // for file io execution + ExecutorService ioExecutor, FileSystem fileSystem, + Configuration configuration, Path filePath, boolean[] columnIncluded, + + // for compression + int compressionSize, CompressionKind compressionKind, + + // preheated meta of this stripe + PreheatFileMeta preheatFileMeta, + + // context for stripe parser + StripeInformation stripeInformation, + TypeDescription fileSchema, OrcFile.WriterVersion version, + ReaderEncryption encryption, + OrcProto.ColumnEncoding[] encodings, + boolean ignoreNonUtf8BloomFilter, long maxBufferSize, + int maxDiskRangeChunkLimit, long maxMergeDistance, + + // for metrics + RuntimeMetrics metrics, + boolean enableMetrics, MemoryAllocatorCtx memoryAllocatorCtx) { + this.maxDiskRangeChunkLimit = maxDiskRangeChunkLimit; + this.maxMergeDistance = maxMergeDistance; + this.enableMetrics = enableMetrics; + this.memoryAllocatorCtx = memoryAllocatorCtx; + // NOTE: the 0th column in array is tree-struct. + Preconditions.checkArgument(columnIncluded != null + && columnIncluded.length == fileSchema.getMaximumId() + 1); + + this.ioExecutor = ioExecutor; + this.fileSystem = fileSystem; + this.configuration = configuration; + this.filePath = filePath; + this.columnIncluded = columnIncluded; + this.compressionSize = compressionSize; + this.compressionKind = compressionKind; + this.preheatFileMeta = preheatFileMeta; + this.stripeInformation = stripeInformation; + this.fileSchema = fileSchema; + this.version = version; + this.encryption = encryption; + this.encodings = encodings; + this.ignoreNonUtf8BloomFilter = ignoreNonUtf8BloomFilter; + this.maxBufferSize = maxBufferSize; + + this.metrics = metrics; + + // internal state + this.registerMap = new ConcurrentHashMap<>(); + this.isOpened = false; + + if (enableMetrics) { + this.openingTimer = metrics.addCounter( + ProfileKeys.ORC_STRIPE_LOADER_OPEN_TIMER.getName(), + ASYNC_STRIPE_LOADER_TIMER, + ProfileUnit.NANO_SECOND + ); + } + + this.totalAllocatedBytes = new AtomicLong(0); + this.releasedStreams = new HashSet<>(); + } + + @Override + public void open() { + long start = System.nanoTime(); + streamOptions = InStream.options() + .withCodec(OrcCodecPool.getCodec(compressionKind)) + .withBufferSize(compressionSize); + + stripeContext = new StripeContext( + stripeInformation, fileSchema, encryption, version, streamOptions, ignoreNonUtf8BloomFilter, maxBufferSize + ); + + OrcProto.StripeFooter footer = + preheatFileMeta.getStripeFooter(stripeInformation.getStripeId()); + + // Get all stream information in this stripe. + streamManager = StaticStripePlanner.parseStripe( + stripeContext, columnIncluded, footer + ); + releasedStreams.addAll(streamManager.getStreams().keySet()); + + isOpened = true; + if (enableMetrics) { + openingTimer.inc(System.nanoTime() - start); + } + } + + @Override + public CompletableFuture> load(List columnIds, + Map rowGroupBitmaps, + Supplier controller) { + Preconditions.checkArgument(isOpened, "The stripe loader has not already been opened"); + // Column-level parallel data loading is only suitable for columns that size > 2MB in one stripe. + // In some cases, we need merge all columns in one IO task. + + OrcIndex orcIndex = preheatFileMeta.getOrcIndex( + stripeInformation.getStripeId() + ); + + // build selected columns bitmap + boolean[] selectedColumns = new boolean[fileSchema.getMaximumId() + 1]; + Arrays.fill(selectedColumns, false); + columnIds.forEach(col -> selectedColumns[col] = true); + + // Build IO plans for each column with different row group bitmaps + // and merge them into one buffer-chunk-list. + + // Get the IO plan of all streams in this column. + BufferChunkList result = StaticStripePlanner.planGroupsInColumn( + stripeContext, + streamManager, + streamOptions, + orcIndex, + rowGroupBitmaps, + selectedColumns + ); + + // check buffer chunk list + long bytesInIOPlan = 0L; + long bytesHitStream = 0L; + for (BufferChunk node = result.get(); node != null; node = (BufferChunk) node.next) { + bytesInIOPlan += node.getLength(); + } + + // metrics the logical bytes range. + Counter bytesRangeCounter = enableMetrics ? metrics.addCounter( + columnsMetricsKey(selectedColumns, ProfileKeys.ORC_LOGICAL_BYTES_RANGE), + ASYNC_STRIPE_LOADER_BYTES_RANGE, + ProfileKeys.ORC_LOGICAL_BYTES_RANGE.getProfileUnit() + ) : null; + for (Map.Entry entry : streamManager.getStreams().entrySet()) { + StreamName streamName = entry.getKey(); + if (streamName.getColumn() < selectedColumns.length && selectedColumns[streamName.getColumn()]) { + StreamInformation stream = entry.getValue(); + + // filter stream with no data. + if (stream != null && stream.firstChunk != null) { + for (DiskRangeList node = stream.firstChunk; node != null; node = node.next) { + if (node.getOffset() >= stream.offset + && node.getEnd() <= stream.offset + stream.length) { + if (enableMetrics) { + bytesRangeCounter.inc(node.getLength()); + } + bytesHitStream += node.getLength(); + } + } + } + } + } + + Preconditions.checkArgument(bytesHitStream == bytesInIOPlan, + String.format( + "bytesHitStream = %s but bytesInIOPlan = %s", + bytesHitStream, bytesInIOPlan + )); + + // large memory allocation: buffer chunk list in stripe-level IO processing. + // We must multiply by a factor of 2 because the OSS network buffer + // or file read buffer requires the same memory size as bytesInIOPlan. + final long allocatedBytes = 2 * bytesInIOPlan; + memoryAllocatorCtx.allocateReservedMemory(allocatedBytes); + totalAllocatedBytes.addAndGet(allocatedBytes); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format("filePath = {0}, stripeId = {1}, allocatedBytes = {2}", + filePath, stripeInformation.getStripeId(), allocatedBytes)); + } + + return CompletableFuture.supplyAsync( + () -> readData(selectedColumns, result, controller), ioExecutor + ); + } + + @Override + public CompletableFuture> load(int targetColumnId, + boolean[] targetRowGroups, + Supplier controller) { + Preconditions.checkArgument(isOpened, "The stripe loader has not already been opened"); + // Column-level parallel data loading is only suitable for columns that size > 2MB in one stripe. + // In some cases, we need merge all columns in one IO task. + + if (registerMap.putIfAbsent(targetColumnId, targetRowGroups) != null) { + // The Stripe-Loader is stateful, and a column can only be loaded once in one stripe. + throw new RuntimeException( + MessageFormat.format("The column id {0} in stripe can only be planned once", targetColumnId) + ); + } + + OrcIndex orcIndex = preheatFileMeta.getOrcIndex( + stripeInformation.getStripeId() + ); + + // Get the IO plan of all streams in this column. + BufferChunkList ioPlan = StaticStripePlanner.planGroupsInColumn( + stripeContext, + streamManager, + streamOptions, + orcIndex, + targetRowGroups, + targetColumnId + ); + + // check buffer chunk list + long bytesInIOPlan = 0L; + long bytesHitStream = 0L; + for (BufferChunk node = ioPlan.get(); node != null; node = (BufferChunk) node.next) { + bytesInIOPlan += node.getLength(); + } + + // metrics the logical bytes range. + Counter bytesRangeCounter = enableMetrics ? metrics.addCounter( + columnMetricsKey(targetColumnId, ProfileKeys.ORC_LOGICAL_BYTES_RANGE), + ASYNC_STRIPE_LOADER_BYTES_RANGE, + ProfileKeys.ORC_LOGICAL_BYTES_RANGE.getProfileUnit() + ) : null; + for (Map.Entry entry : streamManager.getStreams().entrySet()) { + StreamName streamName = entry.getKey(); + if (streamName.getColumn() == targetColumnId) { + StreamInformation stream = entry.getValue(); + + // filter stream with no data. + if (stream != null && stream.firstChunk != null) { + for (DiskRangeList node = stream.firstChunk; node != null; node = node.next) { + if (node.getOffset() >= stream.offset + && node.getEnd() <= stream.offset + stream.length) { + if (enableMetrics) { + bytesRangeCounter.inc(node.getLength()); + } + bytesHitStream += node.getLength(); + } + } + } + } + } + + Preconditions.checkArgument(bytesHitStream == bytesInIOPlan, + String.format( + "bytesHitStream = %s but bytesInIOPlan = %s", + bytesHitStream, bytesInIOPlan + )); + + // large memory allocation: buffer chunk list in stripe-level IO processing. + // We must multiply by a factor of 2 because the OSS network buffer + // or file read buffer requires the same memory size as bytesInIOPlan. + final long allocatedBytes = 2 * bytesInIOPlan; + memoryAllocatorCtx.allocateReservedMemory(allocatedBytes); + totalAllocatedBytes.addAndGet(allocatedBytes); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format("filePath = {0}, stripeId = {1}, allocatedBytes = {2}", + filePath, stripeInformation.getStripeId(), allocatedBytes)); + } + + return CompletableFuture.supplyAsync( + () -> readData(targetColumnId, ioPlan, controller), ioExecutor + ); + } + + @Override + public long clearStream(StreamName streamName) { + if (streamManager == null) { + return 0L; + } + // find stream information and clear the buffer chunk list. + Map allStreams = streamManager.getStreams(); + StreamInformation streamInformation; + if ((streamInformation = allStreams.get(streamName)) != null) { + long releasedBytes = streamInformation.releaseBuffers(); + + // allocate the memory of data IO. + memoryAllocatorCtx.releaseReservedMemory(2 * releasedBytes, true); + totalAllocatedBytes.getAndAdd(-2 * releasedBytes); + + releasedStreams.remove(streamName); + if (releasedStreams.isEmpty()) { + // all streams have been released. + memoryAllocatorCtx.releaseReservedMemory(totalAllocatedBytes.get(), true); + totalAllocatedBytes.getAndAdd(-totalAllocatedBytes.get()); + } + + return releasedBytes; + } + return 0L; + } + + /** + * For validation: + * The list of {range, data} must be in range of stream [offset, length]. + * And the result RangDiskList must be constructed by linked list of all streams. + * + * @return stream manager holding the stream information. + */ + public StreamManager getStreamManager() { + return streamManager; + } + + private Map readData(boolean[] selectedColumns, BufferChunkList ioPlan, + Supplier controller) { + try (DataReader dataReader = buildDataReader()) { + dataReader.setController(controller); + if (enableMetrics) { + // build profile for IO processing. + ORCProfile memoryCounter = new ORCMetricsWrapper( + columnsMetricsKey(selectedColumns, ProfileKeys.ORC_IO_RAW_DATA_MEMORY_COUNTER), + ASYNC_STRIPE_LOADER_MEMORY, + ProfileKeys.ORC_IO_RAW_DATA_MEMORY_COUNTER.getProfileUnit(), + metrics); + + ORCProfile ioTimer = new ORCMetricsWrapper( + columnsMetricsKey(selectedColumns, ProfileKeys.ORC_IO_RAW_DATA_TIMER), + ASYNC_STRIPE_LOADER_TIMER, + ProfileKeys.ORC_IO_RAW_DATA_TIMER.getProfileUnit(), + metrics); + + // Execute IO tasks within the buffer chunk list. + dataReader.readFileData(ioPlan, false, memoryCounter, null, ioTimer); + } else { + dataReader.readFileData(ioPlan, false); + } + } catch (Throwable t) { + // IO ERROR + throw GeneralUtil.nestedException(t); + } + + // Build in-streams after IO tasks done + return buildInStreams((col) -> col < selectedColumns.length && selectedColumns[col]); + } + + private Map readData(int targetColumnId, BufferChunkList ioPlan, + Supplier controller) { + try (DataReader dataReader = buildDataReader()) { + dataReader.setController(controller); + if (enableMetrics) { + // build profile for IO processing. + ORCProfile memoryCounter = new ORCMetricsWrapper( + columnMetricsKey(targetColumnId, ProfileKeys.ORC_IO_RAW_DATA_MEMORY_COUNTER), + ASYNC_STRIPE_LOADER_MEMORY, + ProfileKeys.ORC_IO_RAW_DATA_MEMORY_COUNTER.getProfileUnit(), + metrics); + + ORCProfile ioTimer = new ORCMetricsWrapper( + columnMetricsKey(targetColumnId, ProfileKeys.ORC_IO_RAW_DATA_TIMER), + ASYNC_STRIPE_LOADER_TIMER, + ProfileKeys.ORC_IO_RAW_DATA_TIMER.getProfileUnit(), + metrics); + + // Execute IO tasks within the buffer chunk list. + dataReader.readFileData(ioPlan, false, memoryCounter, null, ioTimer); + } else { + dataReader.readFileData(ioPlan, false); + } + } catch (Throwable t) { + // IO ERROR + throw GeneralUtil.nestedException(t); + } + + // Build in-streams after IO tasks done + Map results = buildInStreams((col) -> col == targetColumnId); + + return results; + } + + @NotNull + private Map buildInStreams(Predicate columnFilter) { + Map results = new TreeMap<>(STREAM_NAME_COMPARATOR); + for (Map.Entry entry : streamManager.getStreams().entrySet()) { + StreamName streamName = entry.getKey(); + if (columnFilter.test(streamName.getColumn())) { + StreamInformation stream = entry.getValue(); + + // filter stream with no data. + if (stream != null && stream.firstChunk != null) { + + InStream inStream = InStream.create( + streamName, + stream.firstChunk, + stream.offset, + stream.length, + streamOptions); + + // build profile for in stream. + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + streamMetricsKey(streamName, ProfileKeys.ORC_IN_STREAM_MEMORY_COUNTER), + ASYNC_STRIPE_LOADER_MEMORY, + ProfileKeys.ORC_IN_STREAM_MEMORY_COUNTER.getProfileUnit(), + metrics) : null; + + ORCProfile decompressTimer = enableMetrics ? new ORCMetricsWrapper( + streamMetricsKey(streamName, ProfileKeys.ORC_IN_STREAM_DECOMPRESS_TIMER), + ASYNC_STRIPE_LOADER_TIMER, + ProfileKeys.ORC_IN_STREAM_DECOMPRESS_TIMER.getProfileUnit(), + metrics) : null; + + inStream.setMemoryCounter(memoryCounter); + inStream.setDecompressTimer(decompressTimer); + + results.put(streamName, inStream); + } + } + } + return results; + } + + private DataReader buildDataReader() throws IOException { + // The stream options will be modified when data-reader closing. + InStream.StreamOptions streamOptions = InStream.options() + .withCodec(OrcCodecPool.getCodec(compressionKind)) + .withBufferSize(compressionSize); + + DataReaderProperties.Builder builder = + DataReaderProperties.builder() + .withCompression(streamOptions) + .withFileSystemSupplier(() -> fileSystem) + .withPath(filePath) + .withMaxMergeDistance(maxMergeDistance) + .withMaxDiskRangeChunkLimit(maxDiskRangeChunkLimit) + .withZeroCopy(false); + FSDataInputStream file = fileSystem.open(filePath); + if (file != null) { + builder.withFile(file); + } + + DataReader dataReader = RecordReaderUtils.createDefaultDataReader(builder.build()); + return dataReader; + } + + @Override + public void close() throws IOException { + // nothing should be closed here. + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/BigBitColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/BigBitColumnReader.java new file mode 100644 index 000000000..2d36e6ee7 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/BigBitColumnReader.java @@ -0,0 +1,121 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.BigIntegerBlock; +import com.alibaba.polardbx.executor.chunk.BigIntegerBlockBuilder; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; +import java.math.BigInteger; +import java.util.Arrays; + +public class BigBitColumnReader extends AbstractLongColumnReader { + + private static final byte[] EMPTY_PACKET = new byte[BigIntegerBlock.LENGTH]; + + public BigBitColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, + OrcProto.ColumnEncoding.Kind kind, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, kind, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof BigIntegerBlock); + init(); + + long start = System.nanoTime(); + BigIntegerBlock bigIntegerBlock = (BigIntegerBlock) randomAccessBlock; + byte[] blockData = bigIntegerBlock.getData(); + boolean[] nulls = bigIntegerBlock.nulls(); + + if (present == null) { + randomAccessBlock.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + long longVal = data.next(); + final byte[] bytes = toBigIntegerBytes(longVal); + appendBigIntegerBytes(blockData, i, bytes); + lastPosition++; + } + + // destroy null array to save the memory. + bigIntegerBlock.destroyNulls(true); + + } else { + // there are some null values + randomAccessBlock.setHasNull(true); + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + appendBigIntegerBytes(blockData, i, EMPTY_PACKET); + } else { + // if not null + long longVal = data.next(); + final byte[] bytes = toBigIntegerBytes(longVal); + appendBigIntegerBytes(blockData, i, bytes); + } + lastPosition++; + } + } + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + /** + * @param bytes from BigInteger with checked length + */ + private void appendBigIntegerBytes(byte[] blockData, int position, byte[] bytes) { + int offset = position * BigIntegerBlock.LENGTH; + if (offset >= blockData.length || offset + BigIntegerBlock.LENGTH > blockData.length) { + throw new ArrayIndexOutOfBoundsException("BigIntegerBlock data length=" + blockData.length + + ", offset=" + offset); + } + System.arraycopy(bytes, 0, blockData, offset, bytes.length); + int idx = bytes.length; + if (bytes.length == BigIntegerBlock.LENGTH) { + // when it is an empty packet + return; + } + for (; idx < BigIntegerBlock.UNSCALED_LENGTH; idx++) { + blockData[offset + idx] = (byte) 0; + } + blockData[offset + idx] = (byte) bytes.length; + } + + private byte[] toBigIntegerBytes(long longVal) { + BigInteger bigInteger = BigInteger.valueOf(longVal); + final byte[] bytes = bigInteger.toByteArray(); + if (bytes.length > BigIntegerBlock.UNSCALED_LENGTH) { + throw new AssertionError("decimal with unexpected digits number"); + } + return bytes; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ByteColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ByteColumnReader.java new file mode 100644 index 000000000..009df3db9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ByteColumnReader.java @@ -0,0 +1,85 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.ByteBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.ShortBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; + +public class ByteColumnReader extends AbstractLongColumnReader { + public ByteColumnReader(int columnId, boolean isPrimaryKey, + StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, + OrcProto.ColumnEncoding.Kind kind, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, kind, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof ByteBlock); + init(); + + long start = System.nanoTime(); + ByteBlock byteBlock = (ByteBlock) randomAccessBlock; + byte[] array = byteBlock.byteArray(); + boolean[] nulls = byteBlock.nulls(); + + if (present == null) { + randomAccessBlock.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + long longVal = data.next(); + array[i] = (byte) longVal; + lastPosition++; + } + + // destroy null array to save the memory. + byteBlock.destroyNulls(true); + + } else { + // there are some null values + randomAccessBlock.setHasNull(true); + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + array[i] = 0; + } else { + // if not null + long longVal = data.next(); + array[i] = (byte) longVal; + } + lastPosition++; + } + } + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/CacheReaderImpl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/CacheReaderImpl.java new file mode 100644 index 000000000..d6f639ab3 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/CacheReaderImpl.java @@ -0,0 +1,112 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.operator.scan.CacheReader; +import com.alibaba.polardbx.executor.operator.scan.SeekableIterator; +import com.google.common.base.Preconditions; + +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Cached block reader in scope of stripe. + */ +public class CacheReaderImpl implements CacheReader { + // The stripe id. + private final int stripeId; + + // The column id. + private final int columnId; + + // Total count of row-groups in given stripe. + private final int rowGroupCount; + + // inner state + private AtomicBoolean isInitialized; + + // For initialization. + private Map> allValidCaches; + private Map> inFlightCaches; + private boolean[] cachedRowGroupBitmap; + + public CacheReaderImpl(int stripeId, int columnId, int rowGroupCount) { + this.stripeId = stripeId; + this.columnId = columnId; + this.rowGroupCount = rowGroupCount; + this.isInitialized = new AtomicBoolean(false); + } + + @Override + public void initialize(Map> allCaches) { + initialize(allCaches, null); + } + + @Override + public void initialize(Map> allValidCaches, + Map> inFlightCaches) { + if (!isInitialized.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("This cache reader has already been initialized."); + } + this.allValidCaches = allValidCaches; + this.inFlightCaches = inFlightCaches; + + // Record the selected row groups. + this.cachedRowGroupBitmap = new boolean[rowGroupCount]; + Arrays.fill(cachedRowGroupBitmap, false); + allValidCaches.keySet().stream().forEach( + rg -> cachedRowGroupBitmap[rg] = true + ); + } + + @Override + public boolean isInitialized() { + return isInitialized.get(); + } + + @Override + public int columnId() { + return this.columnId; + } + + @Override + public boolean[] cachedRowGroupBitmap() { + Preconditions.checkArgument(isInitialized.get()); + return cachedRowGroupBitmap; + } + + @Override + public Map> allCaches() { + return allValidCaches; + } + + @Override + public Block getCache(int groupId, int position) { + SeekableIterator iterator = allValidCaches.get(groupId); + if (iterator == null) { + if (inFlightCaches == null + || (iterator = inFlightCaches.get(groupId)) == null) { + return null; + } + } + // seek and move to next position of block. + return iterator.seek(position); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/CsvColumnarSplit.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/CsvColumnarSplit.java new file mode 100644 index 000000000..e32a596af --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/CsvColumnarSplit.java @@ -0,0 +1,306 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.gms.ColumnarSchemaTransformer; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFManager; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.ColumnarSplit; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.ScanPreProcessor; +import com.alibaba.polardbx.executor.operator.scan.ScanWork; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; +import com.alibaba.polardbx.optimizer.statis.OperatorStatistics; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import java.util.BitSet; +import java.util.List; +import java.util.concurrent.ExecutorService; + +public class CsvColumnarSplit implements ColumnarSplit { + private final ExecutionContext executionContext; + private final ColumnarManager columnarManager; + private final long tso; + private final Path csvFile; + private final int fileId; + private final int sequenceId; + private final ScanPreProcessor preProcessor; + private final OSSColumnTransformer columnTransformer; + + private final List inputRefsForFilter; + private final List inputRefsForProject; + + private CsvScanWork csvScanWork; + private boolean isScanWorkInvoked = false; + private LazyEvaluator lazyEvaluator; + + private int partNum; + + private int nodePartCount; + + public CsvColumnarSplit(ExecutionContext executionContext, ColumnarManager columnarManager, long tso, + Path csvFile, int fileId, int sequenceId, List inputRefsForFilter, + List inputRefsForProject, ScanPreProcessor preProcessor, + LazyEvaluator lazyEvaluator, + int partNum, int nodePartCount, + OSSColumnTransformer columnTransformer) { + this.executionContext = executionContext; + this.columnarManager = columnarManager; + this.tso = tso; + this.csvFile = csvFile; + this.fileId = fileId; + this.sequenceId = sequenceId; + this.inputRefsForFilter = inputRefsForFilter; + this.inputRefsForProject = inputRefsForProject; + this.preProcessor = preProcessor; + this.lazyEvaluator = lazyEvaluator; + this.partNum = partNum; + this.nodePartCount = nodePartCount; + this.columnTransformer = columnTransformer; + } + + @Override + public String getHostAddress() { + return csvFile.toString(); + } + + @Override + public int getSequenceId() { + return 0; + } + + @Override + public int getFileId() { + return fileId; + } + + @Override + public synchronized ScanWork nextWork() { + if (isScanWorkInvoked) { + return null; + } + + if (!preProcessor.isPrepared()) { + GeneralUtil.nestedException("The pre-processor is not prepared"); + } + String scanWorkId = generateScanWorkId(executionContext.getTraceId(), csvFile.toString(), 0); + boolean enableMetrics = executionContext.getParamManager() + .getBoolean(ConnectionParams.ENABLE_COLUMNAR_METRICS); + boolean useSelection = executionContext.getParamManager() + .getBoolean(ConnectionParams.ENABLE_COLUMNAR_SCAN_SELECTION); + boolean enableCompatible = executionContext.getParamManager() + .getBoolean(ConnectionParams.ENABLE_OSS_COMPATIBLE); + RuntimeMetrics metrics = RuntimeMetrics.create(scanWorkId); + csvScanWork = new CsvScanWork(columnarManager, tso, csvFile, inputRefsForFilter, inputRefsForProject, + executionContext, scanWorkId, metrics, enableMetrics, lazyEvaluator, preProcessor.getDeletion(csvFile), + partNum, nodePartCount, useSelection, enableCompatible, columnTransformer); + isScanWorkInvoked = true; + return (ScanWork) csvScanWork; + } + + @Override + public ColumnarSplitPriority getPriority() { + return ColumnarSplitPriority.CSV_SPLIT_PRIORITY; + } + + private static String generateScanWorkId(String traceId, String file, int workIndex) { + return "ScanWork$" + + traceId + '$' + + file + '$' + + workIndex; + } + + public static ColumnarSplitBuilder newBuilder() { + return new CsvColumnarSplitBuilder(); + } + + static class CsvColumnarSplitBuilder implements ColumnarSplitBuilder { + private ExecutionContext executionContext; + private ColumnarManager columnarManager; + private OSSColumnTransformer columnTransformer; + private String logicalSchema; + private String logicalTable; + private Long tso; + private Path csvFile; + private int fileId; + private int sequenceId; + private LazyEvaluator lazyEvaluator; + + private List inputRefsForFilter; + private List inputRefsForProject; + private ScanPreProcessor preProcessor; + + private int partNum; + + private int nodePartCount; + private MemoryAllocatorCtx memoryAllocatorCtx; + + private FragmentRFManager fragmentRFManager; + private OperatorStatistics operatorStatistics; + + @Override + public ColumnarSplit build() { + return new CsvColumnarSplit(executionContext, columnarManager, tso, csvFile, fileId, sequenceId, + inputRefsForFilter, + inputRefsForProject, + preProcessor, lazyEvaluator, + partNum, nodePartCount, + columnTransformer); + } + + @Override + public ColumnarSplitBuilder executionContext(ExecutionContext context) { + this.executionContext = context; + return this; + } + + @Override + public ColumnarSplitBuilder columnarManager(ColumnarManager columnarManager) { + this.columnarManager = columnarManager; + return this; + } + + @Override + public ColumnarSplitBuilder isColumnarMode(boolean isColumnarMode) { + // do nothing because the csv split is always in columnar mode. + return this; + } + + @Override + public ColumnarSplitBuilder tso(Long tso) { + this.tso = tso; + return this; + } + + @Override + public ColumnarSplitBuilder ioExecutor(ExecutorService ioExecutor) { + return this; + } + + @Override + public ColumnarSplitBuilder fileSystem(FileSystem fileSystem, Engine engine) { + return this; + } + + @Override + public ColumnarSplitBuilder configuration(Configuration configuration) { + return this; + } + + @Override + public ColumnarSplitBuilder sequenceId(int sequenceId) { + this.sequenceId = sequenceId; + return this; + } + + @Override + public ColumnarSplitBuilder file(Path filePath, int fileId) { + this.csvFile = filePath; + this.fileId = fileId; + return this; + } + + @Override + public ColumnarSplitBuilder tableMeta(String logicalSchema, String logicalTable) { + this.logicalSchema = logicalSchema; + this.logicalTable = logicalTable; + return this; + } + + @Override + public ColumnarSplitBuilder columnTransformer(OSSColumnTransformer ossColumnTransformer) { + this.columnTransformer = ossColumnTransformer; + return this; + } + + @Override + public ColumnarSplitBuilder inputRefs(List inputRefsForFilter, List inputRefsForProject) { + this.inputRefsForFilter = inputRefsForFilter; + this.inputRefsForProject = inputRefsForProject; + return this; + } + + @Override + public ColumnarSplitBuilder cacheManager(BlockCacheManager blockCacheManager) { + return this; + } + + @Override + public ColumnarSplitBuilder chunkLimit(int chunkLimit) { + return this; + } + + @Override + public ColumnarSplitBuilder morselUnit(int rgThreshold) { + return this; + } + + @Override + public ColumnarSplitBuilder pushDown(LazyEvaluator lazyEvaluator) { + this.lazyEvaluator = lazyEvaluator; + return this; + } + + @Override + public ColumnarSplitBuilder prepare(ScanPreProcessor scanPreProcessor) { + this.preProcessor = scanPreProcessor; + return this; + } + + @Override + public ColumnarSplitBuilder partNum(int partNum) { + this.partNum = partNum; + return this; + } + + @Override + public ColumnarSplitBuilder nodePartCount(int nodePartCount) { + this.nodePartCount = nodePartCount; + return this; + } + + @Override + public ColumnarSplitBuilder memoryAllocator(MemoryAllocatorCtx memoryAllocatorCtx) { + this.memoryAllocatorCtx = memoryAllocatorCtx; + return this; + } + + @Override + public ColumnarSplitBuilder fragmentRFManager(FragmentRFManager fragmentRFManager) { + this.fragmentRFManager = fragmentRFManager; + return this; + } + + @Override + public ColumnarSplitBuilder operatorStatistic(OperatorStatistics operatorStatistics) { + this.operatorStatistics = operatorStatistics; + return this; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/CsvScanWork.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/CsvScanWork.java new file mode 100644 index 000000000..6863511b0 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/CsvScanWork.java @@ -0,0 +1,251 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.archive.reader.TypeComparison; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockConverter; +import com.alibaba.polardbx.executor.chunk.BlockUtils; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.Converters; +import com.alibaba.polardbx.executor.chunk.TimestampBlock; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.operator.scan.IOStatus; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.utils.TimestampUtils; +import org.apache.hadoop.fs.Path; +import org.roaringbitmap.RoaringBitmap; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.BitSet; +import java.util.List; +import java.util.TimeZone; +import java.util.concurrent.ExecutorService; +import java.util.stream.Collectors; + +public class CsvScanWork extends AbstractScanWork { + private static final Logger logger = LoggerFactory.getLogger("COLUMNAR_TRANS"); + + private final ColumnarManager columnarManager; + + private final ExecutionContext executionContext; + + private final long tso; + + private final Path csvFile; + + private final boolean useSelection; + + private final boolean enableCompatible; + + private final List refList; + + private final TimeZone targetTimeZone; + + public CsvScanWork(ColumnarManager columnarManager, long tso, Path csvFile, + List inputRefsForFilter, + List inputRefsForProject, + ExecutionContext executionContext, + String workId, RuntimeMetrics metrics, boolean enableMetrics, + LazyEvaluator lazyEvaluator, RoaringBitmap deletion, + int partNum, int nodePartCount, boolean useSelection, boolean enableCompatible, + OSSColumnTransformer ossColumnTransformer) { + super(workId, metrics, enableMetrics, lazyEvaluator, null, deletion, null, inputRefsForFilter, + inputRefsForProject, partNum, nodePartCount, ossColumnTransformer); + this.columnarManager = columnarManager; + this.tso = tso; + this.csvFile = csvFile; + this.useSelection = useSelection; + this.enableCompatible = enableCompatible; + this.executionContext = executionContext; + this.targetTimeZone = TimestampUtils.getTimeZone(executionContext); + refList = refSet.stream().sorted().collect(Collectors.toList()); + } + + protected void handleNextWork() throws Throwable { + List chunkList; + if (executionContext.isEnableOrcRawTypeBlock()) { + // Special csv scan work for raw orc type. + // Only Long/Double/ByteArray blocks are created. + // Normal query should not get there. + chunkList = columnarManager.rawCsvData(tso, csvFile.getName(), executionContext); + } else { + chunkList = columnarManager.csvData(tso, csvFile.getName()); + } + int filterColumns = inputRefsForFilter.size(); + + boolean skipEvaluation = filterColumns == 0; + int totalPositionCnt = 0; + for (Chunk chunk : chunkList) { + if (isCanceled) { + break; + } + + chunk = projectCsvChunk(chunk); + int positionCnt = chunk.getPositionCount(); + int[] selection = null; + + if (!skipEvaluation) { + long start = System.nanoTime(); + + selection = selectionOf(lazyEvaluator.eval(chunk, totalPositionCnt, positionCnt, deletionBitmap)); + + if (enableMetrics) { + evaluationTimer.inc(System.nanoTime() - start); + } + } else { + selection = selectionOf(new int[] {totalPositionCnt, positionCnt}, deletionBitmap); + } + + // NULL selection means full selection here + if (selection == null) { + ioStatus.addResult(chunk); + } else if (selection.length > 0) { + // rebuild chunk according to project refs. + Chunk projectChunk = rebuildProject(chunk, selection, selection.length); + ioStatus.addResult(projectChunk); + } + totalPositionCnt += positionCnt; + } + + logger.debug( + String.format("Csv scan work finished: chunk count: %d, row count: %d, row/chunk: %f", chunkList.size(), + totalPositionCnt, (double) totalPositionCnt / chunkList.size())); + + ioStatus.finish(); + } + + private Chunk projectCsvChunk(Chunk chunk) { + Block[] blocks = new Block[refList.size()]; + int blockIndex = 0; + + for (int i = 0; i < refList.size(); i++) { + final Integer columnId = columnTransformer.getLocInOrc(chunkRefMap[refList.get(i)]); + + ColumnMeta sourceColumnMeta = columnTransformer.getSourceColumnMeta(i); + ColumnMeta targetColumnMeta = columnTransformer.getTargetColumnMeta(i); + TypeComparison comparison = columnTransformer.getCompareResult(i); + Block block; + + switch (comparison) { + case MISSING_EQUAL: + block = OSSColumnTransformer.fillDefaultValue( + targetColumnMeta.getDataType(), + columnTransformer.getInitColumnMeta(i), + columnTransformer.getTimeStamp(i), + chunk.getPositionCount(), + executionContext + ); + break; + case MISSING_NO_EQUAL: + block = OSSColumnTransformer.fillDefaultValueAndTransform( + targetColumnMeta, + columnTransformer.getInitColumnMeta(i), + chunk.getPositionCount(), + executionContext + ); + break; + default: + BlockConverter converter = Converters.createBlockConverter( + sourceColumnMeta.getDataType(), + targetColumnMeta.getDataType(), + executionContext + ); + block = converter.apply(chunk.getBlock(columnId - 1)); + break; + } + + if (block instanceof TimestampBlock) { + block = TimestampBlock.from((TimestampBlock) block, targetTimeZone); + } + blocks[blockIndex++] = block; + } + + Chunk result = new Chunk(blocks); + result.setPartIndex(partNum); + result.setPartCount(nodePartCount); + return result; + } + + protected Chunk rebuildProject(Chunk chunk, int[] selection, int selSize) { + Block[] blocks = new Block[inputRefsForProject.size()]; + int blockIndex = 0; + + // if all positions are selected, we should not use selection array. + boolean fullySelected = chunk.getPositionCount() == selSize; + + for (int projectRef : inputRefsForProject) { + // mapping blocks for projection. + int chunkIndex = chunkRefMap[projectRef]; + Block block = chunk.getBlock(chunkIndex); + + if (!fullySelected) { + blocks[blockIndex++] = BlockUtils.fillSelection(block, selection, selSize, + useSelection, enableCompatible, targetTimeZone); + } else { + blocks[blockIndex++] = block; + } + } + + Chunk result = new Chunk(blocks); + result.setPartIndex(partNum); + result.setPartCount(nodePartCount); + return result; + } + + @Override + public void invoke(ExecutorService executor) { + executor.submit(() -> { + try { + handleNextWork(); + } catch (Throwable e) { + ioStatus.addException(e); + LOGGER.error("fail to execute csv scan work: ", e); + } + }); + } + + @Override + public IOStatus getIOStatus() { + return ioStatus; + } + + @Override + public String getWorkId() { + return workId; + } + + @Override + public RuntimeMetrics getMetrics() { + return metrics; + } + + @Override + public void close() throws IOException { + if (ioStatus != null) { + ioStatus.close(); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/Decimal64ToDecimalColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/Decimal64ToDecimalColumnReader.java new file mode 100644 index 000000000..ac85a8689 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/Decimal64ToDecimalColumnReader.java @@ -0,0 +1,175 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.core.datatype.DecimalType; +import com.google.common.base.Preconditions; +import io.airlift.slice.Slice; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; +import java.text.MessageFormat; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; + +/** + * An implementation of column reader for reading Decimal64 values into a normal DecimalBlock + */ +public class Decimal64ToDecimalColumnReader extends AbstractLongColumnReader { + public Decimal64ToDecimalColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, + OrcIndex orcIndex, + RuntimeMetrics metrics, OrcProto.ColumnEncoding.Kind kind, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, kind, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof DecimalBlock); + + init(); + + long start = System.nanoTime(); + + DecimalBlock block = (DecimalBlock) randomAccessBlock; + boolean[] nulls = block.nulls(); + Slice memorySegments = block.getMemorySegments(); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + DecimalType decimalType = (DecimalType) block.getType(); + + if (present == null) { + randomAccessBlock.setHasNull(false); + int i = 0; + for (; i < positionCount && data.hasNext(); i++) { + // no null value. + long longVal = data.next(); + int fromIndex = i * DECIMAL_MEMORY_SIZE; + DecimalStructure d2 = new DecimalStructure(memorySegments.slice(fromIndex, DECIMAL_MEMORY_SIZE)); + d2.setLongWithScale(longVal, decimalType.getScale()); + nulls[i] = false; + lastPosition++; + } + if (i < positionCount) { + throw GeneralUtil.nestedException(MessageFormat.format( + "Bad position, positionCount = {0}, workId = {1}", i, metrics.name() + )); + } + ((Block) randomAccessBlock).destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + } else { + // if not null + long longVal = data.next(); + int fromIndex = i * DECIMAL_MEMORY_SIZE; + DecimalStructure d2 = new DecimalStructure(memorySegments.slice(fromIndex, DECIMAL_MEMORY_SIZE)); + d2.setLongWithScale(longVal, decimalType.getScale()); + nulls[i] = false; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + @Override + public int next(RandomAccessBlock randomAccessBlock, int positionCount, int[] selection, int selSize) + throws IOException { + + if (selection == null || selSize == 0 || selection.length == 0) { + next(randomAccessBlock, positionCount); + return 0; + } + + init(); + + long start = System.nanoTime(); + + DecimalBlock block = (DecimalBlock) randomAccessBlock; + boolean[] nulls = block.nulls(); + Slice memorySegments = block.getMemorySegments(); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + DecimalType decimalType = (DecimalType) block.getType(); + + int totalSkipCount = 0; + if (present == null) { + randomAccessBlock.setHasNull(false); + + int lastSelectedPos = -1; + for (int i = 0; i < selSize; i++) { + int selectedPos = selection[i]; + + int skipPos = selectedPos - lastSelectedPos - 1; + if (skipPos > 0) { + data.skip(skipPos); + totalSkipCount += skipPos; + lastPosition += skipPos; + } + long longVal = data.next(); + int fromIndex = selectedPos * DECIMAL_MEMORY_SIZE; + DecimalStructure d2 = new DecimalStructure(memorySegments.slice(fromIndex, DECIMAL_MEMORY_SIZE)); + d2.setLongWithScale(longVal, decimalType.getScale()); + lastPosition++; + + lastSelectedPos = selectedPos; + } + + ((Block) randomAccessBlock).destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + } else { + // if not null + long longVal = data.next(); + int fromIndex = i * DECIMAL_MEMORY_SIZE; + DecimalStructure d2 = new DecimalStructure(memorySegments.slice(fromIndex, DECIMAL_MEMORY_SIZE)); + d2.setLongWithScale(longVal, decimalType.getScale()); + nulls[i] = false; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + return totalSkipCount; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DecimalColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DecimalColumnReader.java new file mode 100644 index 000000000..2393857ad --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DecimalColumnReader.java @@ -0,0 +1,490 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.charset.MySQLUnicodeUtils; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.DecimalBlockBuilder; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.AbstractColumnReader; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.MetricsNameBuilder; +import com.alibaba.polardbx.executor.operator.scan.metrics.ORCMetricsWrapper; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileKeys; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.core.datatype.DecimalType; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import io.airlift.slice.DynamicSliceOutput; +import io.airlift.slice.Slice; +import io.airlift.slice.SliceOutput; +import org.apache.orc.OrcProto; +import org.apache.orc.customized.ORCProfile; +import org.apache.orc.impl.BitFieldReader; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.PositionProvider; +import org.apache.orc.impl.RecordReaderImpl; +import org.apache.orc.impl.RunLengthIntegerReaderV2; +import org.apache.orc.impl.StreamName; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; + +public class DecimalColumnReader extends AbstractColumnReader { + private static final int DEFAULT_INDEX_STRIDE = 10000; + + // basic metadata + private final StripeLoader stripeLoader; + + // in preheat mode, all row-indexes in orc-index should not be null. + private final OrcIndex orcIndex; + private final RuntimeMetrics metrics; + + private final boolean enableMetrics; + // for semantic parser + protected BitFieldReader present; + protected InStream dataStream; + protected RunLengthIntegerReaderV2 lengthReader; + // open parameters + private boolean[] rowGroupIncluded; + private boolean await; + // inner states + private AtomicBoolean openFailed; + private AtomicBoolean initializeOnlyOnce; + private AtomicBoolean isOpened; + // IO results + private Throwable throwable; + private Map inStreamMap; + private CompletableFuture> openFuture; + // record read positions + private int currentRowGroup; + private int lastPosition; + + // execution time metrics. + private Counter preparingTimer; + private Counter seekTimer; + private Counter parseTimer; + + public DecimalColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, boolean enableMetrics) { + super(columnId, isPrimaryKey); + this.stripeLoader = stripeLoader; + this.orcIndex = orcIndex; + this.metrics = metrics; + this.enableMetrics = enableMetrics; + + // inner states + openFailed = new AtomicBoolean(false); + initializeOnlyOnce = new AtomicBoolean(false); + isOpened = new AtomicBoolean(false); + throwable = null; + inStreamMap = null; + openFuture = null; + + // for parser + present = null; + dataStream = null; + lengthReader = null; + + // read position control + // The initial value is -1 means it must seek to the correct row group firstly. + currentRowGroup = -1; + lastPosition = -1; + + rowGroupIncluded = null; + await = false; + + if (enableMetrics) { + preparingTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER.getProfileUnit() + ); + + seekTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_SEEK_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_SEEK_TIMER.getProfileUnit() + ); + + parseTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_PARSE_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_PARSE_TIMER.getProfileUnit() + ); + } + } + + @Override + public boolean[] rowGroupIncluded() { + Preconditions.checkArgument(isOpened.get()); + return rowGroupIncluded; + } + + @Override + public boolean isOpened() { + return isOpened.get(); + } + + @Override + public void open(boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + + // load the specified streams. + openFuture = stripeLoader.load(columnId, rowGroupIncluded); + + if (await) { + doWait(); + } + } + + @Override + public void open(CompletableFuture> loadFuture, + boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + this.openFuture = loadFuture; + if (await) { + doWait(); + } + } + + // wait for open future and handle failure. + private void doWait() { + try { + inStreamMap = openFuture.get(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + + if (throwable != null) { + // throw if failed. + throw GeneralUtil.nestedException(throwable); + } + } + + protected void init() throws IOException { + if (!initializeOnlyOnce.compareAndSet(false, true)) { + return; + } + + long start = System.nanoTime(); + if (!await) { + doWait(); + } + if (openFailed.get()) { + return; + } + + // Unlike StripePlanner in raw ORC SDK, the stream names and IO production are + // all determined at runtime. + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + StreamName lengthName = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH); + + InStream presentStream = inStreamMap.get(presentName); + dataStream = inStreamMap.get(dataName); + InStream lengthStream = inStreamMap.get(lengthName); + + // initialize present and integer reader + present = presentStream == null ? null : new BitFieldReader(presentStream); + lengthReader = lengthStream == null ? null : new RunLengthIntegerReaderV2(lengthStream, false, true); + + // Add memory metrics. + if (present != null) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + presentName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + present.setMemoryCounter(memoryCounter); + } + + if (lengthReader != null) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + lengthName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + lengthReader.setMemoryCounter(memoryCounter); + } + + // metrics time cost of preparing (IO waiting + data steam reader constructing) + if (enableMetrics) { + preparingTimer.inc(System.nanoTime() - start); + } + } + + @Override + public void startAt(int rowGroupId, int elementPosition) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + init(); + + long start = System.nanoTime(); + if (rowGroupId != currentRowGroup || elementPosition < lastPosition) { + // case 1: when given row group is different from the current group, seek to the position of it. + // case 2: when elementPosition <= lastPosition, we need go back to the start position of this row group. + seek(rowGroupId); + + long actualSkipRows = skipPresent(elementPosition); + + // skip on length int-reader and record the skipped length. + long lengthToSkip = 0; + for (int i = 0; i < actualSkipRows; ++i) { + lengthToSkip += lengthReader.next(); + } + + // skip on data InStream + while (dataStream != null && lengthToSkip > 0) { + lengthToSkip -= dataStream.skip(lengthToSkip); + } + + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + } else if (elementPosition > lastPosition && elementPosition < DEFAULT_INDEX_STRIDE) { + // case 3: when elementPosition > lastPosition and the group is same, just skip to given position. + long actualSkipRows = skipPresent(elementPosition - lastPosition); + + // skip on length int-reader and record the skipped length. + long lengthToSkip = 0; + for (int i = 0; i < actualSkipRows; ++i) { + lengthToSkip += lengthReader.next(); + } + + // skip on data InStream + while (dataStream != null && lengthToSkip > 0) { + lengthToSkip -= dataStream.skip(lengthToSkip); + } + + lastPosition = elementPosition; + } else if (elementPosition >= DEFAULT_INDEX_STRIDE) { + // case 4: the position is out of range. + throw GeneralUtil.nestedException("Invalid element position: " + elementPosition); + } + // case 5: the elementPosition == lastPosition and rowGroupId is equal. + + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + } + + // Try to skip rows on present stream and count down + // the actual rows need skipped by data stream. + protected long skipPresent(long rows) throws IOException { + if (present == null) { + return rows; + } + + long result = 0; + for (long c = 0; c < rows; ++c) { + // record the count of non-null values + // in range of [current_position, current_position + rows) + if (present.next() == 1) { + result += 1; + } + } + // It must be less than or equal to count of rows. + return result; + } + + @Override + public void seek(int rowGroupId) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + init(); + + // Find the position-provider of given column and row group. + PositionProvider positionProvider; + OrcProto.RowIndex[] rowIndices = orcIndex.getRowGroupIndex(); + OrcProto.RowIndexEntry entry = rowIndices[columnId].getEntry(rowGroupId); + // This is effectively a test for pre-ORC-569 files. + if (rowGroupId == 0 && entry.getPositionsCount() == 0) { + positionProvider = new RecordReaderImpl.ZeroPositionProvider(); + } else { + positionProvider = new RecordReaderImpl.PositionProviderImpl(entry); + } + + // NOTE: The order of seeking is strict! + if (present != null) { + present.seek(positionProvider); + } + if (dataStream != null) { + dataStream.seek(positionProvider); + } + if (lengthReader != null) { + lengthReader.seek(positionProvider); + } + + currentRowGroup = rowGroupId; + lastPosition = 0; + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof DecimalBlock); + init(); + + long start = System.nanoTime(); + + DecimalBlock block = (DecimalBlock) randomAccessBlock; + boolean[] nulls = block.nulls(); + Slice memorySegments = block.getMemorySegments(); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + DecimalType decimalType = (DecimalType) block.getType(); + + if (present == null) { + randomAccessBlock.setHasNull(false); + + if (lengthReader != null) { + for (int i = 0; i < positionCount; i++) { + // no null value. + long length = lengthReader.next(); + byte[] decimalBin = readDecimalBin(length); + int fromIndex = i * DECIMAL_MEMORY_SIZE; + DecimalStructure d2 = new DecimalStructure(memorySegments.slice(fromIndex, DECIMAL_MEMORY_SIZE)); + DecimalConverter.binToDecimal(decimalBin, d2, decimalType.getPrecision(), decimalType.getScale()); + nulls[i] = false; + lastPosition++; + } + } + + } else { + randomAccessBlock.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + } else { + // if not null + long length = lengthReader.next(); + byte[] decimalBin = readDecimalBin(length); + int fromIndex = i * DECIMAL_MEMORY_SIZE; + DecimalStructure d2 = new DecimalStructure(memorySegments.slice(fromIndex, DECIMAL_MEMORY_SIZE)); + DecimalConverter.binToDecimal(decimalBin, d2, decimalType.getPrecision(), decimalType.getScale()); + nulls[i] = false; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + private byte[] readDecimalBin(long length) throws IOException { + int len = (int) length; + byte[] tmp = new byte[len]; + byte[] read = new byte[len]; + int num = dataStream.read(read); + if (num < len) { + throw new IOException("Failed to read bytes with length: " + length); + } + boolean isUtf8FromLatin1 = + MySQLUnicodeUtils.utf8ToLatin1(read, 0, (int) length, tmp); + + if (!isUtf8FromLatin1) { + // in columnar, decimals are stored already in latin1 encoding + System.arraycopy(read, 0, tmp, 0, len); + } + return tmp; + } + + @Override + public void close() { + if (!isClosed.compareAndSet(false, true)) { + return; + } + + // 1. Clear the resources allocated in InStream + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + StreamName lengthName = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH); + + if (inStreamMap != null) { + InStream presentStream = inStreamMap.get(presentName); + InStream dataStream = inStreamMap.get(dataName); + InStream lengthStream = inStreamMap.get(lengthName); + + if (presentStream != null) { + presentStream.close(); + } + + if (dataStream != null) { + dataStream.close(); + } + + if (lengthStream != null) { + lengthStream.close(); + } + } + + // 2. Clear the memory resources held by stream + long releasedBytes = 0L; + releasedBytes += stripeLoader.clearStream(presentName); + releasedBytes += stripeLoader.clearStream(dataName); + releasedBytes += stripeLoader.clearStream(lengthName); + + if (releasedBytes > 0) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format( + "Release the resource of work: {0}, columnId: {1}, bytes: {2}", + metrics.name(), columnId, releasedBytes + )); + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DefaultLazyEvaluator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DefaultLazyEvaluator.java new file mode 100644 index 000000000..6c93fcaa6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DefaultLazyEvaluator.java @@ -0,0 +1,684 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.bloomfilter.RFBloomFilter; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.BlockBuilders; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItem; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItemKey; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFManager; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.RFEfficiencyChecker; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.statis.OperatorStatistics; +import com.google.common.base.Preconditions; +import org.jetbrains.annotations.NotNull; +import org.roaringbitmap.RelativeRangeConsumer; +import org.roaringbitmap.RoaringBitmap; + +import java.util.Arrays; +import java.util.BitSet; +import java.util.List; +import java.util.Map; + +/** + * Specialized for expression evaluation. + */ +public class DefaultLazyEvaluator implements LazyEvaluator { + private final VectorizedExpression condition; + private final MutableChunk preAllocatedChunk; + private final boolean zeroCopy; + + /** + * The count of input columns. + */ + private final int inputVectorCount; + + /** + * The bitmap of columns for filter evaluation, and it's a subset of project columns. + */ + private final boolean[] filterVectorBitmap; + + /** + * Record some session-level or global variables related to evaluation. + */ + private final ExecutionContext context; + + private final List inputTypes; + + private final double ratio; + + private final boolean reuseVector; + private EvaluationContext evaluationContext; + + /** + * for constant expression. + */ + private final boolean isConstant; + + private volatile FragmentRFManager fragmentRFManager; + private OperatorStatistics operatorStatistics; + private RFEfficiencyChecker efficiencyChecker; + private Map rfBloomFilterMap; + + public DefaultLazyEvaluator(VectorizedExpression condition, MutableChunk preAllocatedChunk, boolean zeroCopy, + int inputVectorCount, boolean[] filterVectorBitmap, ExecutionContext context, + List inputTypes, double ratio) { + this.condition = condition; + this.preAllocatedChunk = preAllocatedChunk; + this.zeroCopy = zeroCopy; + this.inputVectorCount = inputVectorCount; + this.filterVectorBitmap = filterVectorBitmap; + this.context = context; + this.inputTypes = inputTypes; + this.ratio = ratio; + this.reuseVector = context.getParamManager().getBoolean(ConnectionParams.ENABLE_REUSE_VECTOR); + this.evaluationContext = new EvaluationContext(preAllocatedChunk, context); + this.isConstant = VectorizedExpressionUtils.isConstantExpression(condition); + } + + enum EvaluationStrategy { + /** + * No element is selected for evaluation. + */ + NO_SELECTION, + /** + * All elements are selected for evaluation. + */ + FULL_SELECTION, + /** + * Only part of elements are selected for evaluation. + */ + PARTIAL_SELECTION, + /** + * All elements are selected for evaluation. + * And then, elements marked as deleted (in deletion bitmap) will be removed. + */ + POST_INTERSECTION; + + static EvaluationStrategy get(int positionCount, int cardinality, double ratio) { + EvaluationStrategy evaluationStrategy; + if (cardinality == positionCount) { + evaluationStrategy = EvaluationStrategy.NO_SELECTION; + } else if (cardinality == 0) { + evaluationStrategy = EvaluationStrategy.FULL_SELECTION; + } else if (cardinality < positionCount * ratio) { + evaluationStrategy = EvaluationStrategy.POST_INTERSECTION; + } else { + evaluationStrategy = EvaluationStrategy.PARTIAL_SELECTION; + } + return evaluationStrategy; + } + } + + public void registerRF(FragmentRFManager fragmentRFManager, OperatorStatistics operatorStatistics, + Map rfBloomFilterMap) { + this.fragmentRFManager = fragmentRFManager; + this.operatorStatistics = operatorStatistics; + this.efficiencyChecker = new RFEfficiencyCheckerImpl( + fragmentRFManager.getSampleCount(), fragmentRFManager.getFilterRatioThreshold()); + this.rfBloomFilterMap = rfBloomFilterMap; + } + + public static DefaultLazyEvaluatorBuilder builder() { + return new DefaultLazyEvaluatorBuilder(); + } + + public VectorizedExpression getCondition() { + return condition; + } + + private int[] genPreSelection(int startPosition, int positionCount, RoaringBitmap deletion, int cardinality) { + final int[] preSelection = new int[positionCount - cardinality]; + + // remove deleted positions. + deletion.forAllInRange(startPosition, positionCount, new RelativeRangeConsumer() { + private int selectionIndex = 0; + + @Override + public void acceptPresent(int relativePos) { + // for present index, nothing to do + } + + @Override + public void acceptAbsent(int relativePos) { + preSelection[selectionIndex++] = relativePos; + } + + @Override + public void acceptAllPresent(int relativeFrom, int relativeTo) { + // for present index, nothing to do + } + + @Override + public void acceptAllAbsent(int relativeFrom, int relativeTo) { + for (int pos = relativeFrom; pos < relativeTo; pos++) { + preSelection[selectionIndex++] = pos; + } + } + }); + + return preSelection; + } + + @Override + public int eval(Chunk chunk, int startPosition, int positionCount, RoaringBitmap deletion, boolean[] bitmap) { + // check existence in given range. + long cardinality = deletion.rangeCardinality(startPosition, startPosition + positionCount); + Preconditions.checkArgument(cardinality <= positionCount); + + EvaluationStrategy evaluationStrategy = EvaluationStrategy.get( + positionCount, (int) cardinality, ratio + ); + + // preprocessing of evaluation. + switch (evaluationStrategy) { + case NO_SELECTION: { + // all positions are marked as deleted. + return 0; + } + case PARTIAL_SELECTION: { + // partial positions are selected. + final int[] preSelection = genPreSelection(startPosition, positionCount, deletion, (int) cardinality); + + // Allocate or reuse memory for intermediate results + final int resultRows = chunk.getPositionCount(); + // Clean the chunk since last round may use selection + preAllocatedChunk.setSelectionInUse(false); + if (reuseVector) { + preAllocatedChunk.allocateWithReuse(resultRows, inputVectorCount); + } else { + preAllocatedChunk.reallocate(resultRows, inputVectorCount, false); + } + + // Prepare selection array for evaluation. + preAllocatedChunk.setBatchSize(preSelection.length); + preAllocatedChunk.setSelection(preSelection); + preAllocatedChunk.setSelectionInUse(true); + + break; + } + case POST_INTERSECTION: + case FULL_SELECTION: { + // Allocate or reuse memory for intermediate results + final int resultRows = chunk.getPositionCount(); + // Clean the chunk since last round may use selection + preAllocatedChunk.setSelectionInUse(false); + if (reuseVector) { + preAllocatedChunk.allocateWithReuse(resultRows, inputVectorCount); + } else { + preAllocatedChunk.reallocate(resultRows, inputVectorCount, false); + } + + break; + } + default: + } + + if (zeroCopy) { + // If in zero copy mode, set reference of original block into pre-allocated chunk. + for (int i = 0; i < inputVectorCount; i++) { + if (filterVectorBitmap[i]) { + Block block = chunk.getBlock(i); + preAllocatedChunk.setSlotAt(block.cast(RandomAccessBlock.class), i); + } + } + } else { + // If not in zero copy mode, copy all blocks for filter evaluation. + for (int i = 0; i < inputVectorCount; i++) { + if (filterVectorBitmap[i]) { + Block cachedBlock = chunk.getBlock(i); + DataType dataType = inputTypes.get(i); + BlockBuilder blockBuilder = BlockBuilders.create(dataType, context); + + for (int j = 0; j < chunk.getPositionCount(); j++) { + cachedBlock.writePositionTo(j, blockBuilder); + } + + RandomAccessBlock copied = (RandomAccessBlock) blockBuilder.build(); + preAllocatedChunk.setSlotAt(copied, i); + } + } + } + + // Do evaluation + condition.eval(evaluationContext); + + // clear bitmap + Arrays.fill(bitmap, false); + + // get filtered selection result + int selectedCount = 0; + switch (evaluationStrategy) { + case FULL_SELECTION: { + selectedCount = handleFullSelection(bitmap, positionCount); + break; + } + case PARTIAL_SELECTION: { + selectedCount = handlePartialSelection(bitmap, positionCount); + break; + } + case POST_INTERSECTION: { + selectedCount = handlePostIntersection(bitmap, positionCount, deletion, startPosition); + break; + } + case NO_SELECTION: + default: { + throw GeneralUtil.nestedException("invalid strategy for post processing."); + } + } + + if (fragmentRFManager == null) { + return selectedCount; + } + + // handle RF + final int totalPartitionCount = fragmentRFManager.getTotalPartitionCount(); + final int selectedCountBeforeRF = selectedCount; + for (Map.Entry entry : fragmentRFManager.getAllItems().entrySet()) { + + FragmentRFItem item = entry.getValue(); + int filterChannel = item.getSourceFilterChannel(); + boolean useXXHashInFilter = item.useXXHashInFilter(); + FragmentRFManager.RFType rfType = item.getRFType(); + + FragmentRFItemKey itemKey = entry.getKey(); + RFBloomFilter[] rfBloomFilters = rfBloomFilterMap.get(itemKey); + + // We have not received the runtime filter of this item key from build side. + if (rfBloomFilters == null) { + continue; + } + + // check runtime filter efficiency. + if (!efficiencyChecker.check(itemKey)) { + continue; + } + + final int originalCount = selectedCount; + Block filterBlock = chunk.getBlock(filterChannel).cast(Block.class); + switch (rfType) { + case BROADCAST: { + selectedCount = filterBlock.mightContainsLong(rfBloomFilters[0], bitmap, true); + break; + } + case LOCAL: { + if (useXXHashInFilter) { + // The partition of this chunk is consistent. + selectedCount = + filterBlock.mightContainsLong(totalPartitionCount, rfBloomFilters, bitmap, true, true); + } else { + // For local test. + int hitCount = 0; + for (int pos = 0; pos < chunk.getPositionCount(); pos++) { + + if (bitmap[pos]) { + int partition = getPartition(filterBlock, pos, totalPartitionCount); + + int hashCode = filterBlock.hashCode(pos); + bitmap[pos] &= rfBloomFilters[partition].mightContainInt(hashCode); + if (bitmap[pos]) { + hitCount++; + } + } + + } + + selectedCount = hitCount; + } + break; + } + } + // sample the filter ratio of runtime filter. + efficiencyChecker.sample(itemKey, originalCount, selectedCount); + + } + // statistics for filtered rows by runtime filter. + operatorStatistics.addRuntimeFilteredCount(selectedCountBeforeRF - selectedCount); + + return selectedCount; + } + + private static int getPartition(Block block, int position, int partitionCount) { + + // Convert the searchVal from field space to hash space + long hashVal = block.hashCodeUseXxhash(position); + int partition = (int) ((hashVal & Long.MAX_VALUE) % partitionCount); + + return partition; + } + + @Override + public boolean isConstantExpression() { + return isConstant; + } + + @Override + public BitSet eval(Chunk chunk, int startPosition, int positionCount, RoaringBitmap deletion) { + + // check existence in given range. + long cardinality = deletion.rangeCardinality(startPosition, startPosition + positionCount); + Preconditions.checkArgument(cardinality <= positionCount); + + final EvaluationStrategy evaluationStrategy = EvaluationStrategy.get( + positionCount, (int) cardinality, ratio + ); + + // preprocessing of evaluation. + switch (evaluationStrategy) { + case NO_SELECTION: { + // all positions are marked as deleted. + return new BitSet(0); + } + case PARTIAL_SELECTION: { + // partial positions are selected. + final int[] preSelection = genPreSelection(startPosition, positionCount, deletion, (int) cardinality); + + // Allocate or reuse memory for intermediate results + final int resultRows = chunk.getPositionCount(); + // Clean the chunk since last round may use selection + preAllocatedChunk.setSelectionInUse(false); + if (reuseVector) { + preAllocatedChunk.allocateWithReuse(resultRows, inputVectorCount); + } else { + preAllocatedChunk.reallocate(resultRows, inputVectorCount, false); + } + + // Prepare selection array for evaluation. + preAllocatedChunk.setBatchSize(preSelection.length); + preAllocatedChunk.setSelection(preSelection); + preAllocatedChunk.setSelectionInUse(true); + + break; + } + case POST_INTERSECTION: + case FULL_SELECTION: { + // Allocate or reuse memory for intermediate results + final int resultRows = chunk.getPositionCount(); + // Clean the chunk since last round may use selection + preAllocatedChunk.setSelectionInUse(false); + if (reuseVector) { + preAllocatedChunk.allocateWithReuse(resultRows, inputVectorCount); + } else { + preAllocatedChunk.reallocate(resultRows, inputVectorCount, false); + } + + break; + } + default: + } + + if (zeroCopy) { + // If in zero copy mode, set reference of original block into pre-allocated chunk. + for (int i = 0; i < inputVectorCount; i++) { + if (filterVectorBitmap[i]) { + Block block = chunk.getBlock(i); + preAllocatedChunk.setSlotAt(block.cast(RandomAccessBlock.class), i); + } + } + } else { + // If not in zero copy mode, copy all blocks for filter evaluation. + for (int i = 0; i < inputVectorCount; i++) { + if (filterVectorBitmap[i]) { + Block cachedBlock = chunk.getBlock(i); + DataType dataType = inputTypes.get(i); + BlockBuilder blockBuilder = BlockBuilders.create(dataType, context); + + for (int j = 0; j < chunk.getPositionCount(); j++) { + cachedBlock.writePositionTo(j, blockBuilder); + } + + RandomAccessBlock copied = (RandomAccessBlock) blockBuilder.build(); + preAllocatedChunk.setSlotAt(copied, i); + } + } + } + + // Do evaluation + condition.eval(evaluationContext); + + // get filtered selection result + switch (evaluationStrategy) { + case FULL_SELECTION: { + return handleFullSelection(positionCount); + } + case PARTIAL_SELECTION: { + return handlePartialSelection(positionCount); + } + case POST_INTERSECTION: { + return handlePostIntersection(positionCount, deletion, startPosition); + } + case NO_SELECTION: + default: { + throw GeneralUtil.nestedException("invalid strategy for post processing."); + } + } + } + + @NotNull + private BitSet handlePostIntersection(int positionCount, RoaringBitmap deletion, int startPosition) { + RandomAccessBlock resultBlock = preAllocatedChunk.slotIn(condition.getOutputIndex()); + boolean[] nulls = resultBlock.nulls(); + int batchSize = preAllocatedChunk.batchSize(); + BitSet result = new BitSet(positionCount); + if (resultBlock instanceof LongBlock) { + long[] longInputArray = ((LongBlock) resultBlock).longArray(); + for (int i = 0; i < batchSize; i++) { + if (longInputArray[i] != 0 && + (nulls == null || !nulls[i]) && + !deletion.contains(i + startPosition)) { + result.set(i); + } + } + } else if (resultBlock instanceof IntegerBlock) { + int[] intInputArray = ((IntegerBlock) resultBlock).intArray(); + for (int i = 0; i < batchSize; i++) { + if (intInputArray[i] != 0 && + (nulls == null || !nulls[i]) && + !deletion.contains(i + startPosition)) { + result.set(i); + } + } + } else { + GeneralUtil.nestedException("Invalid result block: " + resultBlock.getClass()); + } + + return result; + } + + @NotNull + private BitSet handlePartialSelection(int positionCount) { + // Get pre-selection array for position mapping + Preconditions.checkArgument(preAllocatedChunk.isSelectionInUse()); + int batchSize = preAllocatedChunk.batchSize(); + int[] preSelection = preAllocatedChunk.selection(); + RandomAccessBlock resultBlock = preAllocatedChunk.slotIn(condition.getOutputIndex()); + boolean[] nulls = resultBlock.nulls(); + BitSet result = new BitSet(positionCount); + if (resultBlock instanceof LongBlock) { + long[] longInputArray = ((LongBlock) resultBlock).longArray(); + for (int i = 0; i < batchSize; i++) { + int j = preSelection[i]; + if (longInputArray[j] != 0 && + (nulls == null || !nulls[i])) { + result.set(j); + } + } + } else if (resultBlock instanceof IntegerBlock) { + int[] intInputArray = ((IntegerBlock) resultBlock).intArray(); + for (int i = 0; i < batchSize; i++) { + int j = preSelection[i]; + if (intInputArray[j] != 0 && + (nulls == null || !nulls[i])) { + result.set(j); + } + } + } else { + GeneralUtil.nestedException("Invalid result block: " + resultBlock.getClass()); + } + + return result; + } + + @NotNull + private BitSet handleFullSelection(int positionCount) { + RandomAccessBlock resultBlock = preAllocatedChunk.slotIn(condition.getOutputIndex()); + boolean[] nulls = resultBlock.nulls(); + BitSet result = new BitSet(positionCount); + int batchSize = preAllocatedChunk.batchSize(); + if (resultBlock instanceof LongBlock) { + long[] longInputArray = ((LongBlock) resultBlock).longArray(); + for (int i = 0; i < batchSize; i++) { + if (longInputArray[i] != 0 && + (nulls == null || !nulls[i])) { + result.set(i); + } + } + } else if (resultBlock instanceof IntegerBlock) { + int[] intInputArray = ((IntegerBlock) resultBlock).intArray(); + for (int i = 0; i < batchSize; i++) { + if (intInputArray[i] != 0 && + (nulls == null || !nulls[i])) { + result.set(i); + } + } + } else { + GeneralUtil.nestedException("Invalid result block: " + resultBlock.getClass()); + } + + return result; + } + + @NotNull + private int handlePostIntersection(boolean[] bitmap, int positionCount, RoaringBitmap deletion, int startPosition) { + RandomAccessBlock resultBlock = preAllocatedChunk.slotIn(condition.getOutputIndex()); + boolean[] nulls = resultBlock.nulls(); + int batchSize = preAllocatedChunk.batchSize(); + + int selectCount = 0; + + if (resultBlock instanceof LongBlock) { + long[] longInputArray = ((LongBlock) resultBlock).longArray(); + for (int i = 0; i < batchSize; i++) { + if (longInputArray[i] != 0 && + (nulls == null || !nulls[i]) && + !deletion.contains(i + startPosition)) { + bitmap[i] = true; + selectCount++; + } + } + } else if (resultBlock instanceof IntegerBlock) { + int[] intInputArray = ((IntegerBlock) resultBlock).intArray(); + for (int i = 0; i < batchSize; i++) { + if (intInputArray[i] != 0 && + (nulls == null || !nulls[i]) && + !deletion.contains(i + startPosition)) { + bitmap[i] = true; + selectCount++; + } + } + } else { + GeneralUtil.nestedException("Invalid result block: " + resultBlock.getClass()); + } + + return selectCount; + } + + @NotNull + private int handlePartialSelection(boolean[] bitmap, int positionCount) { + // Get pre-selection array for position mapping + Preconditions.checkArgument(preAllocatedChunk.isSelectionInUse()); + int batchSize = preAllocatedChunk.batchSize(); + int[] preSelection = preAllocatedChunk.selection(); + RandomAccessBlock resultBlock = preAllocatedChunk.slotIn(condition.getOutputIndex()); + boolean[] nulls = resultBlock.nulls(); + + int selectCount = 0; + + if (resultBlock instanceof LongBlock) { + long[] longInputArray = ((LongBlock) resultBlock).longArray(); + for (int i = 0; i < batchSize; i++) { + int j = preSelection[i]; + if (longInputArray[j] != 0 && + (nulls == null || !nulls[i])) { + bitmap[j] = true; + selectCount++; + } + } + } else if (resultBlock instanceof IntegerBlock) { + int[] intInputArray = ((IntegerBlock) resultBlock).intArray(); + for (int i = 0; i < batchSize; i++) { + int j = preSelection[i]; + if (intInputArray[j] != 0 && + (nulls == null || !nulls[i])) { + bitmap[j] = true; + selectCount++; + } + } + } else { + GeneralUtil.nestedException("Invalid result block: " + resultBlock.getClass()); + } + + return selectCount; + } + + @NotNull + private int handleFullSelection(boolean[] bitmap, int positionCount) { + RandomAccessBlock resultBlock = preAllocatedChunk.slotIn(condition.getOutputIndex()); + boolean[] nulls = resultBlock.nulls(); + + int selectCount = 0; + + int batchSize = preAllocatedChunk.batchSize(); + if (resultBlock instanceof LongBlock) { + long[] longInputArray = ((LongBlock) resultBlock).longArray(); + for (int i = 0; i < batchSize; i++) { + if (longInputArray[i] != 0 && + (nulls == null || !nulls[i])) { + bitmap[i] = true; + selectCount++; + } + } + } else if (resultBlock instanceof IntegerBlock) { + int[] intInputArray = ((IntegerBlock) resultBlock).intArray(); + for (int i = 0; i < batchSize; i++) { + if (intInputArray[i] != 0 && + (nulls == null || !nulls[i])) { + bitmap[i] = true; + selectCount++; + } + } + } else { + GeneralUtil.nestedException("Invalid result block: " + resultBlock.getClass()); + } + + return selectCount; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DefaultLazyEvaluatorBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DefaultLazyEvaluatorBuilder.java new file mode 100644 index 000000000..b25973045 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DefaultLazyEvaluatorBuilder.java @@ -0,0 +1,128 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluatorBuilder; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.build.InputRefTypeChecker; +import com.alibaba.polardbx.executor.vectorized.build.Rex2VectorizedExpressionVisitor; +import com.alibaba.polardbx.executor.vectorized.build.VectorizedExpressionBuilder; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import org.apache.calcite.rex.RexNode; + +import java.util.Arrays; +import java.util.BitSet; +import java.util.List; +import java.util.stream.Collectors; + +public class DefaultLazyEvaluatorBuilder implements LazyEvaluatorBuilder { + + /** + * Input types of table scan including filter and project columns. + */ + List> inputTypes; + + /** + * Contain the parameters list in expression. + */ + ExecutionContext context; + + /** + * Represent the abstract tree structure of expression. + */ + RexNode rexNode; + + /** + * The ratio to distinguish between partial-selected case and post-intersection case. + */ + double ratio; + + public DefaultLazyEvaluatorBuilder setRexNode(RexNode rexNode) { + this.rexNode = rexNode; + return this; + } + + public DefaultLazyEvaluatorBuilder setRatio(double ratio) { + this.ratio = ratio; + return this; + } + + /** + * In normal case, InputTypes includes only referenced columns + */ + public DefaultLazyEvaluatorBuilder setInputTypes(List> inputTypes) { + this.inputTypes = inputTypes; + return this; + } + + public DefaultLazyEvaluatorBuilder setContext(ExecutionContext context) { + this.context = context; + return this; + } + + @Override + public LazyEvaluator build() { + RexNode root = VectorizedExpressionBuilder.rewriteRoot(rexNode, true); + + InputRefTypeChecker inputRefTypeChecker = new InputRefTypeChecker(inputTypes); + root = root.accept(inputRefTypeChecker); + + Rex2VectorizedExpressionVisitor converter = + new Rex2VectorizedExpressionVisitor(context, inputTypes.size()); + VectorizedExpression condition = root.accept(converter); + + // Data types of intermediate results or final results. + List> filterOutputTypes = converter.getOutputDataTypes(); + + // placeholder for input and output blocks + MutableChunk preAllocatedChunk = MutableChunk.newBuilder(context.getExecutorChunkLimit()) + .addEmptySlots(inputTypes) + .addEmptySlots(filterOutputTypes) + .addChunkLimit(context.getExecutorChunkLimit()) + .addOutputIndexes(new int[] {condition.getOutputIndex()}) + .addLiteralBitmap(converter.getLiteralBitmap()) + .build(); + + // prepare filter bitmap + List inputVectorIndex = VectorizedExpressionUtils.getInputIndex(condition); + + // filterVectorBitmap means which positions of vectors should be replaced by input-blocks. + boolean[] filterVectorBitmap = new boolean[inputTypes.size() + filterOutputTypes.size()]; + Arrays.fill(filterVectorBitmap, false); + for (int i : inputVectorIndex) { + filterVectorBitmap[i] = true; + } + + DefaultLazyEvaluator lazyEvaluator = new DefaultLazyEvaluator( + condition, + preAllocatedChunk, + true, + inputTypes.size(), + filterVectorBitmap, + context, + inputTypes.stream().map(DataType.class::cast).collect(Collectors.toList()), + ratio + ); + + return lazyEvaluator; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DefaultScanPreProcessor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DefaultScanPreProcessor.java new file mode 100644 index 000000000..eb40a20ea --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DefaultScanPreProcessor.java @@ -0,0 +1,437 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.jdbc.Parameters; +import com.alibaba.polardbx.common.oss.ColumnarFileType; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.columnar.pruning.ColumnarPruneManager; +import com.alibaba.polardbx.executor.columnar.pruning.data.PruneUtils; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruner; +import com.alibaba.polardbx.executor.columnar.pruning.predicate.ColumnPredicatePruningInf; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.operator.scan.ORCMetaReader; +import com.alibaba.polardbx.executor.operator.scan.ScanPreProcessor; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.statis.ColumnarTracer; +import com.google.common.base.Preconditions; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheStats; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; +import org.apache.calcite.rex.RexNode; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.orc.StripeInformation; +import org.apache.orc.impl.OrcTail; +import org.roaringbitmap.RoaringBitmap; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.concurrent.ExecutorService; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static com.alibaba.polardbx.executor.columnar.pruning.data.PruneUtils.transformRexToIndexMergeTree; +import static com.alibaba.polardbx.gms.engine.FileStoreStatistics.CACHE_STATS_FIELD_COUNT; + +/** + * A mocked implementation of ScanPreProcessor that can generate + * file preheat meta, deletion bitmap and pruning result (all selected). + */ +public class DefaultScanPreProcessor implements ScanPreProcessor { + + private static final String PREHEATED_CACHE_NAME = "PREHEATED_CACHE"; + + private static final int PREHEATED_CACHE_MAX_ENTRY = 4096; + + private static final Logger logger = LoggerFactory.getLogger(DefaultScanPreProcessor.class); + + protected static final Cache PREHEATED_CACHE = + CacheBuilder.newBuilder().maximumSize(PREHEATED_CACHE_MAX_ENTRY).recordStats().build(); + + /** + * File path participated in preprocessor. + */ + protected final Set filePaths; + + /** + * A shared configuration object to avoid initialization of large parameter list. + */ + private final Configuration configuration; + + /** + * The filesystem storing the files in file path list. + */ + private final FileSystem fileSystem; + + /** + * To enable index pruning. + */ + protected final boolean enableIndexPruning; + + /** + * To enable oss compatible. + */ + protected final boolean enableOssCompatible; + + protected final String schemaName; + protected final String logicalTableName; + + /** + * The column meta list from query plan. + */ + protected final List columns; + + /** + * The pushed-down predicate. + */ + protected final List rexList; + + /** + * The ratio that row-groups will be selected in a stripe. + */ + private final double groupsRatio; + + /** + * The ratio that row positions in file will be marked. + */ + private final double deletionRatio; + + /** + * The checkpoint tso in columnar mode. + * It will be null if in archive mode. + */ + protected final Long tso; + + /** + * The columnar manager will be null if in archive mode. + */ + protected final ColumnarManager columnarManager; + + /** + * Field id of each column for columnar index + */ + private final List columnFieldIdList; + + /** + * Mapping from file path to its preheated file meta. + */ + protected final Map preheatFileMetaMap; + + /** + * The future will be null if preparation has not been invoked. + */ + protected ListenableFuture future; + + /** + * The mocked pruning results that mapping from file path to stride + group info. + */ + protected Map> rowGroupMatrix; + + /** + * Mapping from file path to deletion bitmap. + */ + private Map deletions; + + /** + * Store the throwable info generated during preparation. + */ + protected Throwable throwable; + + protected IndexPruneContext indexPruneContext; + + public DefaultScanPreProcessor(Configuration configuration, + FileSystem fileSystem, + + // for pruning + String schemaName, + String logicalTableName, + boolean enableIndexPruning, + boolean enableOssCompatible, + List columns, + List rexList, + Map params, + + // for mock + double groupsRatio, + double deletionRatio, + + // for columnar mode. + ColumnarManager columnarManager, + Long tso, + List columnFieldIdList) { + + this.filePaths = new TreeSet<>(); + this.configuration = configuration; + this.fileSystem = fileSystem; + + // for pruning. + this.enableIndexPruning = enableIndexPruning; + this.enableOssCompatible = enableOssCompatible; + this.schemaName = schemaName; + this.logicalTableName = logicalTableName; + this.columns = columns; + this.rexList = rexList; + this.indexPruneContext = new IndexPruneContext(); + indexPruneContext.setParameters(new Parameters(params)); + + // for mock + this.deletionRatio = deletionRatio; + this.groupsRatio = groupsRatio; + + // for columnar mode + this.columnarManager = columnarManager; + // The checkpoint tso in columnar mode. It will be null if in archive mode. + this.tso = tso; + this.columnFieldIdList = columnFieldIdList; + + this.preheatFileMetaMap = new HashMap<>(); + this.rowGroupMatrix = new HashMap<>(); + this.deletions = new HashMap<>(); + } + + @Override + public void addFile(Path filePath) { + this.filePaths.add(filePath); + } + + @Override + public ListenableFuture prepare(ExecutorService executor, String traceId, ColumnarTracer tracer) { + SettableFuture future = SettableFuture.create(); + indexPruneContext.setPruneTracer(tracer); + // Is there a more elegant execution mode? + executor.submit( + () -> { + int stripeNum = 0; + int rgNum = 0; + int pruneRgLeft = 0; + try { + // rex+pc -> distribution segment condition + indexes merge tree + ColumnPredicatePruningInf columnPredicate = + transformRexToIndexMergeTree(rexList, indexPruneContext); + if (logger.isDebugEnabled()) { + logger.debug( + "column index prune " + schemaName + "," + logicalTableName + "," + PruneUtils.display( + columnPredicate, columns, indexPruneContext)); + } + + for (Path filePath : filePaths) { + boolean needGenerateDeletion = true; + + // only preheat orc file meta + if (filePath.getName().toUpperCase().endsWith(ColumnarFileType.ORC.name())) { + // preheat all meta from orc file. + PreheatFileMeta preheat = + PREHEATED_CACHE.get(filePath, () -> preheat(filePath)); + + preheatFileMetaMap.put(filePath, preheat); + + if (enableIndexPruning && columnPredicate != null && tso != null + && columnFieldIdList != null) { + // prune the row-groups. + long loadIndexStart = System.nanoTime(); + // TODO support multi columns for sort key + List sortKeys = + columnarManager.getSortKeyColumns(tso, schemaName, logicalTableName); + Map orcIndexesMap = + columnarManager.getPhysicalColumnIndexes(filePath.getName()); + IndexPruner indexPruner = + ColumnarPruneManager.getIndexPruner( + filePath, preheat, columns, sortKeys.get(0), + columnFieldIdList.stream() + .map(field -> orcIndexesMap.get(field) + 1) + .collect(Collectors.toList()), + enableOssCompatible + ); + + if (tracer != null) { + tracer.tracePruneInit(logicalTableName, + PruneUtils.display(columnPredicate, columns, indexPruneContext), + System.nanoTime() - loadIndexStart); + } + long indexPruneStart = System.nanoTime(); + RoaringBitmap rr = + indexPruner.prune(logicalTableName, columns, columnPredicate, indexPruneContext); + SortedMap rs = indexPruner.pruneToSortMap(rr); + if (rr.isEmpty()) { + needGenerateDeletion = false; + } + if (tracer != null) { + tracer.tracePruneTime(logicalTableName, + PruneUtils.display(columnPredicate, columns, indexPruneContext), + System.nanoTime() - indexPruneStart); + } + stripeNum += indexPruner.getStripeRgNum().size(); + rgNum += indexPruner.getRgNum(); + pruneRgLeft += rr.getCardinality(); + // prune stripe&row groups + rowGroupMatrix.put(filePath, rs); + } else { + // if pruning is disabled, mark all row-groups as selected. + generateFullMatrix(filePath); + } + } + // generate deletion according to deletion ratio and row count. + if (needGenerateDeletion) { + generateDeletion(filePath); + } + } + if (tracer != null && columnPredicate != null) { + tracer.tracePruneResult(logicalTableName, + PruneUtils.display(columnPredicate, columns, indexPruneContext), + filePaths.size(), + stripeNum, rgNum, pruneRgLeft); + } + if (logger.isDebugEnabled() && columnPredicate != null) { + logger.debug( + "prune result: " + traceId + "," + logicalTableName + "," + filePaths.size() + "," + + stripeNum + "," + rgNum + + "," + pruneRgLeft); + } + + } catch (Exception e) { + throwable = e; + future.set(null); + return; + } + future.set(null); + } + + ); + + this.future = future; + return future; + } + + @Override + public boolean isPrepared() { + throwIfFailed(); + return throwable == null && future != null && future.isDone(); + } + + @Override + public SortedMap getPruningResult(Path filePath) { + throwIfFailed(); + Preconditions.checkArgument(isPrepared()); + return rowGroupMatrix.get(filePath); + } + + @Override + public PreheatFileMeta getPreheated(Path filePath) { + throwIfFailed(); + Preconditions.checkArgument(isPrepared()); + return preheatFileMetaMap.get(filePath); + } + + @Override + public RoaringBitmap getDeletion(Path filePath) { + throwIfFailed(); + Preconditions.checkArgument(isPrepared()); + return deletions.get(filePath); + } + + @Override + public void throwIfFailed() { + if (throwable != null) { + throw GeneralUtil.nestedException(throwable); + } + } + + protected RoaringBitmap generateDeletion(Path filePath) { + RoaringBitmap bitmap; + if (tso == null) { + // in archive mode. + bitmap = new RoaringBitmap(); + } else { + // in columnar mode. + bitmap = columnarManager.getDeleteBitMapOf(tso, filePath.getName()); + } + deletions.put(filePath, bitmap); + return bitmap; + } + + protected PreheatFileMeta preheat(Path filePath) throws IOException { + ORCMetaReader metaReader = null; + try { + metaReader = ORCMetaReader.create(configuration, fileSystem); + PreheatFileMeta preheatFileMeta = metaReader.preheat(filePath); + + return preheatFileMeta; + } finally { + metaReader.close(); + } + } + + protected void generateFullMatrix(Path filePath) { + PreheatFileMeta preheatFileMeta = preheatFileMetaMap.get(filePath); + OrcTail orcTail = preheatFileMeta.getPreheatTail(); + + int indexStride = orcTail.getFooter().getRowIndexStride(); + + // but sorted by stripe id. + SortedMap matrix = new TreeMap<>(); + for (StripeInformation stripeInformation : orcTail.getStripes()) { + int stripeId = (int) stripeInformation.getStripeId(); + int groupsInStripe = (int) ((stripeInformation.getNumberOfRows() + indexStride - 1) / indexStride); + + // build row-group by stripe row count and index stride + // mark all groups as selected. + boolean[] groupIncluded = new boolean[groupsInStripe]; + Arrays.fill(groupIncluded, true); + + matrix.put(stripeId, groupIncluded); + } + rowGroupMatrix.put(filePath, matrix); + } + + public static byte[][] getCacheStat() { + CacheStats cacheStats = PREHEATED_CACHE.stats(); + + byte[][] results = new byte[CACHE_STATS_FIELD_COUNT][]; + int pos = 0; + results[pos++] = PREHEATED_CACHE_NAME.getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = String.valueOf(PREHEATED_CACHE.size()).getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = String.valueOf(cacheStats.hitCount()).getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = String.valueOf(cacheStats.missCount()).getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = "IN MEMORY".getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = String.valueOf(PREHEATED_CACHE_MAX_ENTRY).getBytes(); + results[pos++] = new StringBuilder().append(-1).append(" BYTES").toString().getBytes(); + return results; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DeletedScanPreProcessor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DeletedScanPreProcessor.java new file mode 100644 index 000000000..bf4d0715f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DeletedScanPreProcessor.java @@ -0,0 +1,183 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.oss.ColumnarFileType; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.columnar.pruning.ColumnarPruneManager; +import com.alibaba.polardbx.executor.columnar.pruning.data.PruneUtils; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruner; +import com.alibaba.polardbx.executor.columnar.pruning.predicate.ColumnPredicatePruningInf; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.statis.ColumnarTracer; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; +import org.apache.calcite.rex.RexNode; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.roaringbitmap.RoaringBitmap; + +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.concurrent.ExecutorService; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static com.alibaba.polardbx.executor.columnar.pruning.data.PruneUtils.transformRexToIndexMergeTree; + +public class DeletedScanPreProcessor extends DefaultScanPreProcessor { + private static final Logger logger = LoggerFactory.getLogger(DeletedScanPreProcessor.class); + + public DeletedScanPreProcessor(Configuration configuration, + FileSystem fileSystem, + String schemaName, + String logicalTableName, + boolean enableIndexPruning, + boolean enableOssCompatible, + List columns, + List rexList, + Map params, + double groupsRatio, + double deletionRatio, + ColumnarManager columnarManager, + Long tso, + List columnFieldIdList) { + super( + configuration, + fileSystem, + + schemaName, + logicalTableName, + enableIndexPruning, + enableOssCompatible, + columns, + rexList, + params, + + groupsRatio, + deletionRatio, + columnarManager, + tso, + columnFieldIdList + ); + } + + @Override + public ListenableFuture prepare(ExecutorService executor, String traceId, ColumnarTracer tracer) { + SettableFuture future = SettableFuture.create(); + indexPruneContext.setPruneTracer(tracer); + executor.submit( + () -> { + int stripeNum = 0; + int rgNum = 0; + int pruneRgLeft = 0; + try { + // rex+pc -> distribution segment condition + indexes merge tree + ColumnPredicatePruningInf columnPredicate = + transformRexToIndexMergeTree(rexList, indexPruneContext); + if (logger.isDebugEnabled()) { + logger.debug( + "[" + this.getClass().getSimpleName() + "]" + "column index prune " + schemaName + "," + + logicalTableName + "," + PruneUtils.display(columnPredicate, columns, + indexPruneContext)); + } + + for (Path filePath : filePaths) { + RoaringBitmap deleteBitmap = generateDeletion(filePath); + + // only preheat orc file meta + if (filePath.getName().toUpperCase().endsWith(ColumnarFileType.ORC.name())) { + // preheat all meta from orc file. + PreheatFileMeta preheat = + PREHEATED_CACHE.get(filePath, () -> preheat(filePath)); + + preheatFileMetaMap.put(filePath, preheat); + + if (enableIndexPruning && tso != null) { + // prune the row-groups. + long loadIndexStart = System.nanoTime(); + // TODO support multi columns for sort key + List sortKeys = + columnarManager.getSortKeyColumns(tso, schemaName, logicalTableName); + List orcIndexes = + columnarManager.getPhysicalColumnIndexes(tso, filePath.getName(), + IntStream.range(0, columns.size()).boxed().collect( + Collectors.toList())); + IndexPruner indexPruner = ColumnarPruneManager.getIndexPruner( + filePath, preheat, columns, sortKeys.get(0), orcIndexes, enableOssCompatible); + + if (tracer != null) { + tracer.tracePruneInit(logicalTableName, + PruneUtils.display(columnPredicate, columns, indexPruneContext), + System.nanoTime() - loadIndexStart); + } + long indexPruneStart = System.nanoTime(); + + // Only need row-groups containing deleted data. + RoaringBitmap rr = indexPruner.pruneOnlyDeletedRowGroups(deleteBitmap); + + SortedMap rs = indexPruner.pruneToSortMap(rr); + if (rr.isEmpty()) { + } + if (tracer != null) { + tracer.tracePruneTime(logicalTableName, + PruneUtils.display(columnPredicate, columns, indexPruneContext), + System.nanoTime() - indexPruneStart); + } + stripeNum += indexPruner.getStripeRgNum().size(); + rgNum += indexPruner.getRgNum(); + pruneRgLeft += rr.getCardinality(); + // prune stripe&row groups + rowGroupMatrix.put(filePath, rs); + } else { + // if pruning is disabled, mark all row-groups as selected. + generateFullMatrix(filePath); + } + } + } + if (tracer != null && columnPredicate != null) { + tracer.tracePruneResult(logicalTableName, + PruneUtils.display(columnPredicate, columns, indexPruneContext), + filePaths.size(), + stripeNum, rgNum, pruneRgLeft); + } + if (logger.isDebugEnabled() && columnPredicate != null) { + logger.debug( + "[" + this.getClass().getSimpleName() + "]" + "prune result: " + traceId + "," + + logicalTableName + "," + filePaths.size() + "," + + stripeNum + "," + rgNum + "," + pruneRgLeft); + } + + } catch (Exception e) { + throwable = e; + future.set(null); + return; + } + future.set(null); + } + + ); + + this.future = future; + return future; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DeletedScanWork.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DeletedScanWork.java new file mode 100644 index 000000000..ca17ab12f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DeletedScanWork.java @@ -0,0 +1,172 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.columnar.LazyBlock; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.LogicalRowGroup; +import com.alibaba.polardbx.executor.operator.scan.RowGroupIterator; +import com.alibaba.polardbx.executor.operator.scan.RowGroupReader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import org.apache.orc.ColumnStatistics; +import org.roaringbitmap.RoaringBitmap; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +public class DeletedScanWork extends AbstractScanWork { + private final boolean activeLoading; + + public DeletedScanWork(String workId, + RuntimeMetrics metrics, + boolean enableMetrics, + LazyEvaluator lazyEvaluator, + RowGroupIterator rgIterator, + RoaringBitmap deletionBitmap, + MorselColumnarSplit.ScanRange scanRange, + List inputRefsForFilter, + List inputRefsForProject, + int partNum, + int nodePartCount, + boolean activeLoading, + OSSColumnTransformer ossColumnTransformer) { + super(workId, metrics, enableMetrics, lazyEvaluator, rgIterator, deletionBitmap, scanRange, inputRefsForFilter, + inputRefsForProject, partNum, nodePartCount, ossColumnTransformer); + this.activeLoading = activeLoading; + } + + @Override + protected void handleNextWork() { + + // not all row group but those filtered by pruner should be loaded. + final boolean[] prunedRowGroupBitmap = rgIterator.rgIncluded(); + final int rowGroupCount = prunedRowGroupBitmap.length; + + // Get and lazily evaluate chunks until row group count exceeds the threshold. + // NOTE: the row-group and chunk must be in order. + final Map> chunksWithGroup = new TreeMap<>(); + final List selectedRowGroups = new ArrayList<>(); + + while (!isCanceled && rgIterator.hasNext()) { + rgIterator.next(); + LogicalRowGroup logicalRowGroup = rgIterator.current(); + final int rowGroupId = logicalRowGroup.groupId(); + + // The row group id in iterator must be valid. + Preconditions.checkArgument(prunedRowGroupBitmap[rowGroupId]); + + // A flag for each row group to indicate that at least one block selected in row group. + boolean rgSelected = false; + + Chunk chunk; + RowGroupReader rowGroupReader = logicalRowGroup.getReader(); + while ((chunk = rowGroupReader.nextBatch()) != null) { + int[] batchRange = rowGroupReader.batchRange(); + int[] preSelection = selectionOfDeleted(batchRange, deletionBitmap); + if (preSelection != null) { + // rebuild chunk according to project refs. + chunk = rebuildProject(chunk, preSelection, preSelection.length); + List chunksInGroup = chunksWithGroup.computeIfAbsent(rowGroupId, any -> new ArrayList<>()); + chunksInGroup.add(chunk); + rgSelected = true; + } else { + // all data in this chunk are preserved, which are not we want, skip to the next chunk. + // The created chunk and block-loader will be abandoned here. + releaseRef(chunk); + } + } + // the chunk in this row group is run out, change to the next. + if (rgSelected) { + selectedRowGroups.add(rowGroupId); + } else { + // if row-group is not selected, remove all chunks of this row-group from buffer. + List chunksInGroup; + if ((chunksInGroup = chunksWithGroup.remove(rowGroupId)) != null) { + chunksInGroup.clear(); + } + } + } + + // There is no more chunk produced by this row group iterator. + rgIterator.noMoreChunks(); + + // no group is selected. + if (selectedRowGroups.isEmpty()) { + ioStatus.finish(); + return; + } + + // We collect all chunks in several row groups here, + // so we can merge the IO range of different row group to improve I/O efficiency. + boolean[] rowGroupIncluded = toRowGroupBitmap(rowGroupCount, selectedRowGroups); + + // collect all row-groups for mering IO tasks. + mergeIONoCache(inputRefsForProject, rowGroupIncluded); + + for (Map.Entry> entry : chunksWithGroup.entrySet()) { + List chunksInGroup = entry.getValue(); + for (Chunk bufferedChunk : chunksInGroup) { + + // The target chunk may be in lazy mode or changed to be in normal mode. + Chunk targetChunk = bufferedChunk; + if (activeLoading) { + Block[] blocks = new Block[bufferedChunk.getBlockCount()]; + // load project columns + for (int blockIndex = 0; blockIndex < bufferedChunk.getBlockCount(); blockIndex++) { + LazyBlock lazyBlock = (LazyBlock) bufferedChunk.getBlock(blockIndex); + lazyBlock.load(); + + blocks[blockIndex] = lazyBlock.getLoaded(); + } + + targetChunk = new Chunk(bufferedChunk.getPositionCount(), blocks); + } + + targetChunk.setPartIndex(partNum); + targetChunk.setPartCount(nodePartCount); + // add result and notify the blocked threads. + ioStatus.addResult(targetChunk); + } + } + + ioStatus.finish(); + + if (activeLoading) { + // force columnar reader to close. + forceClose(inputRefsForFilter); + forceClose(inputRefsForProject); + } + } + + private void forceClose(List inputRefs) { + for (int i = 0; i < inputRefs.size(); i++) { + final int columnId = inputRefs.get(i) + 1; + ColumnReader columnReader = rgIterator.getColumnReader(columnId); + if (columnReader != null) { + columnReader.close(); + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryBinaryColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryBinaryColumnReader.java new file mode 100644 index 000000000..a4f345e93 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryBinaryColumnReader.java @@ -0,0 +1,104 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.BlockBuilders; +import com.alibaba.polardbx.executor.chunk.ByteArrayBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import io.airlift.slice.Slice; +import it.unimi.dsi.fastutil.bytes.ByteArrayList; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; +import java.util.Arrays; + +public class DictionaryBinaryColumnReader extends AbstractDictionaryColumnReader { + + public DictionaryBinaryColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, + OrcIndex orcIndex, + RuntimeMetrics metrics, OrcProto.ColumnEncoding encoding, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, encoding, indexStride, enableMetrics); + } + + /** + * No dictionary in ByteArrayBlock + */ + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof ByteArrayBlock); + init(); + + long start = System.nanoTime(); + + ByteArrayBlock block = (ByteArrayBlock) randomAccessBlock; + boolean[] nulls = block.nulls(); + int[] offsets = block.getOffsets(); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + Preconditions.checkArgument(offsets != null && offsets.length == positionCount); + ByteArrayList data = new ByteArrayList(positionCount * BlockBuilders.EXPECTED_BYTE_ARRAY_LEN); + + // if dictionary is null, all the value in this column is null and the present stream must be not null. + if (dictionary == null) { + Preconditions.checkArgument(present != null); + } + + if (present == null) { + randomAccessBlock.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + int dictId = (int) dictIdReader.next(); + Slice value = dictionary.getValue(dictId); + data.addElements(data.size(), value.getBytes()); + offsets[i] = data.size(); + lastPosition++; + } + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + offsets[i] = data.size(); + } else { + // if not null + int dictId = (int) dictIdReader.next(); + Slice value = dictionary.getValue(dictId); + data.addElements(data.size(), value.getBytes()); + offsets[i] = data.size(); + } + lastPosition++; + } + } + + block.setData(Arrays.copyOf(data.elements(), data.size())); + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryBlobColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryBlobColumnReader.java new file mode 100644 index 000000000..c28d7bccb --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryBlobColumnReader.java @@ -0,0 +1,91 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.BlobBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.core.datatype.Blob; +import com.google.common.base.Preconditions; +import io.airlift.slice.Slice; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; + +public class DictionaryBlobColumnReader extends AbstractDictionaryColumnReader { + + public DictionaryBlobColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, OrcProto.ColumnEncoding encoding, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, encoding, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof BlobBlock); + init(); + + long start = System.nanoTime(); + + BlobBlock block = (BlobBlock) randomAccessBlock; + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + java.sql.Blob[] blobs = ((BlobBlock) randomAccessBlock).blobArray(); + + // if dictionary is null, all the value in this column is null and the present stream must be not null. + if (dictionary == null) { + Preconditions.checkArgument(present != null); + } + + if (present == null) { + randomAccessBlock.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + int dictId = (int) dictIdReader.next(); + Slice value = dictionary.getValue(dictId); + blobs[i] = new Blob(value.getBytes()); + lastPosition++; + } + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + } else { + // if not null + int dictId = (int) dictIdReader.next(); + Slice value = dictionary.getValue(dictId); + blobs[i] = new Blob(value.getBytes()); + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryBlockBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryBlockBuilder.java new file mode 100644 index 000000000..6f689523f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryBlockBuilder.java @@ -0,0 +1,126 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.AbstractBlockBuilder; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.SliceBlock; +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; +import com.alibaba.polardbx.optimizer.core.datatype.BlobType; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.SliceType; +import com.google.common.base.Preconditions; +import io.airlift.slice.Slice; +import io.airlift.slice.Slices; +import it.unimi.dsi.fastutil.ints.IntArrayList; + +/** + * Block builder for slice block based on dictionary. + */ +public class DictionaryBlockBuilder extends AbstractBlockBuilder { + private BlockDictionary blockDictionary; + private IntArrayList values; + private DataType dataType; + private boolean isCompatible; + + public DictionaryBlockBuilder(boolean isCompatible, DataType dataType, int initialCapacity) { + super(initialCapacity); + Preconditions.checkArgument(dataType instanceof SliceType || dataType instanceof BlobType); + this.dataType = dataType; + this.isCompatible = isCompatible; + this.values = new IntArrayList(initialCapacity); + } + + public void setDictionary(BlockDictionary dictionary) { + this.blockDictionary = dictionary; + } + + @Override + public void writeInt(int value) { + values.add(value); + valueIsNull.add(false); + } + + public Slice getRegion(int position) { + if (valueIsNull.get(position)) { + return Slices.EMPTY_SLICE; + } else { + int dictId = values.getInt(position); + if (dictId == -1) { + return Slices.EMPTY_SLICE; + } + return blockDictionary.getValue(dictId); + } + } + + @Override + public Object getObject(int position) { + return isNull(position) ? null : getRegion(position); + } + + @Override + public void writeObject(Object value) { + if (value == null) { + appendNull(); + return; + } + Preconditions.checkArgument(value instanceof Integer); + writeInt((Integer) value); + } + + @Override + public void ensureCapacity(int capacity) { + super.ensureCapacity(capacity); + values.ensureCapacity(capacity); + } + + @Override + public Block build() { + if (dataType instanceof SliceType) { + return new SliceBlock((SliceType) dataType, 0, getPositionCount(), + mayHaveNull() ? valueIsNull.elements() : null, + blockDictionary, values.elements(), isCompatible + ); + } + throw new UnsupportedOperationException("Unsupported dictionary type: " + dataType); + } + + @Override + public void appendNull() { + appendNullInternal(); + values.add(-1); + } + + @Override + public BlockBuilder newBlockBuilder() { + return new DictionaryBlockBuilder(isCompatible, dataType, getCapacity()); + } + + @Override + public int hashCode(int position) { + if (isNull(position)) { + return 0; + } + int dictId = values.getInt(position); + if (dictId == -1) { + return 0; + } + return blockDictionary.getValue(dictId).hashCode(); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryDecimalColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryDecimalColumnReader.java new file mode 100644 index 000000000..283c0ca71 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryDecimalColumnReader.java @@ -0,0 +1,175 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.charset.MySQLUnicodeUtils; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.core.datatype.DecimalType; +import com.google.common.base.Preconditions; +import io.airlift.slice.Slice; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; +import java.util.Arrays; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; + +public class DictionaryDecimalColumnReader extends AbstractDictionaryColumnReader { + + private static final int DEFAULT_BYTE_BUFFER_LENGTH = 64; + private byte[] byteBuffer = null; + + public DictionaryDecimalColumnReader(int columnId, boolean isPrimaryKey, + StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, OrcProto.ColumnEncoding encoding, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, encoding, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof DecimalBlock); + init(); + + long start = System.nanoTime(); + + DecimalBlock block = (DecimalBlock) randomAccessBlock; + readToDecimal(block, positionCount); + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + private void readToDecimal(DecimalBlock block, int positionCount) throws IOException { + boolean[] nulls = block.nulls(); + Slice memorySegments = block.getMemorySegments(); + DecimalType decimalType = (DecimalType) block.getType(); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + + if (dictionary == null) { + // all values are null + Preconditions.checkArgument(present != null); + block.setHasNull(true); + + for (int i = 0; i < positionCount; i++) { + nulls[i] = true; + lastPosition++; + } + + } else { + initByteBuffer(); + if (present == null) { + block.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + int dictId = (int) dictIdReader.next(); + Slice sliceValue = dictionary.getValue(dictId); + int len = sliceValue.length(); + byte[] tmp = getByteBuffer(len); + boolean isUtf8FromLatin1 = + MySQLUnicodeUtils.utf8ToLatin1(sliceValue, tmp, len); + if (!isUtf8FromLatin1) { + // in columnar, decimals are stored already in latin1 encoding + sliceValue.getBytes(0, tmp, 0, len); + } + int fromIndex = i * DECIMAL_MEMORY_SIZE; + DecimalStructure d2 = new DecimalStructure(memorySegments.slice(fromIndex, DECIMAL_MEMORY_SIZE)); + int[] result = + DecimalConverter.binToDecimal(tmp, d2, decimalType.getPrecision(), decimalType.getScale()); + nulls[i] = false; + lastPosition++; + + if (result[1] != DecimalTypeBase.E_DEC_OK) { + LOGGER.error(String.format("Decoding dictionary decimal failed, dictId: %d, bytes: [%s]", + dictId, Arrays.toString(sliceValue.getBytes()))); + throw GeneralUtil.nestedException("Error occurs while decoding dictionary decimal"); + } + } + + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + block.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + } else { + // if not null + int dictId = (int) dictIdReader.next(); + Slice sliceValue = dictionary.getValue(dictId); + int len = sliceValue.length(); + byte[] tmp = getByteBuffer(len); + boolean isUtf8FromLatin1 = + MySQLUnicodeUtils.utf8ToLatin1(sliceValue, tmp, len); + if (!isUtf8FromLatin1) { + // in columnar, decimals are stored already in latin1 encoding + sliceValue.getBytes(0, tmp, 0, len); + } + int fromIndex = i * DECIMAL_MEMORY_SIZE; + DecimalStructure d2 = + new DecimalStructure(memorySegments.slice(fromIndex, DECIMAL_MEMORY_SIZE)); + int[] result = + DecimalConverter.binToDecimal(tmp, d2, decimalType.getPrecision(), decimalType.getScale()); + nulls[i] = false; + if (result[1] != DecimalTypeBase.E_DEC_OK) { + LOGGER.error(String.format("Decoding dictionary decimal failed, dictId: %d, bytes: [%s]", + dictId, Arrays.toString(sliceValue.getBytes()))); + throw GeneralUtil.nestedException("Error occurs while decoding dictionary decimal"); + } + } + lastPosition++; + } + } + } + } + + private void initByteBuffer() { + if (this.byteBuffer == null) { + this.byteBuffer = new byte[DEFAULT_BYTE_BUFFER_LENGTH]; + } + } + + /** + * @return local byte buffer with ensured capacity + */ + private byte[] getByteBuffer(int len) { + if (this.byteBuffer == null) { + this.byteBuffer = new byte[len]; + return this.byteBuffer; + } + if (this.byteBuffer.length < len) { + this.byteBuffer = new byte[len]; + return this.byteBuffer; + } + return byteBuffer; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryEnumColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryEnumColumnReader.java new file mode 100644 index 000000000..634538df6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryEnumColumnReader.java @@ -0,0 +1,105 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.BlockBuilders; +import com.alibaba.polardbx.executor.chunk.EnumBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import io.airlift.slice.Slice; +import it.unimi.dsi.fastutil.chars.CharArrayList; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; +import java.util.Arrays; + +public class DictionaryEnumColumnReader extends AbstractDictionaryColumnReader { + + public DictionaryEnumColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, OrcProto.ColumnEncoding encoding, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, encoding, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof EnumBlock); + init(); + + long start = System.nanoTime(); + + EnumBlock block = (EnumBlock) randomAccessBlock; + int[] offsets = block.getOffsets(); + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(offsets != null && offsets.length == positionCount); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + + // if dictionary is null, all the value in this column is null and the present stream must be not null. + if (dictionary == null) { + Preconditions.checkArgument(present != null); + } + CharArrayList data = new CharArrayList(positionCount * BlockBuilders.EXPECTED_STRING_LEN); + + if (present == null) { + randomAccessBlock.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + int dictId = (int) dictIdReader.next(); + Slice value = dictionary.getValue(dictId); + String strVal = value.toStringUtf8(); + for (int idx = 0; idx < strVal.length(); idx++) { + data.add(strVal.charAt(idx)); + } + offsets[i] = data.size(); + lastPosition++; + } + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + } else { + // if not null + int dictId = (int) dictIdReader.next(); + Slice value = dictionary.getValue(dictId); + String strVal = value.toStringUtf8(); + for (int idx = 0; idx < strVal.length(); idx++) { + data.add(strVal.charAt(idx)); + } + offsets[i] = data.size(); + } + lastPosition++; + } + } + + block.setData(Arrays.copyOf(data.elements(), data.size())); + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryJsonColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryJsonColumnReader.java new file mode 100644 index 000000000..fcdf4149d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryJsonColumnReader.java @@ -0,0 +1,122 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.StringBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import io.airlift.slice.DynamicSliceOutput; +import io.airlift.slice.Slice; +import io.airlift.slice.SliceOutput; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; + +/** + * The dictionary json column reader is responsible for parsing + * the dictionary-encoding data from orc into string block. + */ +public class DictionaryJsonColumnReader extends AbstractDictionaryColumnReader { + + public DictionaryJsonColumnReader(int columnId, boolean isPrimaryKey, + StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, OrcProto.ColumnEncoding encoding, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, encoding, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof StringBlock); + init(); + + long start = System.nanoTime(); + + StringBlock block = (StringBlock) randomAccessBlock; + int[] offsets = block.getOffsets(); + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(offsets != null && offsets.length == positionCount); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + + long totalLength = 0; + + // if dictionary is null, all the values in this column are null and the present stream must be not null. + if (dictionary == null) { + Preconditions.checkArgument(present != null); + block.setHasNull(true); + + for (int i = 0; i < positionCount; i++) { + offsets[i] = (int) totalLength; + nulls[i] = true; + + lastPosition++; + } + } else { + SliceOutput sliceOutput = new DynamicSliceOutput(positionCount); + if (present == null) { + block.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + int dictId = (int) dictIdReader.next(); + Slice sliceValue = dictionary.getValue(dictId); + sliceOutput.writeBytes(sliceValue); + long length = sliceValue.length(); + totalLength += length; + offsets[i] = (int) totalLength; + nulls[i] = false; + + lastPosition++; + } + + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + block.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + offsets[i] = (int) totalLength; + } else { + // if not null + int dictId = (int) dictIdReader.next(); + Slice sliceValue = dictionary.getValue(dictId); + sliceOutput.writeBytes(sliceValue); + long length = sliceValue.length(); + totalLength += length; + offsets[i] = (int) totalLength; + nulls[i] = false; + } + lastPosition++; + } + } + block.setData(sliceOutput.slice().toStringUtf8().toCharArray()); + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryMapping.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryMapping.java new file mode 100644 index 000000000..9e5919f1b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryMapping.java @@ -0,0 +1,36 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; + +/** + * Maintaining the mapping relation from all the merged dictionary. + */ +public interface DictionaryMapping { + static DictionaryMapping create() { + return new DictionaryMappingImpl(); + } + + int[] merge(BlockDictionary dictionary); + + default long estimatedSize() { + return 0; + } + + void close(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryMappingImpl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryMappingImpl.java new file mode 100644 index 000000000..d6cf56092 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryMappingImpl.java @@ -0,0 +1,80 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; +import io.airlift.slice.Slice; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +public class DictionaryMappingImpl implements DictionaryMapping { + public List getMergedDict() { + return mergedDict; + } + + private List mergedDict = new ArrayList<>(); + private Map reMappings = new HashMap<>(); + + @Override + public int[] merge(BlockDictionary dictionary) { + int hashCode = dictionary.hashCode(); + int[] reMapping; + if ((reMapping = reMappings.get(hashCode)) != null) { + return reMapping; + } + + // merge + reMapping = new int[dictionary.size()]; + for (int originalDictId = 0; originalDictId < dictionary.size(); originalDictId++) { + Slice originalDictValue = dictionary.getValue(originalDictId); + + // Find the index of dict value, and record it into reMapping array. + int index = mergedDict.indexOf(originalDictValue); + if (index == -1) { + mergedDict.add(originalDictValue); + index = mergedDict.size() - 1; + } + reMapping[originalDictId] = index; + } + reMappings.put(hashCode, reMapping); + return reMapping; + } + + @Override + public long estimatedSize() { + AtomicLong estimatedSize = new AtomicLong(); + for (Slice dictValue : mergedDict) { + estimatedSize.addAndGet(dictValue == null ? 0 : dictValue.length()); + } + + reMappings.forEach((integer, intArray) -> + estimatedSize.addAndGet(Integer.BYTES + intArray.length * Integer.BYTES)); + return estimatedSize.get(); + } + + @Override + public void close() { + mergedDict.clear(); + reMappings.clear(); + mergedDict = null; + reMappings = null; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryVarcharColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryVarcharColumnReader.java new file mode 100644 index 000000000..1c31f53bd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DictionaryVarcharColumnReader.java @@ -0,0 +1,179 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.SliceBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import io.airlift.slice.DynamicSliceOutput; +import io.airlift.slice.Slice; +import io.airlift.slice.SliceOutput; +import io.airlift.slice.Slices; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; + +/** + * The dictionary varchar column reader is responsible for parsing + * the dictionary-encoding data from orc into slice block. + */ +public class DictionaryVarcharColumnReader extends AbstractDictionaryColumnReader { + + private final boolean enableSliceDict; + + public DictionaryVarcharColumnReader(int columnId, boolean isPrimaryKey, + StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, OrcProto.ColumnEncoding encoding, int indexStride, + boolean enableMetrics, boolean enableSliceDict) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, encoding, indexStride, enableMetrics); + this.enableSliceDict = enableSliceDict; + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof SliceBlock); + init(); + + long start = System.nanoTime(); + + SliceBlock block = (SliceBlock) randomAccessBlock; + if (enableSliceDict) { + readToSliceDict(block, positionCount); + } else { + readToSliceData(block, positionCount); + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + private void readToSliceDict(SliceBlock block, int positionCount) throws IOException { + int[] dictIds = block.getDictIds(); + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(dictIds != null && dictIds.length == positionCount); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + + // if dictionary is null, all the value in this column is null and the present stream must be not null. + if (dictionary == null) { + Preconditions.checkArgument(present != null); + block.setDictionary(LocalBlockDictionary.EMPTY_DICTIONARY); + } else { + block.setDictionary(dictionary); + } + + if (present == null) { + block.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + int dictId = (int) dictIdReader.next(); + dictIds[i] = dictId; + lastPosition++; + } + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + block.setHasNull(true); + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + dictIds[i] = -1; + } else { + // if not null + int dictId = (int) dictIdReader.next(); + dictIds[i] = dictId; + } + lastPosition++; + } + } + } + + private void readToSliceData(SliceBlock block, int positionCount) throws IOException { + int[] offsets = block.getOffsets(); + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(offsets != null && offsets.length == positionCount); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + + long totalLength = 0; + + if (dictionary == null) { + // all values are null + Preconditions.checkArgument(present != null); + block.setHasNull(true); + + for (int i = 0; i < positionCount; i++) { + offsets[i] = (int) totalLength; + nulls[i] = true; + + lastPosition++; + } + + // For block with all null value, set empty slice. + block.setData(Slices.EMPTY_SLICE); + } else { + SliceOutput sliceOutput = new DynamicSliceOutput(positionCount); + if (present == null) { + block.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + int dictId = (int) dictIdReader.next(); + Slice sliceValue = dictionary.getValue(dictId); + sliceOutput.writeBytes(sliceValue); + long length = sliceValue.length(); + totalLength += length; + offsets[i] = (int) totalLength; + nulls[i] = false; + + lastPosition++; + } + + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + block.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + offsets[i] = (int) totalLength; + } else { + // if not null + int dictId = (int) dictIdReader.next(); + Slice sliceValue = dictionary.getValue(dictId); + sliceOutput.writeBytes(sliceValue); + long length = sliceValue.length(); + totalLength += length; + offsets[i] = (int) totalLength; + nulls[i] = false; + } + lastPosition++; + } + } + block.setData(sliceOutput.slice()); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectBinaryColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectBinaryColumnReader.java new file mode 100644 index 000000000..9a5049f57 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectBinaryColumnReader.java @@ -0,0 +1,114 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.ByteArrayBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import io.airlift.slice.DynamicSliceOutput; +import io.airlift.slice.Slice; +import io.airlift.slice.SliceOutput; +import org.apache.orc.customized.ORCDataOutput; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; + +public class DirectBinaryColumnReader extends DirectVarcharColumnReader { + + public DirectBinaryColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, + OrcIndex orcIndex, RuntimeMetrics metrics, + int indexStride, boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof ByteArrayBlock); + init(); + + long start = System.nanoTime(); + + ByteArrayBlock block = (ByteArrayBlock) randomAccessBlock; + boolean[] nulls = block.nulls(); + int[] offsets = block.getOffsets(); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + Preconditions.checkArgument(offsets != null && offsets.length == positionCount); + + long totalLength = 0; + if (present == null) { + randomAccessBlock.setHasNull(false); + + if (lengthReader != null) { + for (int i = 0; i < positionCount; i++) { + // no null value. + long length = lengthReader.next(); + totalLength += length; + + offsets[i] = (int) totalLength; + nulls[i] = false; + lastPosition++; + } + } + + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + offsets[i] = (int) totalLength; + } else { + // if not null + long length = lengthReader.next(); + totalLength += length; + + offsets[i] = (int) totalLength; + nulls[i] = false; + } + lastPosition++; + } + } + + // Read all bytes of stream into sliceOutput at once. + SliceOutput sliceOutput = new DynamicSliceOutput(positionCount); + ORCDataOutput dataOutput = new SliceOutputWrapper(sliceOutput); + int len = (int) totalLength; + while (len > 0) { + int bytesRead = dataStream.read(dataOutput, len); + if (bytesRead < 0) { + throw GeneralUtil.nestedException("Can't finish byte read from " + dataStream); + } + len -= bytesRead; + } + Slice data = sliceOutput.slice(); + block.setData(data.getBytes()); + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectBlobColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectBlobColumnReader.java new file mode 100644 index 000000000..bba6c5cf8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectBlobColumnReader.java @@ -0,0 +1,509 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.BlobBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.AbstractColumnReader; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.MetricsNameBuilder; +import com.alibaba.polardbx.executor.operator.scan.metrics.ORCMetricsWrapper; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileKeys; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.core.datatype.Blob; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.customized.ORCProfile; +import org.apache.orc.impl.BitFieldReader; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.PositionProvider; +import org.apache.orc.impl.RecordReaderImpl; +import org.apache.orc.impl.RunLengthIntegerReaderV2; +import org.apache.orc.impl.StreamName; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; + +public class DirectBlobColumnReader extends AbstractColumnReader { + // basic metadata + private final StripeLoader stripeLoader; + + // in preheat mode, all row-indexes in orc-index should not be null. + private final OrcIndex orcIndex; + private final RuntimeMetrics metrics; + + private final int indexStride; + + private final boolean enableMetrics; + // for semantic parser + protected BitFieldReader present; + protected InStream dataStream; + protected RunLengthIntegerReaderV2 lengthReader; + // open parameters + private boolean[] rowGroupIncluded; + private boolean await; + // inner states + private AtomicBoolean openFailed; + private AtomicBoolean initializeOnlyOnce; + private AtomicBoolean isOpened; + // IO results + private Throwable throwable; + private Map inStreamMap; + private CompletableFuture> openFuture; + // record read positions + private int currentRowGroup; + private int lastPosition; + + // execution time metrics. + private Counter preparingTimer; + private Counter seekTimer; + private Counter parseTimer; + + public DirectBlobColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, int indexStride, boolean enableMetrics) { + super(columnId, isPrimaryKey); + this.stripeLoader = stripeLoader; + this.orcIndex = orcIndex; + this.metrics = metrics; + this.indexStride = indexStride; + this.enableMetrics = enableMetrics; + + // inner states + openFailed = new AtomicBoolean(false); + initializeOnlyOnce = new AtomicBoolean(false); + isOpened = new AtomicBoolean(false); + throwable = null; + inStreamMap = null; + openFuture = null; + + // for parser + present = null; + dataStream = null; + lengthReader = null; + + // read position control + // The initial value is -1 means it must seek to the correct row group firstly. + currentRowGroup = -1; + lastPosition = -1; + + rowGroupIncluded = null; + await = false; + + if (enableMetrics) { + preparingTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER.getProfileUnit() + ); + + seekTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_SEEK_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_SEEK_TIMER.getProfileUnit() + ); + + parseTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_PARSE_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_PARSE_TIMER.getProfileUnit() + ); + } + + } + + @Override + public boolean[] rowGroupIncluded() { + Preconditions.checkArgument(isOpened.get()); + return rowGroupIncluded; + } + + @Override + public boolean isOpened() { + return isOpened.get(); + } + + @Override + public void open(boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + + // load the specified streams. + openFuture = stripeLoader.load(columnId, rowGroupIncluded); + + if (await) { + doWait(); + } + } + + @Override + public void open(CompletableFuture> loadFuture, + boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + this.openFuture = loadFuture; + if (await) { + doWait(); + } + } + + // wait for open future and handle failure. + private void doWait() { + try { + inStreamMap = openFuture.get(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + + if (throwable != null) { + // throw if failed. + throw GeneralUtil.nestedException(throwable); + } + } + + protected void init() throws IOException { + if (!initializeOnlyOnce.compareAndSet(false, true)) { + return; + } + + long start = System.nanoTime(); + if (!await) { + doWait(); + } + if (openFailed.get()) { + return; + } + + // Unlike StripePlanner in raw ORC SDK, the stream names and IO production are + // all determined at runtime. + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + StreamName lengthName = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH); + + InStream presentStream = inStreamMap.get(presentName); + dataStream = inStreamMap.get(dataName); + InStream lengthStream = inStreamMap.get(lengthName); + + // initialize present and integer reader + present = presentStream == null ? null : new BitFieldReader(presentStream); + lengthReader = lengthStream == null ? null : new RunLengthIntegerReaderV2(lengthStream, false, true); + + // Add memory metrics. + if (present != null) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + presentName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + present.setMemoryCounter(memoryCounter); + } + + if (lengthReader != null) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + lengthName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + lengthReader.setMemoryCounter(memoryCounter); + } + + // metrics time cost of preparing (IO waiting + data steam reader constructing) + if (enableMetrics) { + preparingTimer.inc(System.nanoTime() - start); + } + } + + @Override + public void startAt(int rowGroupId, int elementPosition) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(rowGroupIncluded[rowGroupId]); + init(); + + long start = System.nanoTime(); + + // case 1: the column-reader has not been accessed, + // and the first access is the first effective row-group and the position is 0. + boolean isFirstAccess = (currentRowGroup == -1 && lastPosition == -1) + && elementPosition == 0 + && rowGroupId == 0; + + // case 2: the next access follows the last position in the same row-group. + boolean isConsecutive = rowGroupId == currentRowGroup && elementPosition == lastPosition; + + // case 3: the last access reach the last position of the row-group, and the next access is the next + // valid row-group starting at position 0. + boolean isNextRowGroup = currentRowGroup < rowGroupId + && elementPosition == 0 + && lastPosition == indexStride + && (currentRowGroup + 1 == rowGroupId); + + // It's in order. + if (isFirstAccess || isConsecutive || isNextRowGroup) { + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + return; + } + + // It's not in order, need skip some position. + if (rowGroupId != currentRowGroup || elementPosition < lastPosition) { + // case 1: when given row group is different from the current group, seek to the position of it. + // case 2: when elementPosition <= lastPosition, we need go back to the start position of this row group. + seek(rowGroupId); + + long actualSkipRows = skipPresent(elementPosition); + + // skip on length int-reader and record the skipped length. + long lengthToSkip = 0; + if (lengthReader != null) { + for (int i = 0; i < actualSkipRows; ++i) { + lengthToSkip += lengthReader.next(); + } + } + + // skip on data InStream + if (dataStream != null) { + while (lengthToSkip > 0) { + lengthToSkip -= dataStream.skip(lengthToSkip); + } + } + + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + } else if (elementPosition > lastPosition && elementPosition < indexStride) { + // case 3: when elementPosition > lastPosition and the group is same, just skip to given position. + long actualSkipRows = skipPresent(elementPosition - lastPosition); + + // skip on length int-reader and record the skipped length. + long lengthToSkip = 0; + if (lengthReader != null) { + for (int i = 0; i < actualSkipRows; ++i) { + lengthToSkip += lengthReader.next(); + } + } + + // skip on data InStream + if (dataStream != null) { + while (lengthToSkip > 0) { + lengthToSkip -= dataStream.skip(lengthToSkip); + } + } + + lastPosition = elementPosition; + } else if (elementPosition >= indexStride) { + // case 4: the position is out of range. + throw GeneralUtil.nestedException("Invalid element position: " + elementPosition); + } + // case 5: the elementPosition == lastPosition and rowGroupId is equal. + + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + } + + // Try to skip rows on present stream and count down + // the actual rows need skipped by data stream. + protected long skipPresent(long rows) throws IOException { + if (present == null) { + return rows; + } + + long result = 0; + for (long c = 0; c < rows; ++c) { + // record the count of non-null values + // in range of [current_position, current_position + rows) + if (present.next() == 1) { + result += 1; + } + } + // It must be less than or equal to count of rows. + return result; + } + + @Override + public void seek(int rowGroupId) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + init(); + + // Find the position-provider of given column and row group. + PositionProvider positionProvider; + OrcProto.RowIndex[] rowIndices = orcIndex.getRowGroupIndex(); + OrcProto.RowIndexEntry entry = rowIndices[columnId].getEntry(rowGroupId); + // This is effectively a test for pre-ORC-569 files. + if (rowGroupId == 0 && entry.getPositionsCount() == 0) { + positionProvider = new RecordReaderImpl.ZeroPositionProvider(); + } else { + positionProvider = new RecordReaderImpl.PositionProviderImpl(entry); + } + + // NOTE: The order of seeking is strict! + if (present != null) { + present.seek(positionProvider); + } + if (dataStream != null) { + dataStream.seek(positionProvider); + } + if (lengthReader != null) { + lengthReader.seek(positionProvider); + } + + currentRowGroup = rowGroupId; + lastPosition = 0; + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof BlobBlock); + init(); + + long start = System.nanoTime(); + + BlobBlock block = (BlobBlock) randomAccessBlock; + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + java.sql.Blob[] blobs = ((BlobBlock) randomAccessBlock).blobArray(); + + if (present == null) { + randomAccessBlock.setHasNull(false); + + if (lengthReader != null) { + for (int i = 0; i < positionCount; i++) { + // no null value. + long length = lengthReader.next(); + byte[] bytes = readBytes(length); + blobs[i] = new Blob(bytes); + nulls[i] = false; + lastPosition++; + } + } + + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + blobs[i] = null; + } else { + // if not null + long length = lengthReader.next(); + byte[] bytes = readBytes(length); + blobs[i] = new Blob(bytes); + nulls[i] = false; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + private byte[] readBytes(long length) throws IOException { + byte[] bytes = new byte[(int) length]; + int read = dataStream.read(bytes); + if (read == -1) { + throw new IOException("Failed to read blob with length: " + length); + } + return bytes; + } + + @Override + public void close() { + if (!isClosed.compareAndSet(false, true)) { + return; + } + + // 1. Clear the resources allocated in InStream + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + StreamName lengthName = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH); + + if (inStreamMap != null) { + InStream presentStream = inStreamMap.get(presentName); + InStream dataStream = inStreamMap.get(dataName); + InStream lengthStream = inStreamMap.get(lengthName); + + if (presentStream != null) { + presentStream.close(); + } + + if (dataStream != null) { + dataStream.close(); + } + + if (lengthStream != null) { + lengthStream.close(); + } + } + + // 2. Clear the memory resources held by stream + long releasedBytes = 0L; + releasedBytes += stripeLoader.clearStream(presentName); + releasedBytes += stripeLoader.clearStream(dataName); + releasedBytes += stripeLoader.clearStream(lengthName); + + if (releasedBytes > 0) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format( + "Release the resource of work: {0}, columnId: {1}, bytes: {2}", + metrics.name(), columnId, releasedBytes + )); + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectEnumColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectEnumColumnReader.java new file mode 100644 index 000000000..51caec662 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectEnumColumnReader.java @@ -0,0 +1,120 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.EnumBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.base.Preconditions; +import io.airlift.slice.DynamicSliceOutput; +import io.airlift.slice.Slice; +import io.airlift.slice.SliceOutput; +import org.apache.orc.customized.ORCDataOutput; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; +import java.nio.charset.Charset; + +public class DirectEnumColumnReader extends DirectVarcharColumnReader { + + private final DataType dataType; + private final Charset charset; + + public DirectEnumColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, int indexStride, boolean enableMetrics, DataType inputType) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, enableMetrics); + this.dataType = inputType; + this.charset = Charset.forName(dataType.getCharsetName().getJavaCharset()); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof EnumBlock); + init(); + + long start = System.nanoTime(); + + EnumBlock block = (EnumBlock) randomAccessBlock; + int[] offsets = block.getOffsets(); + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(offsets != null && offsets.length == positionCount); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + + long totalLength = 0; + if (present == null) { + randomAccessBlock.setHasNull(false); + + if (lengthReader != null) { + for (int i = 0; i < positionCount; i++) { + // no null value. + long length = lengthReader.next(); + totalLength += length; + + offsets[i] = (int) totalLength; + nulls[i] = false; + lastPosition++; + } + } + + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + offsets[i] = (int) totalLength; + } else { + // if not null + long length = lengthReader.next(); + totalLength += length; + + offsets[i] = (int) totalLength; + nulls[i] = false; + } + lastPosition++; + } + } + + // Read all bytes of stream into sliceOutput at once. + SliceOutput sliceOutput = new DynamicSliceOutput(positionCount); + ORCDataOutput dataOutput = new SliceOutputWrapper(sliceOutput); + int len = (int) totalLength; + while (len > 0) { + int bytesRead = dataStream.read(dataOutput, len); + if (bytesRead < 0) { + throw GeneralUtil.nestedException("Can't finish byte read from " + dataStream); + } + len -= bytesRead; + } + Slice data = sliceOutput.slice(); + block.setData(data.toStringUtf8().toCharArray()); + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectJsonColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectJsonColumnReader.java new file mode 100644 index 000000000..0fb5442f7 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectJsonColumnReader.java @@ -0,0 +1,129 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.StringBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.base.Preconditions; +import io.airlift.slice.DynamicSliceOutput; +import io.airlift.slice.Slice; +import io.airlift.slice.SliceOutput; +import org.apache.orc.customized.ORCDataOutput; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; +import java.nio.charset.Charset; + +public class DirectJsonColumnReader extends DirectVarcharColumnReader { + + private final DataType dataType; + private final Charset charset; + + public DirectJsonColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, int indexStride, boolean enableMetrics, DataType inputType) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, enableMetrics); + this.dataType = inputType; + this.charset = Charset.forName(dataType.getCharsetName().getJavaCharset()); + } + + private String readJsonString(long length) throws IOException { + byte[] bytes = new byte[(int) length]; + int num = dataStream.read(bytes); + if (num < length) { + throw new IOException("Failed to read string with length: " + length); + } + return new String(bytes, charset); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof StringBlock); + init(); + + long start = System.nanoTime(); + + StringBlock block = (StringBlock) randomAccessBlock; + int[] offsets = block.getOffsets(); + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(offsets != null && offsets.length == positionCount); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + + long totalLength = 0; + if (present == null) { + randomAccessBlock.setHasNull(false); + + if (lengthReader != null) { + for (int i = 0; i < positionCount; i++) { + // no null value. + long length = lengthReader.next(); + totalLength += length; + + offsets[i] = (int) totalLength; + nulls[i] = false; + lastPosition++; + } + } + + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + offsets[i] = (int) totalLength; + } else { + // if not null + long length = lengthReader.next(); + totalLength += length; + + offsets[i] = (int) totalLength; + nulls[i] = false; + } + lastPosition++; + } + } + + // Read all bytes of stream into sliceOutput at once. + SliceOutput sliceOutput = new DynamicSliceOutput(positionCount); + ORCDataOutput dataOutput = new SliceOutputWrapper(sliceOutput); + int len = (int) totalLength; + while (len > 0) { + int bytesRead = dataStream.read(dataOutput, len); + if (bytesRead < 0) { + throw GeneralUtil.nestedException("Can't finish byte read from " + dataStream); + } + len -= bytesRead; + } + Slice data = sliceOutput.slice(); + block.setData(data.toStringUtf8().toCharArray()); + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectVarcharColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectVarcharColumnReader.java new file mode 100644 index 000000000..4a9ab6071 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DirectVarcharColumnReader.java @@ -0,0 +1,517 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.SliceBlock; +import com.alibaba.polardbx.executor.operator.scan.AbstractColumnReader; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.MetricsNameBuilder; +import com.alibaba.polardbx.executor.operator.scan.metrics.ORCMetricsWrapper; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileKeys; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import io.airlift.slice.DynamicSliceOutput; +import io.airlift.slice.Slice; +import io.airlift.slice.SliceOutput; +import org.apache.orc.OrcProto; +import org.apache.orc.customized.ORCDataOutput; +import org.apache.orc.customized.ORCProfile; +import org.apache.orc.impl.BitFieldReader; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.PositionProvider; +import org.apache.orc.impl.RecordReaderImpl; +import org.apache.orc.impl.RunLengthIntegerReaderV2; +import org.apache.orc.impl.StreamName; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; + +public class DirectVarcharColumnReader extends AbstractColumnReader { + protected final boolean enableMetrics; + // basic metadata + protected final StripeLoader stripeLoader; + // in preheat mode, all row-indexes in orc-index should not be null. + protected final OrcIndex orcIndex; + protected final RuntimeMetrics metrics; + protected final int indexStride; + // inner states + protected AtomicBoolean openFailed; + protected AtomicBoolean isOpened; + // for semantic parser + protected BitFieldReader present; + protected InStream dataStream; + protected RunLengthIntegerReaderV2 lengthReader; + // record read positions + protected int currentRowGroup; + protected int lastPosition; + protected Counter parseTimer; + // open parameters + protected boolean[] rowGroupIncluded; + protected boolean await; + protected AtomicBoolean initializeOnlyOnce; + // IO results + protected Throwable throwable; + protected Map inStreamMap; + protected CompletableFuture> openFuture; + // execution time metrics. + protected Counter preparingTimer; + protected Counter seekTimer; + + public DirectVarcharColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, int indexStride, boolean enableMetrics) { + super(columnId, isPrimaryKey); + this.stripeLoader = stripeLoader; + this.orcIndex = orcIndex; + this.metrics = metrics; + this.indexStride = indexStride; + this.enableMetrics = enableMetrics; + + // inner states + openFailed = new AtomicBoolean(false); + initializeOnlyOnce = new AtomicBoolean(false); + isOpened = new AtomicBoolean(false); + throwable = null; + inStreamMap = null; + openFuture = null; + + // for parser + present = null; + dataStream = null; + lengthReader = null; + + // read position control + // The initial value is -1 means it must seek to the correct row group firstly. + currentRowGroup = -1; + lastPosition = -1; + + rowGroupIncluded = null; + await = false; + + if (enableMetrics) { + preparingTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER.getProfileUnit() + ); + + seekTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_SEEK_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_SEEK_TIMER.getProfileUnit() + ); + + parseTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_PARSE_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_PARSE_TIMER.getProfileUnit() + ); + } + + } + + @Override + public boolean[] rowGroupIncluded() { + Preconditions.checkArgument(isOpened.get()); + return rowGroupIncluded; + } + + @Override + public boolean isOpened() { + return isOpened.get(); + } + + @Override + public void open(boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + + // load the specified streams. + openFuture = stripeLoader.load(columnId, rowGroupIncluded); + + if (await) { + doWait(); + } + } + + @Override + public void open(CompletableFuture> loadFuture, + boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + this.openFuture = loadFuture; + if (await) { + doWait(); + } + } + + // wait for open future and handle failure. + private void doWait() { + try { + inStreamMap = openFuture.get(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + + if (throwable != null) { + // throw if failed. + throw GeneralUtil.nestedException(throwable); + } + } + + protected void init() throws IOException { + if (!initializeOnlyOnce.compareAndSet(false, true)) { + return; + } + + long start = System.nanoTime(); + if (!await) { + doWait(); + } + if (openFailed.get()) { + return; + } + + // Unlike StripePlanner in raw ORC SDK, the stream names and IO production are + // all determined at runtime. + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + StreamName lengthName = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH); + + InStream presentStream = inStreamMap.get(presentName); + dataStream = inStreamMap.get(dataName); + InStream lengthStream = inStreamMap.get(lengthName); + + // initialize present and integer reader + present = presentStream == null ? null : new BitFieldReader(presentStream); + lengthReader = lengthStream == null ? null : new RunLengthIntegerReaderV2(lengthStream, false, true); + + // Add memory metrics. + if (present != null) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + presentName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + present.setMemoryCounter(memoryCounter); + } + + if (lengthReader != null) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + lengthName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + lengthReader.setMemoryCounter(memoryCounter); + } + + // metrics time cost of preparing (IO waiting + data steam reader constructing) + if (enableMetrics) { + preparingTimer.inc(System.nanoTime() - start); + } + } + + @Override + public void startAt(int rowGroupId, int elementPosition) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(rowGroupIncluded[rowGroupId]); + init(); + + long start = System.nanoTime(); + + // case 1: the column-reader has not been accessed, + // and the first access is the first effective row-group and the position is 0. + boolean isFirstAccess = (currentRowGroup == -1 && lastPosition == -1) + && elementPosition == 0 + && rowGroupId == 0; + + // case 2: the next access follows the last position in the same row-group. + boolean isConsecutive = rowGroupId == currentRowGroup && elementPosition == lastPosition; + + // case 3: the last access reach the last position of the row-group, and the next access is the next + // valid row-group starting at position 0. + boolean isNextRowGroup = currentRowGroup < rowGroupId + && elementPosition == 0 + && lastPosition == indexStride + && (currentRowGroup + 1 == rowGroupId); + + // It's in order. + if (isFirstAccess || isConsecutive || isNextRowGroup) { + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + return; + } + + // It's not in order, need skip some position. + if (rowGroupId != currentRowGroup || elementPosition < lastPosition) { + // case 1: when given row group is different from the current group, seek to the position of it. + // case 2: when elementPosition <= lastPosition, we need go back to the start position of this row group. + seek(rowGroupId); + + long actualSkipRows = skipPresent(elementPosition); + + // skip on length int-reader and record the skipped length. + long lengthToSkip = 0; + if (lengthReader != null) { + for (int i = 0; i < actualSkipRows; ++i) { + lengthToSkip += lengthReader.next(); + } + } + + // skip on data InStream + if (dataStream != null) { + while (lengthToSkip > 0) { + lengthToSkip -= dataStream.skip(lengthToSkip); + } + } + + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + } else if (elementPosition > lastPosition && elementPosition < indexStride) { + // case 3: when elementPosition > lastPosition and the group is same, just skip to given position. + long actualSkipRows = skipPresent(elementPosition - lastPosition); + + // skip on length int-reader and record the skipped length. + long lengthToSkip = 0; + if (lengthReader != null) { + for (int i = 0; i < actualSkipRows; ++i) { + lengthToSkip += lengthReader.next(); + } + } + + // skip on data InStream + if (dataStream != null) { + while (lengthToSkip > 0) { + lengthToSkip -= dataStream.skip(lengthToSkip); + } + } + + lastPosition = elementPosition; + } else if (elementPosition >= indexStride) { + // case 4: the position is out of range. + throw GeneralUtil.nestedException("Invalid element position: " + elementPosition); + } + // case 5: the elementPosition == lastPosition and rowGroupId is equal. + + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + } + + // Try to skip rows on present stream and count down + // the actual rows need skipped by data stream. + protected long skipPresent(long rows) throws IOException { + if (present == null) { + return rows; + } + + long result = 0; + for (long c = 0; c < rows; ++c) { + // record the count of non-null values + // in range of [current_position, current_position + rows) + if (present.next() == 1) { + result += 1; + } + } + // It must be less than or equal to count of rows. + return result; + } + + @Override + public void seek(int rowGroupId) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + init(); + + // Find the position-provider of given column and row group. + PositionProvider positionProvider; + OrcProto.RowIndex[] rowIndices = orcIndex.getRowGroupIndex(); + OrcProto.RowIndexEntry entry = rowIndices[columnId].getEntry(rowGroupId); + // This is effectively a test for pre-ORC-569 files. + if (rowGroupId == 0 && entry.getPositionsCount() == 0) { + positionProvider = new RecordReaderImpl.ZeroPositionProvider(); + } else { + positionProvider = new RecordReaderImpl.PositionProviderImpl(entry); + } + + // NOTE: The order of seeking is strict! + if (present != null) { + present.seek(positionProvider); + } + if (dataStream != null) { + dataStream.seek(positionProvider); + } + if (lengthReader != null) { + lengthReader.seek(positionProvider); + } + + currentRowGroup = rowGroupId; + lastPosition = 0; + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof SliceBlock); + init(); + + long start = System.nanoTime(); + + SliceBlock block = (SliceBlock) randomAccessBlock; + int[] offsets = block.getOffsets(); + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(offsets != null && offsets.length == positionCount); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + + long totalLength = 0; + if (present == null) { + randomAccessBlock.setHasNull(false); + + if (lengthReader != null) { + for (int i = 0; i < positionCount; i++) { + // no null value. + long length = lengthReader.next(); + totalLength += length; + + offsets[i] = (int) totalLength; + nulls[i] = false; + lastPosition++; + } + } + + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + offsets[i] = (int) totalLength; + } else { + // if not null + long length = lengthReader.next(); + totalLength += length; + + offsets[i] = (int) totalLength; + nulls[i] = false; + } + lastPosition++; + } + } + + // Read all bytes of stream into sliceOutput at once. + SliceOutput sliceOutput = new DynamicSliceOutput(positionCount); + ORCDataOutput dataOutput = new SliceOutputWrapper(sliceOutput); + int len = (int) totalLength; + while (len > 0) { + int bytesRead = dataStream.read(dataOutput, len); + if (bytesRead < 0) { + throw GeneralUtil.nestedException("Can't finish byte read from " + dataStream); + } + len -= bytesRead; + } + Slice data = sliceOutput.slice(); + block.setData(data); + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + @Override + public void close() { + if (!isClosed.compareAndSet(false, true)) { + return; + } + + // 1. Clear the resources allocated in InStream + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + StreamName lengthName = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH); + + if (inStreamMap != null) { + InStream presentStream = inStreamMap.get(presentName); + InStream dataStream = inStreamMap.get(dataName); + InStream lengthStream = inStreamMap.get(lengthName); + + if (presentStream != null) { + presentStream.close(); + } + + if (dataStream != null) { + dataStream.close(); + } + + if (lengthStream != null) { + lengthStream.close(); + } + } + + // 2. Clear the memory resources held by stream + long releasedBytes = 0L; + releasedBytes += stripeLoader.clearStream(presentName); + releasedBytes += stripeLoader.clearStream(dataName); + releasedBytes += stripeLoader.clearStream(lengthName); + + if (releasedBytes > 0) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format( + "Release the resource of work: {0}, columnId: {1}, bytes: {2}", + metrics.name(), columnId, releasedBytes + )); + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DoubleBlockFloatColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DoubleBlockFloatColumnReader.java new file mode 100644 index 000000000..b0862700d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DoubleBlockFloatColumnReader.java @@ -0,0 +1,88 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.DoubleBlock; +import com.alibaba.polardbx.executor.chunk.FloatBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; + +public class DoubleBlockFloatColumnReader extends FloatColumnReader { + + public DoubleBlockFloatColumnReader(int columnId, boolean isPrimaryKey, + StripeLoader stripeLoader, + OrcIndex orcIndex, + RuntimeMetrics metrics, + int indexStride, boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof DoubleBlock); + init(); + + long start = System.nanoTime(); + + DoubleBlock block = (DoubleBlock) randomAccessBlock; + double[] vector = block.doubleArray(); + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + + if (present == null) { + randomAccessBlock.setHasNull(false); + + for (int i = 0; i < positionCount; i++) { + // no null value. + float floatVal = utils.readFloat(dataStream); + vector[i] = floatVal; + lastPosition++; + } + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + vector[i] = 0; + nulls[i] = true; + } else { + // if not null + float doubleVal = utils.readFloat(dataStream); + vector[i] = doubleVal; + nulls[i] = false; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DoubleColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DoubleColumnReader.java new file mode 100644 index 000000000..0f91a8cb4 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DoubleColumnReader.java @@ -0,0 +1,462 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.DoubleBlock; +import com.alibaba.polardbx.executor.chunk.DoubleBlockBuilder; +import com.alibaba.polardbx.executor.chunk.FloatBlock; +import com.alibaba.polardbx.executor.chunk.FloatBlockBuilder; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.AbstractColumnReader; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.MetricsNameBuilder; +import com.alibaba.polardbx.executor.operator.scan.metrics.ORCMetricsWrapper; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileKeys; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.customized.ORCProfile; +import org.apache.orc.impl.BitFieldReader; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.PositionProvider; +import org.apache.orc.impl.RecordReaderImpl; +import org.apache.orc.impl.SerializationUtils; +import org.apache.orc.impl.StreamName; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; + +public class DoubleColumnReader extends AbstractColumnReader { + // basic metadata + private final StripeLoader stripeLoader; + + // in preheat mode, all row-indexes in orc-index should not be null. + private final OrcIndex orcIndex; + private final RuntimeMetrics metrics; + + private final int indexStride; + + private final boolean enableMetrics; + + // open parameters + private boolean[] rowGroupIncluded; + private boolean await; + + // inner states + private AtomicBoolean openFailed; + private AtomicBoolean initializeOnlyOnce; + private AtomicBoolean isOpened; + + // IO results + private Throwable throwable; + private Map inStreamMap; + private CompletableFuture> openFuture; + + // for semantic parser + protected BitFieldReader present; + protected InStream dataStream; + + // record read positions + private int currentRowGroup; + private int lastPosition; + + // execution time metrics. + private Counter preparingTimer; + private Counter seekTimer; + private Counter parseTimer; + + private final SerializationUtils utils; + + public DoubleColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, int indexStride, boolean enableMetrics) { + super(columnId, isPrimaryKey); + this.stripeLoader = stripeLoader; + this.orcIndex = orcIndex; + this.metrics = metrics; + this.indexStride = indexStride; + this.enableMetrics = enableMetrics; + + // inner states + openFailed = new AtomicBoolean(false); + initializeOnlyOnce = new AtomicBoolean(false); + isOpened = new AtomicBoolean(false); + throwable = null; + inStreamMap = null; + openFuture = null; + + // for parser + present = null; + dataStream = null; + + // read position control + // The initial value is -1 means it must seek to the correct row group firstly. + currentRowGroup = -1; + lastPosition = -1; + + rowGroupIncluded = null; + await = false; + + utils = new SerializationUtils(); + + if (enableMetrics) { + preparingTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER.getProfileUnit() + ); + + seekTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_SEEK_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_SEEK_TIMER.getProfileUnit() + ); + + parseTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_PARSE_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_PARSE_TIMER.getProfileUnit() + ); + } + + } + + @Override + public boolean[] rowGroupIncluded() { + Preconditions.checkArgument(isOpened.get()); + return rowGroupIncluded; + } + + @Override + public boolean isOpened() { + return isOpened.get(); + } + + @Override + public void open(boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + + // load the specified streams. + openFuture = stripeLoader.load(columnId, rowGroupIncluded); + + if (await) { + doWait(); + } + } + + @Override + public void open(CompletableFuture> loadFuture, + boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + this.openFuture = loadFuture; + if (await) { + doWait(); + } + } + + // wait for open future and handle failure. + private void doWait() { + try { + inStreamMap = openFuture.get(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + + if (throwable != null) { + // throw if failed. + throw GeneralUtil.nestedException(throwable); + } + } + + protected void init() throws IOException { + if (!initializeOnlyOnce.compareAndSet(false, true)) { + return; + } + + long start = System.nanoTime(); + if (!await) { + doWait(); + } + if (openFailed.get()) { + return; + } + + // Unlike StripePlanner in raw ORC SDK, the stream names and IO production are + // all determined at runtime. + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + StreamName lengthName = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH); + + InStream presentStream = inStreamMap.get(presentName); + dataStream = inStreamMap.get(dataName); + + // initialize present and integer reader + present = presentStream == null ? null : new BitFieldReader(presentStream); + + // Add memory metrics. + if (present != null) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + presentName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + present.setMemoryCounter(memoryCounter); + } + + // metrics time cost of preparing (IO waiting + data steam reader constructing) + if (enableMetrics) { + preparingTimer.inc(System.nanoTime() - start); + } + } + + @Override + public void startAt(int rowGroupId, int elementPosition) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(rowGroupIncluded[rowGroupId]); + init(); + + long start = System.nanoTime(); + + // case 1: the column-reader has not been accessed, + // and the first access is the first effective row-group and the position is 0. + boolean isFirstAccess = (currentRowGroup == -1 && lastPosition == -1) + && elementPosition == 0 + && rowGroupId == 0; + + // case 2: the next access follows the last position in the same row-group. + boolean isConsecutive = rowGroupId == currentRowGroup && elementPosition == lastPosition; + + // case 3: the last access reach the last position of the row-group, and the next access is the next + // valid row-group starting at position 0. + boolean isNextRowGroup = currentRowGroup < rowGroupId + && elementPosition == 0 + && lastPosition == indexStride + && (currentRowGroup + 1 == rowGroupId); + + // It's in order. + if (isFirstAccess || isConsecutive || isNextRowGroup) { + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + return; + } + + // It's not in order, need skip some position. + if (rowGroupId != currentRowGroup || elementPosition < lastPosition) { + // case 1: when given row group is different from the current group, seek to the position of it. + // case 2: when elementPosition <= lastPosition, we need go back to the start position of this row group. + seek(rowGroupId); + + skip(skipPresent(elementPosition)); + + } else if (elementPosition > lastPosition && elementPosition < indexStride) { + // case 3: when elementPosition > lastPosition and the group is same, just skip to given position. + skip(skipPresent(elementPosition - lastPosition)); + + } else if (elementPosition >= indexStride) { + // case 4: the position is out of range. + throw GeneralUtil.nestedException("Invalid element position: " + elementPosition); + } + // case 5: the elementPosition == lastPosition and rowGroupId is equal. + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + } + + private void skip(long numValues) throws IOException { + if (dataStream != null) { + long len = numValues * 8; + while (len > 0) { + len -= dataStream.skip(len); + } + } + } + + // Try to skip rows on present stream and count down + // the actual rows need skipped by data stream. + protected long skipPresent(long rows) throws IOException { + if (present == null) { + return rows; + } + + long result = 0; + for (long c = 0; c < rows; ++c) { + // record the count of non-null values + // in range of [current_position, current_position + rows) + if (present.next() == 1) { + result += 1; + } + } + // It must be less than or equal to count of rows. + return result; + } + + @Override + public void seek(int rowGroupId) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + init(); + + // Find the position-provider of given column and row group. + PositionProvider positionProvider; + OrcProto.RowIndex[] rowIndices = orcIndex.getRowGroupIndex(); + OrcProto.RowIndexEntry entry = rowIndices[columnId].getEntry(rowGroupId); + // This is effectively a test for pre-ORC-569 files. + if (rowGroupId == 0 && entry.getPositionsCount() == 0) { + positionProvider = new RecordReaderImpl.ZeroPositionProvider(); + } else { + positionProvider = new RecordReaderImpl.PositionProviderImpl(entry); + } + + // NOTE: The order of seeking is strict! + if (present != null) { + present.seek(positionProvider); + } + if (dataStream != null) { + dataStream.seek(positionProvider); + } + + currentRowGroup = rowGroupId; + lastPosition = 0; + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof DoubleBlock); + init(); + + long start = System.nanoTime(); + + DoubleBlock block = (DoubleBlock) randomAccessBlock; + double[] vector = block.doubleArray(); + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + + if (present == null) { + randomAccessBlock.setHasNull(false); + + for (int i = 0; i < positionCount; i++) { + // no null value. + double doubleVal = utils.readDouble(dataStream); + vector[i] = doubleVal; + lastPosition++; + } + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + vector[i] = 0; + nulls[i] = true; + } else { + // if not null + double doubleVal = utils.readDouble(dataStream); + vector[i] = doubleVal; + nulls[i] = false; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + @Override + public void close() { + if (!isClosed.compareAndSet(false, true)) { + return; + } + + // 1. Clear the resources allocated in InStream + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + StreamName lengthName = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH); + + if (inStreamMap != null) { + InStream presentStream = inStreamMap.get(presentName); + InStream dataStream = inStreamMap.get(dataName); + InStream lengthStream = inStreamMap.get(lengthName); + + if (presentStream != null) { + presentStream.close(); + } + + if (dataStream != null) { + dataStream.close(); + } + + if (lengthStream != null) { + lengthStream.close(); + } + } + + // 2. Clear the memory resources held by stream + long releasedBytes = 0L; + releasedBytes += stripeLoader.clearStream(presentName); + releasedBytes += stripeLoader.clearStream(dataName); + releasedBytes += stripeLoader.clearStream(lengthName); + + if (releasedBytes > 0) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format( + "Release the resource of work: {0}, columnId: {1}, bytes: {2}", + metrics.name(), columnId, releasedBytes + )); + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DummyBlockLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DummyBlockLoader.java new file mode 100644 index 000000000..d42cb608f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/DummyBlockLoader.java @@ -0,0 +1,61 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.columnar.BlockLoader; +import com.alibaba.polardbx.executor.operator.scan.CacheReader; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; + +import java.io.IOException; + +public class DummyBlockLoader implements BlockLoader { + + private final int startPosition; + private final int positionCount; + + public DummyBlockLoader(int startPosition, int positionCount) { + this.startPosition = startPosition; + this.positionCount = positionCount; + } + + @Override + public Block load(DataType dataType, int[] selection, int selSize) throws IOException { + return null; + } + + @Override + public ColumnReader getColumnReader() { + return null; + } + + @Override + public CacheReader getCacheReader() { + return null; + } + + @Override + public int startPosition() { + return startPosition; + } + + @Override + public int positionCount() { + return positionCount; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/FilterPriorityScanWork.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/FilterPriorityScanWork.java new file mode 100644 index 000000000..87283808c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/FilterPriorityScanWork.java @@ -0,0 +1,365 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.bloomfilter.RFBloomFilter; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.columnar.LazyBlock; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItem; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItemKey; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFManager; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.LogicalRowGroup; +import com.alibaba.polardbx.executor.operator.scan.RowGroupIterator; +import com.alibaba.polardbx.executor.operator.scan.RowGroupReader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.statis.OperatorStatistics; +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.Path; +import org.apache.orc.ColumnStatistics; +import org.roaringbitmap.RoaringBitmap; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +/** + * Example of scan work + */ +public class FilterPriorityScanWork extends AbstractScanWork { + + public static final int INITIAL_LIST_CAPACITY = 16; + private final boolean activeLoading; + private final int chunkLimit; + private final boolean useInFlightBlockCache; + + /** + * The Fragment-level runtime filter manager. + */ + private final FragmentRFManager fragmentRFManager; + + /** + * Record the actual file column channel for a given item key. + */ + private final Map rfFilterRefInFileMap; + + /** + * Record the existence of bloom filter for each item keys. + */ + private Map rfBloomFilters; + + private final OperatorStatistics operatorStatistics; + + private volatile RFLazyEvaluator rfEvaluator; + + /** + * no push-down filter, skip evaluation. + * if evaluator is a constant expression, we should not skip the evaluation. + */ + private boolean skipEvaluation; + + public FilterPriorityScanWork(String workId, + RuntimeMetrics metrics, + boolean enableMetrics, + LazyEvaluator lazyEvaluator, + RowGroupIterator rgIterator, + RoaringBitmap deletionBitmap, + MorselColumnarSplit.ScanRange scanRange, + List inputRefsForFilter, + List inputRefsForProject, + int partNum, + int nodePartCount, boolean activeLoading, int chunkLimit, + boolean useInFlightBlockCache, FragmentRFManager fragmentRFManager, + Map rfFilterRefInFileMap, + OperatorStatistics operatorStatistics, + OSSColumnTransformer columnTransformer) { + super(workId, metrics, enableMetrics, lazyEvaluator, rgIterator, deletionBitmap, scanRange, inputRefsForFilter, + inputRefsForProject, partNum, nodePartCount, columnTransformer); + this.activeLoading = activeLoading; + this.chunkLimit = chunkLimit; + this.useInFlightBlockCache = useInFlightBlockCache; + this.fragmentRFManager = fragmentRFManager; + this.rfFilterRefInFileMap = rfFilterRefInFileMap; + this.rfBloomFilters = new HashMap<>(); + this.operatorStatistics = operatorStatistics; + + // Check should we use skip-eval mode. + int filterColumns = inputRefsForFilter.size(); + this.skipEvaluation = lazyEvaluator == null && filterColumns == 0; + + if (this.fragmentRFManager != null && skipEvaluation) { + // create a light-weight evaluator for runtime filter. + this.rfEvaluator = new RFLazyEvaluator(fragmentRFManager, operatorStatistics, rfBloomFilters); + } + + if (this.fragmentRFManager != null && lazyEvaluator != null) { + // register RF to predicate. + ((DefaultLazyEvaluator) lazyEvaluator).registerRF(fragmentRFManager, operatorStatistics, rfBloomFilters); + } + } + + @Override + protected void handleNextWork() throws Throwable { + final Path filePath = rgIterator.filePath(); + final int stripeId = rgIterator.stripeId(); + + // not all row group but those filtered by pruner should be loaded. + final boolean[] prunedRowGroupBitmap = rgIterator.rgIncluded(); + final int rowGroupCount = prunedRowGroupBitmap.length; + final BlockCacheManager blockCacheManager = rgIterator.getCacheManager(); + + // Get and lazily evaluate chunks until row group count exceeds the threshold. + // NOTE: the row-group and chunk must be in order. + final Map> chunksWithGroup = new TreeMap<>(); + final List selectedRowGroups = new ArrayList<>(); + + // for filter column, initialize or open the related modules. + int filterColumns = inputRefsForFilter.size(); + if (filterColumns == 1) { + // use single IO if filter columns = 1 + final Integer filterColId = columnTransformer.getLocInOrc(chunkRefMap[inputRefsForFilter.get(0)]); + if (filterColId != null) { + singleIO(filterColId, filePath, stripeId, + prunedRowGroupBitmap, blockCacheManager, useInFlightBlockCache); + } + } else if (filterColumns > 1) { + // use merging IO if filter columns > 1. + mergeIO(filePath, stripeId, + inputRefsForFilter, + blockCacheManager, + prunedRowGroupBitmap, + useInFlightBlockCache); + } + + boolean[] bitmap = new boolean[chunkLimit]; + + while (!isCanceled && rgIterator.hasNext()) { + rgIterator.next(); + LogicalRowGroup logicalRowGroup = rgIterator.current(); + final int rowGroupId = logicalRowGroup.groupId(); + + // The row group id in iterator must be valid. + Preconditions.checkArgument(prunedRowGroupBitmap[rowGroupId]); + + // A flag for each row group to indicate that at least one block selected in row group. + boolean rgSelected = false; + + int handledChunksBeforeRF = 0; + Chunk chunk; + RowGroupReader rowGroupReader = logicalRowGroup.getReader(); + while ((chunk = rowGroupReader.nextBatch()) != null) { + + // Check the runtime filter and invoke single IO before evaluation. + if (fragmentRFManager != null) { + for (FragmentRFItemKey itemKey : rfFilterRefInFileMap.keySet()) { + + if (rfBloomFilters.get(itemKey) == null) { + + // try to fetch the runtime filter of given item key, + FragmentRFItem item = fragmentRFManager.getAllItems().get(itemKey); + RFBloomFilter[] bfArray = item.getRF(); + + if (bfArray != null) { + // Success to get the generated runtime filter. + rfBloomFilters.put(itemKey, bfArray); + + // use single IO to open the filter column. + final int filterRefInFile = rfFilterRefInFileMap.get(itemKey); + final int filterColId = filterRefInFile + 1; + singleIO(filterColId, filePath, stripeId, + prunedRowGroupBitmap, blockCacheManager, useInFlightBlockCache); + } + } + } + } + + int[] batchRange = rowGroupReader.batchRange(); + if (skipEvaluation) { + + int[] preSelection = null; + if (rfEvaluator != null) { + // Filter the chunk use fragment RF. + int selectCount = + rfEvaluator.eval(chunk, batchRange[0], batchRange[1], deletionBitmap, bitmap); + + preSelection = selectionOf(bitmap, selectCount); + } else { + preSelection = selectionOf(batchRange, deletionBitmap); + } + + if (preSelection != null) { + // rebuild chunk according to project refs. + chunk = rebuildProject(chunk, preSelection, preSelection.length); + } + + // no evaluation, just buffer the unloaded chunks. + List chunksInGroup = + chunksWithGroup.computeIfAbsent(rowGroupId, any -> new ArrayList<>(INITIAL_LIST_CAPACITY)); + chunksInGroup.add(chunk); + rgSelected = true; + continue; + } + + // Proactively load the filter-blocks + for (int filterRef : inputRefsForFilter) { + int chunkIndex = chunkRefMap[filterRef]; + Preconditions.checkArgument(chunkIndex >= 0); + + // all blocks in chunk is lazy + // NOTE: explicit type cast? + LazyBlock filterBlock = (LazyBlock) chunk.getBlock(chunkIndex); + + // Proactively invoke loading, or we can load it during evaluation. + filterBlock.load(); + } + + long start = System.nanoTime(); + + // Get selection array of this range [n * 1000, (n+1) * 1000] in row group, + // and then evaluate the filter. + int selectCount = + lazyEvaluator.eval(chunk, batchRange[0], batchRange[1], deletionBitmap, bitmap); + + // check zeros in selection array, + // and mark whether this row group is selected or not + boolean hasSelectedPositions = selectCount > 0; + + rgSelected |= hasSelectedPositions; + if (!hasSelectedPositions) { + // if all positions are filtered, skip to the next chunk. + if (enableMetrics) { + evaluationTimer.inc(System.nanoTime() - start); + } + + // The created chunk and block-loader will be abandoned here. + releaseRef(chunk); + continue; + } + + Chunk projectChunk; + if (selectCount == chunk.getPositionCount()) { + projectChunk = rebuildProject(chunk); + } else { + // hold this chunk util all row groups in scan work are handled. + int[] selection = selectionOf(bitmap, selectCount); + if (enableMetrics) { + evaluationTimer.inc(System.nanoTime() - start); + } + + // rebuild chunk according to project refs. + projectChunk = rebuildProject(chunk, selection, selection.length); + } + + List chunksInGroup = chunksWithGroup.computeIfAbsent(rowGroupId, any -> new ArrayList<>( + INITIAL_LIST_CAPACITY)); + chunksInGroup.add(projectChunk); + } + // the chunk in this row group is run out, change to the next. + if (rgSelected) { + selectedRowGroups.add(rowGroupId); + } else { + // if row-group is not selected, remove all chunks of this row-group from buffer. + List chunksInGroup; + if ((chunksInGroup = chunksWithGroup.remove(rowGroupId)) != null) { + chunksInGroup.clear(); + } + } + } + + // There is no more chunk produced by this row group iterator. + rgIterator.noMoreChunks(); + + // no group is selected. + if (selectedRowGroups.isEmpty()) { + ioStatus.finish(); + return; + } + + // We collect all chunks in several row groups here, + // so we can merge the IO range of different row group to improve I/O efficiency. + boolean[] rowGroupIncluded = toRowGroupBitmap(rowGroupCount, selectedRowGroups); + + // collect all row-groups for mering IO tasks. + mergeIO(filePath, stripeId, inputRefsForProject, blockCacheManager, rowGroupIncluded); + + final int blockIndexSize = inputRefsForProject.size(); + List chunkResults = new ArrayList(); + + // load project columns + for (Map.Entry> entry : chunksWithGroup.entrySet()) { + List chunksInGroup = entry.getValue(); + + chunkResults.clear(); + for (int blockIndex = 0; blockIndex < blockIndexSize; blockIndex++) { + for (Chunk bufferedChunk : chunksInGroup) { + + // The target chunk may be in lazy mode or changed to be in normal mode. + Chunk targetChunk = bufferedChunk; + if (activeLoading) { + Block[] blocks = bufferedChunk.getBlocks(); + + LazyBlock lazyBlock = (LazyBlock) blocks[blockIndex]; + lazyBlock.load(); + + blocks[blockIndex] = lazyBlock.getLoaded(); + } + + if (blockIndex == 0) { + targetChunk.setPartIndex(partNum); + targetChunk.setPartCount(nodePartCount); + chunkResults.add(targetChunk); + } + + } + } + ioStatus.addResults(chunkResults); + } + + if (activeLoading) { + // force columnar reader to close. + forceClose(inputRefsForFilter); + forceClose(inputRefsForProject); + + // Clear the path to GC root. + // when using active loading, the row group iterator will not be accessed anymore. + rgIterator = null; + } + + ioStatus.finish(); + } + + private void forceClose(List inputRefs) { + for (int i = 0; i < inputRefs.size(); i++) { + Integer columnId = columnTransformer.getLocInOrc(chunkRefMap[inputRefs.get(i)]); + if (columnId == null) { + continue; + } + ColumnReader columnReader = rgIterator.getColumnReader(columnId); + if (columnReader != null) { + columnReader.close(); + } + } + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/FloatColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/FloatColumnReader.java new file mode 100644 index 000000000..956bf7f36 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/FloatColumnReader.java @@ -0,0 +1,452 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.FloatBlock; +import com.alibaba.polardbx.executor.chunk.FloatBlockBuilder; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.AbstractColumnReader; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.MetricsNameBuilder; +import com.alibaba.polardbx.executor.operator.scan.metrics.ORCMetricsWrapper; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileKeys; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.customized.ORCProfile; +import org.apache.orc.impl.BitFieldReader; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.PositionProvider; +import org.apache.orc.impl.RecordReaderImpl; +import org.apache.orc.impl.SerializationUtils; +import org.apache.orc.impl.StreamName; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; + +public class FloatColumnReader extends AbstractColumnReader { + // basic metadata + private final StripeLoader stripeLoader; + + // in preheat mode, all row-indexes in orc-index should not be null. + private final OrcIndex orcIndex; + private final RuntimeMetrics metrics; + + private final int indexStride; + + protected final boolean enableMetrics; + protected final SerializationUtils utils; + // for semantic parser + protected BitFieldReader present; + protected InStream dataStream; + // open parameters + private boolean[] rowGroupIncluded; + private boolean await; + // inner states + protected AtomicBoolean openFailed; + private AtomicBoolean initializeOnlyOnce; + protected AtomicBoolean isOpened; + // IO results + private Throwable throwable; + private Map inStreamMap; + private CompletableFuture> openFuture; + // record read positions + private int currentRowGroup; + protected int lastPosition; + // execution time metrics. + private Counter preparingTimer; + private Counter seekTimer; + protected Counter parseTimer; + + public FloatColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, int indexStride, boolean enableMetrics) { + super(columnId, isPrimaryKey); + this.stripeLoader = stripeLoader; + this.orcIndex = orcIndex; + this.metrics = metrics; + this.indexStride = indexStride; + this.enableMetrics = enableMetrics; + + // inner states + openFailed = new AtomicBoolean(false); + initializeOnlyOnce = new AtomicBoolean(false); + isOpened = new AtomicBoolean(false); + throwable = null; + inStreamMap = null; + openFuture = null; + + // for parser + present = null; + dataStream = null; + + // read position control + // The initial value is -1 means it must seek to the correct row group firstly. + currentRowGroup = -1; + lastPosition = -1; + + rowGroupIncluded = null; + await = false; + + utils = new SerializationUtils(); + + if (enableMetrics) { + preparingTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_IO_PREPARING_TIMER.getProfileUnit() + ); + + seekTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_SEEK_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_SEEK_TIMER.getProfileUnit() + ); + + parseTimer = metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(columnId, ProfileKeys.ORC_COLUMN_PARSE_TIMER), + COLUMN_READER_TIMER, + ProfileKeys.ORC_COLUMN_PARSE_TIMER.getProfileUnit() + ); + } + + } + + @Override + public boolean[] rowGroupIncluded() { + Preconditions.checkArgument(isOpened.get()); + return rowGroupIncluded; + } + + @Override + public boolean isOpened() { + return isOpened.get(); + } + + @Override + public void open(boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + + // load the specified streams. + openFuture = stripeLoader.load(columnId, rowGroupIncluded); + + if (await) { + doWait(); + } + } + + @Override + public void open(CompletableFuture> loadFuture, + boolean await, boolean[] rowGroupIncluded) { + if (!isOpened.compareAndSet(false, true)) { + throw GeneralUtil.nestedException("It's not allowed to re-open this column reader."); + } + this.rowGroupIncluded = rowGroupIncluded; + this.await = await; + this.openFuture = loadFuture; + if (await) { + doWait(); + } + } + + // wait for open future and handle failure. + private void doWait() { + try { + inStreamMap = openFuture.get(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (ExecutionException e) { + throw new RuntimeException(e); + } + + if (throwable != null) { + // throw if failed. + throw GeneralUtil.nestedException(throwable); + } + } + + protected void init() throws IOException { + if (!initializeOnlyOnce.compareAndSet(false, true)) { + return; + } + + long start = System.nanoTime(); + if (!await) { + doWait(); + } + if (openFailed.get()) { + return; + } + + // Unlike StripePlanner in raw ORC SDK, the stream names and IO production are + // all determined at runtime. + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + StreamName lengthName = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH); + + InStream presentStream = inStreamMap.get(presentName); + dataStream = inStreamMap.get(dataName); + + // initialize present and integer reader + present = presentStream == null ? null : new BitFieldReader(presentStream); + + // Add memory metrics. + if (present != null) { + String metricsName = MetricsNameBuilder.streamMetricsKey( + presentName, ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER + ); + + ORCProfile memoryCounter = enableMetrics ? new ORCMetricsWrapper( + metricsName, + COLUMN_READER_MEMORY, + ProfileKeys.ORC_STREAM_READER_MEMORY_COUNTER.getProfileUnit(), + metrics + ) : null; + + present.setMemoryCounter(memoryCounter); + } + + // metrics time cost of preparing (IO waiting + data steam reader constructing) + if (enableMetrics) { + preparingTimer.inc(System.nanoTime() - start); + } + } + + @Override + public void startAt(int rowGroupId, int elementPosition) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(rowGroupIncluded[rowGroupId]); + init(); + + long start = System.nanoTime(); + + // case 1: the column-reader has not been accessed, + // and the first access is the first effective row-group and the position is 0. + boolean isFirstAccess = (currentRowGroup == -1 && lastPosition == -1) + && elementPosition == 0 + && rowGroupId == 0; + + // case 2: the next access follows the last position in the same row-group. + boolean isConsecutive = rowGroupId == currentRowGroup && elementPosition == lastPosition; + + // case 3: the last access reach the last position of the row-group, and the next access is the next + // valid row-group starting at position 0. + boolean isNextRowGroup = currentRowGroup < rowGroupId + && elementPosition == 0 + && lastPosition == indexStride + && (currentRowGroup + 1 == rowGroupId); + + // It's in order. + if (isFirstAccess || isConsecutive || isNextRowGroup) { + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + return; + } + + // It's not in order, need skip some position. + if (rowGroupId != currentRowGroup || elementPosition < lastPosition) { + // case 1: when given row group is different from the current group, seek to the position of it. + // case 2: when elementPosition <= lastPosition, we need go back to the start position of this row group. + seek(rowGroupId); + + skip(skipPresent(elementPosition)); + + } else if (elementPosition > lastPosition && elementPosition < indexStride) { + // case 3: when elementPosition > lastPosition and the group is same, just skip to given position. + skip(skipPresent(elementPosition - lastPosition)); + + } else if (elementPosition >= indexStride) { + // case 4: the position is out of range. + throw GeneralUtil.nestedException("Invalid element position: " + elementPosition); + } + // case 5: the elementPosition == lastPosition and rowGroupId is equal. + lastPosition = elementPosition; + currentRowGroup = rowGroupId; + + // metrics + if (enableMetrics) { + seekTimer.inc(System.nanoTime() - start); + } + } + + private void skip(long numValues) throws IOException { + if (dataStream != null) { + for (long i = 0; i < numValues; i++) { + utils.readFloat(dataStream); + } + } + } + + // Try to skip rows on present stream and count down + // the actual rows need skipped by data stream. + protected long skipPresent(long rows) throws IOException { + if (present == null) { + return rows; + } + + long result = 0; + for (long c = 0; c < rows; ++c) { + // record the count of non-null values + // in range of [current_position, current_position + rows) + if (present.next() == 1) { + result += 1; + } + } + // It must be less than or equal to count of rows. + return result; + } + + @Override + public void seek(int rowGroupId) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + init(); + + // Find the position-provider of given column and row group. + PositionProvider positionProvider; + OrcProto.RowIndex[] rowIndices = orcIndex.getRowGroupIndex(); + OrcProto.RowIndexEntry entry = rowIndices[columnId].getEntry(rowGroupId); + // This is effectively a test for pre-ORC-569 files. + if (rowGroupId == 0 && entry.getPositionsCount() == 0) { + positionProvider = new RecordReaderImpl.ZeroPositionProvider(); + } else { + positionProvider = new RecordReaderImpl.PositionProviderImpl(entry); + } + + // NOTE: The order of seeking is strict! + if (present != null) { + present.seek(positionProvider); + } + if (dataStream != null) { + dataStream.seek(positionProvider); + } + + currentRowGroup = rowGroupId; + lastPosition = 0; + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof FloatBlock); + init(); + + long start = System.nanoTime(); + + FloatBlock block = (FloatBlock) randomAccessBlock; + float[] vector = block.floatArray(); + boolean[] nulls = block.nulls(); + Preconditions.checkArgument(nulls != null && nulls.length == positionCount); + + if (present == null) { + randomAccessBlock.setHasNull(false); + + for (int i = 0; i < positionCount; i++) { + // no null value. + float floatVal = utils.readFloat(dataStream); + vector[i] = floatVal; + lastPosition++; + } + // destroy null array to save the memory. + block.destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + vector[i] = 0; + nulls[i] = true; + } else { + // if not null + float doubleVal = utils.readFloat(dataStream); + vector[i] = doubleVal; + nulls[i] = false; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + @Override + public void close() { + if (!isClosed.compareAndSet(false, true)) { + return; + } + + // 1. Clear the resources allocated in InStream + StreamName presentName = new StreamName(columnId, OrcProto.Stream.Kind.PRESENT); + StreamName dataName = new StreamName(columnId, OrcProto.Stream.Kind.DATA); + StreamName lengthName = new StreamName(columnId, OrcProto.Stream.Kind.LENGTH); + + if (inStreamMap != null) { + InStream presentStream = inStreamMap.get(presentName); + InStream dataStream = inStreamMap.get(dataName); + InStream lengthStream = inStreamMap.get(lengthName); + + if (presentStream != null) { + presentStream.close(); + } + + if (dataStream != null) { + dataStream.close(); + } + + if (lengthStream != null) { + lengthStream.close(); + } + } + + // 2. Clear the memory resources held by stream + long releasedBytes = 0L; + releasedBytes += stripeLoader.clearStream(presentName); + releasedBytes += stripeLoader.clearStream(dataName); + releasedBytes += stripeLoader.clearStream(lengthName); + + if (releasedBytes > 0) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(MessageFormat.format( + "Release the resource of work: {0}, columnId: {1}, bytes: {2}", + metrics.name(), columnId, releasedBytes + )); + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/IOPriorityScanWork.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/IOPriorityScanWork.java new file mode 100644 index 000000000..bfe7bd9af --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/IOPriorityScanWork.java @@ -0,0 +1,221 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.columnar.LazyBlock; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.LogicalRowGroup; +import com.alibaba.polardbx.executor.operator.scan.RowGroupIterator; +import com.alibaba.polardbx.executor.operator.scan.RowGroupReader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.Path; +import org.apache.orc.ColumnStatistics; +import org.roaringbitmap.RoaringBitmap; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +public class IOPriorityScanWork extends AbstractScanWork { + private static final boolean IN_ROW_GROUP_BATCH = false; + + private final boolean enableCancelLoading; + + public IOPriorityScanWork(String workId, + RuntimeMetrics metrics, + boolean enableMetrics, + LazyEvaluator lazyEvaluator, + RowGroupIterator rgIterator, + RoaringBitmap deletionBitmap, MorselColumnarSplit.ScanRange scanRange, + List inputRefsForFilter, List inputRefsForProject, + int partNum, int nodePartCount, boolean enableCancelLoading, + OSSColumnTransformer columnTransformer) { + super(workId, metrics, enableMetrics, lazyEvaluator, rgIterator, deletionBitmap, scanRange, inputRefsForFilter, + inputRefsForProject, partNum, nodePartCount, columnTransformer); + this.enableCancelLoading = enableCancelLoading; + } + + @Override + protected void handleNextWork() throws Throwable { + final Path filePath = rgIterator.filePath(); + final int stripeId = rgIterator.stripeId(); + + // not all row group but those filtered by pruner should be loaded. + final boolean[] prunedRowGroupBitmap = rgIterator.rgIncluded(); + final int rowGroupCount = prunedRowGroupBitmap.length; + final BlockCacheManager blockCacheManager = rgIterator.getCacheManager(); + + // Get and lazily evaluate chunks until row group count exceeds the threshold. + // NOTE: the row-group and chunk must be in order. + final Map> chunksWithGroup = new TreeMap<>(); + final List selectedRowGroups = new ArrayList<>(); + + // for filter column, initialize or open the related modules. + int filterColumns = inputRefsForFilter.size(); + if (filterColumns == 1) { + // use single IO if filter columns = 1 + final Integer filterColId = columnTransformer.getLocInOrc(chunkRefMap[inputRefsForFilter.get(0)]); + if (filterColId != null) { + singleIO(filterColId, filePath, stripeId, prunedRowGroupBitmap, blockCacheManager); + } + } else if (filterColumns > 1) { + // use merging IO if filter columns > 1. + mergeIO(filePath, stripeId, + inputRefsForFilter, + blockCacheManager, + prunedRowGroupBitmap); + } + + // for project column, invoke IO task. collect all row-groups for mering IO tasks. + mergeIO(filePath, stripeId, inputRefsForProject, blockCacheManager, prunedRowGroupBitmap); + + // no push-down filter, skip evaluation. + boolean skipEvaluation = filterColumns == 0; + + while (!isCanceled && rgIterator.hasNext()) { + rgIterator.next(); + LogicalRowGroup logicalRowGroup = rgIterator.current(); + final int rowGroupId = logicalRowGroup.groupId(); + + // The row group id in iterator must be valid. + Preconditions.checkArgument(prunedRowGroupBitmap[rowGroupId]); + + // A flag for each row group to indicate that at least one block selected in row group. + boolean rgSelected = false; + + Chunk chunk; + RowGroupReader rowGroupReader = logicalRowGroup.getReader(); + + // The block-loader and it's chunk will be created here. + while ((chunk = rowGroupReader.nextBatch()) != null) { + int[] batchRange = rowGroupReader.batchRange(); + if (skipEvaluation) { + int[] preSelection = selectionOf(batchRange, deletionBitmap); + if (preSelection != null) { + // rebuild chunk according to project refs. + chunk = rebuildProject(chunk, preSelection, preSelection.length); + } + + chunk.setPartIndex(partNum); + chunk.setPartCount(nodePartCount); + + // no evaluation, just buffer the unloaded chunks. + List chunksInGroup = chunksWithGroup.computeIfAbsent(rowGroupId, any -> new ArrayList<>()); + chunksInGroup.add(chunk); + rgSelected = true; + + if (!IN_ROW_GROUP_BATCH) { + // add result and notify the blocked threads. + ioStatus.addResult(chunk); + } + + continue; + } + + // Proactively load the filter-blocks + for (int filterRef : inputRefsForFilter) { + int chunkIndex = chunkRefMap[filterRef]; + Preconditions.checkArgument(chunkIndex >= 0); + + // all blocks in chunk is lazy + // NOTE: explicit type cast? + LazyBlock filterBlock = (LazyBlock) chunk.getBlock(chunkIndex); + + // Proactively invoke loading, or we can load it during evaluation. + filterBlock.load(); + } + + long start = System.nanoTime(); + + // Get selection array of this range [n * 1000, (n+1) * 1000] in row group, + // and then evaluate the filter. + BitSet bitmap = lazyEvaluator.eval(chunk, batchRange[0], batchRange[1], deletionBitmap); + + // check zeros in selection array, + // and mark whether this row group is selected or not + boolean hasSelectedPositions = !bitmap.isEmpty(); + + rgSelected |= hasSelectedPositions; + if (!hasSelectedPositions) { + // if all positions are filtered, skip to the next chunk. + if (enableMetrics) { + evaluationTimer.inc(System.nanoTime() - start); + } + + // The created chunk and block-loader will be abandoned here. + releaseRef(chunk); + continue; + } + + // hold this chunk util all row groups in scan work are handled. + int[] selection = selectionOf(bitmap); + if (enableMetrics) { + evaluationTimer.inc(System.nanoTime() - start); + } + + // rebuild chunk according to project refs. + Chunk projectChunk = rebuildProject(chunk, selection, selection.length); + + if (!IN_ROW_GROUP_BATCH) { + // add result and notify the blocked threads. + ioStatus.addResult(projectChunk); + } + + List chunksInGroup = chunksWithGroup.computeIfAbsent(rowGroupId, any -> new ArrayList<>()); + chunksInGroup.add(projectChunk); + } + + // the chunk in this row group is run out, change to the next. + if (rgSelected) { + selectedRowGroups.add(rowGroupId); + if (IN_ROW_GROUP_BATCH) { + List chunksInGroup = chunksWithGroup.get(rowGroupId); + if (chunksInGroup != null) { + for (Chunk result : chunksInGroup) { + // add result and notify the blocked threads. + ioStatus.addResult(result); + } + } + } + } + // Remove all chunks of this row-group from buffer. + List chunksInGroup; + if ((chunksInGroup = chunksWithGroup.remove(rowGroupId)) != null) { + chunksInGroup.clear(); + } + } + + // There is no more chunk produced by this row group iterator. + rgIterator.noMoreChunks(); + + if (enableCancelLoading && selectedRowGroups.isEmpty()) { + // all row-groups don't match the filter predicate, stop the stripe loading task immediately. + isIOCanceled.set(true); + } + + ioStatus.finish(); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/IOStatusImpl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/IOStatusImpl.java new file mode 100644 index 000000000..3e6f9ea80 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/IOStatusImpl.java @@ -0,0 +1,220 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.operator.scan.IOStatus; +import com.alibaba.polardbx.executor.operator.scan.ScanState; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.StampedLock; + +public class IOStatusImpl implements IOStatus { + private static final Logger LOGGER = LoggerFactory.getLogger("oss"); + private final String workId; + + private volatile Throwable throwable; + + /** + * Store the IO production to concurrency-safe queue. + */ + private ConcurrentLinkedQueue results; + + /** + * Notify threads waiting for IO completion. + */ + private List> blockedCallers; + + private volatile boolean isFinished; + + /** + * NOTE: Must be changed when close the whole client. + */ + private volatile boolean isClosed; + + /** + * The state change action should be exclusive for state reading action. + */ + private StampedLock lock; + + private AtomicLong rowCount = new AtomicLong(0); + + public static IOStatus create(String workId) { + return new IOStatusImpl<>(workId); + } + + private IOStatusImpl(String workId) { + this.workId = workId; + + results = new ConcurrentLinkedQueue<>(); + blockedCallers = new ArrayList<>(); + isFinished = false; + isClosed = false; + throwable = null; + lock = new StampedLock(); + } + + @Override + public long rowCount() { + return rowCount.get(); + } + + @Override + public String workId() { + return workId; + } + + public ScanState state() { + if (throwable != null) { + return ScanState.FAILED; + } + if (isFinished) { + return ScanState.FINISHED; + } + if (isClosed) { + // according to the closed state of the whole client. + return ScanState.CLOSED; + } + if (results.peek() != null) { + // ready for fetching result + return ScanState.READY; + } + // external consumer should be blocked to wait for IO production. + return ScanState.BLOCKED; + } + + public ListenableFuture isBlocked() { + throwIfFailed(); + // To be serialized with other state change actions. + long stamp = lock.readLock(); + try { + ScanState state = state(); + switch (state) { + case FINISHED: + case FAILED: + case READY: + notifyBlockedCallers(); + // Not blocked + return Futures.immediateFuture(null); + default: + SettableFuture future = SettableFuture.create(); + blockedCallers.add(future); + return future; + } + } finally { + lock.unlockRead(stamp); + } + } + + public void finish() { + long stamp = lock.writeLock(); + try { + isFinished = true; + notifyBlockedCallers(); + } finally { + lock.unlockWrite(stamp); + } + } + + public void close() { + long stamp = lock.writeLock(); + try { + isClosed = true; + notifyBlockedCallers(); + } finally { + lock.unlockWrite(stamp); + } + } + + public void addResult(BATCH result) { + long stamp = lock.writeLock(); + try { + results.add(result); + + // record row count + if (result instanceof Chunk) { + rowCount.getAndAdd(((Chunk) result).getPositionCount()); + } + // Notify the block callers on this file read task. + notifyBlockedCallers(); + } finally { + lock.unlockWrite(stamp); + } + } + + @Override + public void addResults(List batches) { + long stamp = lock.writeLock(); + try { + results.addAll(batches); + + // record row count + for (int i = 0; i < batches.size(); i++) { + BATCH result = batches.get(i); + if (result instanceof Chunk) { + rowCount.getAndAdd(((Chunk) result).getPositionCount()); + } + } + // Notify the block callers on this file read task. + notifyBlockedCallers(); + } finally { + lock.unlockWrite(stamp); + } + } + + public BATCH popResult() { + return results.poll(); + } + + @Override + public void addException(Throwable t) { + long stamp = lock.writeLock(); + try { + if (throwable == null) { + throwable = t; + } + notifyBlockedCallers(); + } finally { + lock.unlockWrite(stamp); + } + } + + public void throwIfFailed() { + if (throwable != null) { + throw GeneralUtil.nestedException(throwable); + } + } + + private void notifyBlockedCallers() { + // notify all futures in list. + for (int i = 0; i < blockedCallers.size(); i++) { + SettableFuture blockedCaller = blockedCallers.get(i); + blockedCaller.set(null); + } + blockedCallers.clear(); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/IntegerColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/IntegerColumnReader.java new file mode 100644 index 000000000..96d1d166c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/IntegerColumnReader.java @@ -0,0 +1,147 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; + +public class IntegerColumnReader extends AbstractLongColumnReader { + public IntegerColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, + OrcProto.ColumnEncoding.Kind kind, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, kind, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof IntegerBlock); + init(); + + long start = System.nanoTime(); + IntegerBlock integerBlock = (IntegerBlock) randomAccessBlock; + int[] array = integerBlock.intArray(); + boolean[] nulls = integerBlock.nulls(); + + if (present == null) { + randomAccessBlock.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + long longVal = data.next(); + array[i] = (int) longVal; + lastPosition++; + } + + // destroy null array to save the memory. + integerBlock.destroyNulls(true); + + } else { + // there are some null values + randomAccessBlock.setHasNull(true); + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + array[i] = 0; + } else { + // if not null + long longVal = data.next(); + array[i] = (int) longVal; + } + lastPosition++; + } + } + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + @Override + public int next(RandomAccessBlock randomAccessBlock, int positionCount, int[] selection, int selSize) + throws IOException { + if (selection == null || selSize == 0 || selection.length == 0) { + next(randomAccessBlock, positionCount); + return 0; + } + + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof IntegerBlock); + init(); + + long start = System.nanoTime(); + IntegerBlock integerBlock = (IntegerBlock) randomAccessBlock; + int[] array = integerBlock.intArray(); + boolean[] nulls = integerBlock.nulls(); + + int totalSkipCount = 0; + if (present == null) { + randomAccessBlock.setHasNull(false); + int lastSelectedPos = -1; + for (int i = 0; i < selSize; i++) { + int selectedPos = selection[i]; + + int skipPos = selectedPos - lastSelectedPos - 1; + if (skipPos > 0) { + data.skip(skipPos); + totalSkipCount += skipPos; + lastPosition += skipPos; + } + array[selectedPos] = (int) data.next(); + lastPosition++; + + lastSelectedPos = selectedPos; + } + + ((Block) randomAccessBlock).destroyNulls(true); + + } else { + // there are some null values + randomAccessBlock.setHasNull(true); + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + array[i] = 0; + } else { + // if not null + long longVal = data.next(); + array[i] = (int) longVal; + } + lastPosition++; + } + } + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + + return totalSkipCount; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/LocalBlockDictionary.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/LocalBlockDictionary.java new file mode 100644 index 000000000..cd3667adf --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/LocalBlockDictionary.java @@ -0,0 +1,80 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; +import io.airlift.slice.Slice; +import io.airlift.slice.SliceOutput; + +import java.util.Arrays; + +/** + * A local dictionary scoped in a block. + */ +public class LocalBlockDictionary implements BlockDictionary { + + public static final BlockDictionary EMPTY_DICTIONARY = new LocalBlockDictionary(new Slice[0]); + + // NOTE: the format (slice + offsets) is not efficient enough + private final Slice[] dict; + private final int sizeInBytes; + private final int hashCode; + + // NOTE: we suppose that the dict array is in lexicographic order. + public LocalBlockDictionary(Slice[] dict) { + this.dict = dict; + this.hashCode = Arrays.hashCode(dict); + int sizeInBytes = 0; + for (Slice dictValue : dict) { + sizeInBytes += dictValue.length(); + } + this.sizeInBytes = sizeInBytes; + } + + public Slice[] getDict() { + return dict; + } + + @Override + public Slice getValue(int id) { + return dict[id]; + } + + @Override + public int size() { + return dict.length; + } + + @Override + public int sizeInBytes() { + return sizeInBytes; + } + + @Override + public void encoding(SliceOutput sliceOutput) { + sliceOutput.writeInt(dict.length); + for (Slice dictValue : dict) { + sliceOutput.writeInt(dictValue.length()); + sliceOutput.writeBytes(dictValue); + } + } + + @Override + public int hashCode() { + return this.hashCode; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/LogicalRowGroupImpl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/LogicalRowGroupImpl.java new file mode 100644 index 000000000..fbef752a1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/LogicalRowGroupImpl.java @@ -0,0 +1,334 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.columnar.BlockLoader; +import com.alibaba.polardbx.executor.chunk.columnar.CommonLazyBlock; +import com.alibaba.polardbx.executor.chunk.columnar.LazyBlock; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.CacheReader; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.executor.operator.scan.LogicalRowGroup; +import com.alibaba.polardbx.executor.operator.scan.RowGroupReader; +import com.alibaba.polardbx.executor.operator.scan.metrics.MetricsNameBuilder; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileKeys; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.utils.TimestampUtils; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import com.google.common.primitives.Booleans; +import org.apache.hadoop.fs.Path; +import org.apache.orc.ColumnStatistics; +import org.apache.orc.OrcProto; +import org.apache.orc.TypeDescription; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.TimeZone; + +public class LogicalRowGroupImpl implements LogicalRowGroup { + private final RuntimeMetrics metrics; + private final Path filePath; + private final int stripeId; + + /** + * The group id of this row group. + */ + private final int groupId; + + /** + * Count of rows in this row group, must <= ORC_INDEX_STRIDE. + */ + private final int rowCount; + private final int startRowId; + + /** + * File-level column schema + */ + private final TypeDescription fileSchema; + private final OSSColumnTransformer ossColumnTransformer; + + /** + * The length of encoding[] array is equal to column count, + * and the encoding of column that not included is null. + */ + private final OrcProto.ColumnEncoding[] encodings; + + /** + * Included column ids. + */ + private final boolean[] columnIncluded; + + /** + * Count of selected columns. + */ + private final int columns; + + /** + * The maximum rows in one chunk. + */ + private final int chunkLimit; + + /** + * A column-level reader responsible for all blocks of all row groups in the stripe. + */ + private final Map columnReaders; + + /** + * A column-level cache reader holding the available cached blocks in the stripe + */ + private final Map> cacheReaders; + + /** + * The global block cache manager shared by all files. + */ + private final BlockCacheManager blockCacheManager; + + private final ExecutionContext context; + + /** + * Enable block cache or not. + */ + private final boolean useBlockCache; + + private final boolean enableMetrics; + + private final boolean enableColumnReaderLock; + + private final boolean useSelection; + + private final boolean enableCompatible; + + private final TimeZone timeZone; + + private final boolean onlyCachePrimaryKey; + + private final boolean enableSkipCompression; + + public LogicalRowGroupImpl( + RuntimeMetrics metrics, + Path filePath, int stripeId, int groupId, int rowCount, int startRowId, + TypeDescription fileSchema, OSSColumnTransformer ossColumnTransformer, + OrcProto.ColumnEncoding[] encodings, boolean[] columnIncluded, int chunkLimit, + Map columnReaders, Map> cacheReaders, + BlockCacheManager blockCacheManager, ExecutionContext context) { + this.metrics = metrics; + this.filePath = filePath; + this.stripeId = stripeId; + this.groupId = groupId; + this.rowCount = rowCount; + this.startRowId = startRowId; + this.fileSchema = fileSchema; + this.ossColumnTransformer = ossColumnTransformer; + this.encodings = encodings; + this.columnIncluded = columnIncluded; + this.chunkLimit = chunkLimit; + this.columnReaders = columnReaders; + this.cacheReaders = cacheReaders; + this.blockCacheManager = blockCacheManager; + this.context = context; + this.useBlockCache = context.getParamManager() + .getBoolean(ConnectionParams.ENABLE_BLOCK_CACHE); + this.enableMetrics = context.getParamManager() + .getBoolean(ConnectionParams.ENABLE_COLUMNAR_METRICS); + this.enableColumnReaderLock = context.getParamManager() + .getBoolean(ConnectionParams.ENABLE_COLUMN_READER_LOCK); + this.useSelection = context.getParamManager() + .getBoolean(ConnectionParams.ENABLE_COLUMNAR_SCAN_SELECTION); + this.enableCompatible = context.getParamManager() + .getBoolean(ConnectionParams.ENABLE_OSS_COMPATIBLE); + this.timeZone = TimestampUtils.getTimeZone(context); + this.enableSkipCompression = context.getParamManager() + .getBoolean(ConnectionParams.ENABLE_SKIP_COMPRESSION_IN_ORC); + this.onlyCachePrimaryKey = context.getParamManager() + .getBoolean(ConnectionParams.ONLY_CACHE_PRIMARY_KEY_IN_BLOCK_CACHE); + + this.columns = this.ossColumnTransformer.columnCount(); + Preconditions.checkArgument(columns > 0); + } + + protected class Reader implements RowGroupReader { + private LogicalRowGroup logicalRowGroup; + + /** + * Current row position in this row-group for the next allocation. + */ + private int currentPosition; + + private int lastChunkRows; + private int lastPosition; + + public Reader(LogicalRowGroup logicalRowGroup) { + this.logicalRowGroup = logicalRowGroup; + this.currentPosition = 0; + } + + @Override + public Chunk nextBatch() { + // Get the row count of the next lazy chunk. + int chunkRows; + if (currentPosition + chunkLimit >= rowCount) { + chunkRows = rowCount - currentPosition; + } else { + chunkRows = chunkLimit; + } + + if (chunkRows <= 0) { + // run out. + return null; + } + + Block[] blocks = new Block[columns]; + + // the col id is precious identifier in orc file schema, while the col index is just the index in list. + for (int colIndex = 0; colIndex < ossColumnTransformer.columnCount(); colIndex++) { + Integer colId = ossColumnTransformer.getLocInOrc(colIndex); + if (colId != null) { + OrcProto.ColumnEncoding encoding = encodings[colId]; + + ColumnReader columnReader = columnReaders.get(colId); + CacheReader cacheReader = cacheReaders.get(colId); + Preconditions.checkNotNull(columnReader); + Preconditions.checkNotNull(cacheReader); + + Counter loadTimer = enableMetrics ? metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(colId, ProfileKeys.SCAN_WORK_BLOCK_LOAD_TIMER), + BLOCK_LOAD_TIMER, + ProfileKeys.SCAN_WORK_BLOCK_LOAD_TIMER.getProfileUnit() + ) : null; + Counter memoryCounter = enableMetrics ? metrics.addCounter( + MetricsNameBuilder.columnMetricsKey(colId, ProfileKeys.SCAN_WORK_BLOCK_MEMORY_COUNTER), + BLOCK_MEMORY_COUNTER, + ProfileKeys.SCAN_WORK_BLOCK_MEMORY_COUNTER.getProfileUnit() + ) : null; + + // build block loader for specify position range. + BlockLoader loader; + if (context.isEnableOrcRawTypeBlock()) { + // Special path for check cci consistency. + // Normal oss read should not get here. + // build block loader for specify position range. + loader = new OrcRawTypeBlockLoader(logicalRowGroup, colId, currentPosition, chunkRows, + encoding, columnReader, cacheReader, blockCacheManager, context, useBlockCache, + enableColumnReaderLock, chunkLimit, loadTimer, memoryCounter, + onlyCachePrimaryKey, enableSkipCompression); + } else { + // build block loader for specify position range. + loader = new ReactiveBlockLoader(logicalRowGroup, colId, currentPosition, chunkRows, + encoding, columnReader, cacheReader, blockCacheManager, context, useBlockCache, + enableColumnReaderLock, chunkLimit, loadTimer, memoryCounter, + onlyCachePrimaryKey, enableSkipCompression); + } + + // build lazy-block with given loader and schema. + DataType targetType = ossColumnTransformer.getTargetColumnMeta(colIndex).getDataType(); + LazyBlock lazyBlock = new CommonLazyBlock( + targetType, loader, columnReader, useSelection, + enableCompatible, timeZone, context, colIndex, ossColumnTransformer + ); + blocks[colIndex] = lazyBlock; + } else { + blocks[colIndex] = new CommonLazyBlock( + ossColumnTransformer.getTargetColumnMeta(colIndex).getDataType(), + new DummyBlockLoader(currentPosition, chunkRows), null, useSelection, + enableCompatible, timeZone, context, colIndex, ossColumnTransformer + ); + } + } + Chunk chunk = new Chunk(chunkRows, blocks); + + // Get the start position of the next chunk. + lastChunkRows = chunkRows; + lastPosition = currentPosition; + currentPosition += chunkRows; + + return chunk; + } + + @Override + public int[] batchRange() { + return new int[] {lastPosition + startRowId, lastChunkRows}; + } + + @Override + public int groupId() { + return groupId; + } + + @Override + public int rowCount() { + return rowCount; + } + + @Override + public int batches() { + int batches = rowCount / chunkLimit; + return rowCount % chunkLimit == 0 ? batches : batches + 1; + } + } + + @Override + public Path path() { + return filePath; + } + + @Override + public int stripeId() { + return stripeId; + } + + @Override + public int groupId() { + return groupId; + } + + @Override + public int rowCount() { + return rowCount; + } + + @Override + public int startRowId() { + return startRowId; + } + + @Override + public RowGroupReader getReader() { + return new Reader(this); + } + + @Override + public String toString() { + return "LogicalRowGroupImpl{" + + "filePath=" + filePath + + ", stripeId=" + stripeId + + ", groupId=" + groupId + + ", rowCount=" + rowCount + + ", startRowId=" + startRowId + + ", columnIncluded=" + Arrays.toString(columnIncluded) + + ", columns=" + columns + + '}'; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/LongColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/LongColumnReader.java new file mode 100644 index 000000000..a100db3c9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/LongColumnReader.java @@ -0,0 +1,184 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.TimestampBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; +import java.text.MessageFormat; + +/** + * An implementation of column reader for Integer Types. + */ +public class LongColumnReader extends AbstractLongColumnReader { + public LongColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, OrcProto.ColumnEncoding.Kind kind, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, kind, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof LongBlock + || randomAccessBlock instanceof DecimalBlock + || randomAccessBlock instanceof TimestampBlock); + + init(); + + long start = System.nanoTime(); + + // extract long array from different block implementation. + long[] vector = null; + if (randomAccessBlock instanceof LongBlock) { + vector = ((LongBlock) randomAccessBlock).longArray(); + } else if (randomAccessBlock instanceof DecimalBlock) { + vector = ((DecimalBlock) randomAccessBlock).getDecimal64Values(); + } else if (randomAccessBlock instanceof TimestampBlock) { + vector = ((TimestampBlock) randomAccessBlock).getPacked(); + } + + Preconditions.checkArgument(vector != null && vector.length == positionCount); + + boolean[] nulls = randomAccessBlock.nulls(); + if (present == null) { + randomAccessBlock.setHasNull(false); + int i = 0; + for (; i < positionCount && data.hasNext(); i++) { + // no null value. + long longVal = data.next(); + vector[i] = longVal; + lastPosition++; + } + if (i < positionCount) { + throw GeneralUtil.nestedException(MessageFormat.format( + "Bad position, positionCount = {0}, workId = {1}", i, metrics.name() + )); + } + ((Block) randomAccessBlock).destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + vector[i] = 0; + nulls[i] = true; + } else { + // if not null + long longVal = data.next(); + vector[i] = longVal; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + @Override + public int next(RandomAccessBlock randomAccessBlock, int positionCount, int[] selection, int selSize) + throws IOException { + + if (selection == null || selSize == 0 || selection.length == 0) { + next(randomAccessBlock, positionCount); + return 0; + } + + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof LongBlock + || randomAccessBlock instanceof DecimalBlock + || randomAccessBlock instanceof TimestampBlock); + + init(); + + long start = System.nanoTime(); + + // extract long array from different block implementation. + long[] vector = null; + if (randomAccessBlock instanceof LongBlock) { + vector = ((LongBlock) randomAccessBlock).longArray(); + } else if (randomAccessBlock instanceof DecimalBlock) { + vector = ((DecimalBlock) randomAccessBlock).getDecimal64Values(); + } else if (randomAccessBlock instanceof TimestampBlock) { + vector = ((TimestampBlock) randomAccessBlock).getPacked(); + } + + Preconditions.checkArgument(vector != null && vector.length == positionCount); + + int totalSkipCount = 0; + boolean[] nulls = randomAccessBlock.nulls(); + if (present == null) { + randomAccessBlock.setHasNull(false); + + int lastSelectedPos = -1; + for (int i = 0; i < selSize; i++) { + int selectedPos = selection[i]; + + int skipPos = selectedPos - lastSelectedPos - 1; + if (skipPos > 0) { + data.skip(skipPos); + totalSkipCount += skipPos; + lastPosition += skipPos; + } + long longVal = data.next(); + vector[selectedPos] = longVal; + lastPosition++; + + lastSelectedPos = selectedPos; + } + + ((Block) randomAccessBlock).destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + vector[i] = 0; + nulls[i] = true; + } else { + // if not null + long longVal = data.next(); + vector[i] = longVal; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + return totalSkipCount; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MergeIOScanWork.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MergeIOScanWork.java new file mode 100644 index 000000000..842fcecde --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MergeIOScanWork.java @@ -0,0 +1,206 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.columnar.LazyBlock; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.LogicalRowGroup; +import com.alibaba.polardbx.executor.operator.scan.RowGroupIterator; +import com.alibaba.polardbx.executor.operator.scan.RowGroupReader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.Path; +import org.apache.orc.ColumnStatistics; +import org.roaringbitmap.RoaringBitmap; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.stream.Collectors; + +public class MergeIOScanWork extends AbstractScanWork { + private static final boolean IN_ROW_GROUP_BATCH = false; + + private final boolean enableCancelLoading; + + public MergeIOScanWork(String workId, + RuntimeMetrics metrics, + boolean enableMetrics, + LazyEvaluator lazyEvaluator, + RowGroupIterator rgIterator, + RoaringBitmap deletionBitmap, MorselColumnarSplit.ScanRange scanRange, + List inputRefsForFilter, List inputRefsForProject, + int partNum, int nodePartCount, boolean enableCancelLoading, + OSSColumnTransformer ossColumnTransformer) { + super(workId, metrics, enableMetrics, lazyEvaluator, rgIterator, deletionBitmap, scanRange, inputRefsForFilter, + inputRefsForProject, partNum, nodePartCount, ossColumnTransformer); + this.enableCancelLoading = enableCancelLoading; + } + + @Override + protected void handleNextWork() throws Throwable { + final Path filePath = rgIterator.filePath(); + final int stripeId = rgIterator.stripeId(); + + // not all row group but those filtered by pruner should be loaded. + final boolean[] prunedRowGroupBitmap = rgIterator.rgIncluded(); + final BlockCacheManager blockCacheManager = rgIterator.getCacheManager(); + + // Get and lazily evaluate chunks until row group count exceeds the threshold. + // NOTE: the row-group and chunk must be in order. + final Map> chunksWithGroup = new TreeMap<>(); + final List selectedRowGroups = new ArrayList<>(); + + // for filter column, initialize or open the related modules. + int filterColumns = inputRefsForFilter.size(); + + // for all column, invoke IO task. collect all row-groups for mering IO tasks. + mergeIO(filePath, stripeId, + refSet.stream().sorted().collect(Collectors.toList()), + blockCacheManager, prunedRowGroupBitmap); + + // no push-down filter, skip evaluation. + boolean skipEvaluation = filterColumns == 0; + + while (!isCanceled && rgIterator.hasNext()) { + rgIterator.next(); + LogicalRowGroup logicalRowGroup = rgIterator.current(); + final int rowGroupId = logicalRowGroup.groupId(); + + // The row group id in iterator must be valid. + Preconditions.checkArgument(prunedRowGroupBitmap[rowGroupId]); + + // A flag for each row group to indicate that at least one block selected in row group. + boolean rgSelected = false; + + Chunk chunk; + RowGroupReader rowGroupReader = logicalRowGroup.getReader(); + while ((chunk = rowGroupReader.nextBatch()) != null) { + int[] batchRange = rowGroupReader.batchRange(); + if (skipEvaluation) { + int[] preSelection = selectionOf(batchRange, deletionBitmap); + if (preSelection != null) { + // rebuild chunk according to project refs. + chunk = rebuildProject(chunk, preSelection, preSelection.length); + } + + chunk.setPartIndex(partNum); + chunk.setPartCount(nodePartCount); + + // no evaluation, just buffer the unloaded chunks. + List chunksInGroup = chunksWithGroup.computeIfAbsent(rowGroupId, any -> new ArrayList<>()); + chunksInGroup.add(chunk); + rgSelected = true; + if (!IN_ROW_GROUP_BATCH) { + // add result and notify the blocked threads. + ioStatus.addResult(chunk); + } + continue; + } + + // Proactively load the filter-blocks + for (int filterRef : inputRefsForFilter) { + int chunkIndex = chunkRefMap[filterRef]; + Preconditions.checkArgument(chunkIndex >= 0); + + // all blocks in chunk is lazy + // NOTE: explicit type cast? + LazyBlock filterBlock = (LazyBlock) chunk.getBlock(chunkIndex); + + // Proactively invoke loading, or we can load it during evaluation. + filterBlock.load(); + } + + long start = System.nanoTime(); + + // Get selection array of this range [n * 1000, (n+1) * 1000] in row group, + // and then evaluate the filter. + BitSet bitmap = lazyEvaluator.eval(chunk, batchRange[0], batchRange[1], deletionBitmap); + + // check zeros in selection array, + // and mark whether this row group is selected or not + boolean hasSelectedPositions = !bitmap.isEmpty(); + + rgSelected |= hasSelectedPositions; + if (!hasSelectedPositions) { + // if all positions are filtered, skip to the next chunk. + if (enableMetrics) { + evaluationTimer.inc(System.nanoTime() - start); + } + + // The created chunk and block-loader will be abandoned here. + releaseRef(chunk); + continue; + } + + // hold this chunk util all row groups in scan work are handled. + int[] selection = selectionOf(bitmap); + if (enableMetrics) { + evaluationTimer.inc(System.nanoTime() - start); + } + + // rebuild chunk according to project refs. + Chunk projectChunk = rebuildProject(chunk, selection, selection.length); + + if (!IN_ROW_GROUP_BATCH) { + // add result and notify the blocked threads. + ioStatus.addResult(projectChunk); + } + + List chunksInGroup = chunksWithGroup.computeIfAbsent(rowGroupId, any -> new ArrayList<>()); + chunksInGroup.add(projectChunk); + } + + // the chunk in this row group is run out, change to the next. + if (rgSelected) { + selectedRowGroups.add(rowGroupId); + if (IN_ROW_GROUP_BATCH) { + List chunksInGroup = chunksWithGroup.get(rowGroupId); + if (chunksInGroup != null) { + for (Chunk result : chunksInGroup) { + // add result and notify the blocked threads. + ioStatus.addResult(result); + } + } + } + } + // Remove all chunks of this row-group from buffer. + List chunksInGroup; + if ((chunksInGroup = chunksWithGroup.remove(rowGroupId)) != null) { + chunksInGroup.clear(); + } + } + + // There is no more chunk produced by this row group iterator. + rgIterator.noMoreChunks(); + + if (enableCancelLoading && selectedRowGroups.isEmpty()) { + // all row-groups don't match the filter predicate, stop the stripe loading task immediately. + isIOCanceled.set(true); + } + ioStatus.finish(); + } + +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MockScanPreProcessor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MockScanPreProcessor.java new file mode 100644 index 000000000..8fa2f96dd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MockScanPreProcessor.java @@ -0,0 +1,307 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.jdbc.Parameters; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.columnar.pruning.ColumnarPruneManager; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruneContext; +import com.alibaba.polardbx.executor.columnar.pruning.index.IndexPruner; +import com.alibaba.polardbx.executor.columnar.pruning.predicate.ColumnPredicatePruningInf; +import com.alibaba.polardbx.executor.operator.scan.ORCMetaReader; +import com.alibaba.polardbx.executor.operator.scan.ScanPreProcessor; +import com.alibaba.polardbx.optimizer.statis.ColumnarTracer; +import com.google.common.base.Preconditions; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; +import org.apache.calcite.rex.RexNode; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.orc.StripeInformation; +import org.apache.orc.impl.OrcTail; +import org.roaringbitmap.RoaringBitmap; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.concurrent.ExecutorService; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static com.alibaba.polardbx.executor.columnar.pruning.data.PruneUtils.transformRexToIndexMergeTree; + +/** + * A mocked implementation of ScanPreProcessor that can generate + * file preheat meta, deletion bitmap and pruning result (all selected). + */ +public class MockScanPreProcessor implements ScanPreProcessor { + + private static final Cache PREHEATED_CACHE = + CacheBuilder.newBuilder().maximumSize(1 << 12).build(); + + /** + * File path participated in preprocessor. + */ + private final Set filePaths; + + /** + * A shared configuration object to avoid initialization of large parameter list. + */ + private final Configuration configuration; + + /** + * The filesystem storing the files in file path list. + */ + private final FileSystem fileSystem; + + /** + * The ratio that row-groups will be selected in a stripe. + */ + private final double groupsRatio; + + /** + * The ratio that row positions in file will be marked. + */ + private final double deletionRatio; + + /** + * Mapping from file path to its preheated file meta. + */ + private final Map preheatFileMetaMap; + + /** + * The future will be null if preparation has not been invoked. + */ + private ListenableFuture future; + + /** + * The mocked pruning results that mapping from file path to stride + group info. + */ + private Map> rowGroupMatrix; + + /** + * Mapping from file path to deletion bitmap. + */ + private Map deletions; + + private List rexList; + + /** + * Store the throwable info generated during preparation. + */ + private Throwable throwable; + + private IndexPruneContext ipc; + + private final boolean enableOssCompatible; + + public MockScanPreProcessor(Configuration configuration, + FileSystem fileSystem, + List rexList, + Map params, + double groupsRatio, + double deletionRatio, + boolean enableOssCompatible) { + this.filePaths = new TreeSet<>(); + + this.configuration = configuration; + this.fileSystem = fileSystem; + this.deletionRatio = deletionRatio; + this.groupsRatio = groupsRatio; + + this.preheatFileMetaMap = new HashMap<>(); + this.rowGroupMatrix = new HashMap<>(); + this.deletions = new HashMap<>(); + this.rexList = rexList; + this.ipc = new IndexPruneContext(); + ipc.setParameters(new Parameters(params)); + + this.enableOssCompatible = enableOssCompatible; + } + + @Override + public void addFile(Path filePath) { + this.filePaths.add(filePath); + } + + @Override + public ListenableFuture prepare(ExecutorService executor, String traceId, ColumnarTracer tracer) { + SettableFuture future = SettableFuture.create(); + ipc.setPruneTracer(tracer); + // Is there a more elegant execution mode? + executor.submit( + () -> { + for (Path filePath : filePaths) { + try { + // preheat all meta from orc file. + PreheatFileMeta preheat = + PREHEATED_CACHE.get(filePath, () -> preheat(filePath)); + + preheatFileMetaMap.put(filePath, preheat); + + // rex+pc -> distribution segment condition + indexes merge tree + ColumnPredicatePruningInf columnPredicate = transformRexToIndexMergeTree(rexList, ipc); + + if (columnPredicate == null) { + generateFullMatrix(filePath); + } else { + IndexPruner indexPruner = ColumnarPruneManager.getIndexPruner(filePath, preheat, + Collections.emptyList(), 1, + IntStream.range(0, preheat.getPreheatTail().getTypes().size()).boxed().collect( + Collectors.toList()), enableOssCompatible); + // prune stripe&row groups + rowGroupMatrix.put(filePath, + indexPruner.pruneToSortMap("", Lists.newArrayList(), columnPredicate, ipc)); + } + + // generate deletion according to deletion ratio and row count. + generateDeletion(filePath); + } catch (Exception e) { + throwable = e; + future.set(null); + return; + } + } + future.set(null); + } + ); + + this.future = future; + return future; + } + + @Override + public boolean isPrepared() { + throwIfFailed(); + return throwable == null && future != null && future.isDone(); + } + + @Override + public SortedMap getPruningResult(Path filePath) { + throwIfFailed(); + Preconditions.checkArgument(isPrepared()); + return rowGroupMatrix.get(filePath); + } + + @Override + public PreheatFileMeta getPreheated(Path filePath) { + throwIfFailed(); + Preconditions.checkArgument(isPrepared()); + return preheatFileMetaMap.get(filePath); + } + + @Override + public RoaringBitmap getDeletion(Path filePath) { + throwIfFailed(); + Preconditions.checkArgument(isPrepared()); + return deletions.get(filePath); + } + + @Override + public void throwIfFailed() { + if (throwable != null) { + throw GeneralUtil.nestedException(throwable); + } + } + + private void generateMatrix(Path filePath) { + PreheatFileMeta preheatFileMeta = preheatFileMetaMap.get(filePath); + OrcTail orcTail = preheatFileMeta.getPreheatTail(); + + int indexStride = orcTail.getFooter().getRowIndexStride(); + + // but sorted by stripe id. + SortedMap matrix = new TreeMap<>(); + for (StripeInformation stripeInformation : orcTail.getStripes()) { + int stripeId = (int) stripeInformation.getStripeId(); + int groupsInStripe = (int) ((stripeInformation.getNumberOfRows() + indexStride - 1) / indexStride); + + // build row-group by stripe row count and index stride + // and mark the first (groupsInStripe * groupsRatio) positions as selected + boolean[] groupIncluded = new boolean[groupsInStripe]; + int length = Math.min(groupsInStripe, (int) (groupsInStripe * groupsRatio)); + Arrays.fill(groupIncluded, 0, length, true); + + matrix.put(stripeId, groupIncluded); + } + rowGroupMatrix.put(filePath, matrix); + } + + private void generateDeletion(Path filePath) { + PreheatFileMeta preheatFileMeta = preheatFileMetaMap.get(filePath); + OrcTail orcTail = preheatFileMeta.getPreheatTail(); + + final int rowCount = (int) orcTail.getFileTail().getFooter().getNumberOfRows(); + RoaringBitmap bitmap = new RoaringBitmap(); + + // only if stride in proper range or deletion ratio >= 0 we generate a non-empty bitmap. + int stride; + if (deletionRatio != 0d && (stride = (int) Math.ceil(1d / deletionRatio)) < rowCount) { + for (int i = 0; i < rowCount; i += stride) { + bitmap.add(i); + } + } + + deletions.put(filePath, bitmap); + } + + private PreheatFileMeta preheat(Path filePath) throws IOException { + ORCMetaReader metaReader = null; + try { + metaReader = ORCMetaReader.create(configuration, fileSystem); + PreheatFileMeta preheatFileMeta = metaReader.preheat(filePath); + + return preheatFileMeta; + } finally { + metaReader.close(); + } + } + + private void generateFullMatrix(Path filePath) { + PreheatFileMeta preheatFileMeta = preheatFileMetaMap.get(filePath); + OrcTail orcTail = preheatFileMeta.getPreheatTail(); + + int indexStride = orcTail.getFooter().getRowIndexStride(); + + // but sorted by stripe id. + SortedMap matrix = new TreeMap<>(); + for (StripeInformation stripeInformation : orcTail.getStripes()) { + int stripeId = (int) stripeInformation.getStripeId(); + int groupsInStripe = (int) ((stripeInformation.getNumberOfRows() + indexStride - 1) / indexStride); + + // build row-group by stripe row count and index stride + // mark all groups as selected. + boolean[] groupIncluded = new boolean[groupsInStripe]; + Arrays.fill(groupIncluded, true); + + matrix.put(stripeId, groupIncluded); + } + rowGroupMatrix.put(filePath, matrix); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MorselColumnarSplit.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MorselColumnarSplit.java new file mode 100644 index 000000000..90580d2ac --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MorselColumnarSplit.java @@ -0,0 +1,913 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItem; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItemKey; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFManager; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.executor.operator.scan.ColumnarSplit; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.LogicalRowGroup; +import com.alibaba.polardbx.executor.operator.scan.RowGroupIterator; +import com.alibaba.polardbx.executor.operator.scan.ScanPolicy; +import com.alibaba.polardbx.executor.operator.scan.ScanPreProcessor; +import com.alibaba.polardbx.executor.operator.scan.ScanWork; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileAccumulatorType; +import com.alibaba.polardbx.executor.operator.scan.metrics.ProfileUnit; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; +import com.alibaba.polardbx.optimizer.statis.OperatorStatistics; +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.orc.ColumnStatistics; +import org.apache.orc.CompressionKind; +import org.apache.orc.OrcFile; +import org.apache.orc.OrcProto; +import org.apache.orc.StripeInformation; +import org.apache.orc.TypeDescription; +import org.apache.orc.UserMetadataUtil; +import org.apache.orc.impl.OrcTail; +import org.apache.orc.impl.reader.ReaderEncryption; +import org.roaringbitmap.RoaringBitmap; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.BitSet; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.ExecutorService; +import java.util.stream.Collectors; + +/** + * A columnar split responsible for morsel-driven scheduling. + */ +public class MorselColumnarSplit implements ColumnarSplit { + private final ExecutionContext executionContext; + + /** + * Executor service for IO tasks. + */ + private final ExecutorService ioExecutor; + + /** + * Engine of filesystem. + */ + private final Engine engine; + + /** + * Filesystem for storing columnar files. + */ + private final FileSystem fileSystem; + + /** + * Hadoop-style configuration. + */ + private final Configuration configuration; + + /** + * Unique sequence id for Driver-level identification. + */ + private final int sequenceId; + + /** + * Unique identification of columnar file. + */ + private final int fileId; + + /** + * File path with uri about filesystem. + */ + private final Path filePath; + + private final OSSColumnTransformer ossColumnTransformer; + + /** + * The column ids of primary keys in the file. + * It may be null. + */ + private final int[] primaryKeyColIds; + + /** + * Subset of columns in this file. + */ + private List inputRefsForFilter; + private List inputRefsForProject; + + /** + * The limit of chunk rows fetched from columnar files. + */ + private final int chunkLimit; + + /** + * Global block cache manager. + */ + private final BlockCacheManager blockCacheManager; + + /** + * The threshold of row-group count in one morsel-unit. + */ + private final int rgThreshold; + + private final LazyEvaluator lazyEvaluator; + + private final ScanPreProcessor preProcessor; + + /** + * Inner iterator to get the next scan work. + */ + private ScanWorkIterator scanWorkIterator; + + private int partNum; + + private int nodePartCount; + + private final MemoryAllocatorCtx memoryAllocatorCtx; + + private final FragmentRFManager fragmentRFManager; + private final Map rfFilterRefInFileMap; + private final OperatorStatistics operatorStatistics; + + public MorselColumnarSplit(ExecutionContext executionContext, ExecutorService ioExecutor, Engine engine, + FileSystem fileSystem, + Configuration configuration, int sequenceId, int fileId, Path filePath, + OSSColumnTransformer ossColumnTransformer, int[] primaryKeyColIds, + List inputRefsForFilter, List inputRefsForProject, + int chunkLimit, + BlockCacheManager blockCacheManager, + int rgThreshold, + LazyEvaluator lazyEvaluator, ScanPreProcessor preProcessor, + int partNum, int nodePartCount, MemoryAllocatorCtx memoryAllocatorCtx, + FragmentRFManager fragmentRFManager, + Map rfFilterRefInFileMap, + OperatorStatistics operatorStatistics) + throws IOException { + this.executionContext = executionContext; + this.ioExecutor = ioExecutor; + this.engine = engine; + this.fileSystem = fileSystem; + this.configuration = configuration; + this.sequenceId = sequenceId; + this.fileId = fileId; + this.filePath = filePath; + this.ossColumnTransformer = ossColumnTransformer; + this.primaryKeyColIds = primaryKeyColIds; + this.inputRefsForFilter = inputRefsForFilter; + this.inputRefsForProject = inputRefsForProject; + this.chunkLimit = chunkLimit; + + this.blockCacheManager = blockCacheManager; + this.rgThreshold = rgThreshold; + this.lazyEvaluator = lazyEvaluator; + this.preProcessor = preProcessor; + + this.partNum = partNum; + this.nodePartCount = nodePartCount; + this.memoryAllocatorCtx = memoryAllocatorCtx; + this.fragmentRFManager = fragmentRFManager; + this.rfFilterRefInFileMap = rfFilterRefInFileMap; + this.operatorStatistics = operatorStatistics; + } + + public static ColumnarSplitBuilder newBuilder() { + return new MorselColumnarSplitBuilder(); + } + + /** + * Location and range of the row-groups for the scan work. + *

+ * For example, if the total number of row-group is 16, + * the rgIncluded bitmap is {0,1,0,1,0,0,0,1,0,1,1,1,0,0,0,1}, and the rgThreshold is 3. + * Then the list of scan-range will be: + * scan-range1: {0,1,0,1,0,0,0,1} + * scan-range2: {0,1,1,1} + * scan-range3: {0,0,0,1} + *

+ * To simplify the range representation, we make it share the rgIncluded and use {startGroupId, effectiveCount} + * to represent the range on rgIncluded: + * scan-range1: rgIncluded = {0,1,0,1,0,0,0,1,0,1,1,1,0,0,0,1}, startGroupId = 0, effectiveCount = 3 + * scan-range1: rgIncluded = {0,1,0,1,0,0,0,1,0,1,1,1,0,0,0,1}, startGroupId = 8, effectiveCount = 3 + * scan-range1: rgIncluded = {0,1,0,1,0,0,0,1,0,1,1,1,0,0,0,1}, startGroupId = 12, effectiveCount = 1 + */ + public static class ScanRange { + /** + * The stripe of this range. + */ + final int stripeId; + + /** + * The count of available row group starting from the startRowGroupId. + */ + final int effectiveGroupCount; + + /** + * The starting group id of the scan range. + * NOTE: Not every row-group in this scan-range is selected or readable. + */ + final int startRowGroupId; + final boolean[] rowGroupIncluded; + + ScanRange(int stripeId, int startRowGroupId, int effectiveGroupCount, boolean[] rowGroupIncluded) { + this.stripeId = stripeId; + this.effectiveGroupCount = effectiveGroupCount; + this.startRowGroupId = startRowGroupId; + this.rowGroupIncluded = rowGroupIncluded; + } + + public int getStripeId() { + return stripeId; + } + + public int getEffectiveGroupCount() { + return effectiveGroupCount; + } + + public int getStartRowGroupId() { + return startRowGroupId; + } + + public boolean[] getRowGroupIncluded() { + return rowGroupIncluded; + } + + @Override + public String toString() { + return "ScanRange{" + + "stripeId=" + stripeId + + ", effectiveGroupCount=" + effectiveGroupCount + + ", startRowGroupId=" + startRowGroupId + + ", rowGroupIncluded=" + Arrays.toString(rowGroupIncluded) + + '}'; + } + } + + /** + * Iterator for scan works. + */ + class ScanWorkIterator implements Iterator> { + /*================== Inner states for iterator ====================*/ + + // start from 0 + private int stripeListIndex; + + // start from 0 + private int rowGroupIndex; + + private ScanRange currentRange; + + private int scanWorkIndex; + + /*================== Come from pre-processor ====================*/ + + /** + * Preheated file meta from columnar file. + */ + private PreheatFileMeta preheatFileMeta; + + /** + * Selected stripe ids. + */ + private List stripeIds; + + /** + * The start row id of each stripe. + */ + private Map startRowInStripeMap; + + /** + * Filtered row-group bitmaps for each stripe. + */ + private SortedMap rowGroups; + + /** + * File-level deletion bitmap. + */ + private final RoaringBitmap deletion; + + /*================== Come from parameter collection ====================*/ + + // parsed from file meta and reused by all scan work. + private SortedMap stripeInformationMap; + private int compressionSize; + private CompressionKind compressionKind; + private TypeDescription fileSchema; + private boolean[] columnIncluded; + private OrcFile.WriterVersion version; + private ReaderEncryption encryption; + + /** + * Mapping from stripeId to column-encoding info. + */ + private SortedMap encodingMap; + private boolean ignoreNonUtf8BloomFilter; + private long maxBufferSize; + private int indexStride; + private boolean enableDecimal64; + private int maxDiskRangeChunkLimit; + private long maxMergeDistance; + + ScanWorkIterator() throws IOException { + // load from pre-processor. + Preconditions.checkArgument(preProcessor.isPrepared()); + preheatFileMeta = preProcessor.getPreheated(filePath); + rowGroups = preProcessor.getPruningResult(filePath); + stripeIds = rowGroups.keySet().stream().sorted().collect(Collectors.toList()); + deletion = preProcessor.getDeletion(filePath); + + // for iterator inner states. + stripeListIndex = 0; + rowGroupIndex = 0; + currentRange = null; + scanWorkIndex = 0; + + // collect parameters from preheat meta. + collectParams(); + } + + private void collectParams() throws IOException { + OrcTail orcTail = preheatFileMeta.getPreheatTail(); + + // compression info + compressionKind = orcTail.getCompressionKind(); + compressionSize = orcTail.getCompressionBufferSize(); + + // get the mapping from stripe id to stripe information. + List stripeInformationList = orcTail.getStripes(); + stripeInformationMap = stripeInformationList.stream().collect(Collectors.toMap( + stripe -> (int) stripe.getStripeId(), + stripe -> stripe, + (s1, s2) -> s1, + () -> new TreeMap<>() + )); + + // Get the start rows of each stipe. + startRowInStripeMap = new HashMap<>(); + long startRows = 0; + for (int stripeId = 0; stripeId < stripeInformationList.size(); stripeId++) { + StripeInformation stripeInformation = stripeInformationList.get(stripeId); + startRowInStripeMap.put(stripeId, startRows); + long nextStartRows = startRows + stripeInformation.getNumberOfRows(); + startRows = nextStartRows; + } + + fileSchema = orcTail.getSchema(); + version = orcTail.getWriterVersion(); + + // The column included bitmap marking all columnId that should be accessed. + // The colIndex = 0 means struct column. + columnIncluded = new boolean[fileSchema.getMaximumId() + 1]; + Arrays.fill(columnIncluded, false); + columnIncluded[0] = true; + + for (int i = 0; i < ossColumnTransformer.columnCount(); i++) { + Integer locInOrc = ossColumnTransformer.getLocInOrc(i); + if (locInOrc != null) { + columnIncluded[locInOrc] = true; + } + } + + // encryption info for reading. + OrcProto.Footer footer = orcTail.getFooter(); + encryption = new ReaderEncryption(footer, fileSchema, + orcTail.getStripeStatisticsOffset(), orcTail.getTailBuffer(), stripeInformationList, + null, configuration); + + encodingMap = stripeInformationList.stream().collect(Collectors.toMap( + stripe -> (int) stripe.getStripeId(), + stripe -> StaticStripePlanner.buildEncodings( + encryption, + columnIncluded, + preheatFileMeta.getStripeFooter((int) stripe.getStripeId())), + (s1, s2) -> s1, + () -> new TreeMap<>() + )); + + // should the reader ignore the obsolete non-UTF8 bloom filters. + ignoreNonUtf8BloomFilter = false; + + // max buffer size in single IO task. + maxBufferSize = Integer.MAX_VALUE - 1024; + + // the max row count in one row group. + indexStride = orcTail.getFooter().getRowIndexStride(); + + enableDecimal64 = UserMetadataUtil.extractBooleanValue(orcTail.getFooter().getMetadataList(), + UserMetadataUtil.ENABLE_DECIMAL_64, false); + + // When reading stripes >2GB, specify max limit for the chunk size. + maxDiskRangeChunkLimit = Integer.MAX_VALUE - 1024; + + // max merge distance of disk io + maxMergeDistance = executionContext.getParamManager().getLong(ConnectionParams.OSS_ORC_MAX_MERGE_DISTANCE); + } + + @Override + public boolean hasNext() { + if (stripeIds.isEmpty()) { + return false; + } + + // Move row group index in current stripe. + if (moveRowGroupIndex()) { + return true; + } + + // Try to get the next stripe when row-group index is out of bound. + if (++stripeListIndex < stripeIds.size()) { + rowGroupIndex = 0; + if (moveRowGroupIndex()) { + return true; + } + } + + return false; + } + + @Override + public ScanWork next() { + String scanWorkId = generateScanWorkId( + executionContext.getTraceId(), + filePath.toString(), + currentRange.stripeId, + scanWorkIndex++ + ); + + int scanPolicyId = executionContext.getParamManager() + .getInt(ConnectionParams.SCAN_POLICY); + boolean enableMetrics = executionContext.getParamManager() + .getBoolean(ConnectionParams.ENABLE_COLUMNAR_METRICS); + boolean enableCancelLoading = executionContext.getParamManager() + .getBoolean(ConnectionParams.ENABLE_CANCEL_STRIPE_LOADING); + boolean activeLoading = executionContext.getParamManager() + .getBoolean(ConnectionParams.ENABLE_LAZY_BLOCK_ACTIVE_LOADING); + boolean useInFlightBlockCache = executionContext.getParamManager() + .getBoolean(ConnectionParams.ENABLE_USE_IN_FLIGHT_BLOCK_CACHE); + + RuntimeMetrics metrics = RuntimeMetrics.create(scanWorkId); + + // Add parent metrics node + if (enableMetrics) { + metrics.addDerivedCounter(AsyncStripeLoader.ASYNC_STRIPE_LOADER_MEMORY, + null, ProfileUnit.BYTES, ProfileAccumulatorType.SUM); + metrics.addDerivedCounter(AsyncStripeLoader.ASYNC_STRIPE_LOADER_TIMER, + null, ProfileUnit.NANO_SECOND, ProfileAccumulatorType.SUM); + metrics.addDerivedCounter(AsyncStripeLoader.ASYNC_STRIPE_LOADER_BYTES_RANGE, + null, ProfileUnit.BYTES, ProfileAccumulatorType.SUM); + + metrics.addDerivedCounter(ColumnReader.COLUMN_READER_MEMORY, + null, ProfileUnit.BYTES, ProfileAccumulatorType.SUM); + metrics.addDerivedCounter(ColumnReader.COLUMN_READER_TIMER, + null, ProfileUnit.NANO_SECOND, ProfileAccumulatorType.SUM); + + metrics.addDerivedCounter(LogicalRowGroup.BLOCK_LOAD_TIMER, + null, ProfileUnit.NANO_SECOND, ProfileAccumulatorType.SUM); + metrics.addDerivedCounter(LogicalRowGroup.BLOCK_MEMORY_COUNTER, + null, ProfileUnit.BYTES, ProfileAccumulatorType.SUM); + + metrics.addDerivedCounter(ScanWork.EVALUATION_TIMER, + null, ProfileUnit.NANO_SECOND, ProfileAccumulatorType.SUM); + } + + if (currentRange != null) { + RowGroupIterator rowGroupIterator = new RowGroupIteratorImpl( + metrics, + + // The range of this row-group iterator. + currentRange.stripeId, + currentRange.startRowGroupId, + currentRange.effectiveGroupCount, + currentRange.rowGroupIncluded, + + // primary key col ids. + primaryKeyColIds, + + // parameters for IO task. + ioExecutor, fileSystem, configuration, filePath, + + // for compression + compressionSize, compressionKind, + preheatFileMeta, + + // for stripe-level parser + stripeInformationMap.get(currentRange.stripeId), + startRowInStripeMap.get(currentRange.stripeId), + + fileSchema, version, encryption, + encodingMap.get(currentRange.stripeId), + ignoreNonUtf8BloomFilter, + maxBufferSize, maxDiskRangeChunkLimit, maxMergeDistance, + chunkLimit, blockCacheManager, ossColumnTransformer, + executionContext, columnIncluded, indexStride, enableDecimal64, + memoryAllocatorCtx); + + ScanPolicy scanPolicy = ScanPolicy.of(scanPolicyId); + if (executionContext.isEnableOrcDeletedScan()) { + // Special path for check cci consistency. + // Normal oss read should not get here. + scanPolicy = ScanPolicy.DELETED_SCAN; + } + + ScanWork scanWork; + switch (scanPolicy) { + case IO_PRIORITY: + scanWork = new IOPriorityScanWork( + scanWorkId, + metrics, + enableMetrics, + lazyEvaluator, + rowGroupIterator, deletion, + currentRange, inputRefsForFilter, inputRefsForProject, + partNum, nodePartCount, + enableCancelLoading, + ossColumnTransformer); + break; + case FILTER_PRIORITY: + scanWork = new FilterPriorityScanWork( + scanWorkId, + metrics, + enableMetrics, + lazyEvaluator, + rowGroupIterator, deletion, + currentRange, inputRefsForFilter, inputRefsForProject, + partNum, nodePartCount, + activeLoading, chunkLimit, useInFlightBlockCache, + fragmentRFManager, rfFilterRefInFileMap, operatorStatistics, + ossColumnTransformer); + break; + case MERGE_IO: + scanWork = new MergeIOScanWork( + scanWorkId, + metrics, + enableMetrics, + lazyEvaluator, + rowGroupIterator, deletion, + currentRange, inputRefsForFilter, inputRefsForProject, + partNum, nodePartCount, + enableCancelLoading, + ossColumnTransformer); + break; + case DELETED_SCAN: + scanWork = new DeletedScanWork( + scanWorkId, + metrics, + enableMetrics, + lazyEvaluator, + rowGroupIterator, deletion, + currentRange, inputRefsForFilter, inputRefsForProject, + partNum, nodePartCount, + activeLoading, ossColumnTransformer); + break; + default: + throw new UnsupportedOperationException(); + } + + // Must call the hasNext() method again if someone wants to get the next range. + currentRange = null; + return scanWork; + } + return null; + } + + private boolean moveRowGroupIndex() { + // if row group index is less than maximum row group index in stripe, move forward. + int stripeId = stripeIds.get(stripeListIndex); + StripeInformation stripe = stripeInformationMap.get(stripeId); + int rowGroupCountInStripe = (int) ((stripe.getNumberOfRows() + indexStride - 1) / indexStride); + + // Try to get the next row group. + if (rowGroupIndex < rowGroupCountInStripe) { + + // The maximum row group count we can get from remaining row-groups. + int maxRowGroupCount = Math.min(rowGroupCountInStripe - rowGroupIndex, rgThreshold); + + // It's the bitmap of all selected row-group in a stripe. + boolean[] rgIncluded = rowGroups.get(stripeId); + int effectiveCount = 0; + int effectiveRowGroupIndex; + for (effectiveRowGroupIndex = rowGroupIndex; + effectiveRowGroupIndex < rgIncluded.length && effectiveCount < maxRowGroupCount; + effectiveRowGroupIndex++) { + + // Find all the effective row-group from the last row-group index, + // util the effective count is larger than the maximum count of remaining row groups. + if (rgIncluded[effectiveRowGroupIndex]) { + effectiveCount++; + } + } + + if (effectiveCount == 0) { + // It means no effective row-group is found + return false; + } + + currentRange = new ScanRange(stripeId, rowGroupIndex, effectiveCount, rgIncluded); + + // We must ensure that the ranges of all works are not overlap. + rowGroupIndex = effectiveRowGroupIndex; + return true; + } + return false; + } + } + + @Override + public ScanWork nextWork() { + if (scanWorkIterator == null) { + // The pre-processor must have been done. + if (!preProcessor.isPrepared()) { + GeneralUtil.nestedException("The pre-processor is not prepared"); + } + + try { + scanWorkIterator = new ScanWorkIterator(); + } catch (IOException e) { + throw GeneralUtil.nestedException("Fail to initialize scan-work iterator", e); + } + } + + while (scanWorkIterator.hasNext()) { + return (ScanWork) scanWorkIterator.next(); + } + return null; + } + + @Override + public ColumnarSplitPriority getPriority() { + return ColumnarSplitPriority.ORC_SPLIT_PRIORITY; + } + + @Override + public String getHostAddress() { + return filePath.toString(); + } + + @Override + public int getSequenceId() { + return sequenceId; + } + + @Override + public int getFileId() { + return fileId; + } + + private static String generateScanWorkId(String traceId, String file, int stripeId, int workIndex) { + return new StringBuilder() + .append("ScanWork$") + .append(traceId).append('$') + .append(file).append('$') + .append(stripeId).append('$') + .append(workIndex).toString(); + } + + /** + * Builder for morsel-columnar-split. + */ + static class MorselColumnarSplitBuilder implements ColumnarSplitBuilder { + private ExecutionContext executionContext; + private ExecutorService ioExecutor; + private Engine engine; + private FileSystem fileSystem; + private Configuration configuration; + private int sequenceId; + private int fileId; + private Path filePath; + private OSSColumnTransformer ossColumnTransformer; + private String logicalSchema; + private String logicalTable; + private List inputRefsForFilter; + private List inputRefsForProject; + private int chunkLimit; + private BlockCacheManager blockCacheManager; + private int rgThreshold; + private LazyEvaluator lazyEvaluator; + private ScanPreProcessor preProcessor; + private ColumnarManager columnarManager; + private Long tso; + private boolean isColumnarMode; + + private int partNum; + + private int nodePartCount; + private MemoryAllocatorCtx memoryAllocatorCtx; + + private FragmentRFManager fragmentRFManager; + private OperatorStatistics operatorStatistics; + + @Override + public ColumnarSplit build() { + try { + + // CASE1: when evaluator is null, the input refs for filter must be empty. + // CASE2: when evaluator is entirely a constant expression, the input refs for filter must be empty. + Preconditions.checkArgument((lazyEvaluator == null + || lazyEvaluator.isConstantExpression()) == (inputRefsForFilter.isEmpty()), + "when evaluator is null, the input refs for filter must be empty."); + + Map rfFilterRefInFileMap = new HashMap<>(); + if (fragmentRFManager != null) { + + // For each item, mapping the source ref to file column ref. + for (Map.Entry itemEntry + : fragmentRFManager.getAllItems().entrySet()) { + List rfFilterChannels = new ArrayList<>(); + rfFilterChannels.add(itemEntry.getValue().getSourceRefInFile()); + + rfFilterChannels = isColumnarMode + ? columnarManager.getPhysicalColumnIndexes(tso, filePath.getName(), rfFilterChannels) + : rfFilterChannels; + + rfFilterRefInFileMap.put(itemEntry.getKey(), rfFilterChannels.get(0)); + } + } + + int[] primaryKeyColIds = + isColumnarMode ? columnarManager.getPrimaryKeyColumns(filePath.getName()) : null; + + // To distinguish the columnar mode from archive mode + return new MorselColumnarSplit( + executionContext, ioExecutor, engine, fileSystem, + configuration, sequenceId, fileId, filePath, + ossColumnTransformer, primaryKeyColIds, + + inputRefsForFilter, inputRefsForProject, + + chunkLimit, blockCacheManager, + rgThreshold, lazyEvaluator, + preProcessor, + partNum, nodePartCount, memoryAllocatorCtx, fragmentRFManager, rfFilterRefInFileMap, + operatorStatistics); + } catch (IOException e) { + throw GeneralUtil.nestedException("Fail to build columnar split.", e); + } + } + + @Override + public ColumnarSplitBuilder executionContext(ExecutionContext context) { + this.executionContext = context; + return this; + } + + @Override + public ColumnarSplitBuilder ioExecutor(ExecutorService ioExecutor) { + this.ioExecutor = ioExecutor; + return this; + } + + @Override + public ColumnarSplitBuilder fileSystem(FileSystem fileSystem, Engine engine) { + this.fileSystem = fileSystem; + this.engine = engine; + return this; + } + + @Override + public ColumnarSplitBuilder configuration(Configuration configuration) { + this.configuration = configuration; + return this; + } + + @Override + public ColumnarSplitBuilder sequenceId(int sequenceId) { + this.sequenceId = sequenceId; + return this; + } + + @Override + public ColumnarSplitBuilder file(Path filePath, int fileId) { + this.filePath = filePath; + this.fileId = fileId; + return this; + } + + @Override + public ColumnarSplitBuilder tableMeta(String logicalSchema, String logicalTable) { + this.logicalSchema = logicalSchema; + this.logicalTable = logicalTable; + return this; + } + + @Override + public ColumnarSplitBuilder columnTransformer(OSSColumnTransformer ossColumnTransformer) { + this.ossColumnTransformer = ossColumnTransformer; + return this; + } + + @Override + public ColumnarSplitBuilder inputRefs(List inputRefsForFilter, List inputRefsForProject) { + this.inputRefsForFilter = inputRefsForFilter; + this.inputRefsForProject = inputRefsForProject; + return this; + } + + @Override + public ColumnarSplitBuilder cacheManager(BlockCacheManager blockCacheManager) { + this.blockCacheManager = blockCacheManager; + return this; + } + + @Override + public ColumnarSplitBuilder chunkLimit(int chunkLimit) { + this.chunkLimit = chunkLimit; + return this; + } + + @Override + public ColumnarSplitBuilder morselUnit(int rgThreshold) { + this.rgThreshold = rgThreshold; + return this; + } + + @Override + public ColumnarSplitBuilder pushDown(LazyEvaluator lazyEvaluator) { + this.lazyEvaluator = lazyEvaluator; + return this; + } + + @Override + public ColumnarSplitBuilder prepare(ScanPreProcessor scanPreProcessor) { + this.preProcessor = scanPreProcessor; + return this; + } + + @Override + public ColumnarSplitBuilder columnarManager(ColumnarManager columnarManager) { + this.columnarManager = columnarManager; + return this; + } + + @Override + public ColumnarSplitBuilder isColumnarMode(boolean isColumnarMode) { + this.isColumnarMode = isColumnarMode; + return this; + } + + @Override + public ColumnarSplitBuilder tso(Long tso) { + this.tso = tso; + return this; + } + + @Override + public ColumnarSplitBuilder partNum(int partNum) { + this.partNum = partNum; + return this; + } + + @Override + public ColumnarSplitBuilder nodePartCount(int nodePartCount) { + this.nodePartCount = nodePartCount; + return this; + } + + @Override + public ColumnarSplitBuilder memoryAllocator(MemoryAllocatorCtx memoryAllocatorCtx) { + this.memoryAllocatorCtx = memoryAllocatorCtx; + return this; + } + + @Override + public ColumnarSplitBuilder fragmentRFManager(FragmentRFManager fragmentRFManager) { + this.fragmentRFManager = fragmentRFManager; + return this; + } + + @Override + public ColumnarSplitBuilder operatorStatistic(OperatorStatistics operatorStatistics) { + this.operatorStatistics = operatorStatistics; + return this; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MultiDictionaryMapping.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MultiDictionaryMapping.java new file mode 100644 index 000000000..f4c6ab9f9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/MultiDictionaryMapping.java @@ -0,0 +1,60 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; +import io.airlift.slice.Slice; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class MultiDictionaryMapping implements DictionaryMapping { + private final List targetDict; + private Map reMappings = new HashMap<>(); + + public MultiDictionaryMapping(List targetDict) { + this.targetDict = targetDict; + } + + @Override + public int[] merge(BlockDictionary dictionary) { + int hashCode = dictionary.hashCode(); + int[] reMapping; + if ((reMapping = reMappings.get(hashCode)) != null) { + return reMapping; + } + + // merge + reMapping = new int[dictionary.size()]; + for (int originalDictId = 0; originalDictId < dictionary.size(); originalDictId++) { + Slice originalDictValue = dictionary.getValue(originalDictId); + + // Find the index of dict value, and record it into reMapping array. + int index = targetDict.indexOf(originalDictValue); + reMapping[originalDictId] = index; + } + reMappings.put(hashCode, reMapping); + return reMapping; + } + + @Override + public void close() { + reMappings.clear(); + reMappings = null; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/NonBlockedScanPreProcessor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/NonBlockedScanPreProcessor.java new file mode 100644 index 000000000..9a5051f11 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/NonBlockedScanPreProcessor.java @@ -0,0 +1,87 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.operator.scan.ScanPreProcessor; +import com.alibaba.polardbx.optimizer.statis.ColumnarTracer; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import org.apache.hadoop.fs.Path; +import org.roaringbitmap.RoaringBitmap; + +import java.util.HashSet; +import java.util.Set; +import java.util.SortedMap; +import java.util.concurrent.ExecutorService; + +/** + * A non-blocked implementation of ScanPreProcessor with prepared meta and pruning result. + */ +public class NonBlockedScanPreProcessor implements ScanPreProcessor { + private final Set filePaths; + private final PreheatFileMeta preheatFileMeta; + private final SortedMap matrix; + private final RoaringBitmap deletion; + + public NonBlockedScanPreProcessor(PreheatFileMeta preheatFileMeta, + SortedMap matrix, + RoaringBitmap deletion) { + this.filePaths = new HashSet<>(); + this.preheatFileMeta = preheatFileMeta; + this.matrix = matrix; + this.deletion = deletion; + } + + @Override + public void addFile(Path filePath) { + filePaths.add(filePath); + } + + @Override + public ListenableFuture prepare(ExecutorService executor, String traceId, ColumnarTracer tracer) { + return Futures.immediateFuture(null); + } + + @Override + public boolean isPrepared() { + return true; + } + + @Override + public SortedMap getPruningResult(Path filePath) { + if (filePath != null && filePaths.contains(filePath)) { + return matrix; + } + return null; + } + + @Override + public PreheatFileMeta getPreheated(Path filePath) { + if (filePath != null && filePaths.contains(filePath)) { + return preheatFileMeta; + } + return null; + } + + @Override + public RoaringBitmap getDeletion(Path filePath) { + if (filePath != null && filePaths.contains(filePath)) { + return deletion; + } + return null; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ORCMetaReaderImpl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ORCMetaReaderImpl.java new file mode 100644 index 000000000..81a6f968c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ORCMetaReaderImpl.java @@ -0,0 +1,164 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.operator.scan.ORCMetaReader; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.orc.DataReader; +import org.apache.orc.OrcConf; +import org.apache.orc.OrcFile; +import org.apache.orc.OrcProto; +import org.apache.orc.Reader; +import org.apache.orc.StripeInformation; +import org.apache.orc.impl.DataReaderProperties; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.OrcCodecPool; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.OrcTail; +import org.apache.orc.impl.ReaderImpl; +import org.apache.orc.impl.RecordReaderUtils; +import org.apache.orc.impl.reader.StripePlanner; +import org.jetbrains.annotations.NotNull; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import static org.apache.orc.impl.ReaderImpl.extractFileTail; + +public class ORCMetaReaderImpl implements ORCMetaReader { + private final Configuration configuration; + private final FileSystem preheatFileSystem; + + public ORCMetaReaderImpl(Configuration configuration, FileSystem fileSystem) { + this.configuration = configuration; + this.preheatFileSystem = fileSystem; + } + + @Override + public PreheatFileMeta preheat(Path path) throws IOException { + PreheatFileMeta result = new PreheatFileMeta(path); + // 0. build and cache file reader + ReaderImpl fileReader = null; + try { + fileReader = (ReaderImpl) OrcFile.createReader(path, + OrcFile.readerOptions(configuration).filesystem(preheatFileSystem) + ); + + // 1. extract orc tail and cache it + ByteBuffer footerBuffer = fileReader.getSerializedFileFooter(); + OrcTail orcTail = extractFileTail(footerBuffer, -1, -1); + result.setPreheatTail(orcTail); + + // 2. build and cache each stripe metadata in file. + Map preheatContextMap = preheatStripe(path, fileReader, preheatFileSystem); + result.setPreheatStripes(preheatContextMap); + } finally { + // prevent from IO resource leak + if (fileReader != null) { + fileReader.close(); + } + } + return result; + } + + private Map preheatStripe(Path path, ReaderImpl fileReader, FileSystem preheatFileSystem) + throws IOException { + Map result = new ConcurrentHashMap<>(); + + // 1. build data reader + try (DataReader dataReader = buildDataReader(path, fileReader, preheatFileSystem)) { + for (StripeInformation stripe : fileReader.getStripes()) { + + // 2. build stripe planner for each stripe. + StripePlanner planner = buildStripePlanner(fileReader, dataReader); + + boolean[] allColumns = new boolean[fileReader.getSchema().getMaximumId() + 1]; + Arrays.fill(allColumns, true); + + // 3. planner parse meta info of Stripe + // get info of data streams + index streams and cache it in planner object. + planner.parseStripe(stripe, allColumns, null); + + // 4. get row index + // NOTE: Stripe Planner will NOT cache the row indexes already fetched. + OrcIndex index = planner.readRowIndex(allColumns, null); + + // 5. get stripe footer + OrcProto.StripeFooter stripeFooter = dataReader.readStripeFooter(stripe); + + planner.clearDataReader(); + PreheatStripeMeta preheatStripeMeta = new PreheatStripeMeta( + stripe.getStripeId(), index, stripeFooter); + + result.put(stripe.getStripeId(), preheatStripeMeta); + } + } + + return result; + } + + @NotNull + private StripePlanner buildStripePlanner(ReaderImpl fileReader, DataReader dataReader) { + int maxDiskRangeChunkLimit = OrcConf.ORC_MAX_DISK_RANGE_CHUNK_LIMIT.getInt(configuration); + boolean ignoreNonUtf8BloomFilter = OrcConf.IGNORE_NON_UTF8_BLOOM_FILTERS.getBoolean(configuration); + + StripePlanner planner = new StripePlanner( + fileReader.getSchema(), + fileReader.getEncryption(), + dataReader, + fileReader.getWriterVersion(), + ignoreNonUtf8BloomFilter, + maxDiskRangeChunkLimit); + return planner; + } + + private DataReader buildDataReader(Path path, ReaderImpl fileReader, FileSystem fileSystem) throws IOException { + int maxDiskRangeChunkLimit = OrcConf.ORC_MAX_DISK_RANGE_CHUNK_LIMIT.getInt(configuration); + Reader.Options options = fileReader.options(); + + InStream.StreamOptions unencryptedOptions = + InStream.options() + .withCodec(OrcCodecPool.getCodec(fileReader.getCompressionKind())) + .withBufferSize(fileReader.getCompressionSize()); + DataReaderProperties.Builder builder = + DataReaderProperties.builder() + .withCompression(unencryptedOptions) + .withFileSystemSupplier(() -> fileSystem) + .withPath(path) + .withMaxDiskRangeChunkLimit(maxDiskRangeChunkLimit) + .withZeroCopy(options.getUseZeroCopy()); + FSDataInputStream file = fileSystem.open(path); + if (file != null) { + builder.withFile(file); + } + + DataReader dataReader = RecordReaderUtils.createDefaultDataReader( + builder.build()); + return dataReader; + } + + @Override + public void close() throws IOException { + + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/OrcRawTypeBlockLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/OrcRawTypeBlockLoader.java new file mode 100644 index 000000000..9c348c25f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/OrcRawTypeBlockLoader.java @@ -0,0 +1,246 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.AbstractBlock; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.ByteArrayBlock; +import com.alibaba.polardbx.executor.chunk.DoubleBlock; +import com.alibaba.polardbx.executor.chunk.FloatBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.columnar.BlockLoader; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.CacheReader; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.executor.operator.scan.LogicalRowGroup; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.BinaryType; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.SliceType; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import org.apache.orc.ColumnStatistics; +import org.apache.orc.OrcProto; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.text.MessageFormat; + +/** + * This block loader only loads the following raw orc data types from orc data: + * long, float, double, byte[] + * And block loaded by this loader MUST NOT be put in block cache. + */ +public class OrcRawTypeBlockLoader extends ReactiveBlockLoader { + + public OrcRawTypeBlockLoader(LogicalRowGroup logicalRowGroup, + int columnId, + int startPosition, + int positionCount, + OrcProto.ColumnEncoding encoding, + ColumnReader columnReader, + CacheReader cacheReader, + BlockCacheManager blockCacheManager, + ExecutionContext context, + boolean useBlockCache, + boolean enableColumnReaderLock, + int chunkLimit, + Counter loadTimer, + Counter memoryCounter, + boolean onlyCachePrimaryKey, + boolean enableSkipCompression) { + super(logicalRowGroup, columnId, startPosition, positionCount, encoding, columnReader, cacheReader, + blockCacheManager, context, useBlockCache, enableColumnReaderLock, chunkLimit, loadTimer, memoryCounter, + onlyCachePrimaryKey, enableSkipCompression); + } + + @Override + public Block load(DataType dataType, int[] selection, int selSize) throws IOException { + // In this case, we need to proactively open column-reader before this method. + if (!columnReader.isOpened()) { + throw GeneralUtil.nestedException("column reader has not already been opened."); + } + + long start = System.nanoTime(); + Block block = parseBlock(dataType, selection, selSize); + block.cast(AbstractBlock.class).updateSizeInfo(); + if (memoryCounter != null) { + memoryCounter.inc(block.estimateSize()); + } + if (loadTimer != null) { + loadTimer.inc(System.nanoTime() - start); + } + return block; + } + + private Block parseBlock(DataType dataType, int[] selection, int selSize) throws IOException { + Block targetBlock = allocateBlock(dataType); + + // Start decoding from given position. + // The seeking may be blocked util IO processing completed. + long stamp = enableColumnReaderLock ? columnReader.getLock().writeLock() : -1L; + try { + columnReader.startAt(rowGroupId, startPosition); + + if (enableSkipCompression) { + lastSkipCount = columnReader.next(targetBlock.cast(RandomAccessBlock.class), positionCount, + selection, selSize); + } else { + columnReader.next(targetBlock.cast(RandomAccessBlock.class), positionCount); + lastSkipCount = 0; + } + + } catch (Throwable t) { + + LOGGER.error(MessageFormat.format( + "parse block error: {0}", + this.toString() + )); + + throw t; + } finally { + if (enableColumnReaderLock) { + columnReader.getLock().unlockWrite(stamp); + } + } + + return targetBlock; + } + + private Block allocateBlock(DataType inputType) { + switch (inputType.fieldType()) { + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_INT24: + case MYSQL_TYPE_TINY: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_YEAR: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_DATETIME2: + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_TIMESTAMP2: + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_TIME2: + case MYSQL_TYPE_BIT: { + Preconditions.checkArgument(columnReader instanceof LongColumnReader); + return new LongBlock(inputType, positionCount); + } + case MYSQL_TYPE_FLOAT: { + Preconditions.checkArgument(columnReader instanceof DoubleBlockFloatColumnReader); + return new DoubleBlock(inputType, positionCount); + } + case MYSQL_TYPE_DOUBLE: { + Preconditions.checkArgument(columnReader instanceof DoubleColumnReader); + return new DoubleBlock(inputType, positionCount); + } + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: { + if (columnReader instanceof LongColumnReader) { + return new LongBlock(inputType, positionCount); + } else { + return new ByteArrayBlock(positionCount); + } + } + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VARCHAR: { + if (inputType instanceof BinaryType) { + return new ByteArrayBlock(positionCount); + } else if (inputType instanceof SliceType) { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + Preconditions.checkArgument(columnReader instanceof DirectBinaryColumnReader); + // For column in direct encoding + return new ByteArrayBlock(positionCount); + } + case DICTIONARY: + case DICTIONARY_V2: + Preconditions.checkArgument(columnReader instanceof DictionaryBinaryColumnReader); + // for dictionary encoding + return new ByteArrayBlock(positionCount); + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + } else { + throw new UnsupportedOperationException("String type not supported: " + inputType.getClass().getName()); + } + } + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_BLOB: { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + Preconditions.checkArgument(columnReader instanceof DirectBinaryColumnReader); + // For column in direct encoding + return new ByteArrayBlock(positionCount); + } + case DICTIONARY: + case DICTIONARY_V2: + Preconditions.checkArgument(columnReader instanceof DictionaryBinaryColumnReader); + // for dictionary encoding + return new ByteArrayBlock(positionCount); + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + } + case MYSQL_TYPE_JSON: { + Preconditions.checkArgument(columnReader instanceof DirectBinaryColumnReader); + return new ByteArrayBlock(positionCount); + } + + default: + } + throw GeneralUtil.nestedException("Unsupported input-type " + inputType); + } + + @Override + public ColumnReader getColumnReader() { + return this.columnReader; + } + + @Override + public CacheReader getCacheReader() { + return null; + } + + @Override + public int startPosition() { + return startPosition; + } + + @Override + public int positionCount() { + return positionCount; + } + + @Override + public String toString() { + return "RawBlockLoader{" + + "logicalRowGroup=" + logicalRowGroup + + ", columnId=" + columnId + + ", rowGroupId=" + rowGroupId + + ", startPosition=" + startPosition + + ", positionCount=" + positionCount + + '}'; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/PackedTimeColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/PackedTimeColumnReader.java new file mode 100644 index 000000000..fa0b8c39a --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/PackedTimeColumnReader.java @@ -0,0 +1,165 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.DateBlock; +import com.alibaba.polardbx.executor.chunk.DateBlockBuilder; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.TimeBlock; +import com.alibaba.polardbx.executor.chunk.TimeBlockBuilder; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; + +public class PackedTimeColumnReader extends AbstractLongColumnReader { + public PackedTimeColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, + OrcIndex orcIndex, + RuntimeMetrics metrics, + OrcProto.ColumnEncoding.Kind kind, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, kind, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof DateBlock + || randomAccessBlock instanceof TimeBlock); + init(); + + long start = System.nanoTime(); + + long[] packed = randomAccessBlock instanceof DateBlock + ? ((DateBlock) randomAccessBlock).getPacked() + : ((TimeBlock) randomAccessBlock).getPacked(); + boolean[] nulls = randomAccessBlock.nulls(); + + if (present == null) { + randomAccessBlock.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + long longVal = data.next(); + packed[i] = longVal; + lastPosition++; + } + + // destroy null array to save the memory. + if (randomAccessBlock instanceof Block) { + ((Block) randomAccessBlock).destroyNulls(true); + } + } else { + randomAccessBlock.setHasNull(true); + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + packed[i] = 0; + } else { + // if not null + long longVal = data.next(); + packed[i] = longVal; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + @Override + public int next(RandomAccessBlock randomAccessBlock, int positionCount, int[] selection, int selSize) + throws IOException { + if (selection == null || selSize == 0 || selection.length == 0) { + next(randomAccessBlock, positionCount); + return 0; + } + + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof DateBlock + || randomAccessBlock instanceof TimeBlock); + init(); + + long start = System.nanoTime(); + + long[] packed = randomAccessBlock instanceof DateBlock + ? ((DateBlock) randomAccessBlock).getPacked() + : ((TimeBlock) randomAccessBlock).getPacked(); + boolean[] nulls = randomAccessBlock.nulls(); + + int totalSkipCount = 0; + if (present == null) { + randomAccessBlock.setHasNull(false); + + int lastSelectedPos = -1; + for (int i = 0; i < selSize; i++) { + int selectedPos = selection[i]; + + int skipPos = selectedPos - lastSelectedPos - 1; + if (skipPos > 0) { + data.skip(skipPos); + totalSkipCount += skipPos; + lastPosition += skipPos; + } + long longVal = data.next(); + packed[selectedPos] = longVal; + lastPosition++; + + lastSelectedPos = selectedPos; + } + + // destroy null array to save the memory. + if (randomAccessBlock instanceof Block) { + ((Block) randomAccessBlock).destroyNulls(true); + } + + } else { + randomAccessBlock.setHasNull(true); + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + packed[i] = 0; + } else { + // if not null + long longVal = data.next(); + packed[i] = longVal; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + + return totalSkipCount; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/PreheatFileMeta.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/PreheatFileMeta.java new file mode 100644 index 000000000..1a2a2dc44 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/PreheatFileMeta.java @@ -0,0 +1,70 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import org.apache.arrow.util.Preconditions; +import org.apache.hadoop.fs.Path; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.OrcTail; + +import java.util.Map; + +public class PreheatFileMeta { + private final Path filePath; + private Map preheatStripes; + private OrcTail preheatTail; + + public PreheatFileMeta(Path filePath) { + this.filePath = filePath; + } + + public Path getFilePath() { + return filePath; + } + + public void setPreheatStripes( + Map preheatStripes) { + this.preheatStripes = preheatStripes; + } + + public OrcTail getPreheatTail() { + return preheatTail; + } + + public Map getPreheatStripes() { + return preheatStripes; + } + + public void setPreheatTail(OrcTail preheatTail) { + this.preheatTail = preheatTail; + } + + public OrcIndex getOrcIndex(long stripeIndex) { + PreheatStripeMeta stripeMeta = preheatStripes.get(stripeIndex); + Preconditions.checkNotNull(stripeMeta); + + return stripeMeta.getOrcIndex(); + } + + public OrcProto.StripeFooter getStripeFooter(long stripeIndex) { + PreheatStripeMeta stripeMeta = preheatStripes.get(stripeIndex); + Preconditions.checkNotNull(stripeMeta); + + return stripeMeta.getStripeFooter(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/PreheatStripeMeta.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/PreheatStripeMeta.java new file mode 100644 index 000000000..798784ea6 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/PreheatStripeMeta.java @@ -0,0 +1,55 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +public class PreheatStripeMeta { + /** + * Stripe id. + */ + private final long stripeId; + + /** + * The orc RowIndex is read from index streams. + */ + private final OrcIndex orcIndex; + + /** + * The stripe footer is read from the tail of a stripe. + */ + private final OrcProto.StripeFooter stripeFooter; + + public PreheatStripeMeta(long stripeId, OrcIndex orcIndex, OrcProto.StripeFooter stripeFooter) { + this.stripeId = stripeId; + this.orcIndex = orcIndex; + this.stripeFooter = stripeFooter; + } + + public OrcProto.StripeFooter getStripeFooter() { + return stripeFooter; + } + + public long getStripeId() { + return stripeId; + } + + public OrcIndex getOrcIndex() { + return orcIndex; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RFEfficiencyCheckerImpl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RFEfficiencyCheckerImpl.java new file mode 100644 index 000000000..daa8b4d45 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RFEfficiencyCheckerImpl.java @@ -0,0 +1,92 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItemKey; +import com.alibaba.polardbx.executor.operator.scan.RFEfficiencyChecker; + +import java.util.HashMap; +import java.util.Map; + +public class RFEfficiencyCheckerImpl implements RFEfficiencyChecker { + private final static int SAMPLE_RANGE = 10000; + private final int sampleCount; + private final double filterRatioThreshold; + + private final Map itemRecordMap; + + public RFEfficiencyCheckerImpl(int sampleCount, double filterRatioThreshold) { + this.sampleCount = sampleCount; + this.filterRatioThreshold = filterRatioThreshold; + this.itemRecordMap = new HashMap<>(); + } + + @Override + public void sample(FragmentRFItemKey rfItemKey, int originalCount, int selectedCount) { + RFItemRecord itemRecord; + if ((itemRecord = itemRecordMap.get(rfItemKey)) == null) { + return; + } + + // record and calculate filter ratio when record count is less than sample count. + if (itemRecord.recordCount <= sampleCount) { + itemRecord.totalOriginalCount += originalCount; + itemRecord.totalSelectedCount += selectedCount; + } else if (itemRecord.filterRatio >= 1.0d) { + itemRecord.filterRatio = itemRecord.totalOriginalCount == 0 ? 1.0d + : 1.0d - (itemRecord.totalSelectedCount / (itemRecord.totalOriginalCount * 1.0d)); + } + } + + @Override + public boolean check(FragmentRFItemKey rfItemKey) { + RFItemRecord itemRecord; + + // initialize item record. + if ((itemRecord = itemRecordMap.get(rfItemKey)) == null) { + itemRecord = new RFItemRecord(); + itemRecordMap.put(rfItemKey, itemRecord); + return true; + } + + // set record count of this rf item. + itemRecord.recordCount++; + if (itemRecord.recordCount > SAMPLE_RANGE) { + + // reset + itemRecord.recordCount -= SAMPLE_RANGE; + itemRecord.totalOriginalCount = 0; + itemRecord.totalSelectedCount = 0; + itemRecord.filterRatio = 1d; + } + + // Allow runtime filter directly. + if (itemRecord.recordCount <= sampleCount) { + return true; + } + + // Check filter ratio. + return itemRecord.filterRatio >= filterRatioThreshold; + } + + private static class RFItemRecord { + long recordCount = 0; + long totalOriginalCount = 0; + long totalSelectedCount = 0; + double filterRatio = 1.0d; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RFLazyEvaluator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RFLazyEvaluator.java new file mode 100644 index 000000000..a7e446d71 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RFLazyEvaluator.java @@ -0,0 +1,155 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.bloomfilter.RFBloomFilter; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItem; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFItemKey; +import com.alibaba.polardbx.executor.mpp.planner.FragmentRFManager; +import com.alibaba.polardbx.executor.operator.scan.LazyEvaluator; +import com.alibaba.polardbx.executor.operator.scan.RFEfficiencyChecker; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.optimizer.statis.OperatorStatistics; +import com.google.common.base.Preconditions; +import org.roaringbitmap.RoaringBitmap; + +import java.util.Arrays; +import java.util.BitSet; +import java.util.Map; + +public class RFLazyEvaluator implements LazyEvaluator { + + private final FragmentRFManager manager; + private final OperatorStatistics operatorStatistics; + private final RFEfficiencyChecker efficiencyChecker; + private final Map rfBloomFilterMap; + + private final int itemSize; + private final FragmentRFItemKey[] itemKeys; + private final FragmentRFItem[] items; + + public RFLazyEvaluator(FragmentRFManager manager, OperatorStatistics operatorStatistics, + Map rfBloomFilterMap) { + this.manager = manager; + this.operatorStatistics = operatorStatistics; + this.efficiencyChecker = new RFEfficiencyCheckerImpl( + manager.getSampleCount(), manager.getFilterRatioThreshold()); + this.rfBloomFilterMap = rfBloomFilterMap; + + // Put all fragment items and their keys into array. + Map allItems = manager.getAllItems(); + this.itemSize = allItems.size(); + this.itemKeys = new FragmentRFItemKey[itemSize]; + this.items = new FragmentRFItem[itemSize]; + + int index = 0; + for (Map.Entry entry : manager.getAllItems().entrySet()) { + itemKeys[index] = entry.getKey(); + items[index] = entry.getValue(); + index++; + } + } + + @Override + public VectorizedExpression getCondition() { + throw new UnsupportedOperationException(); + } + + @Override + public BitSet eval(Chunk chunk, int startPosition, int positionCount, RoaringBitmap deletion) { + throw new UnsupportedOperationException(); + } + + @Override + public int eval(Chunk chunk, int startPosition, int positionCount, RoaringBitmap deletion, boolean[] bitmap) { + long cardinality = deletion.rangeCardinality(startPosition, startPosition + positionCount); + Preconditions.checkArgument(cardinality <= positionCount); + + if (cardinality != 0) { + // mark the position as TRUE that not deleted in the RoaringBitmap. + for (int i = 0; i < positionCount; i++) { + bitmap[i] = !deletion.contains(i + startPosition); + } + } else { + // clear bitmap + Arrays.fill(bitmap, 0, chunk.getPositionCount(), true); + } + // clear bitmap in the area that out of bound. + if (chunk.getPositionCount() < bitmap.length) { + Arrays.fill(bitmap, chunk.getPositionCount(), bitmap.length, false); + } + + if (chunk == null || chunk.getPositionCount() == 0) { + return 0; + } + + final int totalPartitionCount = manager.getTotalPartitionCount(); + int selectedCount = chunk.getPositionCount(); + for (int i = 0; i < itemSize; i++) { + FragmentRFItem item = items[i]; + int filterChannel = item.getSourceFilterChannel(); + boolean useXXHashInFilter = item.useXXHashInFilter(); + + FragmentRFItemKey itemKey = itemKeys[i]; + RFBloomFilter[] rfBloomFilters = rfBloomFilterMap.get(itemKey); + + // We have not received the runtime filter of this item key from build side. + if (rfBloomFilters == null) { + continue; + } + + // check runtime filter efficiency. + if (!efficiencyChecker.check(itemKey)) { + continue; + } + + final int originalCount = selectedCount; + switch (item.getRFType()) { + case BROADCAST: { + selectedCount = chunk.getBlock(filterChannel).mightContainsLong(rfBloomFilters[0], bitmap, true); + break; + } + case LOCAL: { + if (useXXHashInFilter) { + selectedCount = + chunk.getBlock(filterChannel).mightContainsLong(totalPartitionCount, rfBloomFilters, bitmap, + true, true); + } else { + selectedCount = + chunk.getBlock(filterChannel).mightContainsInt(totalPartitionCount, rfBloomFilters, bitmap, + false, true); + } + break; + } + } + + // sample the filter ratio of runtime filter. + efficiencyChecker.sample(itemKey, originalCount, selectedCount); + } + + // statistics for filtered rows by runtime filter. + operatorStatistics.addRuntimeFilteredCount(chunk.getPositionCount() - selectedCount); + + return selectedCount; + } + + @Override + public boolean isConstantExpression() { + return false; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ReactiveBlockLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ReactiveBlockLoader.java new file mode 100644 index 000000000..59676dd01 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ReactiveBlockLoader.java @@ -0,0 +1,441 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.AbstractBlock; +import com.alibaba.polardbx.executor.chunk.BigIntegerBlock; +import com.alibaba.polardbx.executor.chunk.BlobBlock; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.ByteArrayBlock; +import com.alibaba.polardbx.executor.chunk.ByteBlock; +import com.alibaba.polardbx.executor.chunk.DateBlock; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.DoubleBlock; +import com.alibaba.polardbx.executor.chunk.EnumBlock; +import com.alibaba.polardbx.executor.chunk.FloatBlock; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.ShortBlock; +import com.alibaba.polardbx.executor.chunk.SliceBlock; +import com.alibaba.polardbx.executor.chunk.StringBlock; +import com.alibaba.polardbx.executor.chunk.TimeBlock; +import com.alibaba.polardbx.executor.chunk.TimestampBlock; +import com.alibaba.polardbx.executor.chunk.ULongBlock; +import com.alibaba.polardbx.executor.chunk.columnar.BlockLoader; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.CacheReader; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.executor.operator.scan.LogicalRowGroup; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.BigBitType; +import com.alibaba.polardbx.optimizer.core.datatype.BinaryType; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.EnumType; +import com.alibaba.polardbx.optimizer.core.datatype.SliceType; +import com.alibaba.polardbx.optimizer.utils.TimestampUtils; +import com.codahale.metrics.Counter; +import com.google.common.base.Preconditions; +import org.apache.orc.ColumnStatistics; +import org.apache.orc.OrcProto; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.text.MessageFormat; +import java.util.TimeZone; + +/** + * A reactive block loader depends on IO/cache modules from the outside scheduler. + * 1. The lifecycle of these modules must be in opened/prepared state for loading. + * 2. These modules are shared in stripe-level on given column. + */ +public class ReactiveBlockLoader implements BlockLoader { + protected static final Logger LOGGER = LoggerFactory.getLogger("oss"); + protected final LogicalRowGroup logicalRowGroup; + + /** + * Column id on schema. + */ + protected final int columnId; + + /** + * Row group id in ORC Stripe. + */ + protected final int rowGroupId; + + /** + * The start position at column reader for this block. + */ + protected final int startPosition; + + /** + * The position count of this block. + */ + protected final int positionCount; + + /** + * Column-level encoding type. + */ + protected final OrcProto.ColumnEncoding encoding; + + /** + * A column-level reader responsible for all blocks of all row groups in the stripe. + */ + protected final ColumnReader columnReader; + + /** + * A column-level cache reader holding the available cached blocks in the stripe + */ + private final CacheReader cacheReader; + + /** + * The global block cache manager shared by all files. + */ + private final BlockCacheManager blockCacheManager; + + private final ExecutionContext context; + private final boolean useBlockCache; + protected final boolean enableColumnReaderLock; + private final int chunkLimit; + + protected final Counter loadTimer; + protected final Counter memoryCounter; + private final TimeZone timeZone; + + private final boolean onlyCachePrimaryKey; + protected final boolean enableSkipCompression; + + protected int lastSkipCount; + + public ReactiveBlockLoader(LogicalRowGroup logicalRowGroup, int columnId, + int startPosition, + int positionCount, OrcProto.ColumnEncoding encoding, ColumnReader columnReader, + CacheReader cacheReader, + BlockCacheManager blockCacheManager, ExecutionContext context, + boolean useBlockCache, boolean enableColumnReaderLock, int chunkLimit, + Counter loadTimer, Counter memoryCounter, boolean onlyCachePrimaryKey, + boolean enableSkipCompression) { + this.logicalRowGroup = logicalRowGroup; + this.columnId = columnId; + this.rowGroupId = logicalRowGroup.groupId(); + this.startPosition = startPosition; + this.positionCount = positionCount; + this.encoding = encoding; + this.columnReader = columnReader; + this.cacheReader = cacheReader; + this.blockCacheManager = blockCacheManager; + this.context = context; + this.useBlockCache = useBlockCache; + this.enableColumnReaderLock = enableColumnReaderLock; + this.chunkLimit = chunkLimit; + this.loadTimer = loadTimer; + this.memoryCounter = memoryCounter; + this.timeZone = TimestampUtils.getTimeZone(context); + + this.onlyCachePrimaryKey = onlyCachePrimaryKey; + this.enableSkipCompression = enableSkipCompression; + } + + @Override + public Block load(DataType dataType, int[] selection, int selSize) throws IOException { + // In this case, we need to proactively open column-reader before this method. + if (!columnReader.isOpened()) { + throw GeneralUtil.nestedException("column reader has not already been opened."); + } + + if (!cacheReader.isInitialized()) { + throw GeneralUtil.nestedException("cache reader has not already been initialized."); + } + + long start = System.nanoTime(); + Block cached; + if ((cached = cacheReader.getCache(rowGroupId, startPosition)) != null) { + // If cache hit + // zero copy? + // return LazyBlockUtils.copy(cached); + if (loadTimer != null) { + loadTimer.inc(System.nanoTime() - start); + } + return cached; + } else { + // cache miss, decoding from raw bytes. + Block block = parseBlock(dataType, selection, selSize); + block.cast(AbstractBlock.class).updateSizeInfo(); + if (memoryCounter != null) { + memoryCounter.inc(block.estimateSize()); + } + + // write back to cache manager + // condition1: use block cache + // condition2: there is no compression block has been skipped. + // condition3: the column is primary key. + if (useBlockCache + && (!enableSkipCompression || lastSkipCount == 0) + && (!onlyCachePrimaryKey || columnReader.needCache())) { + blockCacheManager.putCache( + block, // cache entity + chunkLimit, + logicalRowGroup.rowCount(), // for boundary check + logicalRowGroup.path(), logicalRowGroup.stripeId(), logicalRowGroup.groupId(), // base info + columnId, startPosition, positionCount // location of block in row-group + ); + } + if (loadTimer != null) { + loadTimer.inc(System.nanoTime() - start); + } + return block; + } + } + + private Block parseBlock(DataType dataType, int[] selection, int selSize) throws IOException { + boolean compatible = context.isEnableOssCompatible(); + Block targetBlock = allocateBlock(dataType, compatible, timeZone); + + // Start decoding from given position. + // The seeking may be blocked util IO processing completed. + long stamp = enableColumnReaderLock ? columnReader.getLock().writeLock() : -1L; + try { + columnReader.startAt(rowGroupId, startPosition); + + if (enableSkipCompression) { + lastSkipCount = columnReader.next(targetBlock.cast(RandomAccessBlock.class), positionCount, + selection, selSize); + } else { + columnReader.next(targetBlock.cast(RandomAccessBlock.class), positionCount); + lastSkipCount = 0; + } + + } catch (Throwable t) { + + LOGGER.error(MessageFormat.format( + "parse block error: {0}", + this.toString() + )); + + throw t; + } finally { + if (enableColumnReaderLock) { + columnReader.getLock().unlockWrite(stamp); + } + } + + return targetBlock; + } + + private Block allocateBlock(DataType inputType, boolean compatible, TimeZone timeZone) { + // NOTE: we need more type-specified implementations + switch (inputType.fieldType()) { + case MYSQL_TYPE_LONGLONG: { + if (!inputType.isUnsigned()) { + Preconditions.checkArgument(columnReader instanceof LongColumnReader); + // for signed bigint. + return new LongBlock(inputType, positionCount); + } else { + Preconditions.checkArgument(columnReader instanceof UnsignedLongColumnReader); + // for unsigned bigint + return new ULongBlock(inputType, positionCount); + } + } + case MYSQL_TYPE_LONG: { + if (!inputType.isUnsigned()) { + Preconditions.checkArgument(columnReader instanceof IntegerColumnReader); + // for signed int + return new IntegerBlock(inputType, positionCount); + } else { + Preconditions.checkArgument(columnReader instanceof LongColumnReader); + // for unsigned int + return new LongBlock(inputType, positionCount); + } + } + case MYSQL_TYPE_SHORT: { + if (!inputType.isUnsigned()) { + Preconditions.checkArgument(columnReader instanceof ShortColumnReader); + // for signed short + return new ShortBlock(inputType, positionCount); + } else { + Preconditions.checkArgument(columnReader instanceof IntegerColumnReader); + // for unsigned short + return new IntegerBlock(inputType, positionCount); + } + } + case MYSQL_TYPE_INT24: { + Preconditions.checkArgument(columnReader instanceof IntegerColumnReader); + return new IntegerBlock(inputType, positionCount); + } + case MYSQL_TYPE_TINY: { + if (!inputType.isUnsigned()) { + Preconditions.checkArgument(columnReader instanceof ByteColumnReader); + // for signed tiny + return new ByteBlock(inputType, positionCount); + } else { + Preconditions.checkArgument(columnReader instanceof ShortColumnReader); + // for unsigned tiny + return new ShortBlock(inputType, positionCount); + } + } + case MYSQL_TYPE_FLOAT: { + Preconditions.checkArgument(columnReader instanceof FloatColumnReader); + return new FloatBlock(inputType, positionCount); + } + case MYSQL_TYPE_DOUBLE: { + Preconditions.checkArgument(columnReader instanceof DoubleColumnReader); + return new DoubleBlock(inputType, positionCount); + } + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_TIMESTAMP2: { + Preconditions.checkArgument(columnReader instanceof TimestampColumnReader); + return new TimestampBlock(inputType, positionCount, timeZone); + } + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_DATETIME2: { + Preconditions.checkArgument(columnReader instanceof LongColumnReader); + return new TimestampBlock(inputType, positionCount, timeZone); + } + case MYSQL_TYPE_YEAR: { + Preconditions.checkArgument(columnReader instanceof LongColumnReader); + return new LongBlock(inputType, positionCount); + } + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: { + Preconditions.checkArgument(columnReader instanceof PackedTimeColumnReader); + return new DateBlock(positionCount, timeZone); + } + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_TIME2: { + Preconditions.checkArgument(columnReader instanceof PackedTimeColumnReader); + // for date + return new TimeBlock(inputType, positionCount, timeZone); + } + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: { + if (columnReader instanceof LongColumnReader) { + return new DecimalBlock(inputType, positionCount, true); + } else { + return new DecimalBlock(inputType, positionCount, false); + } + } + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VARCHAR: { + if (inputType instanceof BinaryType) { + return new ByteArrayBlock(positionCount); + } else if (inputType instanceof SliceType) { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + Preconditions.checkArgument(columnReader instanceof DirectVarcharColumnReader); + // For column in direct encoding + return new SliceBlock((SliceType) inputType, positionCount, compatible, false); + } + case DICTIONARY: + case DICTIONARY_V2: + boolean enableSliceDict = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_COLUMNAR_SLICE_DICT); + + Preconditions.checkArgument(columnReader instanceof DictionaryVarcharColumnReader); + // for dictionary encoding + return new SliceBlock((SliceType) inputType, positionCount, compatible, enableSliceDict); + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + } else { + throw new UnsupportedOperationException("String type not supported: " + inputType.getClass().getName()); + } + } + case MYSQL_TYPE_ENUM: { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + Preconditions.checkArgument(columnReader instanceof DirectEnumColumnReader); + // For column in direct encoding + return new EnumBlock(positionCount, ((EnumType) inputType).getEnumValues()); + } + case DICTIONARY: + case DICTIONARY_V2: + Preconditions.checkArgument(columnReader instanceof DictionaryEnumColumnReader); + // for dictionary encoding + return new EnumBlock(positionCount, ((EnumType) inputType).getEnumValues()); + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + } + case MYSQL_TYPE_JSON: { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: + Preconditions.checkArgument(columnReader instanceof DirectJsonColumnReader); + return new StringBlock(inputType, positionCount); + case DICTIONARY: + case DICTIONARY_V2: + Preconditions.checkArgument(columnReader instanceof DictionaryJsonColumnReader); + return new StringBlock(inputType, positionCount); + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + } + case MYSQL_TYPE_BIT: { + if (inputType instanceof BigBitType) { + Preconditions.checkArgument(columnReader instanceof BigBitColumnReader); + return new BigIntegerBlock(positionCount); + } else { + Preconditions.checkArgument(columnReader instanceof IntegerColumnReader); + return new IntegerBlock(inputType, positionCount); + } + } + case MYSQL_TYPE_BLOB: { + // For column in both direct encoding and dictionary encoding + return new BlobBlock(positionCount); + } + default: + } + throw GeneralUtil.nestedException("Unsupported input-type " + inputType); + } + + @Override + public ColumnReader getColumnReader() { + return this.columnReader; + } + + @Override + public CacheReader getCacheReader() { + return this.cacheReader; + } + + @Override + public int startPosition() { + return startPosition; + } + + @Override + public int positionCount() { + return positionCount; + } + + @Override + public String toString() { + return "ReactiveBlockLoader{" + + "logicalRowGroup=" + logicalRowGroup + + ", columnId=" + columnId + + ", rowGroupId=" + rowGroupId + + ", startPosition=" + startPosition + + ", positionCount=" + positionCount + + '}'; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RowGroupIteratorImpl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RowGroupIteratorImpl.java new file mode 100644 index 000000000..d35aae9e9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RowGroupIteratorImpl.java @@ -0,0 +1,871 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.archive.reader.OSSColumnTransformer; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.CacheReader; +import com.alibaba.polardbx.executor.operator.scan.ColumnReader; +import com.alibaba.polardbx.executor.operator.scan.LogicalRowGroup; +import com.alibaba.polardbx.executor.operator.scan.RowGroupIterator; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.BigBitType; +import com.alibaba.polardbx.optimizer.core.datatype.BinaryType; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.SliceType; +import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.orc.ColumnStatistics; +import org.apache.orc.CompressionKind; +import org.apache.orc.OrcFile; +import org.apache.orc.OrcProto; +import org.apache.orc.StripeInformation; +import org.apache.orc.TypeDescription; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.TypeUtils; +import org.apache.orc.impl.reader.ReaderEncryption; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.text.MessageFormat; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; + +public class RowGroupIteratorImpl implements RowGroupIterator { + private static final Logger LOGGER = LoggerFactory.getLogger("oss"); + /** + * Stripe id of this row-group iterator. + */ + private final int stripeId; + /** + * The effective row-group count starting from the given startRowGroupId. + */ + private final int effectiveGroupCount; + /** + * The starting row-group id from which the iterator enumerate the logical row-groups. + */ + private final int startRowGroupId; + /** + * The bitmap of selected row-groups, shared by all scan-works in one split. + * The length of bitmap rowGroupIncluded is equal to count of row-groups in stripe. + */ + private final boolean[] rowGroupIncluded; + /** + * The column ids of primary keys in the file. + * It may be null. + */ + private final int[] primaryKeyColIds; + // parameters for IO processing + private final ExecutorService ioExecutor; + private final FileSystem fileSystem; + private final Configuration configuration; + private final Path filePath; + // for compression + private final int compressionSize; + private final CompressionKind compressionKind; + // preheated meta of this stripe + private final PreheatFileMeta preheatFileMeta; + // context for stripe parser + private final StripeInformation stripeInformation; + private final long startRowOfStripe; + private final TypeDescription fileSchema; + private final OrcFile.WriterVersion version; + private final ReaderEncryption encryption; + private final OrcProto.ColumnEncoding[] encodings; + private final boolean ignoreNonUtf8BloomFilter; + private final long maxBufferSize; + private final int indexStride; + private final boolean[] columnIncluded; + private final int chunkLimit; + private final BlockCacheManager blockCacheManager; + private final OSSColumnTransformer ossColumnTransformer; + private final ExecutionContext context; + private final boolean enableMetrics; + private final boolean enableDecimal64; + private final int maxDiskRangeChunkLimit; + private final long maxMergeDistance; + private final boolean enableBlockCache; + private final MemoryAllocatorCtx memoryAllocatorCtx; + /** + * Metrics in scan-work level. + */ + private RuntimeMetrics metrics; + private OrcIndex orcIndex; + private StripeLoader stripeLoader; + /** + * The mapping from columnId to column-reader. + */ + private Map columnReaders; + /** + * The Mapping from columnId to cache-reader. + */ + private Map> cacheReaders; + /** + * The current effective group id. + */ + private int currentGroupId; + private int nextGroupId; + /** + * The fetched row group count must be less than or equal to effectiveGroupCount. + */ + private int fetchedRowGroupCount; + /** + * To store the fetched row-groups with it's groupId. + */ + private Map> rowGroupMap; + + public RowGroupIteratorImpl( + RuntimeMetrics metrics, + + // selected stripe and row-groups + int stripeId, int startRowGroupId, int effectiveGroupCount, boolean[] rowGroupIncluded, + // for primary key + int[] primaryKeyColIds, + // to execute the io task + ExecutorService ioExecutor, + FileSystem fileSystem, Configuration configuration, Path filePath, + // for compression + int compressionSize, CompressionKind compressionKind, + // preheated meta of this stripe + PreheatFileMeta preheatFileMeta, + // context for stripe parser + StripeInformation stripeInformation, long startRowOfStripe, + TypeDescription fileSchema, OrcFile.WriterVersion version, + ReaderEncryption encryption, OrcProto.ColumnEncoding[] encodings, boolean ignoreNonUtf8BloomFilter, + long maxBufferSize, int maxDiskRangeChunkLimit, long maxMergeDistance, int chunkLimit, + BlockCacheManager blockCacheManager, + OSSColumnTransformer ossColumnTransformer, + ExecutionContext context, boolean[] columnIncluded, int indexStride, + // chunk size config + // global block cache manager + boolean enableDecimal64, MemoryAllocatorCtx memoryAllocatorCtx) { + this.metrics = metrics; + this.stripeId = stripeId; + this.startRowGroupId = startRowGroupId; + this.effectiveGroupCount = effectiveGroupCount; + this.rowGroupIncluded = rowGroupIncluded; + this.primaryKeyColIds = primaryKeyColIds; + this.ioExecutor = ioExecutor; + this.fileSystem = fileSystem; + this.configuration = configuration; + this.filePath = filePath; + this.compressionSize = compressionSize; + this.compressionKind = compressionKind; + this.preheatFileMeta = preheatFileMeta; + this.stripeInformation = stripeInformation; + this.startRowOfStripe = startRowOfStripe; + this.fileSchema = fileSchema; + this.version = version; + this.encryption = encryption; + this.encodings = encodings; + this.ignoreNonUtf8BloomFilter = ignoreNonUtf8BloomFilter; + this.maxBufferSize = maxBufferSize; + this.maxDiskRangeChunkLimit = maxDiskRangeChunkLimit; + this.maxMergeDistance = maxMergeDistance; + this.indexStride = indexStride; + this.columnIncluded = columnIncluded; + this.chunkLimit = chunkLimit; + this.blockCacheManager = blockCacheManager; + this.ossColumnTransformer = ossColumnTransformer; + this.context = context; + this.enableMetrics = context.getParamManager() + .getBoolean(ConnectionParams.ENABLE_COLUMNAR_METRICS); + this.enableDecimal64 = enableDecimal64; + this.enableBlockCache = context.getParamManager().getBoolean(ConnectionParams.ENABLE_BLOCK_CACHE); + this.memoryAllocatorCtx = memoryAllocatorCtx; + init(); + } + + static ColumnReader getDecimalReader(boolean enableDecimal64, ExecutionContext context, DataType inputType, + int colId, boolean isPrimaryKey, + StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, int indexStride, + OrcProto.ColumnEncoding encoding, boolean enableMetrics) { + + if (enableDecimal64 && TypeUtils.isDecimal64Precision(inputType.getPrecision())) { + // whether to read a decimal64-encoded column into a decimal64 block + boolean readIntoDecimal64 = context.getParamManager() + .getBoolean(ConnectionParams.ENABLE_COLUMNAR_DECIMAL64); + if (readIntoDecimal64) { + return new LongColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + } else { + return new Decimal64ToDecimalColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + } + } else { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + return new DecimalColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + enableMetrics); + } + case DICTIONARY: + case DICTIONARY_V2: + return new DictionaryDecimalColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding, indexStride, enableMetrics); + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + } + } + + private void init() { + this.stripeLoader = new AsyncStripeLoader( + ioExecutor, fileSystem, configuration, filePath, + columnIncluded, compressionSize, compressionKind, + preheatFileMeta, + stripeInformation, fileSchema, version, encryption, + encodings, ignoreNonUtf8BloomFilter, maxBufferSize, + maxDiskRangeChunkLimit, maxMergeDistance, metrics, + enableMetrics, memoryAllocatorCtx); + stripeLoader.open(); + + this.orcIndex = preheatFileMeta.getOrcIndex( + stripeInformation.getStripeId() + ); + + columnReaders = new HashMap<>(); + cacheReaders = new HashMap<>(); + + // Build cache readers for each column + for (int colId = 1; colId <= fileSchema.getMaximumId(); colId++) { + if (columnIncluded[colId]) { + CacheReader cacheReader = new CacheReaderImpl(stripeId, colId, rowGroupIncluded.length); + cacheReaders.put(colId, cacheReader); + } + } + + boolean enableMetrics = context.getParamManager().getBoolean(ConnectionParams.ENABLE_COLUMNAR_METRICS); + + // the col id is precious identifier in orc file schema, while the col index is just the index in list. + for (int colIndex = 0; colIndex < ossColumnTransformer.columnCount(); colIndex++) { + Integer colId = ossColumnTransformer.getLocInOrc(colIndex); + + // if this column is missing in ORC file, skip + // which means this column should be filled with default value + if (colId == null) { + continue; + } + + // Build column readers according to type-description and encoding kind. + // check if this col id belong to primary keys. + boolean isPrimaryKey = false; + if (primaryKeyColIds != null) { + for (int primaryKeyId : primaryKeyColIds) { + if (primaryKeyId == colId) { + isPrimaryKey = true; + break; + } + } + } + OrcProto.ColumnEncoding encoding = encodings[colId]; + Preconditions.checkNotNull(encoding); + + // NOTE: we need more type-specified implementations + TypeDescription typeDescription = fileSchema.getChildren().get(colId - 1); + DataType inputType = ossColumnTransformer.getSourceColumnMeta(colIndex).getDataType(); + Preconditions.checkNotNull( + inputType); // Generate specific column reader for each column according to data type. + if (context.isEnableOrcRawTypeBlock()) { + // Special path for check cci consistency. + // Normal oss read should not get here. + // Only read the following data types: long, double, float, byte[] + generateOrcRawTypeColumnReader(enableMetrics, colId, encoding, typeDescription, inputType, + isPrimaryKey); + } else { + generateColumnReader(enableMetrics, colId, encoding, typeDescription, inputType, isPrimaryKey); + } + } + + currentGroupId = -1; + nextGroupId = -1; + rowGroupMap = new HashMap<>(); + } + + private void generateColumnReader(boolean enableMetrics, int colId, OrcProto.ColumnEncoding encoding, + TypeDescription typeDescription, DataType inputType, boolean isPrimaryKey) { + switch (inputType.fieldType()) { + case MYSQL_TYPE_LONGLONG: { + if (!inputType.isUnsigned()) { + // for signed bigint. + ColumnReader columnReader = + new LongColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + } else { + // for unsigned bigint + ColumnReader columnReader = + new UnsignedLongColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + } + break; + } + case MYSQL_TYPE_SHORT: { + if (!inputType.isUnsigned()) { + // for signed short + ColumnReader columnReader = + new ShortColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + } else { + // for unsigned short + ColumnReader columnReader = + new IntegerColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + } + break; + } + case MYSQL_TYPE_INT24: { + // for signed/unsigned unsigned mediumint + ColumnReader columnReader = + new IntegerColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case MYSQL_TYPE_TINY: { + if (!inputType.isUnsigned()) { + // for signed tiny + ColumnReader columnReader = + new ByteColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + } else { + // for unsigned tiny + ColumnReader columnReader = + new ShortColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + } + break; + } + case MYSQL_TYPE_LONG: { + if (!inputType.isUnsigned()) { + // for signed int + ColumnReader columnReader = + new IntegerColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + } else { + // for unsigned int + ColumnReader columnReader = + new LongColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + } + break; + } + case MYSQL_TYPE_FLOAT: { + ColumnReader columnReader = + new FloatColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case MYSQL_TYPE_DOUBLE: { + ColumnReader columnReader = + new DoubleColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case MYSQL_TYPE_YEAR: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_DATETIME2: { + ColumnReader columnReader = + new LongColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, encoding.getKind(), + indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_TIMESTAMP2: { + ColumnReader columnReader = + new TimestampColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, + enableMetrics, context); + columnReaders.put(colId, columnReader); + break; + } + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_TIME2: { + // for date + ColumnReader columnReader = + new PackedTimeColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: { + ColumnReader columnReader = getDecimalReader(enableDecimal64, context, inputType, colId, isPrimaryKey, + stripeLoader, orcIndex, metrics, indexStride, encoding, enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VARCHAR: { + if (inputType instanceof BinaryType) { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + // For column in direct encoding + ColumnReader columnReader = + new DirectBinaryColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case DICTIONARY: + case DICTIONARY_V2: + // for dictionary encoding + ColumnReader columnReader = + new DictionaryBinaryColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding, + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + break; + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + } else if (inputType instanceof SliceType) { + boolean enableSliceDict = + context.getParamManager().getBoolean(ConnectionParams.ENABLE_COLUMNAR_SLICE_DICT); + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + // For column in direct encoding + ColumnReader columnReader = + new DirectVarcharColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case DICTIONARY: + case DICTIONARY_V2: + // for dictionary encoding + ColumnReader columnReader = + new DictionaryVarcharColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding, + indexStride, enableMetrics, enableSliceDict); + columnReaders.put(colId, columnReader); + break; + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + break; + } else { + throw new UnsupportedOperationException( + "String type not supported: " + inputType.getClass().getName()); + } + break; + } + case MYSQL_TYPE_ENUM: { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + // For column in direct encoding + ColumnReader columnReader = + new DirectEnumColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + indexStride, + enableMetrics, inputType); + columnReaders.put(colId, columnReader); + break; + } + case DICTIONARY: + case DICTIONARY_V2: + // for dictionary encoding + ColumnReader columnReader = + new DictionaryEnumColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding, + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + break; + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + + break; + } + case MYSQL_TYPE_JSON: { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + // For column in direct encoding + ColumnReader columnReader = + new DirectJsonColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, + enableMetrics, inputType); + columnReaders.put(colId, columnReader); + break; + } + case DICTIONARY: + case DICTIONARY_V2: + // for dictionary encoding + ColumnReader columnReader = + new DictionaryJsonColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding, indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + break; + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + break; + } + case MYSQL_TYPE_BIT: { + if (inputType instanceof BigBitType) { + ColumnReader columnReader = + new BigBitColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + } else { + // for bit + ColumnReader columnReader = + new IntegerColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + } + break; + } + case MYSQL_TYPE_BLOB: { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + // For column in direct encoding + ColumnReader columnReader = + new DirectBlobColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case DICTIONARY: + case DICTIONARY_V2: + // for dictionary encoding + ColumnReader columnReader = + new DictionaryBlobColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, + encoding, + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + break; + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + break; + } + + default: + throw GeneralUtil.nestedException("Unsupported type-description " + typeDescription); + } + } + + private void generateOrcRawTypeColumnReader(boolean enableMetrics, int colId, OrcProto.ColumnEncoding encoding, + TypeDescription typeDescription, DataType inputType, + boolean isPrimaryKey) { + switch (inputType.fieldType()) { + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_INT24: + case MYSQL_TYPE_TINY: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_YEAR: + case MYSQL_TYPE_DATETIME: + case MYSQL_TYPE_DATETIME2: + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_TIMESTAMP2: + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: + case MYSQL_TYPE_TIME: + case MYSQL_TYPE_TIME2: + case MYSQL_TYPE_BIT: { + ColumnReader columnReader = + new LongColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case MYSQL_TYPE_FLOAT: { + ColumnReader columnReader = + new DoubleBlockFloatColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case MYSQL_TYPE_DOUBLE: { + ColumnReader columnReader = + new DoubleColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: { + if (enableDecimal64 && TypeUtils.isDecimal64Precision(inputType.getPrecision())) { + // Long reader. + ColumnReader columnReader = + new LongColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, encoding.getKind(), + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + } else { + // Byte array reader. + ColumnReader columnReader = + new DirectBinaryColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + } + break; + } + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VARCHAR: { + if (inputType instanceof BinaryType || inputType instanceof SliceType) { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + // For column in direct encoding + ColumnReader columnReader = + new DirectBinaryColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case DICTIONARY: + case DICTIONARY_V2: + // for dictionary encoding + ColumnReader columnReader = + new DictionaryBinaryColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, encoding, + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + break; + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + } else { + throw new UnsupportedOperationException( + "String type not supported: " + inputType.getClass().getName()); + } + break; + } + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_BLOB: { + switch (encoding.getKind()) { + case DIRECT: + case DIRECT_V2: { + // For column in direct encoding + ColumnReader columnReader = + new DirectBinaryColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + case DICTIONARY: + case DICTIONARY_V2: + // for dictionary encoding + ColumnReader columnReader = + new DictionaryBinaryColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, encoding, + indexStride, enableMetrics); + columnReaders.put(colId, columnReader); + break; + default: + throw GeneralUtil.nestedException("Unsupported encoding " + encoding.getKind()); + } + + break; + } + case MYSQL_TYPE_JSON: { + ColumnReader columnReader = + new DirectBinaryColumnReader(colId, isPrimaryKey, stripeLoader, orcIndex, metrics, indexStride, + enableMetrics); + columnReaders.put(colId, columnReader); + break; + } + + default: + throw GeneralUtil.nestedException("Unsupported type-description " + typeDescription); + } + } + + private int getRowCount(int groupId) { + final long rowsInStripe = stripeInformation.getNumberOfRows(); + int groupsInStripe = (int) ((rowsInStripe + indexStride - 1) / indexStride); + + if (groupId != groupsInStripe - 1) { + return indexStride; + } else { + return (int) (rowsInStripe - (groupsInStripe - 1) * indexStride); + } + } + + private int startRowId(int groupId) { + return (int) (groupId * indexStride + startRowOfStripe); + } + + @Override + public void seek(int rowId) { + currentGroupId = rowId / indexStride; + } + + @Override + public LogicalRowGroup current() { + LogicalRowGroup logicalRowGroup = rowGroupMap.computeIfAbsent( + currentGroupId, any -> new LogicalRowGroupImpl( + metrics, + filePath, stripeId, currentGroupId, + getRowCount(currentGroupId), startRowId(currentGroupId), + fileSchema, ossColumnTransformer, encodings, columnIncluded, chunkLimit, + columnReaders, cacheReaders, blockCacheManager, + context) + ); + return logicalRowGroup; + } + + @Override + public BlockCacheManager getCacheManager() { + return blockCacheManager; + } + + @Override + public boolean hasNext() { + // start from param: startRowGroupId + // end with bitmap length or fetchedRowGroupCount + for (int groupId = currentGroupId + 1; + groupId < rowGroupIncluded.length && fetchedRowGroupCount < effectiveGroupCount; + groupId++) { + if (groupId >= startRowGroupId && rowGroupIncluded[groupId]) { + nextGroupId = groupId; + fetchedRowGroupCount++; + return true; + } + } + nextGroupId = -1; + return false; + } + + @Override + public Void next() { + // check if the next group exist. + Preconditions.checkArgument(nextGroupId != -1); + currentGroupId = nextGroupId; + return null; + } + + @Override + public StripeLoader getStripeLoader() { + return this.stripeLoader; + } + + @Override + public ColumnReader getColumnReader(int columnId) { + return this.columnReaders.get(columnId); + } + + @Override + public CacheReader getCacheReader(int columnId) { + CacheReader cacheReader = this.cacheReaders.get(columnId); + if (!enableBlockCache && !cacheReader.isInitialized()) { + // Initialize cache reader with empty map to forbid the block cache. + cacheReader.initialize(new HashMap<>()); + } + return cacheReader; + } + + @Override + public Path filePath() { + return this.filePath; + } + + @Override + public int stripeId() { + return this.stripeId; + } + + @Override + public void noMoreChunks() { + columnReaders.forEach((colId, colReader) -> { + colReader.setNoMoreBlocks(); + }); + } + + @Override + public boolean[] columnIncluded() { + return this.columnIncluded; + } + + @Override + public boolean[] rgIncluded() { + return rowGroupIncluded; + } + + @Override + public void close(boolean force) { + columnReaders.forEach((colId, colReader) -> { + if (LOGGER.isDebugEnabled()) { + // In force mode, or ref count of reader is zero. + LOGGER.debug(MessageFormat.format( + "when row group iterator closing, colId = {0}, refCount = {1}, noMoreBlock = {2}, workId = {3}", + colId, colReader.refCount(), colReader.hasNoMoreBlocks(), metrics.name() + )); + } + if (force || (colReader.refCount() <= 0 && colReader.hasNoMoreBlocks())) { + colReader.close(); + } + }); + } + + public boolean checkIfAllReadersClosed() { + AtomicBoolean allClosed = new AtomicBoolean(true); + columnReaders.forEach((colId, colReader) -> { + if (!colReader.isClosed()) { + allClosed.set(false); + } + }); + return allClosed.get(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RowGroupReadOption.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RowGroupReadOption.java new file mode 100644 index 000000000..23498bd77 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/RowGroupReadOption.java @@ -0,0 +1,80 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import org.apache.hadoop.fs.Path; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +public class RowGroupReadOption { + private Path path; + private long stripeId; + private boolean[] rowGroupIncluded; + private boolean[] columnsIncluded; + + public RowGroupReadOption(Path path, long stripeId, boolean[] rowGroupIncluded, boolean[] columnsIncluded) { + this.path = path; + this.stripeId = stripeId; + this.rowGroupIncluded = rowGroupIncluded; + this.columnsIncluded = columnsIncluded; + } + + public void resetRowGroups(Random random, double ratio) { + // fill the row group bitmap according to ratio + for (int i = 0; i < rowGroupIncluded.length * ratio; i++) { + rowGroupIncluded[i] = true; + } + } + + public Path getPath() { + return path; + } + + public long getStripeId() { + return stripeId; + } + + public boolean[] getRowGroupIncluded() { + return rowGroupIncluded; + } + + public boolean[] getColumnsIncluded() { + return columnsIncluded; + } + + @Override + public String toString() { + return "RowGroupReadOption{" + + "path=" + path + + ", stripeId=" + stripeId + + ", rowGroupIncluded=" + toList(rowGroupIncluded) + + ", columnsIncluded=" + toList(columnsIncluded) + + '}'; + } + + private static List toList(boolean[] array) { + List result = new ArrayList<>(); + for (int i = 0; i < array.length; i++) { + if (array[i]) { + result.add(i); + } + } + return result; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ShortColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ShortColumnReader.java new file mode 100644 index 000000000..ddb403cb1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/ShortColumnReader.java @@ -0,0 +1,82 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.ShortBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; + +public class ShortColumnReader extends AbstractLongColumnReader { + public ShortColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, OrcIndex orcIndex, + RuntimeMetrics metrics, + OrcProto.ColumnEncoding.Kind kind, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, kind, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof ShortBlock); + init(); + + long start = System.nanoTime(); + ShortBlock shortBlock = (ShortBlock) randomAccessBlock; + short[] array = shortBlock.shortArray(); + boolean[] nulls = shortBlock.nulls(); + + if (present == null) { + randomAccessBlock.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + long longVal = data.next(); + array[i] = (short) longVal; + lastPosition++; + } + + // destroy null array to save the memory. + shortBlock.destroyNulls(true); + + } else { + // there are some null values + randomAccessBlock.setHasNull(true); + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + array[i] = 0; + } else { + // if not null + long longVal = data.next(); + array[i] = (short) longVal; + } + lastPosition++; + } + } + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SimpleBlockCacheManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SimpleBlockCacheManager.java new file mode 100644 index 000000000..d15393efe --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SimpleBlockCacheManager.java @@ -0,0 +1,484 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.executor.operator.scan.SeekableIterator; +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.CacheLoader; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; +import com.github.benmanes.caffeine.cache.RemovalListener; +import com.github.benmanes.caffeine.cache.Weigher; +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.Path; + +import java.text.MessageFormat; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.StampedLock; + +import static com.alibaba.polardbx.gms.engine.FileStoreStatistics.CACHE_STATS_FIELD_COUNT; + +/** + * A test implementation of BlockCacheManager. + * The CacheId is combined by: + * pathId (tableId + fileId) (32bit) + rowGroupId (16bit) + stripeId (4bit) + columnId (12bit). + */ +public class SimpleBlockCacheManager implements BlockCacheManager { + public static final String BLOCK_CACHE = "BLOCK CACHE"; + public static final String IN_MEMORY = "IN MEMORY"; + + // mark word (8 Bytes) + class pointer (4 Bytes) + long value (8 Bytes) + private static final int LONG_OBJECT_IN_BYTES = 20; + + /** + * Caching the block collections that covering all blocks in row-group which the block in collection belongs to. + */ + private Cache validCache; + + /** + * The in-flight cache is responsible for caching the block collection + * that does not completely contain the all blocks in row-group they belong to. + */ + private LoadingCache inFlightCache; + + private AtomicLong size; + private AtomicLong hitCount; + private AtomicLong flightCount; + private AtomicLong missCount; + private AtomicLong quotaExceedCount; + + /** + * The CacheId is combined by: + * pathId (tableId + fileId) (32bit) + rowGroupId (16bit) + stripeId (4bit) + columnId (12bit). + */ + public static long buildBlockCacheKey(Path path, int stripeId, int columnId, int rowGroupId) { + // The current method is not secure enough. The ideal situation is a compressed value of two digits: + // table_id + file_id + int pathHash = path.toString().hashCode(); + + // rowGroupId (16bit) + stripeId (4bit) + columnId (12bit) + int s = (stripeId << 12) | (columnId & 0x0FFF); + int innerId = (rowGroupId << 16) | (s & 0xFFFF); + + return (((long) pathHash) << 32) | (innerId & 0xffffffffL); + } + + public SimpleBlockCacheManager() { + this.size = new AtomicLong(0L); + this.hitCount = new AtomicLong(0L); + this.missCount = new AtomicLong(0L); + this.quotaExceedCount = new AtomicLong(0L); + this.flightCount = new AtomicLong(0L); + this.validCache = Caffeine.newBuilder() + .maximumWeight(MAXIMUM_MEMORY_SIZE) + .weigher((Weigher) (key, value) -> + + // calculate memory size of block cache for cache weight. + LONG_OBJECT_IN_BYTES + value.memorySize() + ) + .removalListener((longValue, simplifiedBlockCache, removalCause) -> { + // decrement memory size when invalidate block cache. + size.getAndAdd(-(LONG_OBJECT_IN_BYTES + simplifiedBlockCache.memorySize())); + quotaExceedCount.getAndIncrement(); + } + ) + .build(); + + this.inFlightCache = Caffeine.newBuilder() + .maximumSize(MAXIMUM_IN_FLIGHT_ENTRIES) + .removalListener((RemovalListener) (longValue, blockCache, removalCause) -> { + // decrement memory size when invalidate block cache. + size.getAndAdd(-(LONG_OBJECT_IN_BYTES + blockCache.memorySize())); + }) + .expireAfterWrite(IN_FLIGHT_CACHE_TTL_IN_SECOND, TimeUnit.SECONDS) + .build(new CacheLoader() { + @Override + public BlockCache load(Long key) { + + // calculate memory size of block cache and cache key + BlockCache result = new BlockCache(); + size.getAndAdd(result.memorySize() + LONG_OBJECT_IN_BYTES); + return result; + } + }); + } + + /** + * A cache unit for all blocks within a row-group. + */ + private static class BlockCache { + private static final int BASE_MEMORY_SIZE = 96; + public static final int UNSET_CHUNK_LIMIT = -1; + + /** + * The fixed limitation of block position count. + */ + private int chunkLimit; + + /** + * The block cache is completed only if all slot in blockBitmap marked as TRUE. + */ + private AtomicBoolean isCompleted; + + /** + * The total number of rows in the row-group. + */ + private int rowCountInGroup; + + /** + * Responsible for marking the existence of all blocks within the row-group. + */ + private boolean[] blockBitmap; + + /** + * Storing the blocks within the row-group. + * The index of this array is equal to block.positionCount() / chunkLimit + */ + private Block[] blocks; + + private StampedLock rwLock; + + BlockCache() { + isCompleted = new AtomicBoolean(false); + rwLock = new StampedLock(); + rowCountInGroup = -1; + chunkLimit = UNSET_CHUNK_LIMIT; + } + + public SimplifiedBlockCache simplify() { + return new SimplifiedBlockCache(chunkLimit, blocks); + } + + public int memorySize() { + int totalSize = BASE_MEMORY_SIZE; + if (blocks != null) { + for (int i = 0; i < blocks.length; i++) { + if (blocks[i] != null) { + totalSize += blocks[i].getElementUsedBytes(); + } + } + } + + if (blockBitmap != null) { + totalSize += blockBitmap.length * Byte.BYTES; + } + return totalSize; + } + + public boolean isCompleted() { + return isCompleted.get(); + } + + /** + * It's idempotent for store operation. + * + * @return TRUE if completed. + */ + public boolean put(Block block, int rowCount, int position, int positionCount, int chunkLimit) { + // check if rowCountInGroup has been set or consistent. + Preconditions.checkArgument(this.rowCountInGroup == -1 || rowCount == rowCountInGroup); + + // check position alignment. + Preconditions.checkArgument(position % chunkLimit == 0); + Preconditions.checkArgument(positionCount == Math.min(chunkLimit, rowCount - position)); + + if (this.chunkLimit == UNSET_CHUNK_LIMIT) { + this.chunkLimit = chunkLimit; + } else { + Preconditions.checkArgument(this.chunkLimit == chunkLimit); + } + + this.rowCountInGroup = rowCount; + if (blockBitmap == null) { + // Initialization if needed. + int slots = (rowCountInGroup + chunkLimit - 1) / chunkLimit; + blockBitmap = new boolean[slots]; + blocks = new Block[slots]; + } + + // mark and store the block, and check if all blocks have been stored. + blockBitmap[position / chunkLimit] = true; + blocks[position / chunkLimit] = block; + for (boolean marked : blockBitmap) { + if (!marked) { + return false; + } + } + + isCompleted.set(true); + return true; + } + + public SeekableIterator newIterator() { + long stamp = rwLock.readLock(); + try { + Preconditions.checkNotNull(blocks); + return new BlockIterator(); + } finally { + rwLock.unlockRead(stamp); + } + + } + + /** + * A simple block iterator that only maintain a block-index for randomly access. + */ + private class BlockIterator implements SeekableIterator { + + @Override + public Block seek(int position) { + Preconditions.checkArgument(position < rowCountInGroup); + Preconditions.checkArgument(chunkLimit > UNSET_CHUNK_LIMIT); + int blockIndex = position / chunkLimit; + if (blockIndex >= blocks.length) { + throw GeneralUtil.nestedException(MessageFormat.format( + "bad position: {0}, blocks array size: {1}, chunk limit: {2}", + position, blocks.length, chunkLimit)); + } + return blocks[blockIndex]; + } + + @Override + public boolean hasNext() { + throw new UnsupportedOperationException(); + } + + @Override + public Block next() { + throw new UnsupportedOperationException(); + } + } + } + + private static class SimplifiedBlockCache { + private static final int BASE_MEMORY_SIZE = 30; + /** + * The fixed limitation of block position count. + * It must be less than 4096. + */ + private final short chunkLimit; + + /** + * Storing the blocks within the row-group. + * The index of this array is equal to block.positionCount() / chunkLimit + */ + private final Block[] blocks; + + SimplifiedBlockCache(int chunkLimit, Block[] blocks) { + this.chunkLimit = (short) chunkLimit; + this.blocks = blocks; + } + + public SeekableIterator newIterator() { + Preconditions.checkNotNull(blocks); + return new BlockIterator(); + } + + public int memorySize() { + int totalSize = BASE_MEMORY_SIZE; + if (blocks != null) { + for (int i = 0; i < blocks.length; i++) { + if (blocks[i] != null) { + totalSize += blocks[i].getElementUsedBytes(); + } + } + } + return totalSize; + } + + /** + * A simple block iterator that only maintain a block-index for randomly access. + */ + private class BlockIterator implements SeekableIterator { + + @Override + public Block seek(int position) { + int blockIndex = position / chunkLimit; + if (blockIndex >= blocks.length) { + throw GeneralUtil.nestedException(MessageFormat.format( + "bad position: {0}, blocks array size: {1}, chunk limit: {2}", + position, blocks.length, chunkLimit)); + } + return blocks[blockIndex]; + } + + @Override + public boolean hasNext() { + throw new UnsupportedOperationException(); + } + + @Override + public Block next() { + throw new UnsupportedOperationException(); + } + } + } + + @Override + public long getMemorySize() { + return size.get(); + } + + @Override + public void clear() { + validCache.invalidateAll(); + inFlightCache.invalidateAll(); + size.set(0L); + hitCount.set(0); + missCount.set(0); + quotaExceedCount.set(0); + flightCount.set(0); + } + + @Override + public byte[][] generateCacheStatsPacket() { + byte[][] results = new byte[CACHE_STATS_FIELD_COUNT][]; + int pos = 0; + results[pos++] = BLOCK_CACHE.getBytes(); + results[pos++] = String.valueOf(size.get()).getBytes(); + results[pos++] = String.valueOf(validCache.estimatedSize()).getBytes(); + results[pos++] = String.valueOf(inFlightCache.estimatedSize()).getBytes(); + results[pos++] = String.valueOf(hitCount.get()).getBytes(); + results[pos++] = String.valueOf(flightCount.get()).getBytes(); + results[pos++] = String.valueOf(missCount.get()).getBytes(); + results[pos++] = String.valueOf(quotaExceedCount.get()).getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = IN_MEMORY.getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = String.valueOf(-1).getBytes(); + results[pos++] = new StringBuilder().append(MAXIMUM_MEMORY_SIZE).append(" BYTES").toString().getBytes(); + + return results; + } + + @Override + public boolean isCached(Path path, int stripeId, int rowGroupId, int columnId) { + return validCache.getIfPresent(buildBlockCacheKey(path, stripeId, columnId, rowGroupId)) != null; + } + + @Override + public SeekableIterator getCaches(Path path, int stripeId, int rowGroupId, int columnId) { + SimplifiedBlockCache blockCache = validCache.getIfPresent( + buildBlockCacheKey(path, stripeId, columnId, rowGroupId)); + if (blockCache != null) { + hitCount.getAndIncrement(); + return blockCache.newIterator(); + } + missCount.getAndIncrement(); + return null; + } + + @Override + public Map> getCachedRowGroups(Path path, int stripeId, int columnId, + boolean[] rowGroupIncluded) { + Map> result = new TreeMap<>(); + for (int groupId = 0; groupId < rowGroupIncluded.length; groupId++) { + if (!rowGroupIncluded[groupId]) { + continue; + } + // Collect all valid cache containing the blocks in selected row-groups. + long cacheKey = buildBlockCacheKey(path, stripeId, columnId, groupId); + SimplifiedBlockCache blockCache = validCache.getIfPresent(cacheKey); + if (blockCache != null) { + result.put(groupId, blockCache.newIterator()); + hitCount.getAndIncrement(); + } else { + missCount.getAndIncrement(); + } + } + + return result; + } + + @Override + public Map> getInFlightCachedRowGroups(Path path, int stripeId, int columnId, + boolean[] rowGroupIncluded) { + Map> result = new TreeMap<>(); + for (int groupId = 0; groupId < rowGroupIncluded.length; groupId++) { + if (!rowGroupIncluded[groupId]) { + continue; + } + // Collect all in-flight cache containing the part of blocks in selected row-groups. + long cacheKey = buildBlockCacheKey(path, stripeId, columnId, groupId); + BlockCache blockCache = inFlightCache.getIfPresent(cacheKey); + if (blockCache != null) { + flightCount.incrementAndGet(); + result.put(groupId, blockCache.newIterator()); + } + } + + return result; + } + + @Override + public void putCache(Block block, int chunkLimit, int totalRows, Path path, int stripeId, int rowGroupId, + int columnId, + int position, int rows) { + Preconditions.checkArgument(block != null && block.getPositionCount() == rows); + long cacheKey = buildBlockCacheKey(path, stripeId, columnId, rowGroupId); + + try { + // automatically increment size in flight + BlockCache inFlight = inFlightCache.get(cacheKey); + long stamp = inFlight.rwLock.writeLock(); + try { + + // Recheck the completed flag of this block cache. Do nothing if it's completed. + if (inFlight.isCompleted()) { + return; + } + + // Try to put block into block cache. It will strictly check the alignment and validity of block. + boolean completed = inFlight.put(block, totalRows, position, rows, chunkLimit); + size.addAndGet(block.getElementUsedBytes()); + + // move the block-cache from in-flight cache into valid cache. + if (completed) { + SimplifiedBlockCache simplifiedCache = inFlight.simplify(); + validCache.put(cacheKey, simplifiedCache); + + // automatically decrement size in flight + inFlightCache.invalidate(cacheKey); + + size.addAndGet(LONG_OBJECT_IN_BYTES + simplifiedCache.memorySize()); + } + } finally { + inFlight.rwLock.unlockWrite(stamp); + } + + } catch (Throwable e) { + throw GeneralUtil.nestedException(e); + } + } + + @Override + public long getHitCount() { + return hitCount.get(); + } + + @Override + public long getMissCount() { + return missCount.get(); + } +} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SimpleWorkPool.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SimpleWorkPool.java new file mode 100644 index 000000000..6bfa502d4 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SimpleWorkPool.java @@ -0,0 +1,75 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.operator.scan.ColumnarSplit; +import com.alibaba.polardbx.executor.operator.scan.ScanWork; +import com.alibaba.polardbx.executor.operator.scan.WorkPool; +import com.google.common.base.Preconditions; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.PriorityBlockingQueue; + +public class SimpleWorkPool implements WorkPool { + private Map> splitMap; + private boolean noMoreSplits; + + public SimpleWorkPool() { + this.splitMap = new LinkedHashMap<>(); + this.noMoreSplits = false; + } + + @Override + public void addSplit(int driverId, ColumnarSplit split) { + Queue splitQueue = splitMap.computeIfAbsent(driverId, + any -> new PriorityBlockingQueue<>()); + + splitQueue.add(split); + } + + @Override + public void noMoreSplits(int driverId) { + noMoreSplits = true; + } + + @Override + public ScanWork pickUp(int driverId) { + Preconditions.checkArgument(noMoreSplits); + + Queue splitQueue = splitMap.get(driverId); + if (splitQueue == null || splitQueue.isEmpty()) { + return null; + } + + // Peek all head of queue until we find a split with effective scan-work. + while (!splitQueue.isEmpty()) { + ColumnarSplit columnarSplit = splitQueue.peek(); + ScanWork scanWork = columnarSplit.nextWork(); + if (scanWork == null) { + // Remove this split from queue because the scan-works is run out. + splitQueue.poll(); + } else { + return scanWork; + } + } + + return null; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SingleDictionaryMapping.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SingleDictionaryMapping.java new file mode 100644 index 000000000..71a5db033 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SingleDictionaryMapping.java @@ -0,0 +1,68 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; +import io.airlift.slice.Slice; + +import java.util.HashMap; +import java.util.Map; + +/** + * A dictionary mapping for single value. + */ +public class SingleDictionaryMapping implements DictionaryMapping { + private final Slice singleValue; + + /** + * The size of int array is 1, and the element = -1 means no matched value exists in dictionary. + */ + private Map reMappings = new HashMap<>(); + + public SingleDictionaryMapping(Slice singleValue) { + this.singleValue = singleValue; + } + + @Override + public int[] merge(BlockDictionary dictionary) { + int hashCode = dictionary.hashCode(); + int[] reMapping; + if ((reMapping = reMappings.get(hashCode)) != null) { + return reMapping; + } + + // use int array of one slot to store the matched index of target dictionary. + reMapping = new int[] {-1}; + for (int originalDictId = 0; originalDictId < dictionary.size(); originalDictId++) { + Slice originalDictValue = dictionary.getValue(originalDictId); + + // Find the first matched dict value. + if (originalDictValue.compareTo(singleValue) == 0) { + reMapping[0] = originalDictId; + break; + } + } + reMappings.put(hashCode, reMapping); + return reMapping; + } + + @Override + public void close() { + reMappings.clear(); + reMappings = null; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SliceOutputWrapper.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SliceOutputWrapper.java new file mode 100644 index 000000000..d0ba30f5f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/SliceOutputWrapper.java @@ -0,0 +1,48 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.google.common.base.Preconditions; +import io.airlift.slice.SliceOutput; +import org.apache.orc.customized.ORCDataOutput; + +import java.nio.ByteBuffer; + +/** + * A ORCDataOutput wrapped with SliceOutput object. + * It will automatically maintain the current offset of bytes. + */ +public class SliceOutputWrapper implements ORCDataOutput { + private final SliceOutput sliceOutput; + + public SliceOutputWrapper(SliceOutput sliceOutput) { + this.sliceOutput = sliceOutput; + } + + public void read(ByteBuffer buffer, int bytesToRead) { + Preconditions.checkArgument(bytesToRead <= buffer.remaining()); + + // must be an instance of HeapByteBuffer + Preconditions.checkArgument(buffer.array() != null); + + // NOTE: + // HeapByteBuffer.get(byte[] dst, int offset, int length) + // ix(position()) = position + offset + sliceOutput.write(buffer.array(), buffer.arrayOffset() + buffer.position(), bytesToRead); + buffer.position(buffer.position() + bytesToRead); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/StaticStripePlanner.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/StaticStripePlanner.java new file mode 100644 index 000000000..806ae0d48 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/StaticStripePlanner.java @@ -0,0 +1,591 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.google.protobuf.CodedInputStream; +import org.apache.orc.DataReader; +import org.apache.orc.EncryptionAlgorithm; +import org.apache.orc.OrcFile; +import org.apache.orc.OrcProto; +import org.apache.orc.StripeInformation; +import org.apache.orc.TypeDescription; +import org.apache.orc.impl.BufferChunk; +import org.apache.orc.impl.BufferChunkList; +import org.apache.orc.impl.CryptoUtils; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.OrcIndex; +import org.apache.orc.impl.RecordReaderUtils; +import org.apache.orc.impl.StreamName; +import org.apache.orc.impl.reader.ReaderEncryption; +import org.apache.orc.impl.reader.ReaderEncryptionVariant; +import org.apache.orc.impl.reader.StreamInformation; + +import java.io.IOException; +import java.security.Key; +import java.util.Arrays; +import java.util.Map; + +public class StaticStripePlanner { + /** + * Parse a new stripe. Resets the current stripe state. + * + * @param columnInclude an array with true for each column to read + * @return this for method chaining + */ + public static StreamManager parseStripe( + StripeContext stripeContext, + boolean[] columnInclude, + OrcProto.StripeFooter footer) { + StripeInformation stripe = stripeContext.getStripeInformation(); + stripeContext.setCurrentStripeId(stripe.getStripeId()); + stripeContext.setOriginalStripeId(stripe.getEncryptionStripeId()); + stripeContext.setWriterTimezone(footer.getWriterTimezone()); + + StreamManager streamManager = new StreamManager(); + streamManager.setStripeContext(stripeContext); + + // fill the encoding info of stripe context. + buildEncodings(stripeContext, footer, columnInclude); + findStreams(streamManager, stripeContext, stripe.getOffset(), footer, columnInclude); + + // figure out whether each column has null values in this stripe + boolean[] hasNull = stripeContext.getHasNull(); + Arrays.fill(hasNull, false); + for (StreamInformation stream : streamManager.getDataStreams()) { + if (stream.kind == OrcProto.Stream.Kind.PRESENT) { + hasNull[stream.column] = true; + } + } + return streamManager; + } + + public static OrcIndex readRowIndex(StreamManager streamManager, + StripeContext stripeContext, + DataReader dataReader) throws IOException { + OrcProto.Stream.Kind[] bloomFilterKinds = stripeContext.getBloomFilterKinds(); + TypeDescription schema = stripeContext.getSchema(); + + int typeCount = schema.getMaximumId() + 1; + OrcIndex output = new OrcIndex(new OrcProto.RowIndex[typeCount], + new OrcProto.Stream.Kind[typeCount], + new OrcProto.BloomFilterIndex[typeCount], + new OrcProto.BitmapIndex[typeCount]); + + System.arraycopy(bloomFilterKinds, 0, output.getBloomFilterKinds(), 0, + bloomFilterKinds.length); + BufferChunkList ranges = planIndexReading(streamManager, stripeContext); + dataReader.readFileData(ranges, false); + OrcProto.RowIndex[] indexes = output.getRowGroupIndex(); + OrcProto.BloomFilterIndex[] blooms = output.getBloomFilterIndex(); + for (StreamInformation stream : streamManager.getIndexStreams()) { + int column = stream.column; + if (stream.firstChunk != null) { + CodedInputStream data = InStream.createCodedInputStream(InStream.create( + "index", stream.firstChunk, stream.offset, + stream.length, getStreamOptions( + stripeContext, + column, stream.kind))); + switch (stream.kind) { + case ROW_INDEX: + indexes[column] = OrcProto.RowIndex.parseFrom(data); + break; + case BLOOM_FILTER: + case BLOOM_FILTER_UTF8: + break; + default: + break; + } + } + } + return output; + } + + /** + * Get the planned IO ranges by requested columns and row-groups + * + * @param stripeContext stripe-level context. + * @param streamManager stream info of multiple columns. + * @param streamOptions stream options. + * @param index preheated row-indexes in this stripe. + * @param rowGroupIncludeMap Mapping from column-id to row-group bitmap. + * @param selectedColumns selected columns. + * @return Planned IO ranges info for the IO processing. + */ + public static BufferChunkList planGroupsInColumn( + StripeContext stripeContext, + StreamManager streamManager, + InStream.StreamOptions streamOptions, + OrcIndex index, + Map rowGroupIncludeMap, + boolean[] selectedColumns) { + BufferChunkList chunks = new BufferChunkList(); + + OrcProto.RowIndex[] rowIndex = index.getRowGroupIndex(); + boolean isCompressed = streamOptions.getCodec() != null; + int bufferSize = streamOptions.getBufferSize(); + + for (StreamInformation stream : streamManager.getDataStreams()) { + // Check the column id. + // The count of matched streams is >= 1 because there are many stream kind in one column. + if (stream.column < selectedColumns.length && selectedColumns[stream.column]) { + processStream( + stripeContext, + stream, + chunks, + rowIndex, + 0, + rowGroupIncludeMap.get(stream.column), + isCompressed, + bufferSize); + } + } + + return chunks; + } + + public static BufferChunkList planGroupsInColumn( + StripeContext stripeContext, + StreamManager streamManager, + InStream.StreamOptions streamOptions, + OrcIndex index, + boolean[] rowGroupInclude, + int columnId) { + BufferChunkList chunks = new BufferChunkList(); + + OrcProto.RowIndex[] rowIndex = index.getRowGroupIndex(); + boolean isCompressed = streamOptions.getCodec() != null; + int bufferSize = streamOptions.getBufferSize(); + + for (StreamInformation stream : streamManager.getDataStreams()) { + // Check the column id. + // The count of matched streams is >= 1 because there are many stream kind in one column. + if (stream.column == columnId) { + + processStream(stripeContext, stream, chunks, rowIndex, 0, + rowGroupInclude, isCompressed, bufferSize); + } + } + + return chunks; + } + + private static void processStream( + StripeContext stripeContext, + StreamInformation stream, + BufferChunkList result, + OrcProto.RowIndex[] rowIndex, + int startGroup, + boolean[] includedRowGroups, + boolean isCompressed, + int bufferSize) { + + // check existence of row-groups. + if (!hasTrue(includedRowGroups)) { + return; + } + + OrcProto.ColumnEncoding[] encodings = stripeContext.getEncodings(); + TypeDescription schema = stripeContext.getSchema(); + boolean[] hasNull = stripeContext.getHasNull(); + + if (RecordReaderUtils.isDictionary(stream.kind, encodings[stream.column])) { + addChunk1(stripeContext, result, stream, stream.offset, stream.length); + } else { + int column = stream.column; + OrcProto.RowIndex ri = rowIndex[column]; + TypeDescription.Category kind = schema.findSubtype(column).getCategory(); + long alreadyRead = 0; + for (int group = startGroup; group < includedRowGroups.length; ++group) { + if (includedRowGroups[group]) { + // find the last group that is selected + int endGroup = group; + while (endGroup < includedRowGroups.length - 1 && + includedRowGroups[endGroup + 1]) { + endGroup += 1; + } + int posn = RecordReaderUtils.getIndexPosition( + encodings[stream.column].getKind(), kind, stream.kind, + isCompressed, hasNull[column]); + long start = Math.max(alreadyRead, + stream.offset + (group == 0 ? 0 : ri.getEntry(group).getPositions(posn))); + long end = stream.offset; + if (endGroup == includedRowGroups.length - 1) { + end += stream.length; + } else { + long nextGroupOffset = ri.getEntry(endGroup + 1).getPositions(posn); + end += RecordReaderUtils.estimateRgEndOffset(isCompressed, + bufferSize, false, nextGroupOffset, + stream.length); + } + if (alreadyRead < end) { + addChunk1(stripeContext, result, stream, start, end - start); + alreadyRead = end; + } + group = endGroup; + } + } + } + } + + private static void addChunk1( + StripeContext stripeContext, + BufferChunkList list, + StreamInformation stream, + long offset, long length) { + long maxBufferSize = stripeContext.getMaxBufferSize(); + while (length > 0) { + long thisLen = Math.min(length, maxBufferSize); + BufferChunk chunk = new BufferChunk(offset, (int) thisLen); + if (stream.firstChunk == null) { + stream.firstChunk = chunk; + } + list.add(chunk); + offset += thisLen; + length -= thisLen; + } + } + + // NOTE: we must ensure that the groupId is monotonically increasing. + public static BufferChunkList planNextGroup( + StripeContext stripeContext, + StreamManager streamManager, + InStream.StreamOptions streamOptions, + OrcIndex index, + int groupId, + boolean isLastGroup, + boolean[] selectedColumns) { + BufferChunkList chunks = new BufferChunkList(); + + boolean isCompressed = streamOptions.getCodec() != null; + int bufferSize = streamOptions.getBufferSize(); + OrcProto.RowIndex[] rowIndex = index.getRowGroupIndex(); + + for (StreamInformation stream : streamManager.getDataStreams()) { + // check the column id in selected columns bitmap + if (stream.column < selectedColumns.length && selectedColumns[stream.column]) { + processStream( + stripeContext, + stream, + chunks, + rowIndex, + groupId, + isLastGroup, + isCompressed, + bufferSize); + } + } + + return chunks; + } + + private static void processStream( + StripeContext stripeContext, + StreamInformation stream, + BufferChunkList result, + OrcProto.RowIndex[] rowIndex, + int group, + boolean isLastGroup, + boolean isCompressed, + int bufferSize) { + OrcProto.ColumnEncoding[] encodings = stripeContext.getEncodings(); + TypeDescription schema = stripeContext.getSchema(); + boolean[] hasNull = stripeContext.getHasNull(); + long maxBufferSize = stripeContext.getMaxBufferSize(); + + if (RecordReaderUtils.isDictionary(stream.kind, encodings[stream.column])) { + addChunk(result, stream, maxBufferSize, stream.offset, stream.length); + } else { + int column = stream.column; + OrcProto.RowIndex ri = rowIndex[column]; + TypeDescription.Category kind = schema.findSubtype(column).getCategory(); + + int position = RecordReaderUtils.getIndexPosition( + encodings[stream.column].getKind(), kind, stream.kind, + isCompressed, hasNull[column]); + long start = stream.offset + (group == 0 ? 0 : ri.getEntry(group).getPositions(position)); + long end = stream.offset; + if (isLastGroup) { + end += stream.length; + } else { + long nextGroupOffset = ri.getEntry(group + 1).getPositions(position); + end += RecordReaderUtils.estimateRgEndOffset(isCompressed, + bufferSize, false, nextGroupOffset, + stream.length); + } + + addChunk(result, stream, maxBufferSize, start, end - start); + } + } + + private static void addChunk(BufferChunkList list, StreamInformation stream, long maxBufferSize, + long offset, long length) { + while (length > 0) { + long thisLen = Math.min(length, maxBufferSize); + BufferChunk chunk = new BufferChunk(offset, (int) thisLen); + if (stream.firstChunk == null) { + stream.firstChunk = chunk; + } else { + // If we handle chunks in stripe-level at-a-time, we don't need this logic. + // Because all chunks in a stream will be fetched at once. + stream.firstChunk.next = chunk; + } + list.add(chunk); + offset += thisLen; + length -= thisLen; + } + } + + private static boolean hasSomeRowGroups(boolean[] includedRowGroups) { + for (boolean include : includedRowGroups) { + if (include) { + return true; + } + } + return false; + } + + /** + * Get the stream options for a stream in a stripe. + * + * @param column the column we are reading + * @param kind the stream kind we are reading + * @return a new stream options to read the given column + */ + public static InStream.StreamOptions getStreamOptions( + StripeContext stripeContext, + int column, + OrcProto.Stream.Kind kind + ) throws IOException { + ReaderEncryption encryption = stripeContext.getEncryption(); + long currentStripeId = stripeContext.getCurrentStripeId(); + long originalStripeId = stripeContext.getOriginalStripeId(); + + ReaderEncryptionVariant variant = encryption.getVariant(column); + InStream.StreamOptions compression = stripeContext.getStreamOptions(); + if (variant == null) { + return compression; + } else { + EncryptionAlgorithm algorithm = variant.getKeyDescription().getAlgorithm(); + byte[] iv = new byte[algorithm.getIvLength()]; + Key key = variant.getStripeKey(currentStripeId); + CryptoUtils.modifyIvForStream(column, kind, originalStripeId).accept(iv); + return new InStream.StreamOptions(compression) + .withEncryption(algorithm, key, iv); + } + } + + /** + * The length of encoding[] array is equal to column count, + * and the encoding of column that not included is null. + */ + public static OrcProto.ColumnEncoding[] buildEncodings( + ReaderEncryption encryption, + boolean[] columnInclude, + OrcProto.StripeFooter footer) { + OrcProto.ColumnEncoding[] encodings = + new OrcProto.ColumnEncoding[columnInclude.length]; + + for (int c = 0; c < encodings.length; ++c) { + if (columnInclude == null || columnInclude[c]) { + ReaderEncryptionVariant variant = encryption.getVariant(c); + if (variant == null) { + encodings[c] = footer.getColumns(c); + } else { + int subColumn = c - variant.getRoot().getId(); + encodings[c] = footer.getEncryption(variant.getVariantId()) + .getEncoding(subColumn); + } + } + } + return encodings; + } + + // fill the encoding info of stripe context. + private static void buildEncodings( + StripeContext stripeContext, + OrcProto.StripeFooter footer, + boolean[] columnInclude) { + + OrcProto.ColumnEncoding[] encodings = stripeContext.getEncodings(); + ReaderEncryption encryption = stripeContext.getEncryption(); + + for (int c = 0; c < encodings.length; ++c) { + if (columnInclude == null || columnInclude[c]) { + ReaderEncryptionVariant variant = encryption.getVariant(c); + if (variant == null) { + encodings[c] = footer.getColumns(c); + } else { + int subColumn = c - variant.getRoot().getId(); + encodings[c] = footer.getEncryption(variant.getVariantId()) + .getEncoding(subColumn); + } + } + } + } + + /** + * Find the complete list of streams. + * + * @param streamStart the starting offset of streams in the file + * @param footer the footer for the stripe + * @param columnInclude which columns are being read + */ + private static void findStreams( + StreamManager streamManager, + StripeContext stripeContext, + long streamStart, + OrcProto.StripeFooter footer, + boolean[] columnInclude) { + OrcProto.Stream.Kind[] bloomFilterKinds = stripeContext.getBloomFilterKinds(); + ReaderEncryption encryption = stripeContext.getEncryption(); + + long currentOffset = streamStart; + Arrays.fill(bloomFilterKinds, null); + for (OrcProto.Stream stream : footer.getStreamsList()) { + currentOffset += handleStream(streamManager, stripeContext, currentOffset, columnInclude, stream, null); + } + + // Add the encrypted streams that we are using + for (ReaderEncryptionVariant variant : encryption.getVariants()) { + int variantId = variant.getVariantId(); + OrcProto.StripeEncryptionVariant stripeVariant = + footer.getEncryption(variantId); + for (OrcProto.Stream stream : stripeVariant.getStreamsList()) { + currentOffset += + handleStream(streamManager, stripeContext, currentOffset, columnInclude, stream, variant); + } + } + } + + /** + * For each stream, decide whether to include it in the list of streams. + * + * @param offset the position in the file for this stream + * @param columnInclude which columns are being read + * @param stream the stream to consider + * @param variant the variant being read + * @return the offset for the next stream + */ + private static long handleStream( + StreamManager streamManager, + StripeContext stripeContext, + long offset, + boolean[] columnInclude, + OrcProto.Stream stream, + ReaderEncryptionVariant variant) { + OrcProto.Stream.Kind[] bloomFilterKinds = stripeContext.getBloomFilterKinds(); + ReaderEncryption encryption = stripeContext.getEncryption(); + boolean ignoreNonUtf8BloomFilter = stripeContext.isIgnoreNonUtf8BloomFilter(); + TypeDescription schema = stripeContext.getSchema(); + OrcFile.WriterVersion version = stripeContext.getVersion(); + + int column = stream.getColumn(); + if (stream.hasKind()) { + OrcProto.Stream.Kind kind = stream.getKind(); + + if (kind == OrcProto.Stream.Kind.ENCRYPTED_INDEX || + kind == OrcProto.Stream.Kind.ENCRYPTED_DATA) { + // Ignore the placeholders that shouldn't count toward moving the + // offsets. + return 0; + } + + if (columnInclude[column] && encryption.getVariant(column) == variant) { + // Ignore any broken bloom filters unless the user forced us to use + // them. + if (kind != OrcProto.Stream.Kind.BLOOM_FILTER || + !ignoreNonUtf8BloomFilter || + !hadBadBloomFilters(schema.findSubtype(column).getCategory(), + version)) { + // record what kind of bloom filters we are using + if (kind == OrcProto.Stream.Kind.BLOOM_FILTER_UTF8 || + kind == OrcProto.Stream.Kind.BLOOM_FILTER) { + bloomFilterKinds[column] = kind; + } + StreamInformation info = + new StreamInformation(kind, column, offset, stream.getLength()); + switch (StreamName.getArea(kind)) { + case DATA: + streamManager.getDataStreams().add(info); + break; + case INDEX: + streamManager.getIndexStreams().add(info); + break; + default: + } + streamManager.getStreams().put(new StreamName(column, kind), info); + } + } + } + return stream.getLength(); + } + + private static boolean hadBadBloomFilters(TypeDescription.Category category, + OrcFile.WriterVersion version) { + switch (category) { + case STRING: + case CHAR: + case VARCHAR: + return !version.includes(OrcFile.WriterVersion.HIVE_12055); + case DECIMAL: + // fixed by ORC-101, but ORC-101 changed stream kind to BLOOM_FILTER_UTF8 + return true; + case TIMESTAMP: + return !version.includes(OrcFile.WriterVersion.ORC_135); + default: + return false; + } + } + + /** + * Plans the list of disk ranges that the given stripe needs to read the + * indexes. All of the positions are relative to the start of the stripe. + * + * @return a list of merged disk ranges to read + */ + private static BufferChunkList planIndexReading(StreamManager streamManager, StripeContext stripeContext) { + OrcProto.Stream.Kind[] bloomFilterKinds = stripeContext.getBloomFilterKinds(); + BufferChunkList result = new BufferChunkList(); + for (StreamInformation stream : streamManager.getIndexStreams()) { + switch (stream.kind) { + case ROW_INDEX: + addChunk(result, stream, stripeContext.getMaxBufferSize(), stream.offset, stream.length); + break; + case BLOOM_FILTER: + case BLOOM_FILTER_UTF8: + break; + default: + // PASS + break; + } + } + return result; + } + + private static boolean hasTrue(boolean[] rowGroups) { + if (rowGroups == null) { + return false; + } + for (int i = 0; i < rowGroups.length; i++) { + if (rowGroups[i]) { + return true; + } + } + return false; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/StreamManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/StreamManager.java new file mode 100644 index 000000000..64e531bd4 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/StreamManager.java @@ -0,0 +1,98 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.StreamName; +import org.apache.orc.impl.reader.StreamInformation; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class StreamManager { + private StripeContext stripeContext; + + private final Map streams; + // the index streams sorted by offset + private final List indexStreams; + // the data streams sorted by offset + private final List dataStreams; + + public StreamManager() { + streams = new HashMap<>(); + indexStreams = new ArrayList<>(); + dataStreams = new ArrayList<>(); + } + + /** + * Get the stream for the given name. + * It is assumed that the name does not have the encryption set, + * because the TreeReader's don't know if they are reading encrypted data. + * Assumes that readData has already been called on this stripe. + * + * @param name the column/kind of the stream + * @return a new stream with the options set correctly + */ + public InStream getStream(StreamName name) throws IOException { + StreamInformation stream = streams.get(name); + + InStream.StreamOptions streamOptions = StaticStripePlanner.getStreamOptions( + stripeContext, stream.column, stream.kind + ); + + return stream == null ? null + : InStream.create( + name, + stream.firstChunk, + stream.offset, + stream.length, + streamOptions); + } + + public StripeContext getStripeContext() { + return stripeContext; + } + + public StreamManager setStripeContext(StripeContext stripeContext) { + this.stripeContext = stripeContext; + return this; + } + + public Map getStreams() { + return streams; + } + + public List getIndexStreams() { + return indexStreams; + } + + public List getDataStreams() { + return dataStreams; + } + + @Override + public String toString() { + return "StreamManager{" + + "streams=" + streams + + ", indexStreams=" + indexStreams + + ", dataStreams=" + dataStreams + + '}'; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/StripeContext.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/StripeContext.java new file mode 100644 index 000000000..0695c8e52 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/StripeContext.java @@ -0,0 +1,145 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import org.apache.orc.OrcFile; +import org.apache.orc.OrcProto; +import org.apache.orc.StripeInformation; +import org.apache.orc.TypeDescription; +import org.apache.orc.impl.InStream; +import org.apache.orc.impl.reader.ReaderEncryption; + +public class StripeContext { + private final StripeInformation stripeInformation; + + // global information + private final TypeDescription schema; + private final OrcFile.WriterVersion version; + private final OrcProto.ColumnEncoding[] encodings; + private final ReaderEncryption encryption; + + private final InStream.StreamOptions streamOptions; + + private final boolean ignoreNonUtf8BloomFilter; + private final long maxBufferSize; + private final OrcProto.Stream.Kind[] bloomFilterKinds; + + // does each column have a null stream? + private final boolean[] hasNull; + + // specific to the current stripe + private String writerTimezone; + private long currentStripeId; + private long originalStripeId; + + private boolean[] columnInclude; + + public StripeContext(StripeInformation stripeInformation, + TypeDescription schema, + ReaderEncryption encryption, + OrcFile.WriterVersion version, + InStream.StreamOptions streamOptions, + boolean ignoreNonUtf8BloomFilter, + long maxBufferSize) { + this.stripeInformation = stripeInformation; + this.schema = schema; + this.version = version; + this.encodings = new OrcProto.ColumnEncoding[schema.getMaximumId() + 1]; + this.encryption = encryption; + this.streamOptions = streamOptions; + this.ignoreNonUtf8BloomFilter = ignoreNonUtf8BloomFilter; + this.bloomFilterKinds = new OrcProto.Stream.Kind[schema.getMaximumId() + 1]; + this.hasNull = new boolean[schema.getMaximumId() + 1]; + this.maxBufferSize = maxBufferSize; + } + + public StripeContext setWriterTimezone(String writerTimezone) { + this.writerTimezone = writerTimezone; + return this; + } + + public StripeContext setCurrentStripeId(long currentStripeId) { + this.currentStripeId = currentStripeId; + return this; + } + + public StripeContext setOriginalStripeId(long originalStripeId) { + this.originalStripeId = originalStripeId; + return this; + } + + public StripeContext setColumnInclude(boolean[] columnInclude) { + this.columnInclude = columnInclude; + return this; + } + + public TypeDescription getSchema() { + return schema; + } + + public OrcFile.WriterVersion getVersion() { + return version; + } + + public OrcProto.ColumnEncoding[] getEncodings() { + return encodings; + } + + public ReaderEncryption getEncryption() { + return encryption; + } + + public boolean isIgnoreNonUtf8BloomFilter() { + return ignoreNonUtf8BloomFilter; + } + + public long getMaxBufferSize() { + return maxBufferSize; + } + + public OrcProto.Stream.Kind[] getBloomFilterKinds() { + return bloomFilterKinds; + } + + public boolean[] getHasNull() { + return hasNull; + } + + public String getWriterTimezone() { + return writerTimezone; + } + + public long getCurrentStripeId() { + return currentStripeId; + } + + public long getOriginalStripeId() { + return originalStripeId; + } + + public boolean[] getColumnInclude() { + return columnInclude; + } + + public StripeInformation getStripeInformation() { + return stripeInformation; + } + + public InStream.StreamOptions getStreamOptions() { + return streamOptions; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/TimestampColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/TimestampColumnReader.java new file mode 100644 index 000000000..c26062379 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/TimestampColumnReader.java @@ -0,0 +1,112 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.utils.time.MySQLTimeConverter; +import com.alibaba.polardbx.common.utils.time.core.MySQLTimeVal; +import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; +import com.alibaba.polardbx.common.utils.time.core.TimeStorage; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.TimestampBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.utils.TimestampUtils; +import com.alibaba.polardbx.rpc.result.XResultUtil; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; +import java.time.ZoneId; + +import static com.alibaba.polardbx.rpc.result.XResultUtil.ZERO_TIMESTAMP_LONG_VAL; + +public class TimestampColumnReader extends AbstractLongColumnReader { + + private final ZoneId zoneId; + + public TimestampColumnReader(int columnId, boolean isPrimaryKey, + StripeLoader stripeLoader, + OrcIndex orcIndex, + RuntimeMetrics metrics, + OrcProto.ColumnEncoding.Kind kind, int indexStride, + boolean enableMetrics, ExecutionContext context) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, kind, indexStride, enableMetrics); + this.zoneId = TimestampUtils.getZoneId(context); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof TimestampBlock); + init(); + + long start = System.nanoTime(); + + long[] packed = ((TimestampBlock) randomAccessBlock).getPacked(); + boolean[] nulls = randomAccessBlock.nulls(); + + if (present == null) { + randomAccessBlock.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + long longVal = data.next(); + long datetime = parseTimestamp(longVal, zoneId); + packed[i] = datetime; + lastPosition++; + } + + // destroy null array to save the memory. + ((Block) randomAccessBlock).destroyNulls(true); + } else { + randomAccessBlock.setHasNull(true); + // there are some null values + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + packed[i] = 0; + } else { + // if not null + long longVal = data.next(); + long datetime = parseTimestamp(longVal, zoneId); + packed[i] = datetime; + } + lastPosition++; + } + } + + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } + + private static long parseTimestamp(long timestamp, ZoneId zoneId) { + if (timestamp == ZERO_TIMESTAMP_LONG_VAL) { + return TimeStorage.writeTimestamp(MysqlDateTime.zeroDateTime()); + } else { + MySQLTimeVal mySQLTimeVal = XResultUtil.longToTimeValue(timestamp); + MysqlDateTime mysqlDateTime = + MySQLTimeConverter.convertTimestampToDatetime(mySQLTimeVal, zoneId); + return TimeStorage.writeTimestamp(mysqlDateTime); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/UnsignedLongColumnReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/UnsignedLongColumnReader.java new file mode 100644 index 000000000..fb1987557 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/impl/UnsignedLongColumnReader.java @@ -0,0 +1,86 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.impl; + +import com.alibaba.polardbx.common.datatype.UInt64Utils; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.ULongBlock; +import com.alibaba.polardbx.executor.operator.scan.StripeLoader; +import com.alibaba.polardbx.executor.operator.scan.metrics.RuntimeMetrics; +import com.google.common.base.Preconditions; +import org.apache.orc.OrcProto; +import org.apache.orc.impl.OrcIndex; + +import java.io.IOException; + +public class UnsignedLongColumnReader extends AbstractLongColumnReader { + public UnsignedLongColumnReader(int columnId, boolean isPrimaryKey, StripeLoader stripeLoader, + OrcIndex orcIndex, + RuntimeMetrics metrics, + OrcProto.ColumnEncoding.Kind kind, int indexStride, + boolean enableMetrics) { + super(columnId, isPrimaryKey, stripeLoader, orcIndex, metrics, kind, indexStride, enableMetrics); + } + + @Override + public void next(RandomAccessBlock randomAccessBlock, int positionCount) throws IOException { + Preconditions.checkArgument(isOpened.get()); + Preconditions.checkArgument(!openFailed.get()); + Preconditions.checkArgument(randomAccessBlock instanceof ULongBlock); + + init(); + + long start = System.nanoTime(); + ULongBlock block = (ULongBlock) randomAccessBlock; + long[] array = block.longArray(); + boolean[] nulls = block.nulls(); + + if (present == null) { + randomAccessBlock.setHasNull(false); + for (int i = 0; i < positionCount; i++) { + // no null value. + long longVal = data.next(); + array[i] = longVal ^ UInt64Utils.FLIP_MASK; + lastPosition++; + } + // destroy null array to save the memory. + if (randomAccessBlock instanceof Block) { + ((Block) randomAccessBlock).destroyNulls(true); + } + } else { + // there are some null values + randomAccessBlock.setHasNull(true); + for (int i = 0; i < positionCount; i++) { + if (present.next() != 1) { + // for present + nulls[i] = true; + array[i] = 0; + } else { + // if not null + long longVal = data.next(); + array[i] = longVal ^ UInt64Utils.FLIP_MASK; + } + lastPosition++; + } + } + // metrics + if (enableMetrics) { + parseTimer.inc(System.nanoTime() - start); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/MetricsNameBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/MetricsNameBuilder.java new file mode 100644 index 000000000..a2a32c373 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/MetricsNameBuilder.java @@ -0,0 +1,57 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.metrics; + +import org.apache.orc.impl.StreamName; + +public class MetricsNameBuilder { + public static String streamMetricsKey(StreamName streamName, ProfileKey profileKey) { + return String.join(".", + profileKey.getName(), + streamName.toString().replace(' ', '.')); + } + + public static String columnsMetricsKey(boolean[] selectedColumns, ProfileKey profileKey) { + return String.join(".", + profileKey.getName(), + "columns", + bitmapSuffix(selectedColumns)); + } + + public static String columnMetricsKey(int targetColumnId, ProfileKey profileKey) { + return String.join(".", + profileKey.getName(), + "column", + String.valueOf(targetColumnId)); + } + + public static String bitmapSuffix(boolean[] bitmap) { + StringBuilder builder = new StringBuilder(); + builder.append('['); + for (int i = 0; i < bitmap.length; i++) { + if (!bitmap[i]) { + continue; + } + builder.append(i); + if (i != bitmap.length - 1) { + builder.append(','); + } + } + builder.append(']'); + return builder.toString(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ORCMetricsWrapper.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ORCMetricsWrapper.java new file mode 100644 index 000000000..e7be889cd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ORCMetricsWrapper.java @@ -0,0 +1,45 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.metrics; + +import com.codahale.metrics.Counter; +import org.apache.orc.customized.ORCProfile; + +/** + * Wrapper of Runtime Metrics and adapt to ORC Profile. + */ +public class ORCMetricsWrapper implements ORCProfile { + private final String name; + private final Counter counter; + private final RuntimeMetrics runtimeMetrics; + + public ORCMetricsWrapper(String name, String parentName, ProfileUnit unit, RuntimeMetrics runtimeMetrics) { + this.name = name; + this.runtimeMetrics = runtimeMetrics; + this.counter = runtimeMetrics.addCounter(name, parentName, unit); + } + + @Override + public String name() { + return name; + } + + @Override + public void update(long delta) { + counter.inc(delta); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileAccumulatorType.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileAccumulatorType.java new file mode 100644 index 000000000..19626a5fe --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileAccumulatorType.java @@ -0,0 +1,28 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.metrics; + +/** + * The method of accumulation in derived counter node. + */ +public enum ProfileAccumulatorType { + MIN, + MAX, + AVG, + SUM, + NONE // don't accumulate. +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileKey.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileKey.java new file mode 100644 index 000000000..839b41e7c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileKey.java @@ -0,0 +1,86 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.metrics; + +/** + * This class provides a set of pre-defined profile keys. + */ +public class ProfileKey { + private final String name; + private final String description; + private final ProfileType profileType; + private final ProfileUnit profileUnit; + + ProfileKey(String name, String description, ProfileType profileType, ProfileUnit profileUnit) { + this.name = name; + this.description = description; + this.profileType = profileType; + this.profileUnit = profileUnit; + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + + public ProfileUnit getProfileUnit() { + return profileUnit; + } + + public ProfileType getProfileType() { + return profileType; + } + + public static Builder builder() { + return new Builder(); + } + + static class Builder { + private String name; + private String description; + private ProfileType profileType; + private ProfileUnit profileUnit; + + public ProfileKey build() { + return new ProfileKey(name, description, profileType, profileUnit); + } + + public Builder setName(String name) { + this.name = name; + return this; + } + + public Builder setProfileUnit(ProfileUnit profileUnit) { + this.profileUnit = profileUnit; + return this; + } + + public Builder setDescription(String description) { + this.description = description; + return this; + } + + public Builder setProfileType(ProfileType profileType) { + this.profileType = profileType; + return this; + } + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileKeys.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileKeys.java new file mode 100644 index 000000000..091abf09d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileKeys.java @@ -0,0 +1,127 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.metrics; + +/** + * Collections of pre-defined metrics keys. + */ +public class ProfileKeys { + public static final ProfileKey PUSH_CHUNK_COUNTER = ProfileKey.builder() + .setName("Operator.PullChunkCounter") + .setDescription("The count of pulled chunks in this operator.") + .setProfileType(ProfileType.COUNTER) + .setProfileUnit(ProfileUnit.NONE) + .build(); + + public static final ProfileKey PULL_CHUNK_ROWS_COUNTER = ProfileKey.builder() + .setName("Operator.PullChunkRows") + .setDescription("The count of rows in pulled chunks in this operator.") + .setProfileType(ProfileType.COUNTER) + .setProfileUnit(ProfileUnit.NONE) + .build(); + + public static final ProfileKey ORC_STRIPE_LOADER_OPEN_TIMER = ProfileKey.builder() + .setName("ORC.StripeLoaderOpenTimer") + .setDescription("The time cost of stripe-loader opening.") + .setProfileType(ProfileType.TIMER) + .setProfileUnit(ProfileUnit.NANO_SECOND) + .build(); + + public static final ProfileKey ORC_IN_STREAM_MEMORY_COUNTER = ProfileKey.builder() + .setName("ORC.InStreamMemoryCounter") + .setDescription("The count of memory allocation in bytes during processing of in-stream.") + .setProfileType(ProfileType.COUNTER) + .setProfileUnit(ProfileUnit.BYTES) + .build(); + + public static final ProfileKey ORC_IN_STREAM_DECOMPRESS_TIMER = ProfileKey.builder() + .setName("ORC.InStreamDecompressTimer") + .setDescription("The time cost of decompression during processing of in-stream.") + .setProfileType(ProfileType.TIMER) + .setProfileUnit(ProfileUnit.NANO_SECOND) + .build(); + + public static final ProfileKey ORC_IO_RAW_DATA_MEMORY_COUNTER = ProfileKey.builder() + .setName("ORC.IORawDataMemoryCounter") + .setDescription("The count of memory allocation in bytes during processing of data reading.") + .setProfileType(ProfileType.COUNTER) + .setProfileUnit(ProfileUnit.BYTES) + .build(); + + public static final ProfileKey ORC_IO_RAW_DATA_TIMER = ProfileKey.builder() + .setName("ORC.IORawDataTimer") + .setDescription("The time cost of processing of data reading.") + .setProfileType(ProfileType.TIMER) + .setProfileUnit(ProfileUnit.NANO_SECOND) + .build(); + + public static final ProfileKey ORC_LOGICAL_BYTES_RANGE = ProfileKey.builder() + .setName("ORC.LogicalBytesRange") + .setDescription("The count of bytes range of data processing plan.") + .setProfileType(ProfileType.COUNTER) + .setProfileUnit(ProfileUnit.BYTES) + .build(); + + public static final ProfileKey ORC_STREAM_READER_MEMORY_COUNTER = ProfileKey.builder() + .setName("ORC.StreamReaderMemoryCounter") + .setDescription("The count of bytes allocated in stream reader.") + .setProfileType(ProfileType.COUNTER) + .setProfileUnit(ProfileUnit.BYTES) + .build(); + + public static final ProfileKey ORC_COLUMN_IO_PREPARING_TIMER = ProfileKey.builder() + .setName("ORC.ColumnIOPreparingTimer") + .setDescription("The time cost of data IO data preparing for stripe-loader.") + .setProfileType(ProfileType.TIMER) + .setProfileUnit(ProfileUnit.NANO_SECOND) + .build(); + + public static final ProfileKey ORC_COLUMN_SEEK_TIMER = ProfileKey.builder() + .setName("ORC.ColumnSeekTimer") + .setDescription("The time cost of position seeking in column reader.") + .setProfileType(ProfileType.TIMER) + .setProfileUnit(ProfileUnit.NANO_SECOND) + .build(); + + public static final ProfileKey ORC_COLUMN_PARSE_TIMER = ProfileKey.builder() + .setName("ORC.ColumnParseTimer") + .setDescription("The time cost of column data parsing in column reader.") + .setProfileType(ProfileType.TIMER) + .setProfileUnit(ProfileUnit.NANO_SECOND) + .build(); + + public static final ProfileKey SCAN_WORK_BLOCK_MEMORY_COUNTER = ProfileKey.builder() + .setName("ScanWork.BlockMemoryCounter") + .setDescription("The count of bytes allocated in block loader.") + .setProfileType(ProfileType.COUNTER) + .setProfileUnit(ProfileUnit.BYTES) + .build(); + + public static final ProfileKey SCAN_WORK_BLOCK_LOAD_TIMER = ProfileKey.builder() + .setName("ScanWork.BlockLoadTimer") + .setDescription("The time cost of loading in block loader.") + .setProfileType(ProfileType.TIMER) + .setProfileUnit(ProfileUnit.NANO_SECOND) + .build(); + + public static final ProfileKey SCAN_WORK_EVALUATION_TIMER = ProfileKey.builder() + .setName("ScanWork.EvaluationTimer") + .setDescription("The time cost of evaluation in scan-work.") + .setProfileType(ProfileType.TIMER) + .setProfileUnit(ProfileUnit.NANO_SECOND) + .build(); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileType.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileType.java new file mode 100644 index 000000000..49f24d09b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileType.java @@ -0,0 +1,48 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.metrics; + +/** + * Type of metrics. + */ +public enum ProfileType { + /** + * An incrementing and decrementing counter metric. + */ + COUNTER, + + /** + * A timer metric which aggregates timing durations and provides duration statistics. + */ + TIMER, + + /** + * A meter metric which measures mean throughput and one-, five-, and fifteen-minute + * moving average throughput. + */ + METER, + + /** + * A metric which calculates the distribution of a value. + */ + HISTOGRAM, + + /** + * A gauge metric is an instantaneous reading of a particular value. + */ + GAUGE +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileUnit.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileUnit.java new file mode 100644 index 000000000..b5590bde3 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/ProfileUnit.java @@ -0,0 +1,37 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.metrics; + +/** + * The data unit of metrics. + */ +public enum ProfileUnit { + NONE(""), + BYTES("bytes"), + MILLIS_SECOND("ms"), + NANO_SECOND("ns"); + + private String unitStr; + + ProfileUnit(String unitStr) { + this.unitStr = unitStr; + } + + public String getUnitStr() { + return unitStr; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/RuntimeMetrics.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/RuntimeMetrics.java new file mode 100644 index 000000000..0103d98d9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/RuntimeMetrics.java @@ -0,0 +1,104 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.metrics; + +import com.codahale.metrics.Counter; + +import java.util.List; + +/** + * A runtime metrics object is a group of counter/timer organized as tree structure. + * 1. Manage the name and value of counters/timers + * 2. Life cycle management of counters/timers: create / remove / arrange + * 3. Derive counter/timer value from tree structure. + */ +public interface RuntimeMetrics { + static RuntimeMetrics create(String name) { + return new RuntimeMetricsImpl(name); + } + + /** + * Get unique name of this runtime profile. + */ + String name(); + + /** + * Get the parent of this runtime profile in tree structure. + */ + RuntimeMetrics parent(); + + /** + * Get children of this runtime profile in tree structure. + */ + List children(); + + /** + * Add a child profile. + */ + void addChild(RuntimeMetrics child); + + /** + * The counter is owned by the RuntimeProfile object. + * If the counter already exists, the existing counter object is returned. + * + * @param name counter name. + * @param parentName If parent_name is a non-empty string, the counter is added as a child of parent name. + * @return New counter or If the counter already exists, the existing counter object is returned. + */ + Counter addCounter(String name, String parentName, ProfileUnit unit); + + /** + * Add Derived counter as metrics tree node, aggregating metrics + * from children instead of collecting metrics directly. + * + * @param name counter name. + * @param parentName the name of parent counter. + * @param unit profile unit. + * @param accumulatorType type of accumulation. + */ + void addDerivedCounter(String name, String parentName, ProfileUnit unit, ProfileAccumulatorType accumulatorType); + + /** + * Get the counter with given name. + * + * @return null if not exist. + */ + Counter getCounter(String name); + + /** + * Remove the counter or timer with given name. + * + * @return TRUE if element removed, or FALSE if element not found. + */ + boolean remove(String name); + + /** + * Generate a metrics report from the given parent node. + * + * @param parent parent node. + */ + String report(String parent); + + /** + * Generate a metrics report from the root node. + */ + default String reportAll() { + return report(null); + } + + void merge(RuntimeMetrics metrics); +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/RuntimeMetricsImpl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/RuntimeMetricsImpl.java new file mode 100644 index 000000000..a7840d330 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/scan/metrics/RuntimeMetricsImpl.java @@ -0,0 +1,486 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.scan.metrics; + +import com.codahale.metrics.Counter; +import com.codahale.metrics.MetricRegistry; +import com.google.common.base.Preconditions; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.stream.Collectors; + +public class RuntimeMetricsImpl implements RuntimeMetrics { + /** + * Responsible for allocation and maintenance of metrics counter. + */ + private MetricRegistry registry; + + /** + * The unique name of this runtime metrics. + */ + private final String rootName; + + /** + * The parent of this runtime metrics. + */ + private RuntimeMetrics parent; + + /** + * children metrics. + */ + private Map childrenMap; + + /** + * Mapping from metrics name to inner object of metrics. + */ + private Map counterMap; + + public RuntimeMetricsImpl(String rootName) { + this.registry = new MetricRegistry(); + this.rootName = rootName; + + // concurrency-safe and sorted in lexicographical order + this.childrenMap = new ConcurrentSkipListMap<>(String::compareTo); + this.counterMap = new ConcurrentSkipListMap<>(String::compareTo); + + InnerCounter rootCounter = new DerivedCounter(rootName, ProfileUnit.NONE, null, ProfileAccumulatorType.NONE); + counterMap.put(rootName, rootCounter); + } + + /** + * A tree-structure visitor for traversal of metrics counter. + * + * @param Type of returned object. + */ + interface Visitor { + /** + * Traversing Real counter node. + * + * @param level level of traversal. + */ + T visitRealCounter(RealCounter realCounter, int level); + + /** + * Traversing Derived counter node. + * + * @param level level of traversal. + */ + T visitDerivedCounter(DerivedCounter derivedCounter, int level); + } + + /** + * The tree node of runtime metrics. + */ + interface InnerCounter { + String name(); + + ProfileUnit unit(); + + InnerCounter parent(); + + /** + * Adaptor for tree node visitor. + */ + T accept(Visitor visitor, int level); + } + + /** + * Only for summarizing the results from children nodes. + */ + private static class DerivedCounter implements InnerCounter { + private String name; + private ProfileUnit unit; + private InnerCounter parent; + private List children; + private ProfileAccumulatorType accumulatorType; + + public DerivedCounter(String name, ProfileUnit unit, InnerCounter parent, + ProfileAccumulatorType accumulatorType) { + this.name = name; + this.unit = unit; + this.accumulatorType = accumulatorType; + this.children = new ArrayList<>(); + this.parent = parent; + } + + public ProfileAccumulatorType getAccumulatorType() { + return accumulatorType; + } + + public void addChild(InnerCounter innerCounter) { + children.add(innerCounter); + } + + public List getChildren() { + return children; + } + + public void removeChild(InnerCounter innerCounter) { + children.remove(innerCounter); + } + + @Override + public String name() { + return name; + } + + @Override + public ProfileUnit unit() { + return unit; + } + + @Override + public InnerCounter parent() { + return parent; + } + + @Override + public T accept(Visitor visitor, int level) { + return visitor.visitDerivedCounter(this, level); + } + } + + /** + * To collect the metrics during execution. + */ + private static class RealCounter implements InnerCounter { + private Counter counter; + private String name; + private InnerCounter parent; + private ProfileUnit unit; + + public RealCounter(Counter counter, String name, ProfileUnit unit, InnerCounter parent) { + this.counter = counter; + this.name = name; + this.unit = unit; + this.parent = parent; + } + + public Counter counter() { + return counter; + } + + @Override + public String name() { + return name; + } + + @Override + public ProfileUnit unit() { + return unit; + } + + @Override + public InnerCounter parent() { + return parent; + } + + @Override + public T accept(Visitor visitor, int level) { + return visitor.visitRealCounter(this, level); + } + } + + /** + * To traverse the metrics tree for aggregating the metrics value and storing into results map. + */ + private static class AccumulatorVisitor implements Visitor { + private Map result = new TreeMap<>(String::compareTo); + + public Map getResult() { + return result; + } + + @Override + public Long visitRealCounter(RealCounter realCounter, int level) { + long count = realCounter.counter().getCount(); + result.put(realCounter.name(), count); + return count; + } + + @Override + public Long visitDerivedCounter(DerivedCounter derivedCounter, int level) { + ProfileAccumulatorType accumulatorType = derivedCounter.getAccumulatorType(); + switch (accumulatorType) { + case NONE: { + for (InnerCounter innerCounter : derivedCounter.getChildren()) { + innerCounter.accept(this, level + 1); + } + return 0L; + } + case MAX: { + long max = -1L; + for (InnerCounter innerCounter : derivedCounter.getChildren()) { + max = Math.max(max, innerCounter.accept(this, level + 1)); + } + result.put(derivedCounter.name(), max); + return max; + } + case MIN: { + long min = Long.MAX_VALUE; + for (InnerCounter innerCounter : derivedCounter.getChildren()) { + min = Math.min(min, innerCounter.accept(this, level + 1)); + } + result.put(derivedCounter.name(), min); + return min; + } + case AVG: { + long sum = 0L; + int n = 0; + for (InnerCounter innerCounter : derivedCounter.getChildren()) { + sum += innerCounter.accept(this, level + 1); + n++; + } + long avg = sum / n; + result.put(derivedCounter.name(), avg); + return avg; + } + case SUM: + default: { + long sum = 0L; + for (InnerCounter innerCounter : derivedCounter.getChildren()) { + sum += innerCounter.accept(this, level + 1); + } + result.put(derivedCounter.name(), sum); + return sum; + } + } + + } + } + + /** + * To generate a metrics report from given root in the tree. + */ + private static class PrintVisitor implements Visitor { + private final Map resultMap; + private final StringBuilder builder; + + private PrintVisitor(Map resultMap) { + this.resultMap = resultMap; + this.builder = new StringBuilder(); + } + + public String print() { + return builder.toString(); + } + + @Override + public Void visitRealCounter(RealCounter realCounter, int level) { + while (level-- > 0) { + builder.append(' '); + builder.append(' '); + } + builder.append(realCounter.name()); + builder.append(' '); + builder.append(resultMap.get(realCounter.name())); + builder.append(' '); + builder.append(realCounter.unit().getUnitStr()); + builder.append('\n'); + return null; + } + + @Override + public Void visitDerivedCounter(DerivedCounter derivedCounter, int level) { + int nextLevel = level + 1; + while (level-- > 0) { + builder.append(' '); + builder.append(' '); + } + builder.append(derivedCounter.name()); + + if (resultMap.containsKey(derivedCounter.name())) { + builder.append(' '); + builder.append(resultMap.get(derivedCounter.name())); + builder.append(' '); + builder.append(derivedCounter.unit().getUnitStr()); + } + builder.append(':'); + builder.append('\n'); + + for (InnerCounter innerCounter : derivedCounter.getChildren()) { + innerCounter.accept(this, nextLevel); + } + return null; + } + } + + private static class MergeVisitor implements Visitor { + private RuntimeMetricsImpl metrics; + + public MergeVisitor(RuntimeMetricsImpl metrics) { + this.metrics = metrics; + } + + @Override + public Void visitRealCounter(RealCounter realCounter, int level) { + InnerCounter innerCounter; + if ((innerCounter = metrics.counterMap.get(realCounter.name())) != null + && innerCounter instanceof RealCounter) { + long addend = ((RealCounter) innerCounter).counter.getCount(); + realCounter.counter.inc(addend); + } + return null; + } + + @Override + public Void visitDerivedCounter(DerivedCounter derivedCounter, int level) { + for (InnerCounter innerCounter : derivedCounter.getChildren()) { + innerCounter.accept(this, level + 1); + } + return null; + } + } + + @Override + public String name() { + return rootName; + } + + @Override + public RuntimeMetrics parent() { + return parent; + } + + @Override + public List children() { + return childrenMap.values().stream().collect(Collectors.toList()); + } + + @Override + public synchronized void addChild(RuntimeMetrics child) { + childrenMap.putIfAbsent(child.name(), child); + ((RuntimeMetricsImpl) child).parent = this; + } + + @Override + public synchronized Counter addCounter(String name, String parentName, ProfileUnit unit) { + // handle parent. + if (parentName == null || parentName.isEmpty()) { + parentName = rootName; + } else { + Preconditions.checkArgument(counterMap.containsKey(parentName) + && counterMap.get(parentName) instanceof DerivedCounter); + } + DerivedCounter parent = (DerivedCounter) counterMap.get(parentName); + + // find existed counter + if (counterMap.containsKey(name)) { + InnerCounter innerCounter = counterMap.get(name); + Preconditions.checkArgument(innerCounter instanceof RealCounter); + return ((RealCounter) innerCounter).counter(); + } + + // create and register counter into Metric Registry. + Counter counter = registry.counter(name); + InnerCounter innerCounter = new RealCounter(counter, name, unit, parent); + parent.addChild(innerCounter); + + counterMap.put(name, innerCounter); + + return counter; + } + + @Override + public synchronized void addDerivedCounter(String name, String parentName, ProfileUnit unit, + ProfileAccumulatorType accumulatorType) { + // handle parent. + if (parentName == null || parentName.isEmpty()) { + parentName = rootName; + } else { + Preconditions.checkArgument(counterMap.containsKey(parentName) + && counterMap.get(parentName) instanceof DerivedCounter); + } + DerivedCounter parent = (DerivedCounter) counterMap.get(parentName); + + // find existed counter + if (counterMap.containsKey(name)) { + InnerCounter innerCounter = counterMap.get(name); + Preconditions.checkArgument(innerCounter instanceof DerivedCounter); + } + + // create and register counter into Metric Registry. + InnerCounter innerCounter = new DerivedCounter(name, unit, parent, accumulatorType); + parent.addChild(innerCounter); + + counterMap.put(name, innerCounter); + } + + @Override + public Counter getCounter(String name) { + // find existed counter + InnerCounter innerCounter; + if (counterMap.containsKey(name) && + (innerCounter = counterMap.get(name)) instanceof RealCounter) { + return ((RealCounter) innerCounter).counter(); + } + + return null; + } + + @Override + public boolean remove(String name) { + InnerCounter innerCounter; + if ((innerCounter = counterMap.get(name)) != null) { + // remove from children list + InnerCounter parent = counterMap.get(name).parent(); + Preconditions.checkArgument(parent instanceof DerivedCounter); + ((DerivedCounter) parent).removeChild(innerCounter); + + // remove from global map. + return counterMap.remove(name) != null; + } + + return false; + } + + @Override + public String report(String parent) { + if (parent == null || parent.isEmpty()) { + parent = rootName; + } + + Preconditions.checkArgument(counterMap.containsKey(parent), + String.format("The counter: %s does not exist", parent)); + + // for aggregation. + InnerCounter innerCounter = counterMap.get(parent); + AccumulatorVisitor accumulatorVisitor = new AccumulatorVisitor(); + innerCounter.accept(accumulatorVisitor, 0); + + // for printing. + Map result = accumulatorVisitor.getResult(); + PrintVisitor printVisitor = new PrintVisitor(result); + innerCounter.accept(printVisitor, 0); + + return printVisitor.print(); + } + + @Override + public void merge(RuntimeMetrics metrics) { + if (metrics == null || !(metrics instanceof RuntimeMetricsImpl)) { + return; + } + InnerCounter innerCounter = counterMap.get(rootName); + MergeVisitor visitor = new MergeVisitor((RuntimeMetricsImpl) metrics); + innerCounter.accept(visitor, 0); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileBufferWriter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileBufferWriter.java index 4753937ef..d4b525b27 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileBufferWriter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileBufferWriter.java @@ -30,9 +30,6 @@ package com.alibaba.polardbx.executor.operator.spill; import com.alibaba.polardbx.common.datatype.UInt64; -import com.google.common.primitives.Bytes; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; @@ -44,6 +41,9 @@ import com.alibaba.polardbx.optimizer.core.expression.bean.EnumValue; import com.alibaba.polardbx.optimizer.core.row.Row; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.google.common.primitives.Bytes; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import io.airlift.slice.Slice; import org.apache.calcite.sql.OutFileParams; @@ -62,8 +62,8 @@ import java.util.List; import java.util.Set; -import static com.google.common.base.Preconditions.checkState; import static com.alibaba.polardbx.executor.operator.spill.SingleStreamSpiller.NOT_BLOCKED; +import static com.google.common.base.Preconditions.checkState; import static java.util.Objects.requireNonNull; public class AsyncFileBufferWriter { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileSingleStreamSpiller.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileSingleStreamSpiller.java index 196c44174..e438a3611 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileSingleStreamSpiller.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileSingleStreamSpiller.java @@ -29,13 +29,13 @@ */ package com.alibaba.polardbx.executor.operator.spill; -import com.google.common.io.Closer; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.mpp.execution.buffer.PagesSerde; import com.alibaba.polardbx.optimizer.spill.LocalSpillMonitor; +import com.google.common.io.Closer; +import com.google.common.util.concurrent.ListenableFuture; import java.io.IOException; import java.util.Iterator; @@ -148,7 +148,6 @@ public Iterator getSpilledChunks(long maxChunkNum) { } } - @Override public ListenableFuture> getAllSpilledChunks() { closeWriteThenCreateReader(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileSingleStreamSpillerFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileSingleStreamSpillerFactory.java index b114a9bda..8a670aca4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileSingleStreamSpillerFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncFileSingleStreamSpillerFactory.java @@ -29,10 +29,8 @@ */ package com.alibaba.polardbx.executor.operator.spill; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.properties.MppConfig; +import com.alibaba.polardbx.common.properties.FileConfig; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.execution.buffer.PagesSerde; @@ -41,37 +39,37 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.spill.LocalSpillMonitor; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; import io.airlift.concurrent.SetThreadName; import it.unimi.dsi.fastutil.ints.IntArrayList; import org.apache.calcite.sql.OutFileParams; -import javax.annotation.PostConstruct; import java.io.File; import java.io.IOException; import java.nio.file.DirectoryStream; import java.nio.file.FileStore; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import static com.google.common.base.Preconditions.checkArgument; import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_DATA_OUTPUT; import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_OUT_OF_SPILL_FD; import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_OUT_OF_SPILL_SPACE; +import static com.google.common.base.Preconditions.checkArgument; import static java.nio.file.Files.getFileStore; import static java.nio.file.Files.newDirectoryStream; import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.mapred.FileOutputCommitter.TEMP_DIR_NAME; public class AsyncFileSingleStreamSpillerFactory implements SingleStreamSpillerFactory { private static final Logger log = LoggerFactory.getLogger(AsyncFileSingleStreamSpillerFactory.class); - private static final String TEMP_DIR_NAME = "temp"; - private final PagesSerdeFactory serdeFactory; private final FileCleaner fileCleaner; private final long startTime = System.nanoTime(); @@ -82,8 +80,6 @@ public class AsyncFileSingleStreamSpillerFactory implements SingleStreamSpillerF private List spillerRootPaths; // subdir in root path, temporary reserve spiller files generated by operator/shuffle // which cleaned on process restart or query is done - // TODO: after task failover supported, spiller files of shuffle will reserved in another subdir, that will not cleaned on process restart - private List spillerTempPaths; private IntArrayList[] threadsAssign; private AsyncFileSingleStreamSpillerFactory.ReaderThread[] readerThreads; private AsyncFileSingleStreamSpillerFactory.WriterThread[] writerThreads; @@ -100,15 +96,16 @@ public class AsyncFileSingleStreamSpillerFactory implements SingleStreamSpillerF public AsyncFileSingleStreamSpillerFactory(FileCleaner fileCleaner) { this( fileCleaner, - MppConfig.getInstance().getSpillPaths(), - MppConfig.getInstance().getMaxSpillThreads()); + ImmutableList.of(FileConfig.getInstance().getSpillerTempPath()), + FileConfig.getInstance().getSpillConfig().getMaxSpillThreads()); } public AsyncFileSingleStreamSpillerFactory( FileCleaner fileCleaner, List theSpillerRootPaths, int maxThreadNum) { - this(fileCleaner, theSpillerRootPaths, maxThreadNum, MppConfig.getInstance().getAvaliableSpillSpaceThreshold()); + this(fileCleaner, theSpillerRootPaths, maxThreadNum, + FileConfig.getInstance().getSpillConfig().getAvaliableSpillSpaceThreshold()); } public AsyncFileSingleStreamSpillerFactory( @@ -117,7 +114,7 @@ public AsyncFileSingleStreamSpillerFactory( int maxThreadNum, double maxUsedSpaceThreshold) { this(fileCleaner, theSpillerRootPaths, maxThreadNum, maxUsedSpaceThreshold, - MppConfig.getInstance().getMaxSpillFdThreshold()); + FileConfig.getInstance().getSpillConfig().getMaxSpillFdThreshold()); } public AsyncFileSingleStreamSpillerFactory( @@ -142,22 +139,10 @@ public AsyncFileSingleStreamSpillerFactory( log.info("init SpillerManager with path[" + i + "]:" + spillerRootPaths.get(i).toFile().getAbsolutePath()); } - // init temp paths - this.spillerTempPaths = new ArrayList<>(spillerRootPaths.size()); - for (int i = 0; i < spillerRootPaths.size(); i++) { - Path rootPath = spillerRootPaths.get(i); - Path tempPath = Paths.get(rootPath.toFile().getAbsolutePath(), TEMP_DIR_NAME); - this.spillerTempPaths.add(tempPath); - log.info("init SpillerManager with temp path[" + i + "]:" + tempPath.toFile().getAbsolutePath()); - } - // ensure dirs for (int i = 0; i < spillerRootPaths.size(); i++) { Path rootPath = spillerRootPaths.get(i); rootPath.toFile().mkdirs(); - - Path tempPath = spillerTempPaths.get(i); - tempPath.toFile().mkdirs(); } checkArgument(maxThreadNum > 0, "threadNum is not positive"); @@ -237,7 +222,7 @@ public void close() { } private int getSpillerThreadsNum() { - int dynamicSpillerThreads = MppConfig.getInstance().getMaxSpillThreads(); + int dynamicSpillerThreads = FileConfig.getInstance().getSpillConfig().getMaxSpillThreads(); if (dynamicSpillerThreads <= 0) { return maxThreadNum; } @@ -271,7 +256,7 @@ protected FileHolder getNextFileHolder(OutFileParams outFileParams) { } if (hasEnoughDiskSpace(spillerRootPaths.get(0))) { int threadId = threadsAssign[0].getInt(0); - Path assignPath = Paths.get(spillerTempPaths.get(0).toFile().getAbsolutePath(), + Path assignPath = Paths.get(spillerRootPaths.get(0).toFile().getAbsolutePath(), outFileParams.getFileName()); log.info(String.format("assign new file for spiller, %s", assignPath)); return new FileHolder(assignPath, fileCleaner, threadId); @@ -290,7 +275,7 @@ protected FileHolder getNextFileHolder(String prefix) { if (hasEnoughDiskSpace(spillerRootPaths.get(pathIdx))) { // use the first thread int threadId = threadsAssign[pathIdx].getInt(0); - Path assignPath = Paths.get(spillerTempPaths.get(pathIdx).toFile().getAbsolutePath(), + Path assignPath = Paths.get(spillerRootPaths.get(pathIdx).toFile().getAbsolutePath(), getFileName(pathIdx, threadId, prefix, idBase)); log.info(String.format("assign new file for spiller, %s", assignPath)); return new FileHolder(assignPath, fileCleaner, threadId); @@ -304,7 +289,7 @@ protected FileHolder getNextFileHolder(String prefix) { int pathIdx = (int) (id % spillerRootPaths.size()); if (hasEnoughDiskSpace(spillerRootPaths.get(pathIdx))) { int threadId = pickThreadForFile(threadsAssign[pathIdx], spillerThreadsNum, id); - Path assignPath = Paths.get(spillerTempPaths.get(pathIdx).toFile().getAbsolutePath(), + Path assignPath = Paths.get(spillerRootPaths.get(pathIdx).toFile().getAbsolutePath(), getFileName(pathIdx, threadId, prefix, idBase)); log.info(String.format("assign new file for spiller, %s", assignPath)); return new FileHolder(assignPath, fileCleaner, threadId); @@ -366,26 +351,30 @@ private boolean hasEnoughDiskSpace(Path path) { } } - @PostConstruct public SingleStreamSpillerFactory cleanupOldSpillFiles() { - for (Path path : spillerTempPaths) { + for (Path path : spillerRootPaths) { log.warn("Deleting old spill file in path: " + path); - try (DirectoryStream stream = newDirectoryStream(path, SPILL_FILE_GLOB)) { - stream.forEach(spillFile -> { - try { - log.info("Deleting old spill file: " + spillFile); - this.fileCleaner.recycleFile(new FileHolder(spillFile, this.fileCleaner)); - } catch (Exception e) { - log.warn("Could not cleanup old spill file: " + spillFile); - } - }); - } catch (IOException e) { - log.warn("Error cleaning spill files", e); - } + // delete temp spill file + deleteFile(path); } return this; } + private void deleteFile(Path path) { + try (DirectoryStream stream = newDirectoryStream(path)) { + stream.forEach(spillFile -> { + try { + log.info("Deleting old spill file: " + spillFile); + this.fileCleaner.recycleFile(new FileHolder(spillFile, this.fileCleaner)); + } catch (Exception e) { + log.warn("Could not cleanup old spill file: " + spillFile); + } + }); + } catch (IOException e) { + log.warn("Error cleaning spill files", e); + } + } + protected static class ReaderThread extends Thread { private int threadId; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncPageFileChannelReader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncPageFileChannelReader.java index 58de25aec..4f57d3953 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncPageFileChannelReader.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncPageFileChannelReader.java @@ -29,16 +29,16 @@ */ package com.alibaba.polardbx.executor.operator.spill; -import com.google.common.collect.AbstractIterator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.mpp.execution.buffer.PagesSerde; import com.alibaba.polardbx.executor.mpp.execution.buffer.PagesSerdeUtil; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.google.common.collect.AbstractIterator; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import io.airlift.slice.InputStreamSliceInput; import io.airlift.slice.SliceInput; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncPageFileChannelWriter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncPageFileChannelWriter.java index 709b3c378..d0a3476e8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncPageFileChannelWriter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/AsyncPageFileChannelWriter.java @@ -29,12 +29,12 @@ */ package com.alibaba.polardbx.executor.operator.spill; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.mpp.execution.buffer.PagesSerde; import com.alibaba.polardbx.executor.mpp.execution.buffer.PagesSerdeUtil; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.SettableFuture; import io.airlift.slice.OutputStreamSliceOutput; import io.airlift.slice.SliceOutput; @@ -45,8 +45,8 @@ import java.util.ArrayList; import java.util.Iterator; -import static com.google.common.base.Preconditions.checkState; import static com.alibaba.polardbx.executor.operator.spill.SingleStreamSpiller.NOT_BLOCKED; +import static com.google.common.base.Preconditions.checkState; import static java.util.Objects.requireNonNull; public class AsyncPageFileChannelWriter { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileHolder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileHolder.java index 235d31628..fde2dab57 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileHolder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileHolder.java @@ -32,6 +32,7 @@ import javax.annotation.concurrent.GuardedBy; import javax.annotation.concurrent.ThreadSafe; import java.io.Closeable; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -39,6 +40,8 @@ import java.nio.file.Files; import java.nio.file.OpenOption; import java.nio.file.Path; +import java.util.Comparator; +import java.util.stream.Stream; import static com.google.common.base.Preconditions.checkState; import static java.util.Objects.requireNonNull; @@ -94,7 +97,15 @@ public synchronized void close() { public synchronized void doClean() { try { if (Files.exists(filePath)) { - Files.delete(filePath); + if (Files.isDirectory(filePath)) { + try (Stream stream = Files.walk(filePath)) { + stream.sorted(Comparator.reverseOrder()) + .map(Path::toFile) + .forEach(File::delete); + } + } else { + Files.delete(filePath); + } } } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileSingleStreamSpiller.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileSingleStreamSpiller.java index 34a9803bf..4ba0622f0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileSingleStreamSpiller.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileSingleStreamSpiller.java @@ -16,11 +16,6 @@ package com.alibaba.polardbx.executor.operator.spill; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.io.Closer; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; @@ -32,6 +27,11 @@ import com.alibaba.polardbx.executor.mpp.util.MppIterators; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.optimizer.spill.LocalSpillMonitor; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.io.Closer; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.ListeningExecutorService; import io.airlift.slice.InputStreamSliceInput; import io.airlift.slice.OutputStreamSliceOutput; import io.airlift.slice.SliceOutput; @@ -49,11 +49,11 @@ import java.util.LinkedList; import java.util.List; -import static com.google.common.base.Preconditions.checkState; import static com.alibaba.polardbx.executor.mpp.execution.buffer.PagesSerdeUtil.writeSerializedChunk; import static com.alibaba.polardbx.executor.mpp.util.MppCloseables.combineCloseables; import static com.alibaba.polardbx.executor.operator.spill.SingleStreamSpillerFactory.SPILL_FILE_PREFIX; import static com.alibaba.polardbx.executor.operator.spill.SingleStreamSpillerFactory.SPILL_FILE_SUFFIX; +import static com.google.common.base.Preconditions.checkState; import static io.airlift.concurrent.MoreFutures.getFutureValue; import static java.nio.file.StandardOpenOption.APPEND; import static java.util.Objects.requireNonNull; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileSingleStreamSpillerFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileSingleStreamSpillerFactory.java index 830eaa098..6dd66a7bf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileSingleStreamSpillerFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/FileSingleStreamSpillerFactory.java @@ -29,32 +29,35 @@ */ package com.alibaba.polardbx.executor.operator.spill; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListeningExecutorService; import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.properties.MppConfig; +import com.alibaba.polardbx.common.properties.FileConfig; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.mpp.Threads; import com.alibaba.polardbx.executor.mpp.execution.buffer.PagesSerdeFactory; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.spill.LocalSpillMonitor; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListeningExecutorService; import org.apache.calcite.sql.OutFileParams; import javax.annotation.PostConstruct; +import java.io.File; import java.io.IOException; import java.nio.file.DirectoryStream; import java.nio.file.FileStore; +import java.nio.file.Files; import java.nio.file.Path; +import java.util.Comparator; import java.util.List; +import java.util.stream.Stream; -import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator; import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_OUT_OF_SPILL_SPACE; +import static com.google.common.util.concurrent.MoreExecutors.listeningDecorator; import static java.lang.String.format; import static java.nio.file.Files.createDirectories; -import static java.nio.file.Files.delete; import static java.nio.file.Files.getFileStore; import static java.nio.file.Files.newDirectoryStream; import static java.util.Objects.requireNonNull; @@ -74,11 +77,11 @@ public FileSingleStreamSpillerFactory( FileCleaner fileCleaner) { this( listeningDecorator(newFixedThreadPool( - MppConfig.getInstance().getMaxSpillThreads(), + FileConfig.getInstance().getSpillConfig().getMaxSpillThreads(), Threads.daemonThreadsNamed("binary-spiller"))), fileCleaner, - MppConfig.getInstance().getSpillPaths(), - MppConfig.getInstance().getAvaliableSpillSpaceThreshold()); + ImmutableList.of(FileConfig.getInstance().getSpillerTempPath()), + FileConfig.getInstance().getSpillConfig().getAvaliableSpillSpaceThreshold()); } @VisibleForTesting @@ -127,17 +130,29 @@ public FileSingleStreamSpillerFactory( @PostConstruct public SingleStreamSpillerFactory cleanupOldSpillFiles() { - spillPaths.forEach(FileSingleStreamSpillerFactory::cleanupOldSpillFiles); + for (Path path : spillPaths) { + log.warn("Deleting old spill file in path: " + path); + // delete temp spill file + cleanupOldSpillFiles(path, SPILL_FILE_GLOB); + } return this; } - private static void cleanupOldSpillFiles(Path path) { - try (DirectoryStream stream = newDirectoryStream(path, SPILL_FILE_GLOB)) { + private static void cleanupOldSpillFiles(Path path, String fileGlob) { + try (DirectoryStream stream = newDirectoryStream(path, fileGlob)) { stream.forEach(spillFile -> { try { log.info("Deleting old spill file: " + spillFile); // FIXME, do it async - delete(spillFile); + if (java.nio.file.Files.isDirectory(spillFile)) { + try (Stream fileStream = java.nio.file.Files.walk(spillFile)) { + fileStream.sorted(Comparator.reverseOrder()) + .map(Path::toFile) + .forEach(File::delete); + } + } else { + Files.delete(spillFile); + } } catch (Exception e) { log.warn("Could not cleanup old spill file: " + spillFile); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/GenericSpiller.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/GenericSpiller.java index 35a157b5b..886657be1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/GenericSpiller.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/GenericSpiller.java @@ -16,13 +16,13 @@ package com.alibaba.polardbx.executor.operator.spill; -import com.google.common.io.Closer; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.spill.QuerySpillSpaceMonitor; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.google.common.io.Closer; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import org.apache.calcite.sql.OutFileParams; import java.io.IOException; @@ -87,7 +87,7 @@ public List> getSpills() { @Override public List> getSpills(long maxChunkNum) { return singleStreamSpillers.stream() - .map( spiller -> spiller.getSpilledChunks(maxChunkNum)) + .map(spiller -> spiller.getSpilledChunks(maxChunkNum)) .collect(toList()); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/GenericSpillerFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/GenericSpillerFactory.java index 08d9a08be..9eba7c763 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/GenericSpillerFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/GenericSpillerFactory.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.operator.spill; -import com.google.inject.Inject; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.google.inject.Inject; import org.apache.calcite.sql.OutFileParams; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/MemoryRevoker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/MemoryRevoker.java index 55c3bc9c8..7f1764d86 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/MemoryRevoker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/MemoryRevoker.java @@ -16,8 +16,8 @@ package com.alibaba.polardbx.executor.operator.spill; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; +import com.google.common.util.concurrent.ListenableFuture; public interface MemoryRevoker { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/MemorySpillerFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/MemorySpillerFactory.java index 68f4ab8b8..ceca9c0a2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/MemorySpillerFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/MemorySpillerFactory.java @@ -16,12 +16,12 @@ package com.alibaba.polardbx.executor.operator.spill; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import org.apache.calcite.sql.OutFileParams; import java.util.ArrayList; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/OrcWriter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/OrcWriter.java index b793d3040..f224a839d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/OrcWriter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/OrcWriter.java @@ -232,7 +232,7 @@ public void uploadToOss() { try { for (int i = 0; i < localFilePaths.size(); i++) { File localFile = new File(localFilePaths.get(i)); - FileSystemUtils.writeFile(localFile, ossKeys.get(i).toString(), OSS); + FileSystemUtils.writeFile(localFile, ossKeys.get(i).toString(), OSS, false); logger.info("file upload done: " + localFilePaths.get(i) + " file size = " + localFile.length()); } } catch (Exception e) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/SingleStreamSpiller.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/SingleStreamSpiller.java index a76d00982..c3eee50f1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/SingleStreamSpiller.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/SingleStreamSpiller.java @@ -17,9 +17,9 @@ package com.alibaba.polardbx.executor.operator.spill; import com.alibaba.polardbx.common.exception.NotSupportException; +import com.alibaba.polardbx.executor.chunk.Chunk; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; -import com.alibaba.polardbx.executor.chunk.Chunk; import java.io.Closeable; import java.util.Iterator; @@ -55,9 +55,11 @@ default ListenableFuture spill(Chunk page) { /** * 返回的Iterator最多读取maxChunkNum个Chunk */ - default Iterator getSpilledChunks(long maxChunkNum){ + default Iterator getSpilledChunks(long maxChunkNum) { throw new NotSupportException(); - }; + } + + ; /** * Initiates read of previously spilled pages. The returned {@link Future} will be complete once all pages are read. diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/SingleStreamSpillerFactory.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/SingleStreamSpillerFactory.java index 5862cdcbc..0c9cc9423 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/SingleStreamSpillerFactory.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/SingleStreamSpillerFactory.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.operator.spill; -import com.google.common.annotations.VisibleForTesting; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.spill.LocalSpillMonitor; +import com.google.common.annotations.VisibleForTesting; import org.apache.calcite.sql.OutFileParams; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/Spiller.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/Spiller.java index eaa27188d..25746fca6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/Spiller.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/spill/Spiller.java @@ -17,8 +17,8 @@ package com.alibaba.polardbx.executor.operator.spill; import com.alibaba.polardbx.common.exception.NotSupportException; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; +import com.google.common.util.concurrent.ListenableFuture; import java.io.Closeable; import java.util.Iterator; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AbstractBatchBlockBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AbstractBatchBlockBuilder.java new file mode 100644 index 000000000..72540aca8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AbstractBatchBlockBuilder.java @@ -0,0 +1,106 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; + +public abstract class AbstractBatchBlockBuilder implements BlockBuilder { + // fixed size of capacity + final int initialCapacity; + protected boolean[] valueIsNull; + int currentIndex; + protected boolean containsNull; + + public AbstractBatchBlockBuilder(int initialCapacity) { + this.initialCapacity = initialCapacity; + this.valueIsNull = null; + this.currentIndex = 0; + this.containsNull = false; + } + + protected void allocateNulls() { + if (valueIsNull == null) { + this.valueIsNull = new boolean[initialCapacity]; + } + } + + @Override + public int getPositionCount() { + return currentIndex; + } + + @Override + public boolean isNull(int position) { + checkReadablePosition(position); + if (valueIsNull == null) { + return false; + } + return valueIsNull[position]; + } + + @Override + public void ensureCapacity(int capacity) { + throw new UnsupportedOperationException(); + } + + protected void appendNullInternal() { + allocateNulls(); + valueIsNull[currentIndex] = true; + containsNull = true; + currentIndex++; + } + + void setContainsNull() { + this.containsNull = true; + } + + @Override + public boolean mayHaveNull() { + return containsNull; + } + + protected void checkReadablePosition(int position) { + if (position < 0 || position >= getPositionCount()) { + throw new IllegalArgumentException("position is not valid"); + } + } + + @Override + public final boolean equals(int position, Block otherBlock, int otherPosition) { + throw new UnsupportedOperationException("Please invoke from block instead of block builder"); + } + + @Override + public final void writePositionTo(int position, BlockBuilder blockBuilder) { + throw new UnsupportedOperationException(); + } + + @Override + public final long estimateSize() { + throw new UnsupportedOperationException(); + } + + @Override + public long hashCodeUseXxhash(int pos) { + throw new UnsupportedOperationException(); + } + + protected int getCapacity() { + return initialCapacity; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggHashMap.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggHashMap.java index ac641b4f1..66c8a40b9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggHashMap.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggHashMap.java @@ -16,14 +16,15 @@ package com.alibaba.polardbx.executor.operator.util; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; +import com.google.common.util.concurrent.ListenableFuture; +import it.unimi.dsi.fastutil.ints.IntArrayList; import java.util.List; public interface AggHashMap extends GroupHashMap { - int[] putChunk(Chunk keyChunk, Chunk inputChunk); + void putChunk(Chunk keyChunk, Chunk inputChunk, IntArrayList groupIdResult); List getGroupChunkList(); @@ -41,4 +42,28 @@ default void finishMemoryRevoke() { default void close() { } + + /** + * To consume chunks, build hash table, + * maintain the groupId, and accumulate agg-function. + */ + interface GroupBy { + /** + * The key-chunk and input chunk will share the same blocks. + * + * @param keyChunk chunks of group-by blocks. + * @param inputChunk chunks of total input blocks. + * @param groupIdResult if not null, fill the group ids into it. + */ + void putChunk(Chunk keyChunk, Chunk inputChunk, IntArrayList groupIdResult); + + /** + * Get the precise fixed estimated size in bytes of this object. + * + * @return size in bytes + */ + long fixedEstimatedSize(); + + void close(); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggOpenHashMap.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggOpenHashMap.java index 76d97f232..caa9527de 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggOpenHashMap.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggOpenHashMap.java @@ -16,26 +16,43 @@ package com.alibaba.polardbx.executor.operator.util; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.google.common.base.Preconditions; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.memory.SizeOf; +import com.alibaba.polardbx.executor.accumulator.Accumulator; +import com.alibaba.polardbx.executor.accumulator.AccumulatorBuilders; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.BlockBuilders; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.chunk.ChunkBuilder; +import com.alibaba.polardbx.executor.chunk.ChunkConverter; +import com.alibaba.polardbx.executor.chunk.Converters; import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.SliceBlock; import com.alibaba.polardbx.executor.mpp.operator.WorkProcessor; +import com.alibaba.polardbx.executor.operator.scan.impl.DictionaryMapping; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.datatype.IntegerType; +import com.alibaba.polardbx.optimizer.core.datatype.LongType; +import com.alibaba.polardbx.optimizer.core.datatype.SliceType; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; +import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; +import com.google.common.base.Preconditions; +import it.unimi.dsi.fastutil.HashCommon; import it.unimi.dsi.fastutil.ints.AbstractIntComparator; import it.unimi.dsi.fastutil.ints.AbstractIntIterator; +import it.unimi.dsi.fastutil.ints.Int2IntOpenHashMap; +import it.unimi.dsi.fastutil.ints.IntArrayList; import it.unimi.dsi.fastutil.ints.IntArrays; import it.unimi.dsi.fastutil.ints.IntComparator; import it.unimi.dsi.fastutil.ints.IntIterator; +import org.apache.calcite.util.Util; import java.util.ArrayList; +import java.util.Arrays; +import java.util.BitSet; import java.util.List; public class AggOpenHashMap extends GroupOpenHashMap implements AggHashMap { @@ -48,20 +65,30 @@ public class AggOpenHashMap extends GroupOpenHashMap implements AggHashMap { protected final BlockBuilder[] valueBlockBuilders; + private ChunkConverter[] valueConverters; + + private Accumulator[] valueAccumulators; + private int[] filterArgs; private DistinctSet[] distinctSets; private DataType[] aggValueType; + private GroupBy groupBy; + + private final OperatorMemoryAllocatorCtx memoryAllocator; + public AggOpenHashMap(DataType[] groupKeyType, List aggregators, DataType[] aggValueType, - DataType[] inputType, int expectedSize, int chunkSize, ExecutionContext context) { - this(groupKeyType, aggregators, aggValueType, inputType, expectedSize, DEFAULT_LOAD_FACTOR, chunkSize, context); + DataType[] inputType, int expectedSize, int chunkSize, ExecutionContext context, + OperatorMemoryAllocatorCtx memoryAllocator) { + this(groupKeyType, aggregators, aggValueType, inputType, expectedSize, DEFAULT_LOAD_FACTOR, chunkSize, context, + memoryAllocator); } public AggOpenHashMap(DataType[] groupKeyType, List aggregators, DataType[] aggValueType, DataType[] inputType, int expectedSize, float loadFactor, int chunkSize, - ExecutionContext context) { + ExecutionContext context, OperatorMemoryAllocatorCtx memoryAllocator) { super(groupKeyType, expectedSize, loadFactor, chunkSize, context); Preconditions.checkArgument(loadFactor > 0 && loadFactor <= 1, @@ -69,18 +96,28 @@ public AggOpenHashMap(DataType[] groupKeyType, List aggregators, Dat Preconditions.checkArgument(expectedSize >= 0, "The expected number of elements must be non-negative"); this.aggregators = aggregators; + this.memoryAllocator = memoryAllocator; + this.valueAccumulators = new Accumulator[aggregators.size()]; + + this.valueConverters = new ChunkConverter[aggregators.size()]; this.filterArgs = new int[aggregators.size()]; this.distinctSets = new DistinctSet[aggregators.size()]; for (int i = 0; i < aggregators.size(); i++) { - final AbstractAggregator aggregator = (AbstractAggregator) aggregators.get(i); - aggregator.open(expectedSize); + final Aggregator aggregator = aggregators.get(i); + valueAccumulators[i] = + AccumulatorBuilders.create(aggregator, aggValueType[i], inputType, expectedSize, context); + DataType[] originalInputTypes = DataTypeUtils.gather(inputType, aggregator.getInputColumnIndexes()); + DataType[] accumulatorInputTypes = Util.first(valueAccumulators[i].getInputTypes(), originalInputTypes); + valueConverters[i] = + Converters.createChunkConverter(aggregator.getInputColumnIndexes(), inputType, accumulatorInputTypes, + context); filterArgs[i] = aggregator.getFilterArg(); - int[] aggIndexInChunk = aggregator.getOriginTargetIndexes(); if (aggregator.isDistinct()) { + int[] distinctIndexes = aggregator.getNewForAccumulator().getAggTargetIndexes(); distinctSets[i] = - new DistinctSet(inputType, aggIndexInChunk, expectedSize, chunkSize, context); + new DistinctSet(accumulatorInputTypes, distinctIndexes, expectedSize, chunkSize, context); } } @@ -91,52 +128,1209 @@ public AggOpenHashMap(DataType[] groupKeyType, List aggregators, Dat } if (noGroupBy()) { - // add an empty chunk - appendGroup(new Chunk(1), 0); + appendGroup(new Chunk(1), 0); // add an empty chunk + } + + // Prerequisites: + // 1. ENABLE_VEC_ACCUMULATOR=true + // 2. group by column is not empty. + // 3. has no distinct keyword in any aggregator. + // 4. has no filter args in any aggregator. + boolean enableVecAccumulator = context.getParamManager().getBoolean(ConnectionParams.ENABLE_VEC_ACCUMULATOR) + && groupKeyType.length > 0 + && aggregators.stream().allMatch(aggregator -> !aggregator.isDistinct() && aggregator.getFilterArg() < 0); + + // check if group keys consist of (int, int), (varchar, varchar), (int, varchar), (varchar ,int) + // and don't use compatible mode. + boolean groupKeyIntegerAndSlice = groupKeyType != null + && groupKeyType.length == 2 && !context.isEnableOssCompatible() + && ((groupKeyType[0] instanceof IntegerType && groupKeyType[1] instanceof IntegerType) + || (groupKeyType[0] instanceof IntegerType && groupKeyType[1] instanceof SliceType) + || (groupKeyType[0] instanceof SliceType && groupKeyType[1] instanceof IntegerType) + || (groupKeyType[0] instanceof SliceType && groupKeyType[1] instanceof SliceType) + ); + + // group by long + boolean singleGroupKeyLong = groupKeyType != null + && groupKeyType.length == 1 + && groupKeyType[0] instanceof LongType; + + // group by int + boolean singleGroupKeyInteger = groupKeyType != null + && groupKeyType.length == 1 + && groupKeyType[0] instanceof IntegerType; + + if (enableVecAccumulator && groupKeyIntegerAndSlice) { + this.groupBy = new SliceIntBatchGroupBy(); + } else if (enableVecAccumulator && singleGroupKeyLong) { + this.groupBy = new LongBatchGroupBy(); + } else if (enableVecAccumulator && singleGroupKeyInteger) { + this.groupBy = new IntBatchGroupBy(); + } else { + this.groupBy = new DefaultGroupBy(); } + // for fixed memory cost of GroupBy objects. + memoryAllocator.allocateReservedMemory(groupBy.fixedEstimatedSize()); } - @Override - public int[] putChunk(Chunk keyChunk, Chunk inputChunk) { + protected final static int GROUP_ID_COUNT_THRESHOLD = 16; + protected final static long SERIALIZED_MASK = ((long) 0x7fffffff) << 1 | 1; + + private class IntBatchGroupBy implements GroupBy { + protected int[] groupIds = new int[chunkSize]; + protected int[] sourceArray = new int[chunkSize]; + protected int[] groupIdSelection = new int[chunkSize]; + + protected boolean inOrder; + + protected int[] key; + protected int[] value; + protected int mask; + + // The key=0 is stored in the last position in hash table. + protected boolean containsZeroKey; + + // maintain a field: groupIdOfNull for null value. + protected boolean hasNull; + protected BitSet nullBitmap; + protected int groupIdOfNull; + + protected int n; + protected int maxFill; + protected int size; + protected final float f; + + public IntBatchGroupBy() { + this.nullBitmap = new BitSet(chunkSize); + this.containsZeroKey = false; + this.groupIdOfNull = -1; + + this.f = loadFactor; + final int expected = expectedSize; + if (!(f <= 0.0F) && !(f > 1.0F)) { + if (expected < 0) { + throw new IllegalArgumentException("The expected number of elements must be non-negative"); + } else { + this.n = HashCommon.arraySize(expected, f); + this.mask = this.n - 1; + this.maxFill = HashCommon.maxFill(this.n, f); + + // large memory allocation: hash-table for aggregation. + memoryAllocator.allocateReservedMemory(2 * SizeOf.sizeOfIntArray(n + 1)); + this.key = new int[this.n + 1]; + this.value = new int[this.n + 1]; + } + } else { + throw new IllegalArgumentException("Load factor must be greater than 0 and smaller than or equal to 1"); + } + } + + @Override + public void putChunk(Chunk keyChunk, Chunk inputChunk, IntArrayList groupIdResult) { + Preconditions.checkArgument(keyChunk.getBlockCount() == 1); + + // clear null state. + nullBitmap.clear(); + hasNull = false; + + // get input chunks for aggregators. + Chunk[] aggregatorInputs = new Chunk[aggregators.size()]; + for (int i = 0; i < aggregators.size(); i++) { + aggregatorInputs[i] = valueConverters[i].apply(inputChunk); + } + + final int positionCount = inputChunk.getPositionCount(); - final int[] groupIds = new int[inputChunk.getPositionCount()]; - final boolean noGroupBy = noGroupBy(); - if (noGroupBy) { - for (int pos = 0; pos < inputChunk.getPositionCount(); pos++) { - groupIds[pos] = 0; + // step 1. copy blocks into int/long array + // step 2. long type-specific hash, and put group value when first hit. + // step 3. accumulator with selection array. + Block keyBlock = keyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToIntArray(0, positionCount, sourceArray, 0, null); + + if (keyBlock.mayHaveNull()) { + // collect to null bitmap if have null. + keyBlock.collectNulls(0, positionCount, nullBitmap, 0); + hasNull = !nullBitmap.isEmpty(); } - } else { - for (int pos = 0; pos < inputChunk.getPositionCount(); pos++) { - groupIds[pos] = innerPut(keyChunk, pos, -1); - } - } - final Block groupIdBlock = IntegerBlock.wrap(groupIds); - for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { - boolean[] isDistinct = null; - if (distinctSets[aggIndex] != null) { - isDistinct = distinctSets[aggIndex].checkDistinct(groupIdBlock, inputChunk); - } - for (int pos = 0; pos < inputChunk.getPositionCount(); pos++) { - boolean noFilter = true; - if (filterArgs[aggIndex] > -1) { - Object obj = inputChunk.getBlock(filterArgs[aggIndex]).getObject(pos); - if (obj instanceof Boolean) { - noFilter = (Boolean) obj; - } else if (obj instanceof Long) { - long lVal = (Long) obj; - if (lVal < 1) { - noFilter = false; + + // 2.1 build hash table + // 2.2 check in-order + putHashTable(positionCount); + + // step 3. accumulator with selection array. + int groupCount = getGroupCount(); + if (inOrder) { + // CASE 1. (best case) the long value of key block is in order. + int groupId = groupIds[0]; + int startIndex = 0; + for (int i = 0; i < positionCount; i++) { + if (groupIds[i] != groupId) { + // accumulate in range. + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + valueAccumulators[aggIndex] + .accumulate(groupId, aggregatorInputs[aggIndex], startIndex, i); + } + + // update for the next range + startIndex = i; + groupId = groupIds[i]; + } + } + + // for the rest range + if (startIndex < positionCount) { + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + valueAccumulators[aggIndex] + .accumulate(groupId, aggregatorInputs[aggIndex], startIndex, positionCount); + } + } + + } else if (groupCount <= GROUP_ID_COUNT_THRESHOLD) { + // CASE 2. (good case) the ndv is small. + + for (int groupId = 0; groupId < groupCount; groupId++) { + // collect the position that groupIds[position] = groupId into selection array. + int selSize = 0; + for (int position = 0; position < positionCount; position++) { + if (groupIds[position] == groupId) { + groupIdSelection[selSize++] = position; + } + } + + // for each aggregator function + if (selSize > 0) { + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + valueAccumulators[aggIndex] + .accumulate(groupId, aggregatorInputs[aggIndex], groupIdSelection, selSize); + } + } + } + } else { + // Normal execution mode. + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + valueAccumulators[aggIndex] + .accumulate(groupIds, aggregatorInputs[aggIndex], positionCount); + } + } + + if (groupIdResult != null) { + for (int i = 0; i < positionCount; i++) { + groupIdResult.add(groupIds[i]); + } + } + } + + @Override + public long fixedEstimatedSize() { + // The groupId and value map is always growing during aggregation. + return (chunkSize * Integer.BYTES) * 3 + + SizeOf.sizeOfLongArray(nullBitmap.size() / 64) + + Integer.BYTES * 5 + + Byte.BYTES * 3 + Float.BYTES; + } + + public void putHashTable(int positionCount) { + if (!hasNull) { + long lastVal = Long.MIN_VALUE; + for (int position = 0; position < positionCount; position++) { + inOrder &= (sourceArray[position] >= lastVal); + lastVal = sourceArray[position]; + } + } + + for (int position = 0; position < positionCount; position++) { + groupIds[position] = findGroupId(sourceArray, position); + } + } + + private int findGroupId(int[] sourceArray, final int position) { + + if (hasNull && nullBitmap.get(position)) { + if (groupIdOfNull == -1) { + // put null value in the first time. + groupIdOfNull = allocateGroupId(sourceArray, position, true); + } + return groupIdOfNull; + } + + int k = sourceArray[position]; + int pos; + if (k == 0) { + if (this.containsZeroKey) { + return value[this.n]; + } + + this.containsZeroKey = true; + pos = this.n; + } else { + int[] key = this.key; + int curr; + if ((curr = key[pos = HashCommon.mix(k) & this.mask]) != 0) { + if (curr == k) { + return value[pos]; + } + + while ((curr = key[pos = pos + 1 & this.mask]) != 0) { + if (curr == k) { + return value[pos]; } } } - if (noFilter) { - if (isDistinct == null || isDistinct[pos]) { - aggregators.get(aggIndex).accumulate(groupIds[pos], inputChunk, pos); + + key[pos] = k; + } + + // allocate new group id + int groupId = allocateGroupId(sourceArray, position, false); + + this.value[pos] = groupId; + if (this.size++ >= this.maxFill) { + this.rehash(HashCommon.arraySize(this.size + 1, this.f)); + } + + return groupId; + } + + // It's OK if element in this position is null. + private int allocateGroupId(int[] sourceArray, final int position, boolean isNull) { + // use groupCount as group value array index. + if (isNull) { + groupKeyBuffer.appendNull(0); + } else { + groupKeyBuffer.appendInteger(0, sourceArray[position]); + } + + int groupId = groupCount++; + + // Also add an initial value to accumulators + for (int i = 0; i < aggregators.size(); i++) { + valueAccumulators[i].appendInitValue(); + } + return groupId; + } + + protected void rehash(int newN) { + int[] key = this.key; + int[] value = this.value; + int mask = newN - 1; + + // large memory allocation: rehash of hash-table for aggregation. + memoryAllocator.allocateReservedMemory(2 * SizeOf.sizeOfIntArray(newN + 1)); + + int[] newKey = new int[newN + 1]; + int[] newValue = new int[newN + 1]; + int i = this.n; + + int pos; + for (int j = this.realSize(); j-- != 0; newValue[pos] = value[i]) { + do { + --i; + } while (key[i] == 0); + + if (newKey[pos = HashCommon.mix(key[i]) & mask] != 0) { + while (newKey[pos = pos + 1 & mask] != 0) { } } + + newKey[pos] = key[i]; } + + newValue[newN] = value[this.n]; + this.n = newN; + this.mask = mask; + this.maxFill = HashCommon.maxFill(this.n, this.f); + this.key = newKey; + this.value = newValue; + } + + private int realSize() { + return this.containsZeroKey ? this.size - 1 : this.size; + } + + @Override + public void close() { + key = null; + value = null; + groupIds = null; + sourceArray = null; + groupIdSelection = null; } - return groupIds; + } + + private class LongBatchGroupBy implements GroupBy { + protected int[] groupIds = new int[chunkSize]; + protected long[] sourceArray = new long[chunkSize]; + protected int[] groupIdSelection = new int[chunkSize]; + + protected boolean inOrder; + + protected long[] key; + protected int[] value; + protected int mask; + protected boolean containsZeroKey; + protected int n; + protected int maxFill; + protected int size; + protected final float f; + + // maintain a field: groupIdOfNull for null value. + protected boolean hasNull; + protected BitSet nullBitmap; + protected int groupIdOfNull; + + public LongBatchGroupBy() { + this.nullBitmap = new BitSet(chunkSize); + this.containsZeroKey = false; + this.groupIdOfNull = -1; + + final int expected = expectedSize; + this.f = loadFactor; + if (!(f <= 0.0F) && !(f > 1.0F)) { + if (expected < 0) { + throw new IllegalArgumentException("The expected number of elements must be nonnegative"); + } else { + this.n = HashCommon.arraySize(expected, f); + this.mask = this.n - 1; + this.maxFill = HashCommon.maxFill(this.n, f); + + // large memory allocation: hash-table for aggregation. + memoryAllocator.allocateReservedMemory(2 * SizeOf.sizeOfLongArray(n + 1)); + this.key = new long[this.n + 1]; + this.value = new int[this.n + 1]; + } + } else { + throw new IllegalArgumentException("Load factor must be greater than 0 and smaller than or equal to 1"); + } + } + + @Override + public void putChunk(Chunk keyChunk, Chunk inputChunk, IntArrayList groupIdResult) { + Preconditions.checkArgument(keyChunk.getBlockCount() == 1); + + // clear null state. + nullBitmap.clear(); + hasNull = false; + + Chunk[] aggregatorInputs = new Chunk[aggregators.size()]; + for (int i = 0; i < aggregators.size(); i++) { + aggregatorInputs[i] = valueConverters[i].apply(inputChunk); + } + + final int positionCount = inputChunk.getPositionCount(); + + // step 1. copy blocks into long arrays + // step 2. long type-specific hash, and put group value when first hit. + // step 3. accumulator with selection array. + Block keyBlock = keyChunk.getBlock(0).cast(Block.class); + keyBlock.copyToLongArray(0, positionCount, sourceArray, 0); + + if (keyBlock.mayHaveNull()) { + // collect to null bitmap if have null. + keyBlock.collectNulls(0, positionCount, nullBitmap, 0); + hasNull = !nullBitmap.isEmpty(); + } + + // 2.1 build hash table + // 2.2 check in-order + putHashTable(positionCount); + + // step 3. accumulator with selection array. + int groupCount = getGroupCount(); + if (inOrder) { + // CASE 1. (best case) the long value of key block is in order. + + int groupId = groupIds[0]; + int startIndex = 0; + for (int i = 0; i < positionCount; i++) { + if (groupIds[i] != groupId) { + // accumulate in range. + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + valueAccumulators[aggIndex] + .accumulate(groupId, aggregatorInputs[aggIndex], startIndex, i); + } + + // update for the next range + startIndex = i; + groupId = groupIds[i]; + } + } + + // for the rest range + if (startIndex < positionCount) { + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + valueAccumulators[aggIndex] + .accumulate(groupId, aggregatorInputs[aggIndex], startIndex, positionCount); + } + } + + } else if (groupCount <= GROUP_ID_COUNT_THRESHOLD) { + // CASE 2. (good case) the ndv is small. + + for (int groupId = 0; groupId < groupCount; groupId++) { + // collect the position that groupIds[position] = groupId into selection array. + int selSize = 0; + for (int position = 0; position < positionCount; position++) { + if (groupIds[position] == groupId) { + groupIdSelection[selSize++] = position; + } + } + + // for each aggregator function + if (selSize > 0) { + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + valueAccumulators[aggIndex] + .accumulate(groupId, aggregatorInputs[aggIndex], groupIdSelection, selSize); + } + } + } + } else { + // Normal execution mode. + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + valueAccumulators[aggIndex] + .accumulate(groupIds, aggregatorInputs[aggIndex], positionCount); + } + } + if (groupIdResult != null) { + for (int i = 0; i < positionCount; i++) { + groupIdResult.add(groupIds[i]); + } + } + } + + public void putHashTable(int positionCount) { + if (!hasNull) { + long lastVal = Long.MIN_VALUE; + for (int position = 0; position < positionCount; position++) { + inOrder &= (sourceArray[position] >= lastVal); + lastVal = sourceArray[position]; + } + } + + for (int position = 0; position < positionCount; position++) { + groupIds[position] = findGroupId(sourceArray, position); + } + } + + private int findGroupId(long[] sourceArray, final int position) { + if (hasNull && nullBitmap.get(position)) { + if (groupIdOfNull == -1) { + // put null value in the first time. + groupIdOfNull = allocateGroupId(sourceArray, position, true); + } + return groupIdOfNull; + } + + long k = sourceArray[position]; + int pos; + if (k == 0L) { + if (this.containsZeroKey) { + return value[this.n]; + } + + this.containsZeroKey = true; + pos = this.n; + } else { + long[] key = this.key; + long curr; + if ((curr = key[pos = (int) HashCommon.mix(k) & this.mask]) != 0L) { + if (curr == k) { + return value[pos]; + } + + while ((curr = key[pos = pos + 1 & this.mask]) != 0L) { + if (curr == k) { + return value[pos]; + } + } + } + + // not found, insert new key. + key[pos] = k; + } + + // allocate new group id + int groupId = allocateGroupId(sourceArray, position, false); + + this.value[pos] = groupId; + if (this.size++ >= this.maxFill) { + this.rehash(HashCommon.arraySize(this.size + 1, this.f)); + } + + return groupId; + } + + private int allocateGroupId(long[] sourceArray, int position, boolean isNull) { + // use groupCount as group value array index. + if (isNull) { + groupKeyBuffer.appendNull(0); + } else { + groupKeyBuffer.appendLong(0, sourceArray[position]); + } + + int groupId = groupCount++; + + // Also add an initial value to accumulators + for (int i = 0; i < aggregators.size(); i++) { + valueAccumulators[i].appendInitValue(); + } + return groupId; + } + + protected void rehash(int newN) { + long[] key = this.key; + int[] value = this.value; + int mask = newN - 1; + + // large memory allocation: rehash of hash-table for aggregation. + memoryAllocator.allocateReservedMemory(2 * SizeOf.sizeOfLongArray(newN + 1)); + long[] newKey = new long[newN + 1]; + int[] newValue = new int[newN + 1]; + int i = this.n; + + int pos; + for (int j = this.realSize(); j-- != 0; newValue[pos] = value[i]) { + do { + --i; + } while (key[i] == 0L); + + if (newKey[pos = (int) HashCommon.mix(key[i]) & mask] != 0L) { + while (newKey[pos = pos + 1 & mask] != 0L) { + } + } + + newKey[pos] = key[i]; + } + + newValue[newN] = value[this.n]; + this.n = newN; + this.mask = mask; + this.maxFill = HashCommon.maxFill(this.n, this.f); + this.key = newKey; + this.value = newValue; + } + + private int realSize() { + return this.containsZeroKey ? this.size - 1 : this.size; + } + + @Override + public void close() { + key = null; + value = null; + groupIds = null; + sourceArray = null; + groupIdSelection = null; + } + + @Override + public long fixedEstimatedSize() { + return (chunkSize * Integer.BYTES) * 2 + + (chunkSize * Long.BYTES) + + +SizeOf.sizeOfLongArray(nullBitmap.size() / 64) + + Integer.BYTES * 5 + + Byte.BYTES * 3 + + Float.BYTES; + } + } + + /** + * Handle slice-slice, slice-int or int-slice type group by. + * It can fall back to DefaultGroupBy if some blocks are not in dictionary. + */ + private class SliceIntBatchGroupBy implements GroupBy { + IntIntBatchGroupBy dictIntBatchGroupBy; + DefaultGroupBy normalGroupBy; + + @Override + public void putChunk(Chunk keyChunk, Chunk inputChunk, IntArrayList groupIdResult) { + Preconditions.checkArgument(keyChunk.getBlockCount() == 2); + + if (normalGroupBy != null) { + // Already fall back to normal group by. + normalGroupBy.putChunk(keyChunk, inputChunk, groupIdResult); + } else if ((keyChunk.getBlock(0) instanceof SliceBlock + && ((SliceBlock) keyChunk.getBlock(0)).getDictionary() == null) + || (keyChunk.getBlock(1) instanceof SliceBlock + && ((SliceBlock) keyChunk.getBlock(1)).getDictionary() == null)) { + // if any block is a slice block but don't have dictionary. + + if (normalGroupBy == null) { + normalGroupBy = new DefaultGroupBy(); + } + + if (dictIntBatchGroupBy != null) { + // fall back IntIntBatchGroupBy to DefaultGroupBy + normalGroupBy.fillGroupKeyBuffer(); + dictIntBatchGroupBy.close(); + dictIntBatchGroupBy = null; + } + + // Just put into DefaultGroupBy. + normalGroupBy.putChunk(keyChunk, inputChunk, groupIdResult); + } else { + // Good Case: use dictionary for slice block group-by. + if (dictIntBatchGroupBy == null) { + dictIntBatchGroupBy = new IntIntBatchGroupBy(); + } + dictIntBatchGroupBy.putChunk(keyChunk, inputChunk, groupIdResult); + } + } + + @Override + public long fixedEstimatedSize() { + if (normalGroupBy != null) { + return normalGroupBy.fixedEstimatedSize(); + } else if (dictIntBatchGroupBy != null) { + return dictIntBatchGroupBy.fixedEstimatedSize(); + } + return 0; + } + + @Override + public void close() { + if (normalGroupBy != null) { + normalGroupBy.close(); + } + if (dictIntBatchGroupBy != null) { + dictIntBatchGroupBy.close(); + } + } + } + + private class IntIntBatchGroupBy implements GroupBy { + protected int[] groupIds = new int[chunkSize]; + protected int[] intBlock1 = new int[chunkSize]; + protected int[] intBlock2 = new int[chunkSize]; + protected long[] serializedBlock = new long[chunkSize]; + protected int[] groupIdSelection = new int[chunkSize]; + + protected DictionaryMapping dictionaryMapping1 = DictionaryMapping.create(); + protected DictionaryMapping dictionaryMapping2 = DictionaryMapping.create(); + + protected long[] key; + protected int[] value; + protected int mask; + protected boolean containsZeroKey; + protected int n; + protected int maxFill; + protected int size; + protected final float f; + + // handle null value. + // It's for pair of key: (null, xxx) and not initialized. + // Storing the mapping of (xxx) - (groupId). + Int2IntOpenHashMap keyMap1; + boolean hasNull1; + BitSet nullBitmap1; + + // It's for pair of key: (xxx, null) and not initialized. + // Storing the mapping of (xxx) - (groupId). + Int2IntOpenHashMap keyMap2; + boolean hasNull2; + BitSet nullBitmap2; + + // The groupId of key: (null, null). + int groupIdOfDoubleNull; + + public IntIntBatchGroupBy() { + keyMap1 = null; + keyMap2 = null; + hasNull1 = false; + hasNull2 = false; + nullBitmap1 = new BitSet(chunkSize); + nullBitmap2 = new BitSet(chunkSize); + groupIdOfDoubleNull = -1; + + final int expected = expectedSize; + this.f = loadFactor; + if (!(f <= 0.0F) && !(f > 1.0F)) { + if (expected < 0) { + throw new IllegalArgumentException("The expected number of elements must be nonnegative"); + } else { + this.n = HashCommon.arraySize(expected, f); + this.mask = this.n - 1; + this.maxFill = HashCommon.maxFill(this.n, f); + + // large memory allocation: hash-table for aggregation. + memoryAllocator.allocateReservedMemory( + SizeOf.sizeOfIntArray(n + 1) + SizeOf.sizeOfLongArray(n + 1)); + this.key = new long[this.n + 1]; + this.value = new int[this.n + 1]; + } + } else { + throw new IllegalArgumentException("Load factor must be greater than 0 and smaller than or equal to 1"); + } + } + + @Override + public void putChunk(Chunk keyChunk, Chunk inputChunk, IntArrayList groupIdResult) { + Preconditions.checkArgument(keyChunk.getBlockCount() == 2); + + // clear null state + hasNull1 = false; + hasNull2 = false; + nullBitmap1.clear(); + nullBitmap2.clear(); + + Chunk[] aggregatorInputs = new Chunk[aggregators.size()]; + for (int i = 0; i < aggregators.size(); i++) { + aggregatorInputs[i] = valueConverters[i].apply(inputChunk); + } + + final int positionCount = inputChunk.getPositionCount(); + + // step 1. copy blocks into arrays + // step 2. serialize to long + // step 3. long type-specific hash, and put group value when first hit. + // step 4. accumulator with selection array. + + // step 1. copy blocks into arrays + Block keyBlock1 = keyChunk.getBlock(0).cast(Block.class); + Block keyBlock2 = keyChunk.getBlock(1).cast(Block.class); + + keyBlock1.copyToIntArray(0, positionCount, intBlock1, 0, dictionaryMapping1); + keyBlock2.copyToIntArray(0, positionCount, intBlock2, 0, dictionaryMapping2); + + // collect null value for all key blocks. + if (keyBlock1.mayHaveNull()) { + keyBlock1.collectNulls(0, positionCount, nullBitmap1, 0); + hasNull1 = !nullBitmap1.isEmpty(); + } + if (keyBlock2.mayHaveNull()) { + keyBlock2.collectNulls(0, positionCount, nullBitmap2, 0); + hasNull2 = !nullBitmap2.isEmpty(); + } + + // DictMapping.merge(dict) + // int[] remapping = DictMapping.get(hashCode) + // int newDictId = remapping[dictId] + + // step 2. serialize to long + // ((long) key1 << 32) | (key2 & serializedMask); + for (int i = 0; i < positionCount; i++) { + serializedBlock[i] = ((long) (intBlock1[i]) << 32) | ((intBlock2[i]) & SERIALIZED_MASK); + } + + // step 3. long type-specific hash + putHashTable(keyChunk, positionCount); + + // step 4. accumulator with selection array. + int groupCount = getGroupCount(); + if (groupCount <= GROUP_ID_COUNT_THRESHOLD) { + for (int groupId = 0; groupId < groupCount; groupId++) { + // collect the position that groupIds[position] = groupId into selection array. + int selSize = 0; + for (int position = 0; position < positionCount; position++) { + if (groupIds[position] == groupId) { + groupIdSelection[selSize++] = position; + } + } + + // for each aggregator function + if (selSize > 0) { + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + valueAccumulators[aggIndex] + .accumulate(groupId, aggregatorInputs[aggIndex], groupIdSelection, selSize); + } + } + } + } else { + // Fall back to row-by-row execution mode. + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + for (int pos = 0; pos < positionCount; pos++) { + valueAccumulators[aggIndex] + .accumulate(groupIds[pos], aggregatorInputs[aggIndex], pos); + } + } + } + if (groupIdResult != null) { + for (int i = 0; i < positionCount; i++) { + groupIdResult.add(groupIds[i]); + } + } + } + + @Override + public long fixedEstimatedSize() { + // The groupId and value map is always growing during aggregation. + return (chunkSize * Integer.BYTES) * 4 + + (chunkSize * Long.BYTES) + + Integer.BYTES * 5 + + Byte.BYTES * 3 + + Float.BYTES + + dictionaryMapping1.estimatedSize() + + dictionaryMapping2.estimatedSize(); + } + + @Override + public void close() { + key = null; + value = null; + groupIds = null; + intBlock1 = null; + intBlock2 = null; + serializedBlock = null; + groupIdSelection = null; + dictionaryMapping1.close(); + dictionaryMapping2.close(); + dictionaryMapping1 = null; + dictionaryMapping2 = null; + + if (keyMap1 != null) { + keyMap1.clear(); + keyMap1 = null; + } + + if (keyMap2 != null) { + keyMap2.clear(); + keyMap2 = null; + } + } + + public void putHashTable(Chunk keyChunk, int positionCount) { + for (int position = 0; position < positionCount; position++) { + groupIds[position] = findGroupId(serializedBlock, keyChunk, position); + } + } + + private int findGroupId(long[] serializedBlock, Chunk keyChunk, final int position) { + + if (hasNull1 || hasNull2) { + // handle null. + if (hasNull1 && hasNull2 && nullBitmap1.get(position) && nullBitmap2.get(position)) { + // case1: both keys are null. + if (groupIdOfDoubleNull == -1) { + // allocate new group id + groupIdOfDoubleNull = allocateGroupId(keyChunk, position); + } + return groupIdOfDoubleNull; + } else if (hasNull1 && nullBitmap1.get(position)) { + // case2: key of (null, xxx) + if (keyMap1 == null) { + // initialize keyMap1. + keyMap1 = new Int2IntOpenHashMap(); + keyMap1.defaultReturnValue(NOT_EXISTS); + } + + // put the xxx into key map and allocate group id if needed. + int intVal = intBlock2[position]; + int groupId; + if ((groupId = keyMap1.get(intVal)) == NOT_EXISTS) { + groupId = allocateGroupId(keyChunk, position); + keyMap1.put(intVal, groupId); + } + return groupId; + } else if (hasNull2 && nullBitmap2.get(position)) { + // case3: key of (xxx, null) + if (keyMap2 == null) { + // initialize keyMap1. + keyMap2 = new Int2IntOpenHashMap(); + keyMap2.defaultReturnValue(NOT_EXISTS); + } + + // put the xxx into key map and allocate group id if needed. + int intVal = intBlock1[position]; + int groupId; + if ((groupId = keyMap2.get(intVal)) == NOT_EXISTS) { + groupId = allocateGroupId(keyChunk, position); + keyMap2.put(intVal, groupId); + } + return groupId; + } + + // case 4: the key pair in this position is not (null, null), (null, xxx) or (xxx, null) + } + + long k = serializedBlock[position]; + int pos; + if (k == 0L) { + if (this.containsZeroKey) { + return value[this.n]; + } + + this.containsZeroKey = true; + pos = this.n; + } else { + long[] key = this.key; + long curr; + if ((curr = key[pos = (int) HashCommon.mix(k) & this.mask]) != 0L) { + if (curr == k) { + return value[pos]; + } + + while ((curr = key[pos = pos + 1 & this.mask]) != 0L) { + if (curr == k) { + return value[pos]; + } + } + } + + // not found, insert new key. + key[pos] = k; + } + + // allocate new group id + int groupId = allocateGroupId(keyChunk, position); + + this.value[pos] = groupId; + if (this.size++ >= this.maxFill) { + this.rehash(HashCommon.arraySize(this.size + 1, this.f)); + } + + return groupId; + } + + private int allocateGroupId(Chunk keyChunk, int position) { + // use groupCount as group value array index. + groupKeyBuffer.appendRow(keyChunk, position); + + int groupId = groupCount++; + + // Also add an initial value to accumulators + for (int i = 0; i < aggregators.size(); i++) { + valueAccumulators[i].appendInitValue(); + } + return groupId; + } + + protected void rehash(int newN) { + long[] key = this.key; + int[] value = this.value; + int mask = newN - 1; + + // large memory allocation: rehash of hash-table for aggregation. + memoryAllocator.allocateReservedMemory(SizeOf.sizeOfIntArray(newN + 1) + SizeOf.sizeOfLongArray(newN + 1)); + long[] newKey = new long[newN + 1]; + int[] newValue = new int[newN + 1]; + int i = this.n; + + int pos; + for (int j = this.realSize(); j-- != 0; newValue[pos] = value[i]) { + do { + --i; + } while (key[i] == 0L); + + if (newKey[pos = (int) HashCommon.mix(key[i]) & mask] != 0L) { + while (newKey[pos = pos + 1 & mask] != 0L) { + } + } + + newKey[pos] = key[i]; + } + + newValue[newN] = value[this.n]; + this.n = newN; + this.mask = mask; + this.maxFill = HashCommon.maxFill(this.n, this.f); + this.key = newKey; + this.value = newValue; + } + + private int realSize() { + return this.containsZeroKey ? this.size - 1 : this.size; + } + + } + + private class DefaultGroupBy implements GroupBy { + /** + * The array of keys (buckets) + */ + protected int[] keys; + /** + * The mask for wrapping a position counter + */ + protected int mask; + /** + * The current table size. + */ + protected int n; + /** + * Number of entries in the set (including the key zero, if present). + */ + protected int size; + /** + * The acceptable load factor. + */ + protected float f; + /** + * Threshold after which we rehash. It must be the table size times {@link #f}. + */ + protected int maxFill; + + public DefaultGroupBy() { + this.f = loadFactor; + this.n = HashCommon.arraySize(expectedSize, loadFactor); + this.mask = n - 1; + this.maxFill = HashCommon.maxFill(n, loadFactor); + this.size = 0; + + // large memory allocation: hash-table for aggregation. + memoryAllocator.allocateReservedMemory(SizeOf.sizeOfIntArray(n)); + int[] keys = new int[n]; + Arrays.fill(keys, NOT_EXISTS); + this.keys = keys; + } + + @Override + public void putChunk(Chunk keyChunk, Chunk inputChunk, IntArrayList groupIdResult) { + Chunk[] aggregatorInputs; + aggregatorInputs = new Chunk[aggregators.size()]; + for (int i = 0; i < aggregators.size(); i++) { + aggregatorInputs[i] = valueConverters[i].apply(inputChunk); + } + + final int[] groupIds = new int[inputChunk.getPositionCount()]; + final boolean noGroupBy = noGroupBy(); + if (noGroupBy) { + for (int pos = 0; pos < inputChunk.getPositionCount(); pos++) { + groupIds[pos] = 0; + } + } else { + for (int pos = 0; pos < inputChunk.getPositionCount(); pos++) { + groupIds[pos] = innerPut(keyChunk, pos, -1); + } + } + + final Block groupIdBlock = IntegerBlock.wrap(groupIds); + for (int aggIndex = 0; aggIndex < aggregators.size(); aggIndex++) { + Chunk aggInputChunk = aggregatorInputs[aggIndex]; + boolean[] isDistinct = null; + if (distinctSets[aggIndex] != null) { + isDistinct = distinctSets[aggIndex].checkDistinct(groupIdBlock, aggregatorInputs[aggIndex]); + } + for (int pos = 0; pos < inputChunk.getPositionCount(); pos++) { + boolean noFilter = true; + if (filterArgs[aggIndex] > -1) { + Object obj = inputChunk.getBlock(filterArgs[aggIndex]).getObject(pos); + if (obj instanceof Boolean) { + noFilter = (Boolean) obj; + } else if (obj instanceof Long) { + long lVal = (Long) obj; + if (lVal < 1) { + noFilter = false; + } + } + } + if (noFilter) { + if (isDistinct == null || isDistinct[pos]) { + valueAccumulators[aggIndex].accumulate(groupIds[pos], aggInputChunk, pos); + } + } + } + } + + if (groupIdResult != null) { + for (int i = 0; i < groupIds.length; i++) { + groupIdResult.add(groupIds[i]); + } + } + } + + @Override + public long fixedEstimatedSize() { + // The default implementation of GroupBy use dynamic memory allocation. + return Integer.BYTES * 4 + Float.BYTES; + } + + @Override + public void close() { + this.keys = null; + } + + /** + * @param groupId if groupId == -1 means need to generate a new groupid + */ + int innerPut(Chunk chunk, int position, int groupId) { + return doInnerPutArray(chunk, position, groupId); + } + + /** + * Fill the elements from GroupKeyBuffer into hash table of this object, + * but don't allocate new Group ID. + *

+ * This is only for fall-back of other implementation of group-by. + */ + public void fillGroupKeyBuffer() { + List groupKeyChunks = groupKeyBuffer.buildChunks(); + + // avoid rehash. + final int currentSize = groupCount; + if (currentSize > expectedSize) { + this.n = HashCommon.arraySize(currentSize, loadFactor); + this.mask = n - 1; + this.maxFill = HashCommon.maxFill(n, loadFactor); + this.size = 0; + + int[] keys = new int[n]; + Arrays.fill(keys, NOT_EXISTS); + this.keys = keys; + } + + // avoid to allocate group id. + int groupId = 0; + for (Chunk chunk : groupKeyChunks) { + for (int i = 0; i < chunk.getPositionCount(); i++) { + innerPut(chunk, i, groupId++); + } + } + } + + private int doInnerPutArray(Chunk chunk, int position, int groupId) { + int h = HashCommon.mix(chunk.hashCode(position)) & mask; + int k = keys[h]; + + if (k != NOT_EXISTS) { + if (groupKeyBuffer.equals(k, chunk, position)) { + return k; + } + // Open-address probing + while ((k = keys[h = (h + 1) & mask]) != NOT_EXISTS) { + if (groupKeyBuffer.equals(k, chunk, position)) { + return k; + } + } + } + + // 去重,仅在保留第一次命中时的group value + if (groupId == -1) { + groupId = appendGroup(chunk, position); + } + + // otherwise, insert this position + keys[h] = groupId; + + if (size++ >= maxFill) { + rehash(); + } + return groupId; + } + + protected void rehash() { + this.n *= 2; + this.mask = n - 1; + this.maxFill = HashCommon.maxFill(n, this.f); + this.size = 0; + + // large memory allocation: rehash of hash-table for aggregation. + memoryAllocator.allocateReservedMemory(SizeOf.sizeOfIntArray(n)); + int[] keys = new int[n]; + Arrays.fill(keys, NOT_EXISTS); + this.keys = keys; + + List groupChunks = groupKeyBuffer.buildChunks(); + int groupId = 0; + for (Chunk chunk : groupChunks) { + for (int i = 0; i < chunk.getPositionCount(); i++) { + innerPut(chunk, i, groupId++); + } + } + } + } + + @Override + public void putChunk(Chunk keyChunk, Chunk inputChunk, IntArrayList groupIdResult) { + groupBy.putChunk(keyChunk, inputChunk, groupIdResult); } @Override @@ -144,7 +1338,9 @@ int appendGroup(Chunk chunk, int position) { int groupId = super.appendGroup(chunk, position); // Also add an initial value to accumulators - aggregators.forEach(Aggregator::appendInitValue); + for (int i = 0; i < aggregators.size(); i++) { + valueAccumulators[i].appendInitValue(); + } return groupId; } @@ -162,8 +1358,8 @@ protected List buildValueChunks() { List chunks = new ArrayList<>(); int offset = 0; for (int groupId = 0; groupId < getGroupCount(); groupId++) { - for (int i = 0; i < aggregators.size(); i++) { - aggregators.get(i).writeResultTo(groupId, valueBlockBuilders[i]); + for (int i = 0; i < valueAccumulators.length; i++) { + valueAccumulators[i].writeResultTo(groupId, valueBlockBuilders[i]); } // value chunks split by chunk size @@ -177,6 +1373,9 @@ protected List buildValueChunks() { chunks.add(buildValueChunk()); } + // set null to deallocate memory + this.valueAccumulators = null; + return chunks; } @@ -196,14 +1395,26 @@ public AggResultIterator buildChunks() { return new HashAggResultIterator(groupChunks, valueChunks); } + @Override + List buildGroupChunks() { + List result = super.buildGroupChunks(); + if (groupBy != null) { + groupBy.close(); + groupBy = null; + } + + return result; + } + public WorkProcessor buildHashSortedResult() { return buildResult(hashSortedGroupIds()); } private IntIterator hashSortedGroupIds() { this.groupChunks = groupKeyBuffer.buildChunks(); - this.keys = null; - this.map = null; + if (this.groupBy != null) { + this.groupBy.close(); + } IntComparator comparator = new AbstractIntComparator() { @Override public int compare(int position1, int position2) { @@ -272,9 +1483,9 @@ private WorkProcessor buildResult(IntIterator groupIds) { pageBuilder.declarePosition(); int groupId = groupIds.nextInt(); groupKeyBuffer.appendValuesTo(groupId, pageBuilder); - for (int i = 0; i < aggregators.size(); i++) { + for (int i = 0; i < valueAccumulators.length; i++) { BlockBuilder output = pageBuilder.getBlockBuilder(groupKeyType.length + i); - aggregators.get(i).writeResultTo(groupId, output); + valueAccumulators[i].writeResultTo(groupId, output); } } @@ -282,7 +1493,6 @@ private WorkProcessor buildResult(IntIterator groupIds) { }); } - @Override public void close() { } @@ -299,7 +1509,9 @@ public long estimateSize() { size += chunk.estimateSize(); } } else { - size += aggregators.stream().mapToLong(Aggregator::estimateSize).sum(); + for (int i = 0; i < aggregators.size(); i++) { + size += valueAccumulators[i].estimateSize(); + } } return size; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggregateUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggregateUtils.java index 25d90b0c3..9e0aa5a25 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggregateUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AggregateUtils.java @@ -16,92 +16,42 @@ package com.alibaba.polardbx.executor.operator.util; -import com.alibaba.polardbx.common.datatype.Decimal; import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.common.properties.ConnectionProperties; -import com.alibaba.polardbx.executor.accumulator.CheckSumAccumulator; -import com.alibaba.polardbx.executor.accumulator.CheckSumMergeAccumulator; -import com.alibaba.polardbx.executor.calc.Aggregator; -import com.alibaba.polardbx.executor.calc.aggfunctions.Avg; -import com.alibaba.polardbx.executor.calc.aggfunctions.BitAnd; -import com.alibaba.polardbx.executor.calc.aggfunctions.BitOr; -import com.alibaba.polardbx.executor.calc.aggfunctions.BitXor; -import com.alibaba.polardbx.executor.calc.aggfunctions.Byte2ByteMax; -import com.alibaba.polardbx.executor.calc.aggfunctions.Byte2ByteMin; -import com.alibaba.polardbx.executor.calc.aggfunctions.Byte2DecimalAvg; -import com.alibaba.polardbx.executor.calc.aggfunctions.Byte2DecimalSum; -import com.alibaba.polardbx.executor.calc.aggfunctions.Byte2UInt64BitAnd; -import com.alibaba.polardbx.executor.calc.aggfunctions.Byte2UInt64BitOr; -import com.alibaba.polardbx.executor.calc.aggfunctions.Byte2UInt64BitXor; -import com.alibaba.polardbx.executor.calc.aggfunctions.Count; -import com.alibaba.polardbx.executor.calc.aggfunctions.CountRow; -import com.alibaba.polardbx.executor.calc.aggfunctions.CumeDist; -import com.alibaba.polardbx.executor.calc.aggfunctions.Decimal2DecimalAvg; -import com.alibaba.polardbx.executor.calc.aggfunctions.Decimal2DecimalMax; -import com.alibaba.polardbx.executor.calc.aggfunctions.Decimal2DecimalMin; -import com.alibaba.polardbx.executor.calc.aggfunctions.Decimal2DecimalSum; -import com.alibaba.polardbx.executor.calc.aggfunctions.Decimal2UInt64BitAnd; -import com.alibaba.polardbx.executor.calc.aggfunctions.Decimal2UInt64BitOr; -import com.alibaba.polardbx.executor.calc.aggfunctions.Decimal2UInt64BitXor; -import com.alibaba.polardbx.executor.calc.aggfunctions.DenseRank; -import com.alibaba.polardbx.executor.calc.aggfunctions.Double2DoubleAvg; -import com.alibaba.polardbx.executor.calc.aggfunctions.Double2DoubleMax; -import com.alibaba.polardbx.executor.calc.aggfunctions.Double2DoubleMin; -import com.alibaba.polardbx.executor.calc.aggfunctions.Double2DoubleSum; -import com.alibaba.polardbx.executor.calc.aggfunctions.FirstValue; -import com.alibaba.polardbx.executor.calc.aggfunctions.Float2DoubleAvg; -import com.alibaba.polardbx.executor.calc.aggfunctions.Float2DoubleSum; -import com.alibaba.polardbx.executor.calc.aggfunctions.Float2FloatMax; -import com.alibaba.polardbx.executor.calc.aggfunctions.Float2FloatMin; -import com.alibaba.polardbx.executor.calc.aggfunctions.GroupConcat; -import com.alibaba.polardbx.executor.calc.aggfunctions.Int2DecimalAvg; -import com.alibaba.polardbx.executor.calc.aggfunctions.Int2DecimalSum; -import com.alibaba.polardbx.executor.calc.aggfunctions.Int2IntMax; -import com.alibaba.polardbx.executor.calc.aggfunctions.Int2IntMin; -import com.alibaba.polardbx.executor.calc.aggfunctions.Int2UInt64BitAnd; -import com.alibaba.polardbx.executor.calc.aggfunctions.Int2UInt64BitOr; -import com.alibaba.polardbx.executor.calc.aggfunctions.Int2UInt64BitXor; -import com.alibaba.polardbx.executor.calc.aggfunctions.InternalFirstValue; -import com.alibaba.polardbx.executor.calc.aggfunctions.Lag; -import com.alibaba.polardbx.executor.calc.aggfunctions.LastValue; -import com.alibaba.polardbx.executor.calc.aggfunctions.Lead; -import com.alibaba.polardbx.executor.calc.aggfunctions.Long2DecimalAvg; -import com.alibaba.polardbx.executor.calc.aggfunctions.Long2DecimalSum; -import com.alibaba.polardbx.executor.calc.aggfunctions.Long2LongMax; -import com.alibaba.polardbx.executor.calc.aggfunctions.Long2LongMin; -import com.alibaba.polardbx.executor.calc.aggfunctions.Long2LongSum0; -import com.alibaba.polardbx.executor.calc.aggfunctions.Long2UInt64BitAnd; -import com.alibaba.polardbx.executor.calc.aggfunctions.Long2UInt64BitOr; -import com.alibaba.polardbx.executor.calc.aggfunctions.Long2UInt64BitXor; -import com.alibaba.polardbx.executor.calc.aggfunctions.Max; -import com.alibaba.polardbx.executor.calc.aggfunctions.Min; -import com.alibaba.polardbx.executor.calc.aggfunctions.NThValue; -import com.alibaba.polardbx.executor.calc.aggfunctions.NTile; -import com.alibaba.polardbx.executor.calc.aggfunctions.OtherType2UInt64BitAnd; -import com.alibaba.polardbx.executor.calc.aggfunctions.OtherType2UInt64BitOr; -import com.alibaba.polardbx.executor.calc.aggfunctions.OtherType2UInt64BitXor; -import com.alibaba.polardbx.executor.calc.aggfunctions.PercentRank; -import com.alibaba.polardbx.executor.calc.aggfunctions.Rank; -import com.alibaba.polardbx.executor.calc.aggfunctions.RowNumber; -import com.alibaba.polardbx.executor.calc.aggfunctions.Short2DecimalAvg; -import com.alibaba.polardbx.executor.calc.aggfunctions.Short2DecimalSum; -import com.alibaba.polardbx.executor.calc.aggfunctions.Short2ShortMax; -import com.alibaba.polardbx.executor.calc.aggfunctions.Short2ShortMin; -import com.alibaba.polardbx.executor.calc.aggfunctions.Short2UInt64BitAnd; -import com.alibaba.polardbx.executor.calc.aggfunctions.Short2UInt64BitOr; -import com.alibaba.polardbx.executor.calc.aggfunctions.Short2UInt64BitXor; -import com.alibaba.polardbx.executor.calc.aggfunctions.SingleValue; -import com.alibaba.polardbx.executor.calc.aggfunctions.SpecificType2DecimalAvg; -import com.alibaba.polardbx.executor.calc.aggfunctions.SpecificType2DoubleAvgV2; -import com.alibaba.polardbx.executor.calc.aggfunctions.Sum; -import com.alibaba.polardbx.executor.calc.aggfunctions.Sum0; -import com.alibaba.polardbx.executor.calc.aggfunctions.WrapedLong2WarpedLongMax; -import com.alibaba.polardbx.executor.calc.aggfunctions.WrapedLong2WarpedLongMin; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.AvgV2; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.BitAnd; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.BitOr; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.BitXor; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.CheckSum; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.CheckSumMerge; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.CheckSumV2; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.CheckSumV2Merge; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.CountV2; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.CumeDist; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.DenseRank; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.FinalHyperLoglog; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.FirstValue; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.GroupConcat; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.HyperLoglog; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.InternalFirstValue; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.Lag; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.LastValue; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.Lead; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.MaxV2; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.MinV2; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.NThValue; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.NTile; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.PartialHyperLoglog; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.PercentRank; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.Rank; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.RowNumber; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.SingleValue; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.Sum0; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.SumV2; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; +import com.alibaba.polardbx.optimizer.memory.MemoryManager; import org.apache.calcite.rel.core.AggregateCall; import org.apache.calcite.rel.core.GroupConcatAggregateCall; import org.apache.calcite.rel.core.WindowAggregateCall; @@ -109,116 +59,56 @@ import org.apache.calcite.util.ImmutableBitSet; import org.apache.commons.lang3.ArrayUtils; -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; import java.util.ArrayList; import java.util.Arrays; import java.util.List; /** * Abstract AggHandler + * + * @author Eric Fu */ public abstract class AggregateUtils { public static final int MAX_HASH_TABLE_SIZE = 131064; public static final int MIN_HASH_TABLE_SIZE = 1024; - public static List convertAggregators(List inputTypes, - List aggOutputTypes, - List aggCallList, + public static List convertAggregators(List aggCallList, ExecutionContext executionContext, MemoryAllocatorCtx memoryAllocator) { List aggList = new ArrayList<>(aggCallList.size()); for (int i = 0, n = aggCallList.size(); i < n; i++) { AggregateCall call = aggCallList.get(i); - SqlKind function = call.getAggregation().getKind(); - if (SqlKind.WINDOW_FUNCTION.contains(function)) { - aggList.add(convertWindowFunction(call)); - continue; - } boolean isDistinct = call.isDistinct(); + // int precision = call.getType().getPrecision(); + // int scale = call.getType().getScale(); + // check is call.isDistinct() + SqlKind function = call.getAggregation().getKind(); int filterArg = call.filterArg; Integer index = -1; if (call.getArgList() != null && call.getArgList().size() > 0) { index = call.getArgList().get(0); } - - DataType inputType = index >= 0 ? inputTypes.get(index) : null; - Class inputTypeClass = inputType == null ? null : inputType.getDataClass(); - DataType outputType = aggOutputTypes.get(i); - Class outputTypeClass = outputType == null ? null : outputType.getDataClass(); - switch (function) { case AVG: { - if (outputTypeClass == Decimal.class) { - if (inputTypeClass == Decimal.class) { - aggList.add(new Decimal2DecimalAvg(index, isDistinct, inputType, outputType, filterArg)); - } else if (inputTypeClass == Byte.class) { - aggList.add(new Byte2DecimalAvg(index, isDistinct, inputType, outputType, filterArg)); - } else if (inputTypeClass == Short.class) { - aggList.add(new Short2DecimalAvg(index, isDistinct, inputType, outputType, filterArg)); - } else if (inputTypeClass == Integer.class) { - aggList.add(new Int2DecimalAvg(index, isDistinct, inputType, outputType, filterArg)); - } else if (inputTypeClass == Long.class) { - aggList.add(new Long2DecimalAvg(index, isDistinct, inputType, outputType, filterArg)); - } else { - aggList.add(new Avg(index, isDistinct, outputType, filterArg)); - } - } else if (outputTypeClass == Double.class) { - if (inputTypeClass == Double.class) { - aggList.add(new Double2DoubleAvg(index, isDistinct, inputType, outputType, filterArg)); - } else if (inputTypeClass == Float.class) { - aggList.add(new Float2DoubleAvg(index, isDistinct, inputType, outputType, filterArg)); - } else { - aggList.add(new Avg(index, isDistinct, outputType, filterArg)); - } - } else { - aggList.add(new Avg(index, isDistinct, outputType, filterArg)); - } + aggList.add(new AvgV2(index, isDistinct, memoryAllocator, filterArg)); break; } case SUM: { - if (outputTypeClass == Decimal.class) { - if (inputTypeClass == Decimal.class) { - aggList.add(new Decimal2DecimalSum(index, isDistinct, inputType, outputType, filterArg)); - } else if (inputTypeClass == Byte.class) { - aggList.add(new Byte2DecimalSum(index, isDistinct, inputType, outputType, filterArg)); - } else if (inputTypeClass == Short.class) { - aggList.add(new Short2DecimalSum(index, isDistinct, inputType, outputType, filterArg)); - } else if (inputTypeClass == Integer.class) { - aggList.add(new Int2DecimalSum(index, isDistinct, inputType, outputType, filterArg)); - } else if (inputTypeClass == Long.class) { - aggList.add(new Long2DecimalSum(index, isDistinct, inputType, outputType, filterArg)); - } else { - aggList.add(new Sum(index, isDistinct, outputType, filterArg)); - } - } else if (outputTypeClass == Double.class && inputTypeClass == Double.class) { - aggList.add(new Double2DoubleSum(index, isDistinct, inputType, outputType, filterArg)); - } else if (outputTypeClass == Double.class && inputTypeClass == Float.class) { - aggList.add(new Float2DoubleSum(index, isDistinct, inputType, outputType, filterArg)); - } else { - aggList.add(new Sum(index, isDistinct, outputType, filterArg)); - } + aggList.add(new SumV2(index, isDistinct, memoryAllocator, filterArg)); break; } case SUM0: { - if (inputTypeClass == Long.class && outputTypeClass == Long.class) { - aggList.add(new Long2LongSum0(index, isDistinct, inputType, outputType, filterArg)); - } else { - aggList.add(new Sum0(index, isDistinct, outputType, filterArg)); - } + aggList.add(new Sum0(index, isDistinct, memoryAllocator, filterArg)); break; } case COUNT: { int[] countIdx = call.getArgList().isEmpty() ? ArrayUtils.EMPTY_INT_ARRAY : Arrays - .stream(call.getArgList().toArray(new Integer[0])).mapToInt(Integer::valueOf).toArray(); - if (countIdx.length == 0 || (countIdx[0] >= inputTypes.size())) { - aggList.add(new CountRow(new int[0], isDistinct, filterArg)); - } else { - aggList.add(new Count(countIdx, isDistinct, filterArg)); - } + .stream(call.getArgList().toArray(new Integer[0])) + .mapToInt(Integer::valueOf) + .toArray(); + aggList.add(new CountV2(countIdx, isDistinct, memoryAllocator, filterArg)); break; } case SINGLE_VALUE: { @@ -226,109 +116,23 @@ public static List convertAggregators(List inputTypes, break; } case MAX: { - if (inputTypeClass == Byte.class && outputTypeClass == Byte.class) { - aggList.add(new Byte2ByteMax(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Short.class && outputTypeClass == Short.class) { - aggList.add(new Short2ShortMax(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Integer.class && outputTypeClass == Integer.class) { - aggList.add(new Int2IntMax(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Long.class && outputTypeClass == Long.class) { - aggList.add(new Long2LongMax(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Decimal.class && outputTypeClass == Decimal.class) { - aggList.add(new Decimal2DecimalMax(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Float.class && outputTypeClass == Float.class) { - aggList.add(new Float2FloatMax(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Double.class && outputTypeClass == Double.class) { - aggList.add(new Double2DoubleMax(index, inputType, outputType, filterArg)); - } else if ((inputTypeClass == outputTypeClass) && (inputTypeClass == Date.class - || inputTypeClass == Time.class || inputTypeClass == Timestamp.class)) { - aggList.add(new WrapedLong2WarpedLongMax(index, inputType, outputType, filterArg)); - } else { - aggList.add(new Max(index, inputType, outputType, filterArg)); - } + aggList.add(new MaxV2(index, filterArg)); break; } case MIN: { - if (inputTypeClass == Byte.class && outputTypeClass == Byte.class) { - aggList.add(new Byte2ByteMin(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Short.class && outputTypeClass == Short.class) { - aggList.add(new Short2ShortMin(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Integer.class && outputTypeClass == Integer.class) { - aggList.add(new Int2IntMin(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Long.class && outputTypeClass == Long.class) { - aggList.add(new Long2LongMin(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Decimal.class && outputTypeClass == Decimal.class) { - aggList.add(new Decimal2DecimalMin(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Float.class && outputTypeClass == Float.class) { - aggList.add(new Float2FloatMin(index, inputType, outputType, filterArg)); - } else if (inputTypeClass == Double.class && outputTypeClass == Double.class) { - aggList.add(new Double2DoubleMin(index, inputType, outputType, filterArg)); - } else if ((inputTypeClass == outputTypeClass) && (inputTypeClass == Date.class - || inputTypeClass == Time.class || inputTypeClass == Timestamp.class)) { - aggList.add(new WrapedLong2WarpedLongMin(index, inputType, outputType, filterArg)); - } else { - aggList.add(new Min(index, inputType, outputType, filterArg)); - } + aggList.add(new MinV2(index, filterArg)); break; } case BIT_OR: { - if (outputType == DataTypes.ULongType) { - if (inputTypeClass == Byte.class) { - aggList.add(new Byte2UInt64BitOr(index, inputType, filterArg)); - } else if (inputTypeClass == Short.class) { - aggList.add(new Short2UInt64BitOr(index, inputType, filterArg)); - } else if (inputTypeClass == Integer.class) { - aggList.add(new Int2UInt64BitOr(index, inputType, filterArg)); - } else if (inputTypeClass == Long.class) { - aggList.add(new Long2UInt64BitOr(index, inputType, filterArg)); - } else if (inputTypeClass == Decimal.class) { - aggList.add(new Decimal2UInt64BitOr(index, inputType, filterArg)); - } else { - aggList.add(new OtherType2UInt64BitOr(index, inputType, filterArg)); - } - } else { - aggList.add(new BitOr(index, inputType, outputType, filterArg)); - } + aggList.add(new BitOr(index, filterArg)); break; } - case BIT_XOR: { - if (outputType == DataTypes.ULongType) { - if (inputTypeClass == Byte.class) { - aggList.add(new Byte2UInt64BitXor(index, inputType, filterArg)); - } else if (inputTypeClass == Short.class) { - aggList.add(new Short2UInt64BitXor(index, inputType, filterArg)); - } else if (inputTypeClass == Integer.class) { - aggList.add(new Int2UInt64BitXor(index, inputType, filterArg)); - } else if (inputTypeClass == Long.class) { - aggList.add(new Long2UInt64BitXor(index, inputType, filterArg)); - } else if (inputTypeClass == Decimal.class) { - aggList.add(new Decimal2UInt64BitXor(index, inputType, filterArg)); - } else { - aggList.add(new OtherType2UInt64BitXor(index, inputType, filterArg)); - } - } else { - aggList.add(new BitXor(index, inputType, outputType, filterArg)); - } + case BIT_AND: { + aggList.add(new BitAnd(index, filterArg)); break; } - case BIT_AND: { - if (outputType == DataTypes.ULongType) { - if (inputTypeClass == Byte.class) { - aggList.add(new Byte2UInt64BitAnd(index, inputType, filterArg)); - } else if (inputTypeClass == Short.class) { - aggList.add(new Short2UInt64BitAnd(index, inputType, filterArg)); - } else if (inputTypeClass == Integer.class) { - aggList.add(new Int2UInt64BitAnd(index, inputType, filterArg)); - } else if (inputTypeClass == Long.class) { - aggList.add(new Long2UInt64BitAnd(index, inputType, filterArg)); - } else if (inputTypeClass == Decimal.class) { - aggList.add(new Decimal2UInt64BitAnd(index, inputType, filterArg)); - } else { - aggList.add(new OtherType2UInt64BitAnd(index, inputType, filterArg)); - } - } else { - aggList.add(new BitAnd(index, inputType, outputType, filterArg)); - } + case BIT_XOR: { + aggList.add(new BitXor(index, filterArg)); break; } case GROUP_CONCAT: { @@ -349,89 +153,114 @@ public static List convertAggregators(List inputTypes, groupConcatMaxLen, executionContext.getEncoding(), memoryAllocator, - filterArg, - outputType)); + filterArg)); break; } case __FIRST_VALUE: { - aggList.add(new InternalFirstValue(index, outputType, filterArg, executionContext)); + aggList.add(new InternalFirstValue(index, filterArg)); + break; + } + case ROW_NUMBER: { + aggList.add(new RowNumber()); + break; + } + case RANK: { + aggList.add(new Rank(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg)); + break; + } + case DENSE_RANK: { + aggList.add(new DenseRank(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg)); + break; + } + case PERCENT_RANK: { + aggList + .add(new PercentRank(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg)); + break; + } + case HYPER_LOGLOG: { + aggList.add( + new HyperLoglog(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg)); + break; + } + case PARTIAL_HYPER_LOGLOG: { + aggList.add( + new PartialHyperLoglog(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg)); + break; + } + case FINAL_HYPER_LOGLOG: { + aggList.add( + new FinalHyperLoglog(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg)); break; } case CHECK_SUM: { - aggList.add(new CheckSumAccumulator(GroupConcat.toIntArray(call.getArgList()), outputType, filterArg, - inputTypes)); + aggList.add(new CheckSum(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg)); break; } case CHECK_SUM_MERGE: { aggList.add( - new CheckSumMergeAccumulator(index, outputType, filterArg, inputTypes)); + new CheckSumMerge(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg)); + break; + } + case CHECK_SUM_V2: { + aggList.add(new CheckSumV2(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg)); + break; + } + case CHECK_SUM_V2_MERGE: { + aggList.add( + new CheckSumV2Merge(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg)); + break; + } + case CUME_DIST: { + aggList.add(new CumeDist(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg)); + break; + } + case FIRST_VALUE: { + aggList.add(new FirstValue(index, filterArg)); + break; + } + case LAST_VALUE: { + aggList.add(new LastValue(index, filterArg)); + break; + } + case NTH_VALUE: { + WindowAggregateCall windowAggregateCall = (WindowAggregateCall) call; + aggList.add(new NThValue(windowAggregateCall.getArgList().get(0), + windowAggregateCall.getNTHValueOffset(call.getArgList().get(1)), filterArg)); + break; + } + case NTILE: { + WindowAggregateCall windowAggregateCall = (WindowAggregateCall) call; + aggList.add(new NTile(windowAggregateCall.getNTileOffset(index), filterArg)); + break; + } + case LAG: + case LEAD: { + WindowAggregateCall windowAggregateCall = (WindowAggregateCall) call; + int lagLeadOffset = 1; + if (call.getArgList().size() > 1) { + lagLeadOffset = windowAggregateCall.getLagLeadOffset(call.getArgList().get(1)); + } + String defaultValue = null; + if (call.getArgList().size() > 2) { + defaultValue = windowAggregateCall.getLagLeadDefaultValue(call.getArgList().get(2)); + } + if (function == SqlKind.LAG) { + aggList.add(new Lag(index, lagLeadOffset, + defaultValue, filterArg)); + } else { + aggList.add(new Lead(index, lagLeadOffset, + defaultValue, filterArg)); + } break; } default: throw new UnsupportedOperationException( "Unsupported agg function to convert:" + function.name()); - } } return aggList; } - public static Aggregator convertWindowFunction(AggregateCall call) { - SqlKind function = call.getAggregation().getKind(); - int filterArg = call.filterArg; - switch (function) { - case ROW_NUMBER: { - return new RowNumber(filterArg); - } - case RANK: { - return new Rank(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg); - } - case DENSE_RANK: { - return new DenseRank(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg); - } - case PERCENT_RANK: { - return new PercentRank(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg); - } case CUME_DIST: { - return new CumeDist(call.getArgList().stream().mapToInt(Integer::valueOf).toArray(), filterArg); - } - case FIRST_VALUE: { - return new FirstValue(call.getArgList().get(0), filterArg); - } - case LAST_VALUE: { - return new LastValue(call.getArgList().get(0), filterArg); - } - case NTH_VALUE: { - WindowAggregateCall windowAggregateCall = (WindowAggregateCall) call; - return new NThValue(windowAggregateCall.getArgList().get(0), - windowAggregateCall.getNTHValueOffset(call.getArgList().get(1)), filterArg); - } - case NTILE: { - WindowAggregateCall windowAggregateCall = (WindowAggregateCall) call; - return new NTile(windowAggregateCall.getNTileOffset(call.getArgList().get(0)), filterArg); - } - case LAG: - case LEAD: { - WindowAggregateCall windowAggregateCall = (WindowAggregateCall) call; - int lagLeadOffset = 1; - if (call.getArgList().size() > 1) { - lagLeadOffset = windowAggregateCall.getLagLeadOffset(call.getArgList().get(1)); - } - String defaultValue = null; - if (call.getArgList().size() > 2) { - defaultValue = windowAggregateCall.getLagLeadDefaultValue(call.getArgList().get(2)); - } - if (function == SqlKind.LAG) { - return new Lag(call.getArgList().get(0), lagLeadOffset, defaultValue, filterArg); - } else { - return new Lead(call.getArgList().get(0), lagLeadOffset, defaultValue, filterArg); - } - } - default: - throw new UnsupportedOperationException( - "Unsupported window function to convert:" + function.name()); - } - } - public static int[] convertBitSet(ImmutableBitSet gp) { List list = gp.asList(); int[] groups = new int[list.size()]; @@ -462,8 +291,14 @@ public static DataType[] collectDataTypes(List columns, int start, int } public static int estimateHashTableSize(int expectedOutputRowCount, ExecutionContext context) { - int maxHashTableSize = - MAX_HASH_TABLE_SIZE * context.getParamManager().getInt(ConnectionParams.AGG_MAX_HASH_TABLE_FACTOR); + + int maxHashTableSize; + int hashTableFactor = context.getParamManager().getInt(ConnectionParams.AGG_MAX_HASH_TABLE_FACTOR); + if (hashTableFactor > 1) { + maxHashTableSize = MAX_HASH_TABLE_SIZE * hashTableFactor; + } else { + maxHashTableSize = MAX_HASH_TABLE_SIZE * MemoryManager.getInstance().getAggHashTableFactor(); + } int minHashTableSize = MIN_HASH_TABLE_SIZE / context.getParamManager().getInt(ConnectionParams.AGG_MIN_HASH_TABLE_FACTOR); if (expectedOutputRowCount > maxHashTableSize) { @@ -473,8 +308,4 @@ public static int estimateHashTableSize(int expectedOutputRowCount, ExecutionCon } return expectedOutputRowCount; } - public static boolean supportSpill(Aggregator aggregator) { - return !(aggregator instanceof Avg) && !(aggregator instanceof SpecificType2DecimalAvg) - && !(aggregator instanceof SpecificType2DoubleAvgV2) && !(aggregator instanceof GroupConcat); - } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AntiJoinResultIterator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AntiJoinResultIterator.java new file mode 100644 index 000000000..1e3bec710 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AntiJoinResultIterator.java @@ -0,0 +1,95 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.Chunk; + +import java.util.List; + +public class AntiJoinResultIterator { + private List matchedPosition; + + private ChunksIndex buildChunk; + + BlockBuilder[] blockBuilders; + + private final int chunkLimit; + + private volatile int buildPosition; + + /** + * exclude + */ + private final int endOffset; + + public AntiJoinResultIterator(List matchedPosition, ChunksIndex buildChunk, BlockBuilder[] blockBuilders, + int chunkLimit, int startOffset, int endOffset) { + this.matchedPosition = matchedPosition; + this.buildChunk = buildChunk; + this.chunkLimit = chunkLimit; + this.buildPosition = startOffset; + this.blockBuilders = blockBuilders; + this.endOffset = endOffset; + } + + public Chunk nextChunk() { + if (buildPosition >= endOffset) { + return null; + } + + while (buildPosition < endOffset) { + buildAntiJoinRow(buildChunk, matchedPosition.get(buildPosition), blockBuilders); + buildPosition++; + // check buffered data is full + if (currentPosition() >= chunkLimit) { + return buildChunkAndReset(blockBuilders); + } + } + return buildChunkAndReset(blockBuilders); + } + + int currentPosition() { + return blockBuilders[0].getPositionCount(); + } + + protected static void buildAntiJoinRow(ChunksIndex inputChunk, int position, BlockBuilder[] blockBuilders) { + // inner side only + long chunkIdAndPos = inputChunk.getAddress(position); + for (int i = 0; i < blockBuilders.length; i++) { + inputChunk.getChunk(SyntheticAddress.decodeIndex(chunkIdAndPos)).getBlock(i) + .writePositionTo(SyntheticAddress.decodeOffset(chunkIdAndPos), blockBuilders[i]); + } + } + + protected static Chunk buildChunkAndReset(BlockBuilder[] blockBuilders) { + Block[] blocks = new Block[blockBuilders.length]; + for (int i = 0; i < blockBuilders.length; i++) { + blocks[i] = blockBuilders[i].build(); + } + for (int i = 0; i < blockBuilders.length; i++) { + blockBuilders[i] = blockBuilders[i].newBlockBuilder(); + } + return new Chunk(blocks); + } + + public boolean finished() { + return buildPosition >= endOffset; + } +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AtomicIntegerArray.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AtomicIntegerArray.java new file mode 100644 index 000000000..8cb0e9b29 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/AtomicIntegerArray.java @@ -0,0 +1,374 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +import java.lang.reflect.Field; +import java.util.Arrays; +import java.util.function.IntUnaryOperator; +import java.util.function.IntBinaryOperator; + +import sun.misc.Unsafe; + +/** + * An {@code int} array in which elements may be updated atomically. + * See the {@link java.util.concurrent.atomic} package + * specification for description of the properties of atomic + * variables. + * + * @author Doug Lea + * @since 1.5 + */ +public class AtomicIntegerArray implements java.io.Serializable { + private static final long serialVersionUID = 2862133569453604235L; + + private static Unsafe unsafe; + private static int base; + private static int shift; + private final int[] array; + + static { + try { + // fetch theUnsafe object + Field field = Unsafe.class.getDeclaredField("theUnsafe"); + field.setAccessible(true); + unsafe = (Unsafe) field.get(null); + if (unsafe == null) { + throw new RuntimeException("Unsafe access not available"); + } + + base = unsafe.arrayBaseOffset(int[].class); + + int scale = unsafe.arrayIndexScale(int[].class); + if ((scale & (scale - 1)) != 0) { + throw new Error("data type scale not a power of two"); + } + shift = 31 - Integer.numberOfLeadingZeros(scale); + } catch (Throwable t) { + throw new RuntimeException(t); + } + } + + private long checkedByteOffset(int i) { + if (i < 0 || i >= array.length) { + throw new IndexOutOfBoundsException("index " + i); + } + + return byteOffset(i); + } + + private static long byteOffset(int i) { + return ((long) i << shift) + base; + } + + public AtomicIntegerArray(int length) { + array = new int[length]; + } + + /** + * Creates a new AtomicIntegerArray of the given length, with all + * elements initially zero. + * + * @param length the length of the array + */ + public AtomicIntegerArray(int length, int initialValue) { + int[] keys = new int[length]; + Arrays.fill(keys, initialValue); + array = keys; + } + + /** + * Creates a new AtomicIntegerArray with the same length as, and + * all elements copied from, the given array. + * + * @param array the array to copy elements from + * @throws NullPointerException if array is null + */ + public AtomicIntegerArray(int[] array) { + // Visibility guaranteed by final field guarantees + this.array = array.clone(); + } + + /** + * Returns the length of the array. + * + * @return the length of the array + */ + public final int length() { + return array.length; + } + + /** + * Gets the current value at position {@code i}. + * + * @param i the index + * @return the current value + */ + public final int get(int i) { + return getRaw(checkedByteOffset(i)); + } + + private int getRaw(long offset) { + return unsafe.getIntVolatile(array, offset); + } + + /** + * Sets the element at position {@code i} to the given value. + * + * @param i the index + * @param newValue the new value + */ + public final void set(int i, int newValue) { + unsafe.putIntVolatile(array, checkedByteOffset(i), newValue); + } + + /** + * Eventually sets the element at position {@code i} to the given value. + * + * @param i the index + * @param newValue the new value + * @since 1.6 + */ + public final void lazySet(int i, int newValue) { + unsafe.putOrderedInt(array, checkedByteOffset(i), newValue); + } + + /** + * Atomically sets the element at position {@code i} to the given + * value and returns the old value. + * + * @param i the index + * @param newValue the new value + * @return the previous value + */ + public final int getAndSet(int i, int newValue) { + return unsafe.getAndSetInt(array, checkedByteOffset(i), newValue); + } + + /** + * Atomically sets the element at position {@code i} to the given + * updated value if the current value {@code ==} the expected value. + * + * @param i the index + * @param expect the expected value + * @param update the new value + * @return {@code true} if successful. False return indicates that + * the actual value was not equal to the expected value. + */ + public final boolean compareAndSet(int i, int expect, int update) { + return compareAndSetRaw(checkedByteOffset(i), expect, update); + } + + private boolean compareAndSetRaw(long offset, int expect, int update) { + return unsafe.compareAndSwapInt(array, offset, expect, update); + } + + /** + * Atomically sets the element at position {@code i} to the given + * updated value if the current value {@code ==} the expected value. + * + *

May fail + * spuriously and does not provide ordering guarantees, so is + * only rarely an appropriate alternative to {@code compareAndSet}. + * + * @param i the index + * @param expect the expected value + * @param update the new value + * @return {@code true} if successful + */ + public final boolean weakCompareAndSet(int i, int expect, int update) { + return compareAndSet(i, expect, update); + } + + /** + * Atomically increments by one the element at index {@code i}. + * + * @param i the index + * @return the previous value + */ + public final int getAndIncrement(int i) { + return getAndAdd(i, 1); + } + + /** + * Atomically decrements by one the element at index {@code i}. + * + * @param i the index + * @return the previous value + */ + public final int getAndDecrement(int i) { + return getAndAdd(i, -1); + } + + /** + * Atomically adds the given value to the element at index {@code i}. + * + * @param i the index + * @param delta the value to add + * @return the previous value + */ + public final int getAndAdd(int i, int delta) { + return unsafe.getAndAddInt(array, checkedByteOffset(i), delta); + } + + /** + * Atomically increments by one the element at index {@code i}. + * + * @param i the index + * @return the updated value + */ + public final int incrementAndGet(int i) { + return getAndAdd(i, 1) + 1; + } + + /** + * Atomically decrements by one the element at index {@code i}. + * + * @param i the index + * @return the updated value + */ + public final int decrementAndGet(int i) { + return getAndAdd(i, -1) - 1; + } + + /** + * Atomically adds the given value to the element at index {@code i}. + * + * @param i the index + * @param delta the value to add + * @return the updated value + */ + public final int addAndGet(int i, int delta) { + return getAndAdd(i, delta) + delta; + } + + /** + * Atomically updates the element at index {@code i} with the results + * of applying the given function, returning the previous value. The + * function should be side-effect-free, since it may be re-applied + * when attempted updates fail due to contention among threads. + * + * @param i the index + * @param updateFunction a side-effect-free function + * @return the previous value + * @since 1.8 + */ + public final int getAndUpdate(int i, IntUnaryOperator updateFunction) { + long offset = checkedByteOffset(i); + int prev, next; + do { + prev = getRaw(offset); + next = updateFunction.applyAsInt(prev); + } while (!compareAndSetRaw(offset, prev, next)); + return prev; + } + + /** + * Atomically updates the element at index {@code i} with the results + * of applying the given function, returning the updated value. The + * function should be side-effect-free, since it may be re-applied + * when attempted updates fail due to contention among threads. + * + * @param i the index + * @param updateFunction a side-effect-free function + * @return the updated value + * @since 1.8 + */ + public final int updateAndGet(int i, IntUnaryOperator updateFunction) { + long offset = checkedByteOffset(i); + int prev, next; + do { + prev = getRaw(offset); + next = updateFunction.applyAsInt(prev); + } while (!compareAndSetRaw(offset, prev, next)); + return next; + } + + /** + * Atomically updates the element at index {@code i} with the + * results of applying the given function to the current and + * given values, returning the previous value. The function should + * be side-effect-free, since it may be re-applied when attempted + * updates fail due to contention among threads. The function is + * applied with the current value at index {@code i} as its first + * argument, and the given update as the second argument. + * + * @param i the index + * @param x the update value + * @param accumulatorFunction a side-effect-free function of two arguments + * @return the previous value + * @since 1.8 + */ + public final int getAndAccumulate(int i, int x, + IntBinaryOperator accumulatorFunction) { + long offset = checkedByteOffset(i); + int prev, next; + do { + prev = getRaw(offset); + next = accumulatorFunction.applyAsInt(prev, x); + } while (!compareAndSetRaw(offset, prev, next)); + return prev; + } + + /** + * Atomically updates the element at index {@code i} with the + * results of applying the given function to the current and + * given values, returning the updated value. The function should + * be side-effect-free, since it may be re-applied when attempted + * updates fail due to contention among threads. The function is + * applied with the current value at index {@code i} as its first + * argument, and the given update as the second argument. + * + * @param i the index + * @param x the update value + * @param accumulatorFunction a side-effect-free function of two arguments + * @return the updated value + * @since 1.8 + */ + public final int accumulateAndGet(int i, int x, + IntBinaryOperator accumulatorFunction) { + long offset = checkedByteOffset(i); + int prev, next; + do { + prev = getRaw(offset); + next = accumulatorFunction.applyAsInt(prev, x); + } while (!compareAndSetRaw(offset, prev, next)); + return next; + } + + /** + * Returns the String representation of the current values of array. + * + * @return the String representation of the current values of array + */ + public String toString() { + int iMax = array.length - 1; + if (iMax == -1) { + return "[]"; + } + + StringBuilder b = new StringBuilder(); + b.append('['); + for (int i = 0; ; i++) { + b.append(getRaw(byteOffset(i))); + if (i == iMax) { + return b.append(']').toString(); + } + b.append(',').append(' '); + } + } + +} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/BatchBlockWriter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/BatchBlockWriter.java new file mode 100644 index 000000000..6a7753bba --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/BatchBlockWriter.java @@ -0,0 +1,1413 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.type.MySQLStandardFieldType; +import com.alibaba.polardbx.common.utils.time.MySQLTimeTypeUtil; +import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; +import com.alibaba.polardbx.common.utils.time.core.OriginalDate; +import com.alibaba.polardbx.common.utils.time.core.TimeStorage; +import com.alibaba.polardbx.common.utils.time.parser.StringTimeParser; +import com.alibaba.polardbx.common.utils.timezone.InternalTimeZone; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.BlockBuilders; +import com.alibaba.polardbx.executor.chunk.DateBlock; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.datatype.DecimalType; +import com.google.common.base.Preconditions; +import io.airlift.slice.DynamicSliceOutput; +import io.airlift.slice.Slice; +import io.airlift.slice.SliceOutput; + +import java.sql.Date; +import java.sql.Types; +import java.util.Optional; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.DECIMAL_128; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.DECIMAL_64; +import static com.alibaba.polardbx.executor.chunk.SegmentedDecimalBlock.DecimalBlockState.UNSET_STATE; + +public interface BatchBlockWriter { + static BlockBuilder create(DataType type, ExecutionContext context, int chunkSize, ObjectPools objectPools) { + if (objectPools == null) { + return create(type, context, chunkSize); + } + MySQLStandardFieldType fieldType = type.fieldType(); + int chunkLimit = context.getParamManager().getInt(ConnectionParams.CHUNK_SIZE); + switch (fieldType) { + case MYSQL_TYPE_LONGLONG: + if (type.isUnsigned()) { + // for bigint unsigned + return BlockBuilders.create(type, context, context.getBlockBuilderCapacity(), objectPools); + } else { + // for bigint + return new BatchLongBlockBuilder(chunkSize, chunkLimit, objectPools.getLongArrayPool()); + } + case MYSQL_TYPE_LONG: + if (type.isUnsigned()) { + // for int unsigned + return new BatchLongBlockBuilder(chunkSize, chunkLimit, objectPools.getLongArrayPool()); + } else { + // for int + return new BatchIntegerBlockBuilder(chunkSize, chunkLimit, objectPools.getIntArrayPool()); + } + + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: + return new BatchDateBlockBuilder(chunkSize, chunkLimit, objectPools.getLongArrayPool()); + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: + return new BatchDecimalBlockBuilder(chunkSize, type); + default: + return BlockBuilders.create(type, context, context.getBlockBuilderCapacity(), objectPools); + } + } + + static BlockBuilder create(DataType type, ExecutionContext context, int chunkSize) { + MySQLStandardFieldType fieldType = type.fieldType(); + switch (fieldType) { + case MYSQL_TYPE_LONGLONG: + if (type.isUnsigned()) { + // for bigint unsigned + return BlockBuilders.create(type, context, context.getBlockBuilderCapacity()); + } else { + // for bigint + return new BatchLongBlockBuilder(chunkSize); + } + case MYSQL_TYPE_LONG: + if (type.isUnsigned()) { + // for int unsigned + return new BatchLongBlockBuilder(chunkSize); + } else { + // for int + return new BatchIntegerBlockBuilder(chunkSize); + } + case MYSQL_TYPE_DATE: + case MYSQL_TYPE_NEWDATE: + return new BatchDateBlockBuilder(chunkSize); + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: + return new BatchDecimalBlockBuilder(chunkSize, type); + default: + return BlockBuilders.create(type, context, context.getBlockBuilderCapacity()); + } + } + + void copyBlock(Block sourceBlock, int[] positions, int offsetInPositionArray, int positionOffset, + int positionCount); + + default void copyBlock(Block sourceBlock, int[] positions, int positionOffset, int positionCount) { + copyBlock(sourceBlock, positions, 0, positionOffset, positionCount); + } + + default void copyBlock(Block sourceBlock, int[] positions, int positionCount) { + copyBlock(sourceBlock, positions, 0, positionCount); + } + + void copyBlock(Block sourceBlock, int positionCount); + + class BatchIntegerBlockBuilder extends AbstractBatchBlockBuilder implements BatchBlockWriter { + private int[] values; + private DriverObjectPool objectPool; + private int chunkLimit; + + public BatchIntegerBlockBuilder(int capacity, int chunkLimit, DriverObjectPool intArrayPool) { + super(capacity); + this.objectPool = intArrayPool; + this.chunkLimit = chunkLimit; + int[] result = intArrayPool.poll(); + if (result == null || result.length < capacity) { + result = new int[capacity]; + } + this.values = result; + } + + public BatchIntegerBlockBuilder(int capacity) { + super(capacity); + this.values = new int[capacity]; + } + + @Override + public void copyBlock(Block sourceBlock, int[] positions, int offsetInPositionArray, int positionOffset, + int positionCount) { + IntegerBlock block = sourceBlock.cast(IntegerBlock.class); + int[] selection = block.getSelection(); + int[] intArray = block.intArray(); + + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + values[currentIndex++] = intArray[j]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + values[currentIndex++] = intArray[positions[i + offsetInPositionArray] + positionOffset]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = + nullArray[positions[i + offsetInPositionArray] + positionOffset]); + } + } + } + } + + @Override + public void copyBlock(Block sourceBlock, int positionCount) { + IntegerBlock block = sourceBlock.cast(IntegerBlock.class); + int[] selection = block.getSelection(); + int[] intArray = block.intArray(); + + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + values[currentIndex++] = intArray[j]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + values[currentIndex++] = intArray[i]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[i]); + } + } + } + } + + @Override + public void writeInt(int value) { + values[currentIndex] = value; + if (valueIsNull != null) { + valueIsNull[currentIndex] = false; + } + currentIndex++; + } + + @Override + public int getInt(int position) { + checkReadablePosition(position); + return values[position]; + } + + @Override + public Object getObject(int position) { + return isNull(position) ? null : getInt(position); + } + + @Override + public void writeObject(Object value) { + if (value == null) { + appendNull(); + return; + } + Preconditions.checkArgument(value instanceof Integer); + writeInt((Integer) value); + } + + @Override + public Block build() { + Block result = new IntegerBlock(0, getPositionCount(), mayHaveNull() ? valueIsNull : null, values); + if (objectPool != null) { + result.setRecycler(objectPool.getRecycler(chunkLimit)); + } + return result; + } + + @Override + public void appendNull() { + appendNullInternal(); + values[currentIndex - 1] = 0; + } + + @Override + public BlockBuilder newBlockBuilder() { + if (objectPool != null) { + return new BatchIntegerBlockBuilder(getCapacity(), chunkLimit, objectPool); + } else { + return new BatchIntegerBlockBuilder(getCapacity()); + } + } + + @Override + public BlockBuilder newBlockBuilder(ObjectPools objectPools, int chunkLimit) { + return new BatchIntegerBlockBuilder(getCapacity(), chunkLimit, objectPools.getIntArrayPool()); + } + + @Override + public int hashCode(int position) { + if (isNull(position)) { + return 0; + } + return values[position]; + } + } + + class BatchLongBlockBuilder extends AbstractBatchBlockBuilder implements BatchBlockWriter { + private long[] values; + private DriverObjectPool objectPool; + private int chunkLimit; + + public BatchLongBlockBuilder(int initialCapacity) { + super(initialCapacity); + this.values = new long[initialCapacity]; + } + + public BatchLongBlockBuilder(int capacity, int chunkLimit, DriverObjectPool longArrayPool) { + super(capacity); + this.objectPool = longArrayPool; + this.chunkLimit = chunkLimit; + + long[] result = longArrayPool.poll(); + if (result == null || result.length < capacity) { + result = new long[capacity]; + } + this.values = result; + } + + @Override + public void copyBlock(Block sourceBlock, int[] positions, int offsetInPositionArray, int positionOffset, + int positionCount) { + LongBlock block = sourceBlock.cast(LongBlock.class); + int[] selection = block.getSelection(); + long[] longArray = block.longArray(); + + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + values[currentIndex++] = longArray[j]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + values[currentIndex++] = longArray[positions[i + offsetInPositionArray] + positionOffset]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = + nullArray[positions[i + offsetInPositionArray] + positionOffset]); + } + } + } + } + + @Override + public void copyBlock(Block sourceBlock, int positionCount) { + LongBlock block = sourceBlock.cast(LongBlock.class); + int[] selection = block.getSelection(); + long[] longArray = block.longArray(); + + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + values[currentIndex++] = longArray[j]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + values[currentIndex++] = longArray[i]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[i]); + } + } + } + } + + @Override + public void writeLong(long value) { + values[currentIndex] = value; + if (valueIsNull != null) { + valueIsNull[currentIndex] = false; + } + currentIndex++; + } + + @Override + public long getLong(int position) { + checkReadablePosition(position); + return values[position]; + } + + @Override + public Object getObject(int position) { + return isNull(position) ? null : getLong(position); + } + + @Override + public void writeObject(Object value) { + if (value == null) { + appendNull(); + return; + } + Preconditions.checkArgument(value instanceof Long); + writeLong((Long) value); + } + + @Override + public Block build() { + Block block = new LongBlock(0, getPositionCount(), mayHaveNull() ? valueIsNull : null, values); + if (objectPool != null) { + block.setRecycler(objectPool.getRecycler(chunkLimit)); + } + return block; + } + + @Override + public void appendNull() { + appendNullInternal(); + values[currentIndex - 1] = 0L; + } + + @Override + public BlockBuilder newBlockBuilder() { + if (objectPool != null) { + return new BatchLongBlockBuilder(getCapacity(), chunkLimit, objectPool); + } else { + return new BatchLongBlockBuilder(getCapacity()); + } + } + + @Override + public BlockBuilder newBlockBuilder(ObjectPools objectPools, int chunkLimit) { + return new BatchLongBlockBuilder(getCapacity(), chunkLimit, objectPools.getLongArrayPool()); + } + + @Override + public int hashCode(int position) { + if (isNull(position)) { + return 0; + } + return Long.hashCode(values[position]); + } + } + + class BatchDateBlockBuilder extends AbstractBatchBlockBuilder implements BatchBlockWriter { + long[] packed; + DriverObjectPool objectPool; + int chunkLimit; + + public BatchDateBlockBuilder(int initialCapacity) { + super(initialCapacity); + packed = new long[initialCapacity]; + } + + public BatchDateBlockBuilder(int capacity, int chunkLimit, DriverObjectPool longArrayPool) { + super(capacity); + this.objectPool = longArrayPool; + this.chunkLimit = chunkLimit; + long[] result = longArrayPool.poll(); + if (result == null || result.length < capacity) { + result = new long[capacity]; + } + this.packed = result; + } + + @Override + public void copyBlock(Block sourceBlock, int[] positions, int offsetInPositionArray, int positionOffset, + int positionCount) { + DateBlock block = sourceBlock.cast(DateBlock.class); + int[] selection = block.getSelection(); + long[] longArray = block.getPacked(); + + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + packed[currentIndex++] = longArray[j]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + packed[currentIndex++] = longArray[positions[i + offsetInPositionArray] + positionOffset]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = + nullArray[positions[i + offsetInPositionArray] + positionOffset]); + } + } + } + } + + @Override + public void copyBlock(Block sourceBlock, int positionCount) { + DateBlock block = sourceBlock.cast(DateBlock.class); + int[] selection = block.getSelection(); + long[] longArray = block.getPacked(); + + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + packed[currentIndex++] = longArray[j]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + packed[currentIndex++] = longArray[i]; + } + + if (block.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = block.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[i]); + } + } + } + } + + @Override + public void writeString(String value) { + if (value == null) { + appendNull(); + return; + } + writeByteArray(value.getBytes()); + } + + @Override + public void writeByteArray(byte[] value) { + if (value == null) { + appendNull(); + return; + } + MysqlDateTime t = StringTimeParser.parseString( + value, + Types.DATE); + writeMysqlDatetime(t); + } + + public void writePackedLong(long value) { + packed[currentIndex] = value; + if (valueIsNull != null) { + valueIsNull[currentIndex] = false; + } + currentIndex++; + } + + public void writeMysqlDatetime(MysqlDateTime t) { + if (t == null) { + appendNull(); + return; + } + long l = TimeStorage.writeDate(t); + + packed[currentIndex] = l; + if (valueIsNull != null) { + valueIsNull[currentIndex] = false; + } + currentIndex++; + } + + @Override + public void writeDate(Date value) { + // round to scale. + Date date = DataTypes.DateType.convertFrom(value); + + // pack to long value + MysqlDateTime t = Optional.ofNullable(date) + .map(MySQLTimeTypeUtil::toMysqlDate) + .orElse(null); + + writeMysqlDatetime(t); + } + + @Override + public void writeDatetimeRawLong(long val) { + packed[currentIndex] = val; + if (valueIsNull != null) { + valueIsNull[currentIndex] = false; + } + currentIndex++; + } + + @Override + public Date getDate(int position) { + checkReadablePosition(position); + + // unpack the long value to original date object. + final long packedLong = packed[position]; + MysqlDateTime t = TimeStorage.readDate(packedLong); + t.setTimezone(InternalTimeZone.DEFAULT_TIME_ZONE); + + // we assume the time read from packed long value is valid. + Date date = new OriginalDate(t); + return date; + } + + @Override + public Object getObject(int position) { + return isNull(position) ? null : getDate(position); + } + + @Override + public void writeObject(Object value) { + if (value == null) { + appendNull(); + return; + } + Preconditions.checkArgument(value instanceof Date); + writeDate((Date) value); + } + + @Override + public Block build() { + Block block = new DateBlock(0, getPositionCount(), mayHaveNull() ? valueIsNull : null, packed, + DataTypes.DateType, InternalTimeZone.DEFAULT_TIME_ZONE); + if (objectPool != null) { + block.setRecycler(objectPool.getRecycler(chunkLimit)); + } + return block; + } + + @Override + public void appendNull() { + appendNullInternal(); + packed[currentIndex - 1] = 0L; + } + + @Override + public BlockBuilder newBlockBuilder() { + if (objectPool != null) { + return new BatchDateBlockBuilder(getCapacity(), chunkLimit, objectPool); + } else { + return new BatchDateBlockBuilder(getCapacity()); + } + } + + @Override + public BlockBuilder newBlockBuilder(ObjectPools objectPools, int chunkLimit) { + return new BatchDateBlockBuilder(getCapacity(), chunkLimit, objectPools.getLongArrayPool()); + } + + @Override + public int hashCode(int position) { + if (isNull(position)) { + return 0; + } + return Long.hashCode(packed[position]); + } + + public long getPackedLong(int position) { + checkReadablePosition(position); + return packed[position]; + } + } + + class BatchDecimalBlockBuilder extends AbstractBatchBlockBuilder + implements BatchBlockWriter, SegmentedDecimalBlock { + + private final int scale; + SliceOutput sliceOutput; + long[] decimal64List; + long[] decimal128HighList; + DecimalType decimalType; + // collect state of decimal values. + SegmentedDecimalBlock.DecimalBlockState state; + private DecimalStructure decimalBuffer; + private DecimalStructure decimalResult; + + public BatchDecimalBlockBuilder(int capacity, DataType type) { + super(capacity); + this.decimalType = (DecimalType) type; + this.scale = decimalType.getScale(); + this.state = UNSET_STATE; + } + + @Override + public void copyBlock(Block sourceBlock, int[] positions, int offsetInPositionArray, int positionOffset, + int positionCount) { + DecimalBlock block = sourceBlock.cast(DecimalBlock.class); + + if (isDecimal64()) { + copyToDecimal64(block, positions, offsetInPositionArray, positionOffset, positionCount); + return; + } + + if (isDecimal128()) { + copyToDecimal128(block, positions, offsetInPositionArray, positionOffset, positionCount); + return; + } + + // copy to a normal decimal block + Slice output = DecimalStructure.allocateDecimalSlice(); + for (int i = 0; i < positionCount; i++) { + block.writePositionTo(positions[i], this, output); + } + } + + private void copyToDecimal64(DecimalBlock sourceBlock, int[] positions, int offsetInPositionArray, + int positionOffset, int positionCount) { + if (this.isUnset()) { + initDecimal64List(); + } + if (sourceBlock.isDecimal64()) { + copyDec64ToDec64(sourceBlock, positions, offsetInPositionArray, positionOffset, positionCount); + return; + } + if (sourceBlock.isDecimal128()) { + copyDec128ToDec64(sourceBlock, positions, offsetInPositionArray, positionOffset, positionCount); + return; + } + + // copy from a normal decimal block + Slice output = sourceBlock.allocCachedSlice(); + for (int i = 0; i < positionCount; i++) { + sourceBlock.writePositionTo(positions[i], this, output); + } + } + + private void copyToDecimal128(DecimalBlock sourceBlock, int[] positions, int offsetInPositionArray, + int positionOffset, int positionCount) { + if (this.isUnset()) { + initDecimal128List(); + } + if (sourceBlock.isDecimal64()) { + copyDec64ToDec128(sourceBlock, positions, offsetInPositionArray, positionOffset, positionCount); + return; + } + if (sourceBlock.isDecimal128()) { + copyDec128ToDec128(sourceBlock, positions, offsetInPositionArray, positionOffset, positionCount); + return; + } + + // copy from a normal decimal block + Slice output = sourceBlock.allocCachedSlice(); + for (int i = 0; i < positionCount; i++) { + sourceBlock.writePositionTo(positions[i], this, output); + } + } + + private void copyDec64ToDec64(DecimalBlock sourceBlock, int[] positions, int offsetInPositionArray, + int positionOffset, int positionCount) { + int[] selection = sourceBlock.getSelection(); + long[] longArray = sourceBlock.getDecimal64Values(); + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + decimal64List[currentIndex++] = longArray[j]; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + decimal64List[currentIndex++] = longArray[positions[i + offsetInPositionArray] + positionOffset]; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = + nullArray[positions[i + offsetInPositionArray] + positionOffset]); + } + } + } + } + + private void copyDec64ToDec128(DecimalBlock sourceBlock, int[] positions, int offsetInPositionArray, + int positionOffset, int positionCount) { + int[] selection = sourceBlock.getSelection(); + long[] longArray = sourceBlock.getDecimal64Values(); + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + decimal64List[currentIndex] = longArray[j]; + decimal128HighList[currentIndex] = decimal64List[currentIndex] >= 0 ? 0 : -1; + currentIndex++; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + decimal64List[currentIndex] = longArray[positions[i + offsetInPositionArray] + positionOffset]; + decimal128HighList[currentIndex] = decimal64List[currentIndex] >= 0 ? 0 : -1; + currentIndex++; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = + nullArray[positions[i + offsetInPositionArray] + positionOffset]); + } + } + } + } + + private void copyDec128ToDec64(DecimalBlock sourceBlock, int[] positions, int offsetInPositionArray, + int positionOffset, int positionCount) { + this.initDecimal128List(); + copyDec128ToDec128(sourceBlock, positions, offsetInPositionArray, positionOffset, positionCount); + } + + private void copyDec128ToDec128(DecimalBlock sourceBlock, int[] positions, int offsetInPositionArray, + int positionOffset, int positionCount) { + int[] selection = sourceBlock.getSelection(); + long[] srcDecimal128Low = sourceBlock.getDecimal128LowValues(); + long[] srcDecimal128High = sourceBlock.getDecimal128HighValues(); + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + decimal64List[currentIndex] = srcDecimal128Low[j]; + decimal128HighList[currentIndex] = srcDecimal128High[j]; + currentIndex++; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[positions[i + offsetInPositionArray] + positionOffset]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + decimal64List[currentIndex] = + srcDecimal128Low[positions[i + offsetInPositionArray] + positionOffset]; + decimal128HighList[currentIndex] = + srcDecimal128High[positions[i + offsetInPositionArray] + positionOffset]; + currentIndex++; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = + nullArray[positions[i + offsetInPositionArray] + positionOffset]); + } + } + } + } + + @Override + public void copyBlock(Block sourceBlock, int positionCount) { + DecimalBlock block = sourceBlock.cast(DecimalBlock.class); + + if (isDecimal64()) { + copyToDecimal64(block, positionCount); + return; + } + + if (isDecimal128()) { + copyToDecimal128(block, positionCount); + return; + } + + // copy to a normal decimal block + Slice output = DecimalStructure.allocateDecimalSlice(); + for (int i = 0; i < positionCount; i++) { + block.writePositionTo(i, this, output); + } + } + + private void copyToDecimal64(DecimalBlock sourceBlock, int positionCount) { + if (sourceBlock.isDecimal64()) { + copyDec64ToDec64(sourceBlock, positionCount); + return; + } + if (sourceBlock.isDecimal128()) { + copyDec128ToDec64(sourceBlock, positionCount); + return; + } + + // copy from a normal decimal block + Slice output = DecimalStructure.allocateDecimalSlice(); + for (int i = 0; i < positionCount; i++) { + sourceBlock.writePositionTo(i, this, output); + } + } + + private void copyToDecimal128(DecimalBlock sourceBlock, int positionCount) { + if (this.isUnset()) { + initDecimal128List(); + } + if (sourceBlock.isDecimal64()) { + copyDec64ToDec128(sourceBlock, positionCount); + return; + } + if (sourceBlock.isDecimal128()) { + copyDec128ToDec128(sourceBlock, positionCount); + return; + } + + // copy from a normal decimal block + Slice output = DecimalStructure.allocateDecimalSlice(); + for (int i = 0; i < positionCount; i++) { + sourceBlock.writePositionTo(i, this, output); + } + } + + private void copyDec64ToDec64(DecimalBlock sourceBlock, int positionCount) { + int[] selection = sourceBlock.getSelection(); + long[] longArray = sourceBlock.getDecimal64Values(); + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + decimal64List[currentIndex++] = longArray[j]; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + decimal64List[currentIndex++] = longArray[i]; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[i]); + } + } + } + } + + private void copyDec64ToDec128(DecimalBlock sourceBlock, int positionCount) { + int[] selection = sourceBlock.getSelection(); + long[] longArray = sourceBlock.getDecimal64Values(); + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + decimal64List[currentIndex] = longArray[j]; + decimal128HighList[currentIndex] = longArray[j] >= 0 ? 0 : 1; + currentIndex++; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + decimal64List[currentIndex++] = longArray[i]; + decimal128HighList[currentIndex++] = longArray[i] >= 0 ? 0 : 1; + currentIndex++; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[i]); + } + } + } + } + + private void copyDec128ToDec64(DecimalBlock sourceBlock, int positionCount) { + this.initDecimal128List(); + copyDec128ToDec128(sourceBlock, positionCount); + } + + private void copyDec128ToDec128(DecimalBlock sourceBlock, int positionCount) { + int[] selection = sourceBlock.getSelection(); + long[] srcDecimal128Low = sourceBlock.getDecimal128LowValues(); + long[] srcDecimal128High = sourceBlock.getDecimal128HighValues(); + int nullArrayIndex = currentIndex; + if (selection != null) { + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + decimal64List[currentIndex] = srcDecimal128Low[j]; + decimal128HighList[currentIndex] = srcDecimal128High[j]; + currentIndex++; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + int j = selection[i]; + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[j]); + } + } + + } else { + for (int i = 0; i < positionCount; i++) { + decimal64List[currentIndex] = srcDecimal128Low[i]; + decimal128HighList[currentIndex] = srcDecimal128High[i]; + currentIndex++; + } + + if (sourceBlock.mayHaveNull()) { + allocateNulls(); + boolean[] nullArray = sourceBlock.nulls(); + for (int i = 0; i < positionCount; i++) { + containsNull |= (valueIsNull[nullArrayIndex++] = nullArray[i]); + } + } + } + } + + private void initSliceOutput() { + if (this.sliceOutput == null) { + this.sliceOutput = new DynamicSliceOutput(initialCapacity * DECIMAL_MEMORY_SIZE); + } + } + + private void initDecimal64List() { + if (this.decimal64List == null) { + this.decimal64List = new long[initialCapacity]; + } + this.state = DECIMAL_64; + } + + private void initDecimal128List() { + if (this.decimal64List == null) { + this.decimal64List = new long[initialCapacity]; + } + if (this.decimal128HighList == null) { + this.decimal128HighList = new long[initialCapacity]; + } + this.state = DECIMAL_128; + } + + @Override + public void writeDecimal(Decimal value) { + convertToNormalDecimal(); + if (valueIsNull != null) { + valueIsNull[currentIndex] = false; + } + currentIndex++; + sliceOutput.writeBytes(value.getMemorySegment()); + + updateDecimalInfo(value.getDecimalStructure()); + } + + @Override + public void writeLong(long value) { + if (state.isUnset()) { + initDecimal64List(); + state = DECIMAL_64; + } else if (!state.isDecimal64()) { + writeDecimal(new Decimal(value, decimalType.getScale())); + return; + } + + if (valueIsNull != null) { + valueIsNull[currentIndex] = false; + } + decimal64List[currentIndex] = value; + currentIndex++; + } + + public void writeDecimal128(long low, long high) { + if (state.isUnset()) { + initDecimal128List(); + state = DECIMAL_128; + } else if (state.isDecimal64()) { + // convert decimal64 to decimal128 + initDecimal128List(); + state = DECIMAL_128; + for (int i = 0; i < currentIndex; i++) { + if (decimal64List != null && decimal64List[i] < 0) { + decimal128HighList[i] = -1; + } else { + decimal128HighList[i] = 0; + } + } + } else if (!state.isDecimal128()) { + // normal decimal + DecimalStructure buffer = getDecimalBuffer(); + DecimalStructure result = getDecimalResult(); + FastDecimalUtils.setDecimal128WithScale(buffer, result, low, high, scale); + writeDecimal(new Decimal(result)); + return; + } + + if (valueIsNull != null) { + valueIsNull[currentIndex] = false; + } + decimal64List[currentIndex] = low; + decimal128HighList[currentIndex] = high; + currentIndex++; + } + + @Override + public void writeByteArray(byte[] value) { + writeByteArray(value, 0, value.length); + } + + @Override + public void writeByteArray(byte[] value, int offset, int length) { + DecimalStructure d = new DecimalStructure(); + DecimalConverter.parseString(value, offset, length, d, false); + writeDecimal(new Decimal(d)); + } + + @Override + public void appendNull() { + appendNullInternal(); + if (isUnset()) { + initDecimal64List(); + } + if (isDecimal64()) { + decimal64List[currentIndex - 1] = 0L; + } else if (isDecimal128()) { + decimal64List[currentIndex - 1] = 0L; + decimal128HighList[currentIndex - 1] = 0L; + } else { + // If null value, just skip 64-bytes + sliceOutput.skipBytes(DECIMAL_MEMORY_SIZE); + } + } + + @Override + public Decimal getDecimal(int position) { + checkReadablePosition(position); + if (state.isDecimal64()) { + return new Decimal(getLong(position), scale); + } + if (state.isDecimal128()) { + DecimalStructure buffer = getDecimalBuffer(); + DecimalStructure result = getDecimalResult(); + long low = decimal64List[position]; + long high = decimal128HighList[position]; + FastDecimalUtils.setDecimal128WithScale(buffer, result, low, high, scale); + return new Decimal(result); + } + Slice segment = sliceOutput.slice().slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); + return new Decimal(segment); + } + + @Override + public long getLong(int position) { + checkDecimal64StoreType(); + checkReadablePosition(position); + return decimal64List[position]; + } + + @Override + public Object getObject(int position) { + return isNull(position) ? null : getDecimal(position); + } + + @Override + public void writeObject(Object value) { + if (value == null) { + appendNull(); + return; + } + checkNormalDecimalType(); + Preconditions.checkArgument(value instanceof Decimal); + writeDecimal((Decimal) value); + } + + @Override + public Block build() { + if (isDecimal64()) { + return new DecimalBlock(decimalType, getPositionCount(), mayHaveNull(), + mayHaveNull() ? valueIsNull : null, decimal64List); + } + if (isDecimal128()) { + return DecimalBlock.buildDecimal128Block(decimalType, getPositionCount(), mayHaveNull(), + mayHaveNull() ? valueIsNull : null, decimal64List, decimal128HighList); + } + + return new DecimalBlock(decimalType, getPositionCount(), mayHaveNull() ? valueIsNull : null, + sliceOutput.slice(), state); + } + + @Override + public BlockBuilder newBlockBuilder() { + return new BatchDecimalBlockBuilder(getCapacity(), decimalType); + } + + @Override + public int hashCode(int position) { + if (isNull(position)) { + return 0; + } + return getDecimal(position).hashCode(); + } + + @Override + public Slice segmentUncheckedAt(int position) { + return sliceOutput.slice().slice(position * DECIMAL_MEMORY_SIZE, DECIMAL_MEMORY_SIZE); + } + + private void updateDecimalInfo(DecimalStructure d) { + DecimalBlock.DecimalBlockState elementState = DecimalBlock.DecimalBlockState.stateOf(d); + this.state = this.state.merge(elementState); + } + + public DecimalBlock.DecimalBlockState getState() { + return this.state; + } + + public DataType getDecimalType() { + return decimalType; + } + + @Override + public boolean isDecimal64() { + return state.isDecimal64() || (state.isUnset() && decimalType.isDecimal64()); + } + + @Override + public boolean isDecimal128() { + return state.isDecimal128() || (state.isUnset() && decimalType.isDecimal128()); + } + + @Override + public long getDecimal128Low(int position) { + if (isDecimal128()) { + return decimal64List[position]; + } else { + throw new UnsupportedOperationException( + "Cannot get decimal128Low from DecimalBlock with state: " + state); + } + } + + @Override + public long getDecimal128High(int position) { + if (isDecimal128()) { + return decimal128HighList[position]; + } else { + throw new UnsupportedOperationException( + "Cannot get decimal128High from DecimalBlock with state: " + state); + } + } + + public boolean isNormal() { + return state.isNormal(); + } + + public void convertToNormalDecimal() { + initSliceOutput(); + + if (isNormal()) { + return; + } + + // unset 或 decimal64/decimal128 状态 + if (decimal64List != null && currentIndex > 0) { + if (state.isDecimal64()) { + state = UNSET_STATE; + DecimalStructure tmpBuffer = getDecimalBuffer(); + DecimalStructure resultBuffer = getDecimalResult(); + // 可能已经有 DECIMAL64值 + for (int pos = 0; pos < currentIndex; pos++) { + if (!isNull(pos)) { + long decimal64 = decimal64List[pos]; + FastDecimalUtils.setLongWithScale(tmpBuffer, resultBuffer, decimal64, scale); + sliceOutput.writeBytes(resultBuffer.getDecimalMemorySegment()); + updateDecimalInfo(resultBuffer); + } else { + sliceOutput.skipBytes(DECIMAL_MEMORY_SIZE); + } + } + + decimal64List = null; + } else if (state.isDecimal128()) { + Preconditions.checkArgument(decimal64List.length == decimal128HighList.length, + "Decimal128 lowBits count does not match highBits count"); + state = UNSET_STATE; + DecimalStructure tmpBuffer = getDecimalBuffer(); + DecimalStructure resultBuffer = getDecimalResult(); + for (int pos = 0; pos < currentIndex; pos++) { + if (!isNull(pos)) { + long lowBits = decimal64List[pos]; + long highBits = decimal128HighList[pos]; + FastDecimalUtils.setDecimal128WithScale(tmpBuffer, resultBuffer, lowBits, highBits, scale); + sliceOutput.writeBytes(resultBuffer.getDecimalMemorySegment()); + updateDecimalInfo(resultBuffer); + } else { + sliceOutput.skipBytes(DECIMAL_MEMORY_SIZE); + } + } + + decimal64List = null; + decimal128HighList = null; + } + } + } + + private void checkNormalDecimalType() { + if (state.isDecimal64()) { + throw new AssertionError("DECIMAL_64 store type is inconsistent when writing a Decimal"); + } + } + + private void checkDecimal64StoreType() { + if (state.isUnset()) { + state = DECIMAL_64; + } else if (state != DECIMAL_64) { + throw new AssertionError("Unmatched DECIMAL_64 type: " + state); + } + } + + public boolean canWriteDecimal64() { + return state.isUnset() || state.isDecimal64(); + } + + public boolean isUnset() { + return state.isUnset(); + } + + public boolean isSimple() { + return state.isSimple(); + } + + public void setContainsNull(boolean containsNull) { + this.containsNull = containsNull; + } + + protected DecimalStructure getDecimalBuffer() { + if (decimalBuffer == null) { + this.decimalBuffer = new DecimalStructure(); + } + return decimalBuffer; + } + + protected DecimalStructure getDecimalResult() { + if (decimalResult == null) { + this.decimalResult = new DecimalStructure(); + } + return decimalResult; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/BloomFilterProduce.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/BloomFilterProduce.java index 0c1de8809..f74daa5b0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/BloomFilterProduce.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/BloomFilterProduce.java @@ -16,19 +16,19 @@ package com.alibaba.polardbx.executor.operator.util; +import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.bloomfilter.BitSet; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilter; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; -import com.alibaba.polardbx.executor.operator.util.minmaxfilter.MinMaxFilter; -import com.google.common.net.HttpHeaders; -import com.google.common.net.MediaType; -import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.mpp.deploy.ServiceProvider; import com.alibaba.polardbx.executor.mpp.execution.QueryManager; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilter; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.alibaba.polardbx.executor.operator.util.minmaxfilter.MinMaxFilter; +import com.google.common.net.HttpHeaders; +import com.google.common.net.MediaType; import io.airlift.http.client.HttpClient; import io.airlift.http.client.HttpUriBuilder; import io.airlift.http.client.JsonBodyGenerator; @@ -78,7 +78,8 @@ private BloomFilterProduce(List> bloomfilterId, List } public static BloomFilterProduce create(List> bloomfilterId, List> hashKeys, - List bloomFilters, List> minMaxFilters, HttpClient client, URI uri, + List bloomFilters, List> minMaxFilters, + HttpClient client, URI uri, String query) { if (bloomFilters.isEmpty()) { throw new IllegalArgumentException("Empty BloomFilterList in BloomFilterProduce"); @@ -125,7 +126,8 @@ public List convertBloomFilterInfo() { } bloomFilterInfos.add( new BloomFilterInfo(id, bloomFilters.get(i).getBitmap(), bloomFilters.get(i).getNumHashFunctions(), - bloomFilters.get(i).getHashMethodInfo(), minMaxFilters.get(i).stream().map(x -> x.toMinMaxFilterInfo()).collect(Collectors.toList()))); + bloomFilters.get(i).getHashMethodInfo(), + minMaxFilters.get(i).stream().map(x -> x.toMinMaxFilterInfo()).collect(Collectors.toList()))); } } return bloomFilterInfos; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ChunkRowOpenHashMap.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ChunkRowOpenHashMap.java index dc2f9a6f3..ac278b20e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ChunkRowOpenHashMap.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ChunkRowOpenHashMap.java @@ -16,8 +16,8 @@ package com.alibaba.polardbx.executor.operator.util; -import com.google.common.base.Preconditions; import com.alibaba.polardbx.executor.chunk.Chunk; +import com.google.common.base.Preconditions; import it.unimi.dsi.fastutil.Hash; import it.unimi.dsi.fastutil.HashCommon; import org.openjdk.jol.info.ClassLayout; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ChunksIndex.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ChunksIndex.java index 3c0669909..2cb3cdeb9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ChunksIndex.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ChunksIndex.java @@ -45,11 +45,70 @@ public ChunksIndex() { offsets.add(0); } + // type-specific + protected TypedListHandle typedListHandle; + protected TypedList[] typedLists = null; + protected int dataTypeSize = 0; + + public void merge(List chunksIndexList) { + for (int i = 0; i < chunksIndexList.size(); i++) { + ChunksIndex other = chunksIndexList.get(i); + if (other.getPositionCount() == 0) { + continue; + } + + // merge chunks + chunks.addAll(other.chunks); + + // merge offsets + final int currentPositionCount = getPositionCount(); + for (int j = 1; j < other.offsets.size(); j++) { + offsets.add(other.offsets.getInt(j) + currentPositionCount); + } + } + } + + public void setTypedHashTable(TypedListHandle typedListHandle) { + this.typedListHandle = typedListHandle; + } + + public long estimateTypedListSizeInBytes() { + if (typedListHandle != null) { + final int positionCount = getPositionCount(); + return typedListHandle.estimatedSize(positionCount); + } + return 0L; + } + + public void openTypedHashTable() { + if (typedListHandle != null) { + int positionCount = getPositionCount(); + this.typedLists = typedListHandle.getTypedLists(positionCount); + this.dataTypeSize = typedLists.length; + } + } + + public void addChunkToTypedList(int chunkId) { + if (typedListHandle != null) { + Chunk chunk = getChunk(chunkId); + int sourceIndex = getChunkOffset(chunkId); + typedListHandle.consume(chunk, sourceIndex); + } + } + public void addChunk(Chunk chunk) { chunks.add(chunk); offsets.add(getPositionCount() + chunk.getPositionCount()); } + public long getLong(int col, int position) { + return typedLists[col].getLong(position); + } + + public int getInt(int col, int position) { + return typedLists[col].getInt(position); + } + public final long getAddress(int position) { int chunkId = upperBound(offsets, position) - 1; int offset = offsets.getInt(chunkId); @@ -77,6 +136,21 @@ public void writePositionTo(int columnIndex, int position, BlockBuilder blockBui block.writePositionTo(SyntheticAddress.decodeOffset(address), blockBuilder); } + public void writePositionTo(int chunkId, int positionInChunk, int columnIndex, BlockBuilder blockBuilder) { + Block block = getChunk(chunkId).getBlock(columnIndex); + block.writePositionTo(positionInChunk, blockBuilder); + } + + public void getAddress(int[] positions, int[] chunkIds, int[] positionsInChunk, int positionCount) { + for (int i = 0; i < positionCount; i++) { + int position = positions[i]; + int chunkId = upperBound(offsets, position) - 1; + int positionInChunk = position - offsets.getInt(chunkId); + chunkIds[i] = chunkId; + positionsInChunk[i] = positionInChunk; + } + } + public Chunk.ChunkRow rowAt(int position) { long address = getAddress(position); Chunk chunk = getChunk(SyntheticAddress.decodeIndex(address)); @@ -161,4 +235,12 @@ protected Chunk computeNext() { } }; } + + public synchronized void close() { + if (typedLists != null) { + for (TypedList typedList : typedLists) { + typedList.close(); + } + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ConcurrentRawDirectHashTable.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ConcurrentRawDirectHashTable.java new file mode 100644 index 000000000..70254a80b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ConcurrentRawDirectHashTable.java @@ -0,0 +1,106 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +import com.google.common.base.Preconditions; +import it.unimi.dsi.fastutil.Hash; +import org.openjdk.jol.info.ClassLayout; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicIntegerArray; + +/** + * An concurrent hash table implementation for marking position + */ +public class ConcurrentRawDirectHashTable implements Hash { + + private static final long INSTANCE_SIZE = ClassLayout.parseClass(ConcurrentRawDirectHashTable.class).instanceSize(); + + public static final int NOT_EXISTS = -1; + + public static final int EXISTS = 1; + + /** + * The array of keys (buckets) + */ + private final AtomicIntegerArray keys; + /** + * The current table size. + */ + private final int n; + + public ConcurrentRawDirectHashTable(int size) { + Preconditions.checkArgument(size >= 0, "The number of elements must be non-negative"); + + this.n = size; + + int[] keys = new int[n]; + Arrays.fill(keys, NOT_EXISTS); + this.keys = new AtomicIntegerArray(keys); + } + + /** + * Mark assigned position + * + * @return true if this slot was empty, false otherwise + */ + public boolean markAndGet(int pos) { + if (!keys.compareAndSet(pos, NOT_EXISTS, EXISTS)) { + return false; + } else { + return true; + } + } + + public void rawMark(int pos) { + keys.set(pos, EXISTS); + } + + /** + * Whether this position has been set yet + * + * @return return true if this position has been set + */ + public boolean hasSet(int pos) { + return keys.get(pos) == EXISTS; + } + + public long estimateSize() { + return INSTANCE_SIZE + keys.length() * Integer.BYTES; + } + + public int size() { + return keys.length(); + } + + public List getNotMarkedPosition() { + List notMarkedPosition = new ArrayList<>(); + int length = keys.length(); + for (int pos = 0; pos < length; ++pos) { + if (keys.get(pos) == NOT_EXISTS) { + notMarkedPosition.add(pos); + } + } + return notMarkedPosition; + } + + public static long estimatedSizeInBytes(int size) { + return INSTANCE_SIZE + size * Integer.BYTES; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ConcurrentRawHashTable.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ConcurrentRawHashTable.java index a20703bb1..58e4370f2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ConcurrentRawHashTable.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ConcurrentRawHashTable.java @@ -21,9 +21,6 @@ import it.unimi.dsi.fastutil.HashCommon; import org.openjdk.jol.info.ClassLayout; -import java.util.Arrays; -import java.util.concurrent.atomic.AtomicIntegerArray; - /** * An concurrent hash table implementation mapping a hash code to an integer value * @@ -54,17 +51,14 @@ public ConcurrentRawHashTable(int size, float loadFactor) { this.n = HashCommon.arraySize(size, loadFactor); this.mask = n - 1; - - int[] keys = new int[n]; - Arrays.fill(keys, NOT_EXISTS); - this.keys = new AtomicIntegerArray(keys); + this.keys = new AtomicIntegerArray(n, NOT_EXISTS); } public ConcurrentRawHashTable(int size) { this(size, selectLoadFactor(size)); } - private static float selectLoadFactor(int size) { + public static float selectLoadFactor(int size) { if (size >= 100_000_000) { // more than 100M records return DEFAULT_LOAD_FACTOR; } else if (size >= 10_000_000) { // more than 10M records @@ -115,7 +109,18 @@ public int get(int hash) { return keys.get(h); } - public long estimateSize() { + public long estimateSizeInBytes() { return INSTANCE_SIZE + keys.length() * Integer.BYTES; } + + public static long estimateSizeInBytes(int size) { + long lengthOfHashTable = HashCommon.arraySize( + size, ConcurrentRawHashTable.selectLoadFactor(size)); + + return INSTANCE_SIZE + lengthOfHashTable * Integer.BYTES; + } + + public int size() { + return n; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DefaultTypedBuffer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DefaultTypedBuffer.java new file mode 100644 index 000000000..ec70ba7b5 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DefaultTypedBuffer.java @@ -0,0 +1,197 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.ChunkBuilder; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; + +/** + * Appendable buffer for arbitrary data types + * + * @author Eric Fu + * @see ChunksIndex + */ +public class DefaultTypedBuffer implements TypedBuffer { + + private BlockBuilder[] blockBuilders; + private final int chunkSize; + + private int currentSize; + private final List chunks = new ArrayList<>(); + private long estimateSize = 0; + private ExecutionContext context; + + DefaultTypedBuffer(BlockBuilder[] blockBuilders, int chunkSize, ExecutionContext context) { + this.blockBuilders = blockBuilders; + this.chunkSize = chunkSize; + this.context = context; + } + + @Override + public boolean equals(int position, Chunk otherChunk, int otherPosition) { + final int chunkId = chunkIndexOf(position); + final int offset = offsetOf(position); + + if (chunkId < chunks.size()) { + // Just compare both chunks + Chunk chunk = chunks.get(chunkId); + return chunk.equals(offset, otherChunk, otherPosition); + } else { + // Compare the block builders with given chunk (block by block) + assert chunkId == chunks.size(); + for (int i = 0; i < blockBuilders.length; i++) { + if (!otherChunk.getBlock(i).equals(otherPosition, blockBuilders[i], offset)) { + return false; + } + } + return true; + } + } + + @Override + public void appendRow(Object array, int nullPosition, int positionCount) { + throw new UnsupportedOperationException(); + } + + @Override + public void appendRow(Chunk chunk, int position) { + // Check fulfilled before appending + if (currentSize == chunkSize) { + Chunk buildingChunk = getBuildingChunk(); + chunks.add(buildingChunk); + estimateSize += buildingChunk.estimateSize(); + for (int i = 0; i < blockBuilders.length; i++) { + blockBuilders[i] = blockBuilders[i].newBlockBuilder(); + currentSize = 0; + } + } + + for (int i = 0; i < blockBuilders.length; i++) { + chunk.getBlock(i).writePositionTo(position, blockBuilders[i]); + } + currentSize++; + } + + @Override + public List buildChunks() { + ArrayList allChunks = new ArrayList<>(this.chunks); + if (currentSize > 0) { + allChunks.add(getBuildingChunk()); + } + return allChunks; + } + + private int chunkIndexOf(int position) { + return position / chunkSize; + } + + private int offsetOf(int position) { + return position % chunkSize; + } + + @Override + public void appendValuesTo(int position, ChunkBuilder chunkBuilder) { + final int chunkId = chunkIndexOf(position); + final int offset = offsetOf(position); + if (chunkId < chunks.size()) { + // Just compare both chunks + Chunk chunk = chunks.get(chunkId); + for (int i = 0; i < chunk.getBlockCount(); ++i) { + final Block block = chunk.getBlock(i); + chunkBuilder.appendTo(block, i, offset); + } + } else { + // Compare the block builders with given chunk (block by block) + assert chunkId == chunks.size(); + for (int i = 0; i < blockBuilders.length; i++) { + Block block = blockBuilders[i].build(); + chunkBuilder.appendTo(block, i, offset); + } + } + } + + private Chunk getBuildingChunk() { + Block[] blocks = new Block[blockBuilders.length]; + for (int i = 0; i < blockBuilders.length; i++) { + blocks[i] = blockBuilders[i].build(); + } + return new Chunk(currentSize, blocks); + } + + @Override + public long estimateSize() { + return estimateSize; + } + + @Override + public void appendInteger(int col, int value) { + // Check fulfilled before appending + if (currentSize == chunkSize) { + Chunk buildingChunk = getBuildingChunk(); + chunks.add(buildingChunk); + estimateSize += buildingChunk.estimateSize(); + for (int i = 0; i < blockBuilders.length; i++) { + blockBuilders[i] = blockBuilders[i].newBlockBuilder(); + currentSize = 0; + } + } + + blockBuilders[col].writeInt(value); + currentSize++; + } + + @Override + public void appendLong(int col, long value) { + // Check fulfilled before appending + if (currentSize == chunkSize) { + Chunk buildingChunk = getBuildingChunk(); + chunks.add(buildingChunk); + estimateSize += buildingChunk.estimateSize(); + for (int i = 0; i < blockBuilders.length; i++) { + blockBuilders[i] = blockBuilders[i].newBlockBuilder(); + currentSize = 0; + } + } + + blockBuilders[col].writeLong(value); + currentSize++; + } + + @Override + public void appendNull(int col) { + // Check fulfilled before appending + if (currentSize == chunkSize) { + Chunk buildingChunk = getBuildingChunk(); + chunks.add(buildingChunk); + estimateSize += buildingChunk.estimateSize(); + for (int i = 0; i < blockBuilders.length; i++) { + blockBuilders[i] = blockBuilders[i].newBlockBuilder(); + currentSize = 0; + } + } + + blockBuilders[col].appendNull(); + currentSize++; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DriverIntArrayPool.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DriverIntArrayPool.java new file mode 100644 index 000000000..eceab7a1d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DriverIntArrayPool.java @@ -0,0 +1,72 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +import java.text.MessageFormat; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicBoolean; + +public class DriverIntArrayPool implements DriverObjectPool { + private LinkedBlockingQueue queue; + private AtomicBoolean isCleared; + private long recycleTimes; + private long reuseTimes; + + public DriverIntArrayPool() { + this.queue = new LinkedBlockingQueue<>(); + this.isCleared = new AtomicBoolean(false); + this.recycleTimes = 0L; + this.reuseTimes = 0L; + } + + @Override + public void add(int[] object) { + queue.add(object); + recycleTimes++; + } + + @Override + public int[] poll() { + int[] res = queue.poll(); + if (res != null) { + reuseTimes++; + } + return res; + } + + @Override + public Recycler getRecycler(final int chunkLimit) { + return (int[] object) -> { + if (!isCleared.get() && object != null && object.length >= chunkLimit) { + add(object); + } + }; + } + + @Override + public void clear() { + if (isCleared.compareAndSet(false, true)) { + queue.clear(); + } + } + + @Override + public String report() { + return MessageFormat.format("int array object pool, recycleTimes = {0}, reuseTimes = {1}", recycleTimes, + reuseTimes); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DriverLongArrayPool.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DriverLongArrayPool.java new file mode 100644 index 000000000..add3af458 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DriverLongArrayPool.java @@ -0,0 +1,72 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +import java.text.MessageFormat; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicBoolean; + +public class DriverLongArrayPool implements DriverObjectPool { + private LinkedBlockingQueue queue; + private AtomicBoolean isCleared; + private long recycleTimes; + private long reuseTimes; + + public DriverLongArrayPool() { + this.queue = new LinkedBlockingQueue<>(); + this.isCleared = new AtomicBoolean(false); + this.recycleTimes = 0L; + this.reuseTimes = 0L; + } + + @Override + public void add(long[] object) { + queue.add(object); + recycleTimes++; + } + + @Override + public long[] poll() { + long[] res = queue.poll(); + if (res != null) { + reuseTimes++; + } + return res; + } + + @Override + public Recycler getRecycler(int chunkLimit) { + return (long[] object) -> { + if (!isCleared.get() && object != null && object.length >= chunkLimit) { + add(object); + } + }; + } + + @Override + public void clear() { + if (isCleared.compareAndSet(false, true)) { + queue.clear(); + } + } + + @Override + public String report() { + return MessageFormat.format("long array object pool, recycleTimes = {0}, reuseTimes = {1}", recycleTimes, + reuseTimes); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DriverObjectPool.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DriverObjectPool.java new file mode 100644 index 000000000..b06483544 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/DriverObjectPool.java @@ -0,0 +1,66 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +/** + * A object pool in executor level + * + * @param type of object. + */ +public interface DriverObjectPool { + static DriverObjectPool createIntArrayPool() { + return new DriverIntArrayPool(); + } + + static DriverObjectPool createLongArrayPool() { + return new DriverLongArrayPool(); + } + + /** + * Add object into pool. + */ + void add(T object); + + /** + * Poll the object from the pool + */ + T poll(); + + /** + * Get the recycler to recycle object back into the pool. + */ + Recycler getRecycler(int chunkLimit); + + /** + * Clear the pool. + */ + void clear(); + + /** + * Get the message of this pool. + */ + String report(); + + /** + * To recycle the object back into pool. + * + * @param type of the object. + */ + interface Recycler { + void recycle(T object); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ElementaryChunksIndex.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ElementaryChunksIndex.java index cf3dac454..e69de29bb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ElementaryChunksIndex.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ElementaryChunksIndex.java @@ -1,209 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.operator.util; - -import com.google.common.collect.AbstractIterator; -import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.executor.chunk.BlockBuilder; -import com.alibaba.polardbx.executor.chunk.Chunk; -import it.unimi.dsi.fastutil.ints.IntArrayList; -import org.openjdk.jol.info.ClassLayout; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -/** - * Index of several chunks - * - * @author xiaoyinng - */ -public final class ElementaryChunksIndex { - - private static final long INSTANCE_SIZE = ClassLayout.parseClass(ElementaryChunksIndex.class).instanceSize(); - - private final List chunks; - private int chunkLimit; - private IntArrayList offsetList = new IntArrayList(); - private final IntArrayList positionIndexList; - private int[] offsets; - private int[] positionIndex = new int[0]; - private Chunk.ChunkRow[] chunkRows; - - public ElementaryChunksIndex(int initSize, int chunkLimit) { - chunks = new ArrayList<>(initSize); - this.chunkLimit = chunkLimit; - positionIndexList = new IntArrayList(initSize * this.chunkLimit); - } - - public ElementaryChunksIndex(int chunkLimit) { - chunks = new ArrayList<>(); - positionIndexList = new IntArrayList(chunkLimit); - this.chunkLimit = chunkLimit; - } - - public void addChunk(Chunk chunk) { - final int k = chunks.size(); - offsetList.add(getPositionCount() + chunk.getPositionCount()); - for (int i = 0; i < chunk.getPositionCount(); i++) { - positionIndexList.add(k); - } - chunks.add(chunk); - build(); - } - - public void build() { - offsets = offsetList.elements(); - positionIndex = positionIndexList.elements(); - } - - public void buildRow() { - if (chunkRows == null) { - chunkRows = new Chunk.ChunkRow[positionIndexList.size()]; - } - } - - public Object getObject(int columnIndex, int position) { - final int chunkAt = positionIndex[position]; - final Chunk chunk = chunks.get(chunkAt); - Block block = chunk.getBlock(columnIndex); - if (chunkAt == 0) { - return block.getObject(position); - } - final int preChunkAt = chunkAt - 1; - int offset = offsets[preChunkAt]; - return block.getObject(position - offset); - } - - public int getChunkOffset(int index) { - if (index == 0) { - return 0; - } - return offsets[index - 1]; - } - - public void writePositionTo(int columnIndex, int position, BlockBuilder blockBuilder) { - final int chunkAt = positionIndex[position]; - final Chunk chunk = chunks.get(chunkAt); - Block block = chunk.getBlock(columnIndex); - final int preChunkAt = chunkAt - 1; - if (chunkAt == 0) { - block.writePositionTo(position, blockBuilder); - return; - } - int offset = offsets[preChunkAt]; - block.writePositionTo(position - offset, blockBuilder); - } - - public Chunk.ChunkRow rowAt(int position) { - - Chunk.ChunkRow chunkRow = chunkRows[position]; - if (chunkRow != null) { - return chunkRow; - } - final int chunkAt = positionIndex[position]; - final Chunk chunk = chunks.get(chunkAt); - if (chunkAt == 0) { - chunkRow = chunk.rowAt(position); - chunkRows[position] = chunkRow; - return chunkRow; - } - final int preChunkAt = chunkAt - 1; - int offset = offsets[preChunkAt]; - chunkRow = chunk.rowAt(position - offset); - chunkRows[position] = chunkRow; - return chunkRow; - } - - public int getPositionCount() { - return positionIndexList.size(); - } - - public int getChunkCount() { - return chunks.size(); - } - - public boolean isEmpty() { - return chunks.isEmpty(); - } - - public Chunk getChunk(int index) { - return chunks.get(index); - } - - /** - * Estimate the memory size except the contained chunks - */ - public long estimateSelfSize() { - return INSTANCE_SIZE + positionIndex.length; - } - - public int hashCode(int position) { - final int chunkAt = positionIndex[position]; - Chunk chunk = chunks.get(chunkAt); - if (chunkAt == 0) { - return chunk.hashCode(position); - } - final int preChunkAt = chunkAt - 1; - int offset = offsets[preChunkAt]; - return chunk.hashCode(position - offset); - } - - public boolean equals(int position, Chunk otherChunk, int otherPosition) { - final int chunkAt = positionIndex[position]; - Chunk chunk = chunks.get(chunkAt); - if (chunkAt == 0) { - return chunk.equals(position, otherChunk, otherPosition); - } - final int preChunkAt = chunkAt - 1; - int offset = offsets[preChunkAt]; - return chunk.equals(position - offset, otherChunk, otherPosition); - } - - public boolean equals(int position1, int position2) { - final int chunkAt1 = positionIndex[position1]; - final int preChunkAt1 = chunkAt1 - 1; - int offset1 = 0; - if (chunkAt1 != 0) { - offset1 = offsets[preChunkAt1]; - } - final int chunkAt2 = positionIndex[position2]; - final int preChunkAt2 = chunkAt2 - 1; - int offset2 = 0; - if (chunkAt2 != 0) { - offset2 = offsets[preChunkAt2]; - } - Chunk chunk1 = chunks.get(chunkAt1); - Chunk chunk2 = chunks.get(chunkAt2); - - return chunk1.equals(position1 - offset1, chunk2, position2 - offset2); - } - - public Iterator getChunksAndDeleteAfterRead() { - return new AbstractIterator() { - private int chunkCounter; - - @Override - protected Chunk computeNext() { - if (chunkCounter == getChunkCount()) { - return endOfData(); - } - return getChunk(chunkCounter++); - } - }; - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ExternalSorter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ExternalSorter.java index 20253dfba..811a2e639 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ExternalSorter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ExternalSorter.java @@ -16,8 +16,6 @@ package com.alibaba.polardbx.executor.operator.util; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.Chunk; @@ -30,6 +28,8 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; import java.util.Iterator; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/GroupOpenHashMap.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/GroupOpenHashMap.java index a3c35b1de..f984cb990 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/GroupOpenHashMap.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/GroupOpenHashMap.java @@ -16,43 +16,35 @@ package com.alibaba.polardbx.executor.operator.util; -import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.optimizer.core.datatype.SliceType; -import com.google.common.base.Preconditions; import com.alibaba.polardbx.common.utils.memory.ObjectSizeUtils; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.google.common.base.Preconditions; import it.unimi.dsi.fastutil.Hash; import it.unimi.dsi.fastutil.HashCommon; -import it.unimi.dsi.fastutil.ints.Int2IntArrayMap; -import it.unimi.dsi.fastutil.ints.Int2IntOpenHashMap; import java.util.Arrays; import java.util.List; class GroupOpenHashMap implements GroupHashMap, Hash { - private static final int NOT_EXISTS = -1; + protected static final int NOT_EXISTS = -1; - final int expectedSize; + protected final int expectedSize; - final int chunkSize; + protected final int chunkSize; protected final DataType[] groupKeyType; protected TypedBuffer groupKeyBuffer; - private int groupCount; - - private boolean useMap; - - protected Int2IntOpenHashMap map; + protected int groupCount; /** * The array of keys (buckets) */ - protected int[] keys; + private int[] keys; /** * The mask for wrapping a position counter */ @@ -74,6 +66,8 @@ class GroupOpenHashMap implements GroupHashMap, Hash { */ private int maxFill; + protected float loadFactor; + protected ExecutionContext context; public GroupOpenHashMap(DataType[] groupKeyType, int expectedSize, int chunkSize, ExecutionContext context) { @@ -86,22 +80,16 @@ public GroupOpenHashMap(DataType[] groupKeyType, int expectedSize, float loadFac "Load factor must be greater than 0 and smaller than or equal to 1"); Preconditions.checkArgument(expectedSize >= 0, "The expected number of elements must be non-negative"); + this.loadFactor = loadFactor; this.f = loadFactor; this.n = HashCommon.arraySize(expectedSize, loadFactor); this.mask = n - 1; this.maxFill = HashCommon.maxFill(n, loadFactor); this.size = 0; - this.useMap = context.getParamManager().getBoolean(ConnectionParams.ENABLE_UNIQUE_HASH_KEY); - - if (useMap) { - this.map = new Int2IntOpenHashMap(expectedSize, loadFactor); - map.defaultReturnValue(NOT_EXISTS); - } else { - int[] keys = new int[n]; - Arrays.fill(keys, NOT_EXISTS); - this.keys = keys; - } + int[] keys = new int[n]; + Arrays.fill(keys, NOT_EXISTS); + this.keys = keys; this.groupKeyType = groupKeyType; this.groupKeyBuffer = TypedBuffer.create(groupKeyType, chunkSize, context); @@ -114,32 +102,6 @@ public GroupOpenHashMap(DataType[] groupKeyType, int expectedSize, float loadFac * @param groupId if groupId == -1 means need to generate a new groupid */ int innerPut(Chunk chunk, int position, int groupId) { - if (useMap) { - return doInnerPutMap(chunk, position, groupId); - } else { - return doInnerPutArray(chunk, position, groupId); - } - } - - private int doInnerPutMap(Chunk chunk, int position, int groupId) { - int uniqueKey = chunk.hashCode(position); - - int value; - if ((value = map.get(uniqueKey)) != NOT_EXISTS) { - return value; - } - - if (groupId == -1) { - groupId = appendGroup(chunk, position); - } - - // otherwise, insert this position - map.put(uniqueKey, groupId); - - return groupId; - } - - private int doInnerPutArray(Chunk chunk, int position, int groupId) { int h = HashCommon.mix(chunk.hashCode(position)) & mask; int k = keys[h]; @@ -168,7 +130,7 @@ private int doInnerPutArray(Chunk chunk, int position, int groupId) { return groupId; } - private void rehash() { + protected void rehash() { this.n *= 2; this.mask = n - 1; this.maxFill = HashCommon.maxFill(n, this.f); @@ -196,7 +158,6 @@ List buildGroupChunks() { // set null to deallocate memory this.keys = null; - this.map = null; this.groupKeyBuffer = null; return chunks; @@ -209,9 +170,7 @@ boolean noGroupBy() { @Override public long estimateSize() { long size = 0L; - if (useMap && map != null) { - size += map.size() * Integer.BYTES * 2; - } else if (!useMap && keys != null) { + if (keys != null) { size += keys.length * ObjectSizeUtils.SIZE_INTEGER; } if (groupKeyBuffer != null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashAggResultIterator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashAggResultIterator.java index b997a6383..82399d1e6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashAggResultIterator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashAggResultIterator.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.operator.util; -import com.google.common.base.Preconditions; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.Chunk; +import com.google.common.base.Preconditions; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashGroupJoinResultIterator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashGroupJoinResultIterator.java index c440afc8e..559d98883 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashGroupJoinResultIterator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashGroupJoinResultIterator.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.operator.util; -import com.google.common.base.Preconditions; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.Chunk; +import com.google.common.base.Preconditions; import java.util.BitSet; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashWindowAggResultIterator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashWindowAggResultIterator.java index 811724872..bc8b4bc3c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashWindowAggResultIterator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashWindowAggResultIterator.java @@ -20,6 +20,7 @@ import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.Chunk; import com.google.common.base.Preconditions; +import it.unimi.dsi.fastutil.ints.IntArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; @@ -31,14 +32,14 @@ public class HashWindowAggResultIterator implements AggResultIterator { private final int inputChunkSize; private final List inputChunks; - private final List groupIds; + private final List groupIds; private BlockBuilder[] valueBlockBuilders; private final int chunkSize; public HashWindowAggResultIterator(List valueChunks, - List inputChunks, List groupIds, + List inputChunks, List groupIds, BlockBuilder[] blockBuilders, int chunkSize) { Preconditions.checkArgument(groupIds.size() == inputChunks.size(), "size of input chunk should be same with group id list"); @@ -67,12 +68,14 @@ public Chunk nextChunk() { results[i] = inputChunk.getBlock(i); } - int[] groupId = groupIds.get(index); + IntArrayList groupId = groupIds.get(index); for (int pos = 0; pos < inputChunk.getPositionCount(); ++pos) { for (int i = 0; i < valueBlockCount; ++i) { - valueChunks.get(groupId[pos] / chunkSize).getBlock(i) - .writePositionTo(groupId[pos] % chunkSize, valueBlockBuilders[i]); + int groupIdOfPos = groupId.getInt(pos); + + valueChunks.get(groupIdOfPos / chunkSize).getBlock(i) + .writePositionTo(groupIdOfPos % chunkSize, valueBlockBuilders[i]); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashWindowOpenHashMap.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashWindowOpenHashMap.java index bbe9e2e55..0b5831f27 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashWindowOpenHashMap.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/HashWindowOpenHashMap.java @@ -16,22 +16,26 @@ package com.alibaba.polardbx.executor.operator.util; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; +import com.google.common.base.Preconditions; +import it.unimi.dsi.fastutil.ints.IntArrayList; import java.util.ArrayList; import java.util.List; public class HashWindowOpenHashMap extends AggOpenHashMap { private List inputChunks = new ArrayList<>(); - private List groupIds = new ArrayList<>(); + private List groupIds = new ArrayList<>(); public HashWindowOpenHashMap(DataType[] groupKeyType, List aggregators, DataType[] aggValueType, - DataType[] inputType, int expectedSize, int chunkSize, ExecutionContext context) { + DataType[] inputType, int expectedSize, int chunkSize, ExecutionContext context, + OperatorMemoryAllocatorCtx memoryAllocator) { super(groupKeyType, aggregators, aggValueType, inputType, expectedSize, DEFAULT_LOAD_FACTOR, chunkSize, - context); + context, memoryAllocator); } @Override @@ -41,10 +45,12 @@ public HashWindowAggResultIterator buildChunks() { } @Override - public int[] putChunk(Chunk keyChunk, Chunk inputChunk) { + public void putChunk(Chunk keyChunk, Chunk inputChunk, IntArrayList groupIdResult) { inputChunks.add(inputChunk); - int[] groupId = super.putChunk(keyChunk, inputChunk); - groupIds.add(groupId); - return groupId; + IntArrayList result = new IntArrayList(); + super.putChunk(keyChunk, inputChunk, result); + Preconditions.checkArgument(result.size() == inputChunk.getPositionCount(), + "length of group id not equal to length of input chunk"); + groupIds.add(result); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/MemSortor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/MemSortor.java index 5b0d67941..8f4d174b3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/MemSortor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/MemSortor.java @@ -16,9 +16,6 @@ package com.alibaba.polardbx.executor.operator.util; -import com.google.common.base.Preconditions; -import com.google.common.collect.AbstractIterator; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Block; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.BlockBuilders; @@ -29,6 +26,9 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; +import com.google.common.base.Preconditions; +import com.google.common.collect.AbstractIterator; +import com.google.common.util.concurrent.ListenableFuture; import it.unimi.dsi.fastutil.ints.AbstractIntComparator; import it.unimi.dsi.fastutil.ints.IntArrays; import it.unimi.dsi.fastutil.ints.IntComparator; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/MergeSortedChunks.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/MergeSortedChunks.java index 9de23b53c..da1be52b7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/MergeSortedChunks.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/MergeSortedChunks.java @@ -27,8 +27,8 @@ import java.util.List; import java.util.function.BiPredicate; -import static com.google.common.collect.ImmutableList.toImmutableList; import static com.alibaba.polardbx.executor.mpp.operator.WorkProcessor.mergeSorted; +import static com.google.common.collect.ImmutableList.toImmutableList; import static java.util.Objects.requireNonNull; public class MergeSortedChunks { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ObjectPools.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ObjectPools.java new file mode 100644 index 000000000..07d4bda0c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/ObjectPools.java @@ -0,0 +1,57 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +/** + * Manage different types of object pool. + */ +public interface ObjectPools { + static ObjectPools create() { + return new ObjectPoolsImpl(); + } + + DriverObjectPool getIntArrayPool(); + + DriverObjectPool getLongArrayPool(); + + void clear(); + + class ObjectPoolsImpl implements ObjectPools { + // object pool + private DriverObjectPool intArrayPool; + private DriverObjectPool longArrayPool; + + ObjectPoolsImpl() { + this.intArrayPool = DriverObjectPool.createIntArrayPool(); + this.longArrayPool = DriverObjectPool.createLongArrayPool(); + } + + public DriverObjectPool getIntArrayPool() { + return intArrayPool; + } + + public DriverObjectPool getLongArrayPool() { + return longArrayPool; + } + + @Override + public void clear() { + intArrayPool.clear(); + longArrayPool.clear(); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/Sorter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/Sorter.java index e5f97cc99..a9f6fa7e2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/Sorter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/Sorter.java @@ -16,11 +16,11 @@ package com.alibaba.polardbx.executor.operator.util; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.utils.OrderByOption; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.memory.MemoryAllocatorCtx; +import com.google.common.util.concurrent.ListenableFuture; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpillableAggHashMap.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpillableAggHashMap.java index 13fe746fa..574486140 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpillableAggHashMap.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpillableAggHashMap.java @@ -16,14 +16,6 @@ package com.alibaba.polardbx.executor.operator.util; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.executor.calc.AbstractAggregator; -import com.alibaba.polardbx.executor.calc.aggfunctions.CountRow; -import com.alibaba.polardbx.executor.calc.aggfunctions.Count; -import com.alibaba.polardbx.executor.calc.aggfunctions.Long2LongSum0; -import com.google.common.collect.AbstractIterator; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.BlockBuilder; @@ -39,8 +31,14 @@ import com.alibaba.polardbx.executor.utils.OrderByOption; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.executor.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.Aggregator; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.CountV2; +import com.alibaba.polardbx.optimizer.core.expression.calc.aggfunctions.SumV2; import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; +import com.google.common.collect.AbstractIterator; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ListenableFuture; +import it.unimi.dsi.fastutil.ints.IntArrayList; import org.apache.calcite.rel.RelFieldCollation; import java.util.ArrayList; @@ -97,7 +95,7 @@ public SpillableAggHashMap(DataType[] groupKeyType, List aggregators this.memoryAllocator = memoryAllocator; aggHashMap = new AggOpenHashMap(groupKeyType, aggregators, aggValueType, inputType, - expectedSize, chunkSize, context); + expectedSize, chunkSize, context, memoryAllocator); this.spillerFactory = spillerFactory; this.spillTypes = new ArrayList<>(); blockBuilders = new BlockBuilder[groupKeyType.length + aggValueType.length]; @@ -112,9 +110,9 @@ public SpillableAggHashMap(DataType[] groupKeyType, List aggregators } @Override - public int[] putChunk(Chunk keyChunk, Chunk inputChunk) { + public void putChunk(Chunk keyChunk, Chunk inputChunk, IntArrayList groupIdResult) { checkState(spillInProgress.isDone()); - return aggHashMap.putChunk(keyChunk, inputChunk); + aggHashMap.putChunk(keyChunk, inputChunk, groupIdResult); } @Override @@ -182,14 +180,13 @@ private List getGlobalAggregators() { List aggList = new ArrayList<>(aggregators.size()); int groupKeySize = groupKeyType.length; for (Aggregator aggCall : aggregators) { - if (aggCall instanceof Count || aggCall instanceof CountRow) { - aggList.add( - new Long2LongSum0(groupKeySize + aggList.size(), ((AbstractAggregator) aggCall).isDistinct(), - DataTypes.LongType, DataTypes.DecimalType, - ((AbstractAggregator) aggCall).getFilterArg())); + Aggregator newAgg = aggCall.getNew(); + if (newAgg instanceof CountV2) { + aggList.add(new SumV2(groupKeySize + aggList.size(), newAgg.isDistinct(), memoryAllocator, + newAgg.getFilterArg())); } else { - ((AbstractAggregator) aggCall).setAggIndexInChunk(new int[] {groupKeySize + aggList.size()}); - aggList.add(aggCall); + newAgg.setAggTargetIndexes(new int[] {groupKeySize + aggList.size()}); + aggList.add(newAgg); } } return aggList; @@ -212,7 +209,7 @@ private ListenableFuture spillToDisk() { aggHashMap.close(); } aggHashMap = new AggOpenHashMap(groupKeyType, aggregators, aggValueType, inputType, - expectedSize, chunkSize, context); + expectedSize, chunkSize, context, memoryAllocator); }; return spillInProgress; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpillableChunkIterator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpillableChunkIterator.java index 8c8b89791..fa4b69396 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpillableChunkIterator.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpillableChunkIterator.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.operator.util; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.operator.spill.Spiller; +import com.google.common.util.concurrent.ListenableFuture; import java.util.Iterator; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpilledTopNHeap.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpilledTopNHeap.java index b67e29128..4bbc6165e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpilledTopNHeap.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/SpilledTopNHeap.java @@ -16,10 +16,6 @@ package com.alibaba.polardbx.executor.operator.util; -import com.google.common.collect.AbstractIterator; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Ordering; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.chunk.Chunk; @@ -32,6 +28,10 @@ import com.alibaba.polardbx.optimizer.core.row.Row; import com.alibaba.polardbx.optimizer.memory.OperatorMemoryAllocatorCtx; import com.alibaba.polardbx.optimizer.spill.SpillMonitor; +import com.google.common.collect.AbstractIterator; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Ordering; +import com.google.common.util.concurrent.ListenableFuture; import it.unimi.dsi.fastutil.ints.IntArrayFIFOQueue; import it.unimi.dsi.fastutil.objects.ObjectHeapPriorityQueue; import org.openjdk.jol.info.ClassLayout; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedBuffer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedBuffer.java index 9739ae8b2..a95964e32 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedBuffer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedBuffer.java @@ -16,7 +16,7 @@ package com.alibaba.polardbx.executor.operator.util; -import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.common.type.MySQLStandardFieldType; import com.alibaba.polardbx.executor.chunk.BlockBuilder; import com.alibaba.polardbx.executor.chunk.BlockBuilders; import com.alibaba.polardbx.executor.chunk.Chunk; @@ -24,122 +24,52 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import java.util.ArrayList; +import java.util.BitSet; import java.util.List; -/** - * Appendable buffer for arbitrary data types - * - * @author Eric Fu - * @see ChunksIndex - */ -public class TypedBuffer { - - private BlockBuilder[] blockBuilders; - private final int chunkSize; +public interface TypedBuffer { + void appendRow(T array, int nullPosition, int positionCount); - private int currentSize; - private final List chunks = new ArrayList<>(); - private long estimateSize = 0; - private ExecutionContext context; - - private TypedBuffer(BlockBuilder[] blockBuilders, int chunkSize, ExecutionContext context) { - this.blockBuilders = blockBuilders; - this.chunkSize = chunkSize; - this.context = context; + default void appendRow(T array, int positionCount) { + appendRow(array, -1, positionCount); } - public static TypedBuffer create(DataType[] dataTypes, int chunkSize, ExecutionContext context) { - BlockBuilder[] blockBuilders = new BlockBuilder[dataTypes.length]; - for (int i = 0; i < dataTypes.length; i++) { - blockBuilders[i] = BlockBuilders.create(dataTypes[i], context); - } - return new TypedBuffer(blockBuilders, chunkSize, context); - } + void appendRow(Chunk chunk, int position); - public boolean equals(int position, Chunk otherChunk, int otherPosition) { - final int chunkId = chunkIndexOf(position); - final int offset = offsetOf(position); + List buildChunks(); - if (chunkId < chunks.size()) { - // Just compare both chunks - Chunk chunk = chunks.get(chunkId); - return chunk.equals(offset, otherChunk, otherPosition); - } else { - // Compare the block builders with given chunk (block by block) - assert chunkId == chunks.size(); - for (int i = 0; i < blockBuilders.length; i++) { - if (!otherChunk.getBlock(i).equals(otherPosition, blockBuilders[i], offset)) { - return false; - } - } - return true; - } - } + boolean equals(int position, Chunk otherChunk, int otherPosition); - public void appendRow(Chunk chunk, int position) { - // Check fulfilled before appending - if (currentSize == chunkSize) { - Chunk buildingChunk = getBuildingChunk(); - chunks.add(buildingChunk); - estimateSize += buildingChunk.estimateSize(); - for (int i = 0; i < blockBuilders.length; i++) { - blockBuilders[i] = blockBuilders[i].newBlockBuilder(); - currentSize = 0; - } - } + void appendValuesTo(int position, ChunkBuilder chunkBuilder); - for (int i = 0; i < blockBuilders.length; i++) { - chunk.getBlock(i).writePositionTo(position, blockBuilders[i]); - } - currentSize++; - } + long estimateSize(); - public List buildChunks() { - ArrayList allChunks = new ArrayList<>(this.chunks); - if (currentSize > 0) { - allChunks.add(getBuildingChunk()); - } - return allChunks; - } + // type-specific interface + void appendInteger(int col, int value); - private int chunkIndexOf(int position) { - return position / chunkSize; - } + void appendLong(int col, long value); - private int offsetOf(int position) { - return position % chunkSize; - } + void appendNull(int col); - public void appendValuesTo(int position, ChunkBuilder chunkBuilder) { - final int chunkId = chunkIndexOf(position); - final int offset = offsetOf(position); - if (chunkId < chunks.size()) { - // Just compare both chunks - Chunk chunk = chunks.get(chunkId); - for (int i = 0; i < chunk.getBlockCount(); ++i) { - final Block block = chunk.getBlock(i); - chunkBuilder.appendTo(block, i, offset); - } - } else { - // Compare the block builders with given chunk (block by block) - assert chunkId == chunks.size(); - for (int i = 0; i < blockBuilders.length; i++) { - Block block = blockBuilders[i].build(); - chunkBuilder.appendTo(block, i, offset); - } + static TypedBuffer create(DataType[] dataTypes, int chunkSize, ExecutionContext context) { + BlockBuilder[] blockBuilders = new BlockBuilder[dataTypes.length]; + for (int i = 0; i < dataTypes.length; i++) { + blockBuilders[i] = BlockBuilders.create(dataTypes[i], context); } + return new DefaultTypedBuffer(blockBuilders, chunkSize, context); } - private Chunk getBuildingChunk() { - Block[] blocks = new Block[blockBuilders.length]; - for (int i = 0; i < blockBuilders.length; i++) { - blocks[i] = blockBuilders[i].build(); + static TypedBuffer createTypeSpecific(DataType dataType, int chunkSize, ExecutionContext context) { + MySQLStandardFieldType fieldType = dataType.fieldType(); + switch (fieldType) { + case MYSQL_TYPE_LONGLONG: + return TypedBuffers.createLong(chunkSize, context); + case MYSQL_TYPE_LONG: + return TypedBuffers.createInt(chunkSize, context); + default: { + // fall back + return create(new DataType[] {dataType}, chunkSize, context); + } } - return new Chunk(currentSize, blocks); - } - - public long estimateSize() { - return estimateSize; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedBuffers.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedBuffers.java new file mode 100644 index 000000000..59e4bf831 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedBuffers.java @@ -0,0 +1,223 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockBuilder; +import com.alibaba.polardbx.executor.chunk.BlockBuilders; +import com.alibaba.polardbx.executor.chunk.Chunk; +import com.alibaba.polardbx.executor.chunk.ChunkBuilder; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; + +public class TypedBuffers { + public static TypedBuffer createLong(int chunkSize, ExecutionContext context) { + return new LongTypedBuffer(chunkSize, context); + } + + public static TypedBuffer createInt(int chunkSize, ExecutionContext context) { + return new IntegerTypedBuffer(chunkSize, context); + } + + private static abstract class AbstractTypeSpecificBuffer implements TypedBuffer { + protected final int chunkSize; + protected final ExecutionContext context; + protected int currentSize; + protected final List chunks = new ArrayList<>(); + protected long estimateSize = 0; + protected Block randomAccessBlock; + + AbstractTypeSpecificBuffer(int chunkSize, ExecutionContext context) { + this.chunkSize = chunkSize; + this.context = context; + } + + protected abstract void doAppendRow(T array, int nullPosition, int sourceIndex, int positionCount); + + @Override + public void appendInteger(int col, int value) { + throw new UnsupportedOperationException(); + } + + @Override + public void appendLong(int col, long value) { + throw new UnsupportedOperationException(); + } + + @Override + public void appendNull(int col) { + throw new UnsupportedOperationException(); + } + + @Override + public void appendRow(T array, int nullPosition, int positionCount) { + if (currentSize + positionCount < chunkSize) { + doAppendRow(array, nullPosition, 0, positionCount); + } else { + int firstCopySize = chunkSize - currentSize; + + doAppendRow(array, nullPosition, 0, firstCopySize); + doAppendRow(array, nullPosition, firstCopySize, positionCount - firstCopySize); + } + } + + @Override + public void appendRow(Chunk chunk, int position) { + throw new UnsupportedOperationException(); + } + + @Override + public List buildChunks() { + ArrayList allChunks = new ArrayList<>(this.chunks); + if (currentSize > 0) { + allChunks.add(getBuildingChunk()); + } + return allChunks; + } + + @Override + public boolean equals(int position, Chunk otherChunk, int otherPosition) { + final int chunkId = chunkIndexOf(position); + final int offset = offsetOf(position); + + if (chunkId < chunks.size()) { + // Just compare both chunks + Chunk chunk = chunks.get(chunkId); + return chunk.equals(offset, otherChunk, otherPosition); + } else { + // Compare the block builders with given chunk (block by block) + assert chunkId == chunks.size(); + return otherChunk.getBlock(0).equals(otherPosition, randomAccessBlock, offset); + } + } + + @Override + public void appendValuesTo(int position, ChunkBuilder chunkBuilder) { + final int chunkId = chunkIndexOf(position); + final int offset = offsetOf(position); + if (chunkId < chunks.size()) { + // Just compare both chunks + Chunk chunk = chunks.get(chunkId); + for (int i = 0; i < chunk.getBlockCount(); ++i) { + final Block block = chunk.getBlock(i); + chunkBuilder.appendTo(block, i, offset); + } + } else { + // Compare the block builders with given chunk (block by block) + assert chunkId == chunks.size(); + + ((RandomAccessBlock) randomAccessBlock).resize(currentSize); + chunkBuilder.appendTo(randomAccessBlock, 0, offset); + } + } + + @Override + public long estimateSize() { + return estimateSize; + } + + protected Chunk getBuildingChunk() { + Block[] blocks = new Block[] {randomAccessBlock}; + ((RandomAccessBlock) randomAccessBlock).resize(currentSize); + return new Chunk(currentSize, blocks); + } + + protected int chunkIndexOf(int position) { + return position / chunkSize; + } + + protected int offsetOf(int position) { + return position % chunkSize; + } + } + + private static class LongTypedBuffer extends AbstractTypeSpecificBuffer { + LongTypedBuffer(int chunkSize, ExecutionContext context) { + super(chunkSize, context); + this.randomAccessBlock = new LongBlock(DataTypes.LongType, chunkSize); + } + + @Override + protected void doAppendRow(long[] array, int nullPosition, int sourceIndex, int positionCount) { + // Check fulfilled before appending + if (currentSize == chunkSize) { + Chunk buildingChunk = getBuildingChunk(); + chunks.add(buildingChunk); + estimateSize += buildingChunk.estimateSize(); + + // reset long block + randomAccessBlock = new LongBlock(DataTypes.LongType, chunkSize); + currentSize = 0; + } + + // copy value array + long[] targetValueArray = ((LongBlock) randomAccessBlock).longArray(); + for (int position = 0; position < positionCount; position++) { + targetValueArray[currentSize + position] = array[sourceIndex + position]; + } + + // copy null array + if (nullPosition >= 0) { + boolean[] targetNullArray = ((LongBlock) randomAccessBlock).nulls(); + targetNullArray[currentSize + nullPosition] = true; + } + currentSize += positionCount; + } + + } + + private static class IntegerTypedBuffer extends AbstractTypeSpecificBuffer { + IntegerTypedBuffer(int chunkSize, ExecutionContext context) { + super(chunkSize, context); + this.randomAccessBlock = new IntegerBlock(DataTypes.IntegerType, chunkSize); + } + + @Override + protected void doAppendRow(int[] array, int nullPosition, int sourceIndex, int positionCount) { + // Check fulfilled before appending + if (currentSize == chunkSize) { + Chunk buildingChunk = getBuildingChunk(); + chunks.add(buildingChunk); + estimateSize += buildingChunk.estimateSize(); + + // reset long block + randomAccessBlock = new IntegerBlock(DataTypes.IntegerType, chunkSize); + currentSize = 0; + } + + // copy value array + int[] targetValueArray = ((IntegerBlock) randomAccessBlock).intArray(); + for (int position = 0; position < positionCount; position++) { + targetValueArray[currentSize + position] = array[sourceIndex + position]; + } + + // copy null array + if (nullPosition >= 0) { + boolean[] targetNullArray = ((IntegerBlock) randomAccessBlock).nulls(); + targetNullArray[currentSize + nullPosition] = true; + } + currentSize += positionCount; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedList.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedList.java new file mode 100644 index 000000000..fd8badbbb --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedList.java @@ -0,0 +1,123 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +import com.alibaba.polardbx.common.utils.memory.SizeOf; + +public interface TypedList { + int INITIAL_SIZE = 1024; + + static TypedList createLong(int fixedSize) { + return new LongTypedList(fixedSize); + } + + static TypedList createInt(int fixedSize) { + return new IntTypedList(fixedSize); + } + + default void setInt(int index, int value) { + throw new UnsupportedOperationException(); + } + + default void setLong(int index, long value) { + throw new UnsupportedOperationException(); + } + + default void setIntArray(int sourceIndex, int[] fromArray, int startIndex, int endIndex) { + throw new UnsupportedOperationException(); + } + + default void setLongArray(int sourceIndex, long[] fromArray, int startIndex, int endIndex) { + throw new UnsupportedOperationException(); + } + + default long getLong(int position) { + throw new UnsupportedOperationException(); + } + + default int getInt(int position) { + throw new UnsupportedOperationException(); + } + + default void close() { + throw new UnsupportedOperationException(); + } + + class LongTypedList implements TypedList { + long[] array; + + public LongTypedList(int fixedSize) { + this.array = new long[fixedSize]; + } + + @Override + public void setLong(int index, long value) { + array[index] = value; + } + + @Override + public void setLongArray(int sourceIndex, long[] fromArray, int startIndex, int endIndex) { + System.arraycopy(fromArray, startIndex, array, sourceIndex, endIndex - startIndex); + } + + @Override + public long getLong(int position) { + return array[position]; + } + + @Override + public void close() { + array = null; + } + + public static long estimatedSizeInBytes(int fixedSize) { + return SizeOf.sizeOfLongArray(fixedSize); + } + } + + class IntTypedList implements TypedList { + int[] array; + + public IntTypedList(int fixedSize) { + array = new int[fixedSize]; + } + + @Override + public void setInt(int index, int value) { + array[index] = value; + } + + @Override + public void setIntArray(int sourceIndex, int[] fromArray, int startIndex, int endIndex) { + System.arraycopy(fromArray, startIndex, array, sourceIndex, endIndex - startIndex); + } + + @Override + public int getInt(int position) { + return array[position]; + } + + @Override + public void close() { + array = null; + } + + public static long estimatedSizeInBytes(int fixedSize) { + return SizeOf.sizeOfIntArray(fixedSize); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedListHandle.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedListHandle.java new file mode 100644 index 000000000..6fd4e9e4f --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/TypedListHandle.java @@ -0,0 +1,33 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.operator.util; + +import com.alibaba.polardbx.executor.chunk.Chunk; + +public interface TypedListHandle { + long SERIALIZED_MASK = ((long) 0x7fffffff) << 1 | 1; + + long estimatedSize(int fixedSize); + + TypedList[] getTypedLists(int fixedSize); + + void consume(Chunk chunk, int sourceIndex); + + static long serialize(int a, int b) { + return ((long) a << 32) | (b & SERIALIZED_MASK); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/VectorUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/VectorUtils.java index 1fdd1ec06..5bcd705f5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/VectorUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/VectorUtils.java @@ -32,6 +32,12 @@ public static int[] addInt(int[] a, int[] b) { return results; } + public static void addInt(int[] results, int[] a, int[] b, int len) { + for (int i = 0; i < len; i++) { + results[i] = a[i] + b[i]; + } + } + public static int[] multiplyInt(int[] a, int b) { final int len = a.length; int[] results = new int[len]; @@ -41,4 +47,10 @@ public static int[] multiplyInt(int[] a, int b) { return results; } + public static void multiplyInt(int[] results, int[] a, int b, int len) { + for (int i = 0; i < len; i++) { + results[i] = a[i] * b; + } + } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterConsume.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterConsume.java index 61ec08447..12bbd1301 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterConsume.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterConsume.java @@ -16,10 +16,10 @@ package com.alibaba.polardbx.executor.operator.util.bloomfilter; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilter; import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.operator.util.minmaxfilter.MinMaxFilter; import com.google.common.util.concurrent.SettableFuture; @@ -64,7 +64,8 @@ public synchronized void setBloomFilter(BloomFilterInfo filterInfo) { if (filterInfo.getData() != null) { BloomFilter bloomFilter = filterInfo.toBloomFilter(); this.bloomFilter = bloomFilter; - this.minMaxFilterList = filterInfo.getMinMaxFilterInfoList().stream().map(x -> MinMaxFilter.from(x)).collect(Collectors.toList()); + this.minMaxFilterList = filterInfo.getMinMaxFilterInfoList().stream().map(x -> MinMaxFilter.from(x)) + .collect(Collectors.toList()); this.future.set(filterInfo); } else { this.future.set(null); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterConsumeFilter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterConsumeFilter.java index 5d0bf167a..e328f8eab 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterConsumeFilter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterConsumeFilter.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.operator.util.bloomfilter; -import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilter; import com.alibaba.polardbx.common.utils.hash.IStreamingHasher; +import com.alibaba.polardbx.executor.chunk.Chunk; /** * @author bairui.lrj diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterExpression.java index 08b879aa7..34269278d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/bloomfilter/BloomFilterExpression.java @@ -16,11 +16,11 @@ package com.alibaba.polardbx.executor.operator.util.bloomfilter; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; +import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.common.utils.bloomfilter.BloomFilterInfo; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; import java.util.ArrayList; import java.util.HashMap; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/BlackHoleMinMaxFilter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/BlackHoleMinMaxFilter.java index 1357b96c6..1ad5c612e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/BlackHoleMinMaxFilter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/BlackHoleMinMaxFilter.java @@ -21,7 +21,6 @@ /** * @author chenzilin - * @date 2021/12/8 15:01 */ public class BlackHoleMinMaxFilter extends MinMaxFilter { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/DateMinMaxFilter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/DateMinMaxFilter.java index 3a953c12a..7ee10975b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/DateMinMaxFilter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/DateMinMaxFilter.java @@ -25,7 +25,6 @@ /** * @author chenzilin - * @date 2021/12/13 19:15 */ public class DateMinMaxFilter extends MinMaxFilter { private Long min; @@ -61,8 +60,8 @@ public void put(Block block, int pos) { if (!block.isNull(pos)) { MysqlDateTime mysqlDateTime = DataTypeUtil.toMySQLDatetimeByFlags( - block.getDate(pos), - TimeParserFlags.FLAG_TIME_NO_ZERO_DATE); + block.getDate(pos), + TimeParserFlags.FLAG_TIME_NO_ZERO_DATE); long num = TimeStorage.writeTimestamp(mysqlDateTime); @@ -78,9 +77,9 @@ public void put(Block block, int pos) { @Override public MinMaxFilterInfo toMinMaxFilterInfo() { return new MinMaxFilterInfo( - MinMaxFilterInfo.TYPE.LONG, - min == null ? null : min.longValue(), - max == null ? null : max.longValue(), null, null, null, null, null, null); + MinMaxFilterInfo.TYPE.LONG, + min == null ? null : min.longValue(), + max == null ? null : max.longValue(), null, null, null, null, null, null); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/DoubleMinMaxFilter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/DoubleMinMaxFilter.java index cf3d20d66..8a31b2710 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/DoubleMinMaxFilter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/DoubleMinMaxFilter.java @@ -21,11 +21,11 @@ /** * @author chenzilin - * @date 2021/12/14 17:45 */ public class DoubleMinMaxFilter extends MinMaxFilter { Double min; Double max; + public DoubleMinMaxFilter() { } @@ -67,9 +67,9 @@ public void put(Block block, int pos) { @Override public MinMaxFilterInfo toMinMaxFilterInfo() { return new MinMaxFilterInfo( - MinMaxFilterInfo.TYPE.DOUBLE, - null, null, null, null, min == null ? null : min.doubleValue(), - max == null ? null : max.doubleValue(), null, null); + MinMaxFilterInfo.TYPE.DOUBLE, + null, null, null, null, min == null ? null : min.doubleValue(), + max == null ? null : max.doubleValue(), null, null); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/FloatMinMaxFilter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/FloatMinMaxFilter.java index 4dc14a9a5..4920e3d48 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/FloatMinMaxFilter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/FloatMinMaxFilter.java @@ -21,11 +21,11 @@ /** * @author chenzilin - * @date 2021/12/14 18:20 */ public class FloatMinMaxFilter extends MinMaxFilter { Float min; Float max; + public FloatMinMaxFilter() { } @@ -67,9 +67,9 @@ public void put(Block block, int pos) { @Override public MinMaxFilterInfo toMinMaxFilterInfo() { return new MinMaxFilterInfo( - MinMaxFilterInfo.TYPE.FLOAT, - null, null, null, null, null, null, min == null ? null : min.floatValue(), - max == null ? null : max.floatValue()); + MinMaxFilterInfo.TYPE.FLOAT, + null, null, null, null, null, null, min == null ? null : min.floatValue(), + max == null ? null : max.floatValue()); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/IntegerMinMaxFilter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/IntegerMinMaxFilter.java index 3b7ba45d5..459355f97 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/IntegerMinMaxFilter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/IntegerMinMaxFilter.java @@ -18,15 +18,14 @@ import com.alibaba.polardbx.common.utils.bloomfilter.MinMaxFilterInfo; import com.alibaba.polardbx.executor.chunk.Block; -import com.alibaba.polardbx.optimizer.core.function.calc.scalar.filter.In; /** * @author chenzilin - * @date 2021/12/8 14:49 */ public class IntegerMinMaxFilter extends MinMaxFilter { Integer min; Integer max; + public IntegerMinMaxFilter() { } @@ -68,9 +67,9 @@ public void put(Block block, int pos) { @Override public MinMaxFilterInfo toMinMaxFilterInfo() { return new MinMaxFilterInfo( - MinMaxFilterInfo.TYPE.INTEGER, - min == null ? null : min.longValue(), - max == null ? null : max.longValue(), null, null, null, null, null, null); + MinMaxFilterInfo.TYPE.INTEGER, + min == null ? null : min.longValue(), + max == null ? null : max.longValue(), null, null, null, null, null, null); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/LongMinMaxFilter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/LongMinMaxFilter.java index 7a60a4197..bdc5b233f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/LongMinMaxFilter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/LongMinMaxFilter.java @@ -21,7 +21,6 @@ /** * @author chenzilin - * @date 2021/12/8 14:40 */ public class LongMinMaxFilter extends MinMaxFilter { private Long min; @@ -68,9 +67,9 @@ public void put(Block block, int pos) { @Override public MinMaxFilterInfo toMinMaxFilterInfo() { return new MinMaxFilterInfo( - MinMaxFilterInfo.TYPE.LONG, - min == null ? null : min.longValue(), - max == null ? null : max.longValue(), null, null, null, null, null, null); + MinMaxFilterInfo.TYPE.LONG, + min == null ? null : min.longValue(), + max == null ? null : max.longValue(), null, null, null, null, null, null); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/MinMaxFilter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/MinMaxFilter.java index 0edad71d6..c8e5dc8da 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/MinMaxFilter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/MinMaxFilter.java @@ -26,10 +26,8 @@ import java.sql.Date; import java.sql.Timestamp; - /** * @author chenzilin - * @date 2021/12/8 14:33 */ public class MinMaxFilter { public static MinMaxFilter create(DataType type) { @@ -59,42 +57,42 @@ public static MinMaxFilter create(DataType type) { public static MinMaxFilter from(MinMaxFilterInfo minMaxFilterInfo) { switch (minMaxFilterInfo.getType()) { - case NULL: - return new BlackHoleMinMaxFilter(); - case INTEGER: - return new IntegerMinMaxFilter( - minMaxFilterInfo.getMinNumber() == null ? null : minMaxFilterInfo.getMinNumber().intValue(), - minMaxFilterInfo.getMaxNumber() == null ? null : minMaxFilterInfo.getMaxNumber().intValue()); - case LONG: - return new LongMinMaxFilter( - minMaxFilterInfo.getMinNumber() == null ? null : minMaxFilterInfo.getMinNumber().longValue(), - minMaxFilterInfo.getMaxNumber() == null ? null : minMaxFilterInfo.getMaxNumber().longValue()); - case STRING: - return new StringMinMaxFilter( - minMaxFilterInfo.getMinString() == null ? null : minMaxFilterInfo.getMinString(), - minMaxFilterInfo.getMaxString() == null ? null : minMaxFilterInfo.getMaxString()); - case TIMESTAMP: - return new TimestampMinMaxFilter( - minMaxFilterInfo.getMinNumber() == null ? null : minMaxFilterInfo.getMinNumber().longValue(), - minMaxFilterInfo.getMaxNumber() == null ? null : minMaxFilterInfo.getMaxNumber().longValue()); - case DATE: - return new DateMinMaxFilter( - minMaxFilterInfo.getMinNumber() == null ? null : minMaxFilterInfo.getMinNumber().longValue(), - minMaxFilterInfo.getMaxNumber() == null ? null : minMaxFilterInfo.getMaxNumber().longValue()); - case DECIMAL: - return new DecimalMinMaxFilter( - minMaxFilterInfo.getMinString() == null ? null : Decimal.fromString(minMaxFilterInfo.getMinString()), - minMaxFilterInfo.getMaxString() == null ? null : Decimal.fromString(minMaxFilterInfo.getMaxString())); - case DOUBLE: - return new DoubleMinMaxFilter( - minMaxFilterInfo.getMinDouble() == null ? null : minMaxFilterInfo.getMinDouble(), - minMaxFilterInfo.getMaxDouble() == null ? null : minMaxFilterInfo.getMaxDouble()); - case FLOAT: - return new FloatMinMaxFilter( - minMaxFilterInfo.getMinFloat() == null ? null : minMaxFilterInfo.getMinFloat(), - minMaxFilterInfo.getMaxFloat() == null ? null : minMaxFilterInfo.getMaxFloat()); - default: - throw new UnsupportedOperationException(); + case NULL: + return new BlackHoleMinMaxFilter(); + case INTEGER: + return new IntegerMinMaxFilter( + minMaxFilterInfo.getMinNumber() == null ? null : minMaxFilterInfo.getMinNumber().intValue(), + minMaxFilterInfo.getMaxNumber() == null ? null : minMaxFilterInfo.getMaxNumber().intValue()); + case LONG: + return new LongMinMaxFilter( + minMaxFilterInfo.getMinNumber() == null ? null : minMaxFilterInfo.getMinNumber().longValue(), + minMaxFilterInfo.getMaxNumber() == null ? null : minMaxFilterInfo.getMaxNumber().longValue()); + case STRING: + return new StringMinMaxFilter( + minMaxFilterInfo.getMinString() == null ? null : minMaxFilterInfo.getMinString(), + minMaxFilterInfo.getMaxString() == null ? null : minMaxFilterInfo.getMaxString()); + case TIMESTAMP: + return new TimestampMinMaxFilter( + minMaxFilterInfo.getMinNumber() == null ? null : minMaxFilterInfo.getMinNumber().longValue(), + minMaxFilterInfo.getMaxNumber() == null ? null : minMaxFilterInfo.getMaxNumber().longValue()); + case DATE: + return new DateMinMaxFilter( + minMaxFilterInfo.getMinNumber() == null ? null : minMaxFilterInfo.getMinNumber().longValue(), + minMaxFilterInfo.getMaxNumber() == null ? null : minMaxFilterInfo.getMaxNumber().longValue()); + case DECIMAL: + return new DecimalMinMaxFilter( + minMaxFilterInfo.getMinString() == null ? null : Decimal.fromString(minMaxFilterInfo.getMinString()), + minMaxFilterInfo.getMaxString() == null ? null : Decimal.fromString(minMaxFilterInfo.getMaxString())); + case DOUBLE: + return new DoubleMinMaxFilter( + minMaxFilterInfo.getMinDouble() == null ? null : minMaxFilterInfo.getMinDouble(), + minMaxFilterInfo.getMaxDouble() == null ? null : minMaxFilterInfo.getMaxDouble()); + case FLOAT: + return new FloatMinMaxFilter( + minMaxFilterInfo.getMinFloat() == null ? null : minMaxFilterInfo.getMinFloat(), + minMaxFilterInfo.getMaxFloat() == null ? null : minMaxFilterInfo.getMaxFloat()); + default: + throw new UnsupportedOperationException(); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/StringMinMaxFilter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/StringMinMaxFilter.java index af2ec8ead..3c2b99bd8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/StringMinMaxFilter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/StringMinMaxFilter.java @@ -22,7 +22,6 @@ /** * @author chenzilin - * @date 2021/12/10 15:33 */ public class StringMinMaxFilter extends MinMaxFilter { @@ -57,7 +56,7 @@ public void setMax(String max) { @Override public void put(Block block, int pos) { if (!block.isNull(pos)) { - String str = ((SliceBlock) block).getRegion(pos).toStringUtf8(); + String str = (block.cast(SliceBlock.class)).getRegion(pos).toStringUtf8(); if (min == null || str.compareTo(min) < 0) { min = str; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/TimestampMinMaxFilter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/TimestampMinMaxFilter.java index d6ea69025..b0a05593f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/TimestampMinMaxFilter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/operator/util/minmaxfilter/TimestampMinMaxFilter.java @@ -27,13 +27,9 @@ import com.alibaba.polardbx.executor.chunk.TimestampBlock; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.rpc.result.XResultUtil; -import org.apache.orc.sarg.SearchArgument; - -import java.sql.Timestamp; /** * @author chenzilin - * @date 2021/12/13 16:57 */ public class TimestampMinMaxFilter extends MinMaxFilter { private Long min; @@ -67,16 +63,18 @@ public void setMax(Long max) { @Override public void put(Block block, int pos) { if (!block.isNull(pos)) { - OriginalTimestamp originalTimestamp = ((TimestampBlock)block).getTimestamp(pos); + OriginalTimestamp originalTimestamp = ((TimestampBlock) block).getTimestamp(pos); MysqlDateTime mysqlDateTime = DataTypeUtil.toMySQLDatetimeByFlags( - originalTimestamp, - TimeParserFlags.FLAG_TIME_FUZZY_DATE); + originalTimestamp, + TimeParserFlags.FLAG_TIME_FUZZY_DATE); if (mysqlDateTime == null) { return; } TimeParseStatus timeParseStatus = new TimeParseStatus(); - MySQLTimeVal timeVal = MySQLTimeConverter.convertDatetimeToTimestampWithoutCheck(mysqlDateTime, timeParseStatus, originalTimestamp.getMysqlDateTime().getTimezone().toZoneId()); + MySQLTimeVal timeVal = + MySQLTimeConverter.convertDatetimeToTimestampWithoutCheck(mysqlDateTime, timeParseStatus, + originalTimestamp.getMysqlDateTime().getTimezone().toZoneId()); if (timeVal == null) { return; } @@ -94,9 +92,9 @@ public void put(Block block, int pos) { @Override public MinMaxFilterInfo toMinMaxFilterInfo() { return new MinMaxFilterInfo( - MinMaxFilterInfo.TYPE.LONG, - min == null ? null : min.longValue(), - max == null ? null : max.longValue(), null, null, null, null, null, null); + MinMaxFilterInfo.TYPE.LONG, + min == null ? null : min.longValue(), + max == null ? null : max.longValue(), null, null, null, null, null, null); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/AlterTableGroupUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/AlterTableGroupUtils.java index e9bd32c79..caba1309c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/AlterTableGroupUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/AlterTableGroupUtils.java @@ -25,7 +25,6 @@ import com.alibaba.polardbx.executor.ExecutorHelper; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.ddl.job.validator.JoinGroupValidator; -import com.alibaba.polardbx.gms.partition.TablePartRecordInfoContext; import com.alibaba.polardbx.gms.partition.TablePartitionRecord; import com.alibaba.polardbx.gms.tablegroup.JoinGroupInfoRecord; import com.alibaba.polardbx.gms.tablegroup.JoinGroupUtils; @@ -108,7 +107,6 @@ import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlLiteral; import org.apache.calcite.sql.SqlNode; -import org.apache.calcite.sql.SqlNodeList; import org.apache.calcite.sql.SqlNumericLiteral; import org.apache.calcite.sql.SqlPartition; import org.apache.calcite.sql.SqlPartitionValue; @@ -249,7 +247,7 @@ public static void alterTableGroupSplitPartitionCheck(SqlAlterTableSplitPartitio boolean alterTableOnly = !(sqlAlterTableSplitPartition instanceof SqlAlterTableGroupSplitPartition); String tgSchema = tableGroupConfig.getTableGroupRecord().getSchema(); final SchemaManager schemaManager = OptimizerContext.getContext(tgSchema).getLatestSchemaManager(); - String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0).getLogTbRec().tableName; + String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0); TableMeta tableMeta = schemaManager.getTable(tableInCurrentGroup); TableGroupRecord tableGroupRecord = tableGroupConfig.getTableGroupRecord(); String tgName = tableGroupRecord.getTg_name(); @@ -678,8 +676,11 @@ private static PartitionField calculateSplitAtVal(TableGroupConfig tableGroupCon SqlLiteral constLiteral = ((SqlLiteral) sqlAlter.getAtValue()); String constStr = constLiteral.getValueAs(String.class); atVal.store(constStr, new CharType()); - } else if (strategy != PartitionStrategy.HASH && strategy != PartitionStrategy.KEY && GeneralUtil.isEmpty( - sqlAlter.getNewPartitions())) { + } else if (strategy != PartitionStrategy.HASH + && strategy != PartitionStrategy.KEY + && strategy != PartitionStrategy.DIRECT_HASH + && strategy != PartitionStrategy.CO_HASH + && GeneralUtil.isEmpty(sqlAlter.getNewPartitions())) { throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, "Missing the new partitions spec"); } if (GeneralUtil.isNotEmpty(sqlAlter.getNewPartitions())) { @@ -710,7 +711,7 @@ public static void alterTableGroupMergePartitionCheck(SqlAlterTableMergePartitio String schemaName = tableGroupConfig.getTableGroupRecord().getSchema(); final SchemaManager schemaManager = executionContext.getSchemaManager(schemaName); - String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0).getLogTbRec().tableName; + String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0); TableMeta tableMeta = schemaManager.getTable(tableInCurrentGroup); PartitionInfo partitionInfo = tableMeta.getPartitionInfo(); @@ -827,7 +828,7 @@ public static void alterTableGroupReorgPartitionCheck(SqlAlterTableReorgPartitio String schemaName = tableGroupConfig.getTableGroupRecord().getSchema(); final SchemaManager schemaManager = executionContext.getSchemaManager(schemaName); - String firstTableName = tableGroupConfig.getAllTables().get(0).getTableName(); + String firstTableName = tableGroupConfig.getAllTables().get(0); PartitionInfo partitionInfo = schemaManager.getTable(firstTableName).getPartitionInfo(); PartitionByDefinition partByDef = partitionInfo.getPartitionBy(); @@ -1372,7 +1373,7 @@ public static void alterTableGroupMovePartitionCheck(SqlAlterTableMovePartition } String tgSchema = tableGroupConfig.getTableGroupRecord().getSchema(); - String firstTableName = tableGroupConfig.getAllTables().get(0).getTableName(); + String firstTableName = tableGroupConfig.getAllTables().get(0); PartitionInfo partitionInfo = OptimizerContext.getContext(tgSchema).getPartitionInfoManager().getPartitionInfo(firstTableName); @@ -1424,8 +1425,8 @@ public static void alterTableGroupMovePartitionCheck(SqlAlterTableMovePartition private static void alterTableGroupExtractPartitionCheck(SqlAlterTableGroup sqlAlterTableGroup, TableGroupConfig tableGroupConfig, ExecutionContext executionContext) { - String schemaName = tableGroupConfig.getAllTables().get(0).getLogTbRec().tableSchema; - String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0).getLogTbRec().tableName; + String schemaName = tableGroupConfig.getTableGroupRecord().getSchema(); + String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0); final SchemaManager schemaManager = executionContext.getSchemaManager(schemaName); @@ -1464,6 +1465,7 @@ public static void alterTableExtractPartitionCheck(String schemaName, String log "can't execute the extract partition command for single/broadcast tables"); } if (partitionInfo.getPartitionBy().getStrategy() != PartitionStrategy.HASH + && partitionInfo.getPartitionBy().getStrategy() != PartitionStrategy.CO_HASH && partitionInfo.getPartitionBy().getStrategy() != PartitionStrategy.KEY && partitionInfo.getPartitionBy().getStrategy() != PartitionStrategy.LIST && partitionInfo.getPartitionBy().getStrategy() != PartitionStrategy.LIST_COLUMNS) { @@ -1483,8 +1485,8 @@ private static void alterTableGroupSplitPartitionByHotValueCheck(SqlAlterTableGr boolean modifyNonTemplateSubPartition = subPartitionSplit && sqlAlterTableSplitPartitionByHotValue.getModifyPartitionName() != null; - String schemaName = tableGroupConfig.getAllTables().get(0).getLogTbRec().tableSchema; - String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0).getLogTbRec().tableName; + String schemaName = tableGroupConfig.getTableGroupRecord().getSchema(); + String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0); final SchemaManager schemaManager = executionContext.getSchemaManager(schemaName); @@ -1623,7 +1625,7 @@ public static void alterTableGroupDropPartitionCheck(SqlAlterTableDropPartition String schemaName = tableGroupConfig.getTableGroupRecord().getSchema(); final SchemaManager schemaManager = executionContext.getSchemaManager(schemaName); - String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0).getTableName(); + String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0); PartitionInfo partitionInfo = schemaManager.getTable(tableInCurrentGroup).getPartitionInfo(); PartitionByDefinition partByDef = partitionInfo.getPartitionBy(); @@ -1712,17 +1714,17 @@ public static void alterTableGroupDropPartitionCheck(SqlAlterTableDropPartition "Don't allow to drop all the partitions"); } - for (TablePartRecordInfoContext record : tableGroupConfig.getAllTables()) { - TableMeta tbMeta = schemaManager.getTable(record.getTableName()); - if (tbMeta.withGsi()) { + for (String tableName : tableGroupConfig.getAllTables()) { + TableMeta tbMeta = schemaManager.getTable(tableName); + if (tbMeta.withGsi() && !tbMeta.withCci()) { throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, String.format("it's not support to drop partition/subpartition when table[%s] with GSI", - record.getTableName())); + tableName)); } - if (tbMeta.isGsi()) { + if (tbMeta.isGsi() && !tbMeta.isColumnar()) { throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_DROP_PARTITION, String.format("it's not support to drop global index[%s]'s partition/subpartition", - record.getTableName())); + tableName)); } } } @@ -1733,7 +1735,7 @@ public static void alterTableGroupTruncatePartitionCheck( String schemaName = tableGroupConfig.getTableGroupRecord().getSchema(); final SchemaManager schemaManager = executionContext.getSchemaManager(schemaName); - String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0).getLogTbRec().tableName; + String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0); PartitionInfo partitionInfo = schemaManager.getTable(tableInCurrentGroup).getPartitionInfo(); PartitionByDefinition subPartByDef = partitionInfo.getPartitionBy().getSubPartitionBy(); @@ -1744,17 +1746,17 @@ public static void alterTableGroupTruncatePartitionCheck( "Don't allow to drop subpartitions from one-level partitioned table"); } - for (TablePartRecordInfoContext record : tableGroupConfig.getAllTables()) { - TableMeta tbMeta = schemaManager.getTable(record.getTableName()); + for (String tableName : tableGroupConfig.getAllTables()) { + TableMeta tbMeta = schemaManager.getTable(tableName); if (tbMeta.withGsi()) { throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, String.format("it's not support to truncate partition/subpartition when table[%s] with GSI", - record.getTableName())); + tableName)); } if (tbMeta.isGsi()) { throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_DROP_PARTITION, String.format("it's not support to truncate global index[%s]'s partition/subpartition", - record.getTableName())); + tableName)); } } } @@ -1764,12 +1766,12 @@ private static void alterTableModifyListPartitionValuesCheck(SqlAlterTableGroup SqlAlterTableModifyPartitionValues sqlModifyListPartitionValues, ExecutionContext executionContext) { - TablePartitionRecord tablePartitionRecord = tableGroupConfig.getAllTables().get(0).getLogTbRec(); + TableGroupRecord tableGroupRecord = tableGroupConfig.getTableGroupRecord(); SchemaManager schemaManager = - OptimizerContext.getContext(tablePartitionRecord.getTableSchema()).getLatestSchemaManager(); + OptimizerContext.getContext(tableGroupRecord.getSchema()).getLatestSchemaManager(); PartitionInfo partitionInfo = - OptimizerContext.getContext(tablePartitionRecord.getTableSchema()).getPartitionInfoManager() - .getPartitionInfo(tablePartitionRecord.getTableName()); + OptimizerContext.getContext(tableGroupRecord.getSchema()).getPartitionInfoManager() + .getPartitionInfo(tableGroupConfig.getAllTables().get(0)); SqlPartition partSpecAst = sqlModifyListPartitionValues.getPartition(); SqlSubPartition subpartSpecAst = null; @@ -1902,15 +1904,15 @@ private static void alterTableModifyListPartitionValuesCheck(SqlAlterTableGroup "the number of drop values should less than the number of values contain by partition[%s]", targetPartNameStr)); } - for (TablePartRecordInfoContext record : tableGroupConfig.getAllTables()) { - TableMeta tbMeta = schemaManager.getTable(record.getTableName()); + for (String tableName : tableGroupConfig.getAllTables()) { + TableMeta tbMeta = schemaManager.getTable(tableName); if (tbMeta.withGsi()) { throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, - String.format("it's not support to drop value when table[%s] with GSI", record.getTableName())); + String.format("it's not support to drop value when table[%s] with GSI", tableName)); } if (tbMeta.isGsi()) { throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_MODIFY_PARTITION_DROP_VALUE, - String.format("it's not support to drop value for global index[%s]", record.getTableName())); + String.format("it's not support to drop value for global index[%s]", tableName)); } } } @@ -1923,7 +1925,7 @@ public static void alterTableGroupRenamePartitionCheck( final String schemaName = tableGroupConfig.getTableGroupRecord().getSchema(); final SchemaManager schemaManager = executionContext.getSchemaManager(schemaName); - String firstTableInCurrentGroup = tableGroupConfig.getAllTables().get(0).getLogTbRec().tableName; + String firstTableInCurrentGroup = tableGroupConfig.getAllTables().get(0); TableMeta tableMeta = schemaManager.getTable(firstTableInCurrentGroup); Set oldPartitionNames = new HashSet<>(); @@ -2049,7 +2051,7 @@ private static void alterTableGroupAddPartitionCheck(TableGroupConfig tableGroup String tgSchema = tableGroupConfig.getTableGroupRecord().getSchema(); final SchemaManager schemaManager = OptimizerContext.getContext(tgSchema).getLatestSchemaManager(); String tgName = tableGroupConfig.getTableGroupRecord().getTg_name(); - String firstTableInCurrentGroup = tableGroupConfig.getAllTables().get(0).getLogTbRec().tableName; + String firstTableInCurrentGroup = tableGroupConfig.getAllTables().get(0); PartitionInfo partitionInfo = schemaManager.getTable(firstTableInCurrentGroup).getPartitionInfo(); if (partitionInfo.isSingleTable() || partitionInfo.isBroadcastTable()) { throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, @@ -2136,7 +2138,7 @@ public static void mergeTableGroupCheck(MergeTableGroupPreparedData preparedData throw new TddlRuntimeException(ErrorCode.ERR_TABLE_GROUP_IS_EMPTY, "it not allow to merge tables into empty tablegroup"); } - String firstTable = tableGroupConfig.getAllTables().get(0).getTableName(); + String firstTable = tableGroupConfig.getAllTables().get(0); PartitionInfo partitionInfo = partitionInfoManager.getPartitionInfo(firstTable); TableMeta tableMeta = ec.getSchemaManager(preparedData.getSchemaName()).getTable(firstTable); if (tableMeta.isGsi()) { @@ -2157,7 +2159,7 @@ public static void mergeTableGroupCheck(MergeTableGroupPreparedData preparedData String.format("The joinGroup of tableGroup:[%s] is not match with the joinGroup of tableGroup[%s]", tableGroup, targetTableGroup); if (GeneralUtil.isNotEmpty(tableGroupConfig.getAllTables())) { - String sourceTable = tableGroupConfig.getAllTables().get(0).getTableName(); + String sourceTable = tableGroupConfig.getAllTables().get(0); PartitionInfo sourcePartitionInfo = partitionInfoManager.getPartitionInfo(sourceTable); if (!sourcePartitionInfo.equals(partitionInfo)) { throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_INVALID_PARAMS, @@ -2197,7 +2199,7 @@ public static void alterTableGroupAddTableCheck(AlterTableGroupAddTablePreparedD } PartitionInfo partitionInfo = null; if (GeneralUtil.isNotEmpty(tableGroupConfig.getAllTables())) { - String firstTable = tableGroupConfig.getAllTables().get(0).getTableName(); + String firstTable = tableGroupConfig.getAllTables().get(0); partitionInfo = partitionInfoManager.getPartitionInfo(firstTable); } else { partitionInfo = partitionInfoManager.getPartitionInfo(preparedData.getReferenceTable()); @@ -2327,7 +2329,7 @@ public static PartitionInfo getPartitionInfo(String tableGroupName, String schem "can't modify the tablegroup:" + tableGroupName + " when it's empty"); } - String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0).getLogTbRec().tableName; + String tableInCurrentGroup = tableGroupConfig.getAllTables().get(0); TableMeta tableMeta = schemaManager.getTable(tableInCurrentGroup); PartitionInfo partitionInfo = tableMeta.getPartitionInfo(); @@ -2350,7 +2352,7 @@ public static PartitionInfo getPartitionInfo(String tableGroupName, String schem public static String convertExtractPartitionToSplitPartitionSql( LogicalAlterTableExtractPartition extractPartitionPlan, boolean isAlterTable, ExecutionContext executionContext) { - final String SPLIT_SQL = isAlterTable ? "ALTER TABLE {0} SPLIT INTO {1} PARTITIONS 1 BY HOT VALUE({2});" : + final String SPLIT_SQL = isAlterTable ? "ALTER TABLE {0} SPLIT INTO {1} PARTITIONS 1 BY HOT VALUE({2})" : "ALTER TABLEGROUP {0} SPLIT INTO {1} PARTITIONS 1 BY HOT VALUE({2})"; PartitionInfo partitionInfo; SqlAlterTableExtractPartition sqlAlterTableExtractPartition; @@ -2468,8 +2470,8 @@ public static String convertExtractToSplitSqlForSubpartition(LogicalAlterTableEx */ public static String convertExtractListRelToSplitListSql(LogicalAlterTableExtractPartition extractPartitionPlan, boolean isAlterTable, ExecutionContext executionContext) { - final String SPLIT_SQL = isAlterTable ? "ALTER TABLE {0} split PARTITION {1} into ({2});" : - "ALTER TABLEGROUP {0} split PARTITION {1} into ({2});"; + final String SPLIT_SQL = isAlterTable ? "ALTER TABLE {0} split PARTITION {1} into ({2})" : + "ALTER TABLEGROUP {0} split PARTITION {1} into ({2})"; final String PARTITION_DEF = "PARTITION {0} VALUES IN({1})"; PartitionInfo partitionInfo; SqlAlterTableExtractPartition sqlAlterTableExtractPartition; @@ -2789,8 +2791,8 @@ public static String convertExtractListToSplitListForSubpartition( */ public static String convertAddListRelToSplitListSql(LogicalAlterTableAddPartition addPartitionPlan, boolean isAlterTable, ExecutionContext executionContext) { - final String SPLIT_TABLEGROUP_SQL = isAlterTable ? "ALTER TABLE {0} split PARTITION {1} into ({2});" : - "ALTER TABLEGROUP {0} split PARTITION {1} into ({2});"; + final String SPLIT_TABLEGROUP_SQL = isAlterTable ? "ALTER TABLE {0} split PARTITION {1} into ({2})" : + "ALTER TABLEGROUP {0} split PARTITION {1} into ({2})"; PartitionInfo partitionInfo; SqlAlterTableAddPartition sqlAlterTableAddPartition; @@ -2922,24 +2924,76 @@ public static String convertAddListRelToSplitListSql(LogicalAlterTableAddPartiti } } - List newPartitionsStr = new ArrayList<>(); - for (SqlPartition newSqlPartition : newPartitions) { - newPartitionsStr.add(newSqlPartition.toString()); + boolean useSubPart = partitionInfo.getPartitionBy().getSubPartitionBy() != null; + boolean useSubPartTemp = false; + if (useSubPart) { + useSubPartTemp = partitionInfo.getPartitionBy().getSubPartitionBy().isUseSubPartTemplate(); } - PartitionSpec tobePrintDefaultSpec = defaultSpec.copy(); - tobePrintDefaultSpec.setLogical(false); - newPartitionsStr.add( - tobePrintDefaultSpec.toString() - ); - String finalNewpartsExpr = String.join(", ", newPartitionsStr); - return MessageFormat.format(SPLIT_TABLEGROUP_SQL, objectName, defaultSpec.getName(), finalNewpartsExpr); + if (!useSubPart || useSubPartTemp) { + /** + * Only for 1st-level-part of list + */ + List newPartitionsStr = new ArrayList<>(); + for (SqlPartition newSqlPartition : newPartitions) { + newPartitionsStr.add(newSqlPartition.toString()); + } + + PartitionSpec tobePrintDefaultSpec = defaultSpec.copy(); + tobePrintDefaultSpec.setLogical(false); + newPartitionsStr.add( + tobePrintDefaultSpec.toString() + ); + String finalNewPartsExpr = String.join(", ", newPartitionsStr); + String finalSplitSql = + MessageFormat.format(SPLIT_TABLEGROUP_SQL, objectName, defaultSpec.getName(), finalNewPartsExpr); + return finalSplitSql; + } else { + /** + * For 1st-level-part with specifying 2nd-level-part of list + */ + + /** + * Here only handle the add partition operations + * of non-template-subpartition list/list_columns containing default partition or + * non-template-subpartition range/range_columns containing maxvalue partition + */ + + List newPartitionsStr = new ArrayList<>(); + for (SqlPartition newSqlPartition : newPartitions) { + newPartitionsStr.add(newSqlPartition.toString()); + } + + PartitionSpec tobePrintDefaultSpec = defaultSpec.copy(); + newPartitionsStr.add( + tobePrintDefaultSpec.toString() + ); + + String finalNewpartsExpr = String.join(", ", newPartitionsStr); + String finalSplitSql = + MessageFormat.format(SPLIT_TABLEGROUP_SQL, objectName, defaultSpec.getName(), finalNewpartsExpr); + return finalSplitSql; + } + +// List newPartitionsStr = new ArrayList<>(); +// for (SqlPartition newSqlPartition : newPartitions) { +// newPartitionsStr.add(newSqlPartition.toString()); +// } +// +// PartitionSpec tobePrintDefaultSpec = defaultSpec.copy(); +// tobePrintDefaultSpec.setLogical(false); +// newPartitionsStr.add( +// tobePrintDefaultSpec.toString() +// ); +// String finalNewpartsExpr = String.join(", ", newPartitionsStr); +// String finalSplitSql = MessageFormat.format(SPLIT_TABLEGROUP_SQL, objectName, defaultSpec.getName(), finalNewpartsExpr); +// return finalSplitSql; } public static String convertAddListRelToSplitListSqlForSubPartition(LogicalAlterTableAddPartition addPartitionPlan, boolean isAlterTable, ExecutionContext executionContext) { - final String SPLIT_SQL = isAlterTable ? "alter table {0} split subpartition {1} into ({2});" : + final String SPLIT_SQL = isAlterTable ? "alter table {0} split subpartition {1} into ({2})" : "alter tablegroup {0} split subpartition {1} into ({2})"; PartitionInfo partitionInfo; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/BackfillExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/BackfillExecutor.java index 681ab5a1e..180a9860e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/BackfillExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/BackfillExecutor.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.partitionmanagement; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; @@ -28,12 +27,10 @@ import com.alibaba.polardbx.executor.backfill.Extractor; import com.alibaba.polardbx.executor.backfill.Loader; import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils; import com.alibaba.polardbx.executor.partitionmanagement.backfill.AlterTableGroupExtractor; import com.alibaba.polardbx.executor.partitionmanagement.backfill.AlterTableGroupLoader; import com.alibaba.polardbx.executor.scaleout.backfill.ChangeSetExecutor; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.google.common.collect.Sets; import org.apache.calcite.rel.RelNode; import java.util.HashMap; @@ -68,6 +65,7 @@ public int backfill(String schemaName, final long speedMin = baseEc.getParamManager().getLong(ConnectionParams.SCALEOUT_BACKFILL_SPEED_MIN); final long speedLimit = baseEc.getParamManager().getLong(ConnectionParams.SCALEOUT_BACKFILL_SPEED_LIMITATION); final long parallelism = baseEc.getParamManager().getLong(ConnectionParams.SCALEOUT_BACKFILL_PARALLELISM); + final boolean useBinary = baseEc.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY); if (null == baseEc.getServerVariables()) { baseEc.setServerVariables(new HashMap<>()); @@ -91,12 +89,12 @@ public int backfill(String schemaName, Extractor extractor; if (useChangeSet) { extractor = ChangeSetExecutor - .create(schemaName, tableName, tableName, batchSize, speedMin, speedLimit, parallelism, sourcePhyTables, - baseEc); + .create(schemaName, tableName, tableName, batchSize, speedMin, speedLimit, parallelism, useBinary, + null, sourcePhyTables, baseEc); } else { extractor = AlterTableGroupExtractor - .create(schemaName, tableName, tableName, batchSize, speedMin, speedLimit, parallelism, sourcePhyTables, - baseEc); + .create(schemaName, tableName, tableName, batchSize, speedMin, speedLimit, parallelism, useBinary, + sourcePhyTables, baseEc); } final Loader loader = AlterTableGroupLoader diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/LocalPartitionManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/LocalPartitionManager.java index 1ea88719f..32d00314c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/LocalPartitionManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/LocalPartitionManager.java @@ -22,8 +22,8 @@ import com.alibaba.polardbx.common.utils.time.parser.StringTimeParser; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.TableMeta; -import com.alibaba.polardbx.optimizer.partition.common.LocalPartitionDefinitionInfo; import com.alibaba.polardbx.optimizer.partition.PartitionInfoUtil; +import com.alibaba.polardbx.optimizer.partition.common.LocalPartitionDefinitionInfo; import com.alibaba.polardbx.repo.mysql.checktable.CheckTableUtil; import com.alibaba.polardbx.repo.mysql.checktable.LocalPartitionDescription; import com.alibaba.polardbx.repo.mysql.checktable.TableDescription; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/backfill/AlterTableGroupExtractor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/backfill/AlterTableGroupExtractor.java index 9e1361e66..5e4f03585 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/backfill/AlterTableGroupExtractor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/backfill/AlterTableGroupExtractor.java @@ -39,25 +39,25 @@ protected AlterTableGroupExtractor(String schemaName, String sourceTableName, St long speedMin, long speedLimit, long parallelism, + boolean useBinary, PhyTableOperation planSelectWithMax, PhyTableOperation planSelectWithMin, PhyTableOperation planSelectWithMinAndMax, PhyTableOperation planSelectMaxPk, PhyTableOperation planSelectSample, - PhyTableOperation planSelectMinAndMaxSample, List primaryKeysId, Map> sourcePhyTables) { - super(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, - planSelectWithMax, planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, - planSelectSample, planSelectMinAndMaxSample, primaryKeysId); + super(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, useBinary, + null, planSelectWithMax, planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, + planSelectSample, primaryKeysId); this.sourcePhyTables = sourcePhyTables; } public static Extractor create(String schemaName, String sourceTableName, String targetTableName, long batchSize, - long speedMin, long speedLimit, long parallelism, + long speedMin, long speedLimit, long parallelism, boolean useBinary, Map> sourcePhyTables, ExecutionContext ec) { - final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); + final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, useBinary, ec); ExtractorInfo info = Extractor.buildExtractorInfo(ec, schemaName, sourceTableName, targetTableName, true); @@ -70,6 +70,7 @@ public static Extractor create(String schemaName, String sourceTableName, String speedMin, speedLimit, parallelism, + useBinary, builder.buildSelectForBackfill(info.getSourceTableMeta(), info.getTargetTableColumns(), info.getPrimaryKeys(), false, true, lockMode), @@ -82,10 +83,7 @@ public static Extractor create(String schemaName, String sourceTableName, String true, true, lockMode), builder.buildSelectMaxPkForBackfill(info.getSourceTableMeta(), info.getPrimaryKeys()), - builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys(), info.getPrimaryKeys(), - false, false), - builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys(), info.getPrimaryKeys(), - true, true), + builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys()), info.getPrimaryKeysId(), sourcePhyTables); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/backfill/AlterTableGroupLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/backfill/AlterTableGroupLoader.java index 7da60a470..0726b4711 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/backfill/AlterTableGroupLoader.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/backfill/AlterTableGroupLoader.java @@ -16,13 +16,14 @@ package com.alibaba.polardbx.executor.partitionmanagement.backfill; +import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.backfill.Extractor; import com.alibaba.polardbx.executor.backfill.Loader; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.gsi.InsertIndexExecutor; import com.alibaba.polardbx.executor.gsi.PhysicalPlanBuilder; import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.GlobalIndexMeta; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; @@ -44,12 +45,15 @@ import org.apache.calcite.sql.parser.SqlParserPos; import org.apache.commons.lang.StringUtils; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.BiFunction; import java.util.stream.Collectors; +import static org.apache.calcite.sql.fun.SqlStdOperatorTable.EQUALS; + /** * Created by luoyanxin. * @@ -64,9 +68,10 @@ protected AlterTableGroupLoader(String schemaName, String tableName, SqlInsert i int[] checkerParamMapping, BiFunction, ExecutionContext, List> executeFunc, Map> physicalTableGroupMap, - boolean mirrorCopy) { + boolean mirrorCopy, + String backfillReturning) { super(schemaName, tableName, insert, insertIgnore, checkerPlan, checkerPkMapping, checkerParamMapping, - executeFunc, mirrorCopy); + executeFunc, mirrorCopy, backfillReturning); this.physicalTableGroupMap = physicalTableGroupMap; } @@ -76,6 +81,8 @@ public static Loader create(String schemaName, String primaryTable, String index Map> physicalTableGroupMap, boolean mirrorCopy) { final OptimizerContext optimizerContext = OptimizerContext.getContext(schemaName); + boolean canUseReturning = canUseBackfillReturning(ec, schemaName); + // Construct target table final SqlNode targetTableParam = BuildPlanUtils.buildTargetTable(); @@ -127,7 +134,8 @@ public static Loader create(String schemaName, String primaryTable, String index final TddlRuleManager tddlRuleManager = optimizerContext.getRuleManager(); final Set filterColumns = Sets.newTreeSet(String::compareToIgnoreCase); final Set primaryKeys = Sets.newTreeSet(String::compareToIgnoreCase); - primaryKeys.addAll(GlobalIndexMeta.getPrimaryKeys(primaryTableMeta)); + final List pkList = Extractor.getPrimaryKeys(primaryTableMeta, ec); + primaryKeys.addAll(pkList); filterColumns.addAll(primaryKeys); filterColumns.addAll(tddlRuleManager.getSharedColumns(primaryTable)); filterColumns.addAll(tddlRuleManager.getSharedColumns(indexTable)); @@ -163,7 +171,8 @@ public static Loader create(String schemaName, String primaryTable, String index checkerParamMapping, executeFunc, physicalTableGroupMap, - mirrorCopy); + mirrorCopy, + canUseReturning ? String.join(",", pkList) : null); } @Override @@ -178,8 +187,11 @@ public int executeInsert(SqlInsert sqlInsert, String schemaName, String tableNam targetGroup = groupPair.getValue(); } } - return InsertIndexExecutor + List> returningRes = new ArrayList<>(); + int affectRows = InsertIndexExecutor .backfillIntoPartitionedTable(null, sqlInsert, tableMeta, schemaName, executionContext, executeFunc, false, - newPartInfo, targetGroup, phyTableName, this.mirrorCopy); + newPartInfo, targetGroup, phyTableName, this.mirrorCopy, usingBackfillReturning, returningRes); + + return usingBackfillReturning ? getReturningAffectRows(returningRes, executionContext) : affectRows; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/corrector/AlterTableGroupChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/corrector/AlterTableGroupChecker.java index acb2b6489..1f297d455 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/corrector/AlterTableGroupChecker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/corrector/AlterTableGroupChecker.java @@ -50,7 +50,9 @@ public AlterTableGroupChecker(String schemaName, String tableName, String indexN TableMeta gsiTableMeta, long batchSize, long speedMin, long speedLimit, - long parallelism, SqlSelect.LockMode primaryLock, + long parallelism, + boolean useBinary, + SqlSelect.LockMode primaryLock, SqlSelect.LockMode gsiLock, PhyTableOperation planSelectWithMaxPrimary, PhyTableOperation planSelectWithMaxGsi, @@ -64,7 +66,7 @@ public AlterTableGroupChecker(String schemaName, String tableName, String indexN Map> sourceTargetTables, Map> targetTargetTables) { super(schemaName, tableName, indexName, primaryTableMeta, gsiTableMeta, batchSize, speedMin, speedLimit, - parallelism, + parallelism, useBinary, primaryLock, gsiLock, planSelectWithMaxPrimary, planSelectWithMaxGsi, planSelectWithMinAndMaxPrimary, planSelectWithMinAndMaxGsi, planSelectWithInTemplate, planSelectWithIn, planSelectMaxPk, indexColumns, primaryKeysId, rowComparator); @@ -74,8 +76,8 @@ public AlterTableGroupChecker(String schemaName, String tableName, String indexN } public static Checker create(String schemaName, String tableName, String indexName, long batchSize, long speedMin, - long speedLimit, - long parallelism, SqlSelect.LockMode primaryLock, SqlSelect.LockMode gsiLock, + long speedLimit, long parallelism, boolean useBinary, + SqlSelect.LockMode primaryLock, SqlSelect.LockMode gsiLock, ExecutionContext ec, Map> sourceTargetTables, Map> targetTargetTables) { @@ -84,7 +86,7 @@ public static Checker create(String schemaName, String tableName, String indexNa final TableMeta indexTableMeta = sm.getTable(indexName); Extractor.ExtractorInfo info = Extractor.buildExtractorInfo(ec, schemaName, tableName, indexName, false); - final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); + final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, useBinary, ec); final Pair selectWithIn = builder .buildSelectWithInForChecker(info.getSourceTableMeta(), info.getTargetTableColumns(), info.getPrimaryKeys(), @@ -115,6 +117,7 @@ public static Checker create(String schemaName, String tableName, String indexNa speedMin, speedLimit, parallelism, + useBinary, primaryLock, gsiLock, builder.buildSelectForBackfill(info.getSourceTableMeta(), info.getTargetTableColumns(), diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/fastchecker/AlterTableGroupFastChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/fastchecker/AlterTableGroupFastChecker.java index d10fe1d76..c732d271a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/fastchecker/AlterTableGroupFastChecker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/fastchecker/AlterTableGroupFastChecker.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.partitionmanagement.fastchecker; import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.executor.backfill.Extractor; import com.alibaba.polardbx.executor.fastchecker.FastChecker; import com.alibaba.polardbx.executor.gsi.PhysicalPlanBuilder; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; @@ -25,9 +24,7 @@ import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; -import com.alibaba.polardbx.statistics.SQLRecorderLogger; -import java.text.MessageFormat; import java.util.List; import java.util.Map; import java.util.Set; @@ -44,7 +41,6 @@ public AlterTableGroupFastChecker(String schemaName, String srcLogicalTableName, Map> dstPhyDbAndTables, List srcColumns, List dstColumns, List srcPks, List dstPks, - long parallelism, int lockTimeOut, PhyTableOperation planSelectHashCheckSrc, PhyTableOperation planSelectHashCheckWithUpperBoundSrc, PhyTableOperation planSelectHashCheckWithLowerBoundSrc, @@ -57,10 +53,9 @@ public AlterTableGroupFastChecker(String schemaName, String srcLogicalTableName, PhyTableOperation planIdleSelectDst, PhyTableOperation planSelectSampleSrc, PhyTableOperation planSelectSampleDst) { - super(schemaName, schemaName, srcLogicalTableName, dstLogicalTableName, null, srcPhyDbAndTables, + super(schemaName, schemaName, srcLogicalTableName, dstLogicalTableName, srcPhyDbAndTables, dstPhyDbAndTables, srcColumns, dstColumns, srcPks, dstPks, - parallelism, lockTimeOut, planSelectHashCheckSrc, planSelectHashCheckWithUpperBoundSrc, planSelectHashCheckWithLowerBoundSrc, @@ -76,7 +71,7 @@ public AlterTableGroupFastChecker(String schemaName, String srcLogicalTableName, } public static FastChecker create(String schemaName, String tableName, Map> srcPhyDbAndTables, - Map> dstPhyDbAndTables, long parallelism, + Map> dstPhyDbAndTables, ExecutionContext ec) { // Build select plan final SchemaManager sm = ec.getSchemaManager(schemaName); @@ -89,17 +84,14 @@ public static FastChecker create(String schemaName, String tableName, Map baseTablePks = FastChecker.getorderedPrimaryKeys(baseTableMeta, ec); + final List baseTablePks = FastChecker.getorderedPrimaryKeys(baseTableMeta); final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); - final int lockTimeOut = ec.getParamManager().getInt(ConnectionParams.FASTCHECKER_LOCK_TIMEOUT); - return new AlterTableGroupFastChecker( schemaName, tableName, tableName, srcPhyDbAndTables, dstPhyDbAndTables, baseTableColumns, baseTableColumns, baseTablePks, baseTablePks, - parallelism, lockTimeOut, builder.buildSelectHashCheckForChecker(baseTableMeta, baseTableColumns, baseTablePks, false, false), builder.buildSelectHashCheckForChecker(baseTableMeta, baseTableColumns, baseTablePks, false, true), builder.buildSelectHashCheckForChecker(baseTableMeta, baseTableColumns, baseTablePks, true, false), @@ -113,8 +105,8 @@ public static FastChecker create(String schemaName, String tableName, Map sourceTargetGroup, + String dstLogicalTableName, Map> srcPhyDbAndTables, Map> dstPhyDbAndTables, List srcColumns, List dstColumns, List srcPks, List dstPks, - long parallelism, int lockTimeOut, PhyTableOperation planSelectHashCheckSrc, PhyTableOperation planSelectHashCheckWithUpperBoundSrc, PhyTableOperation planSelectHashCheckWithLowerBoundSrc, @@ -65,8 +63,8 @@ public LogicalTableDataMigrationFastChecker(String srcSchemaName, String dstSche PhyTableOperation planIdleSelectDst, PhyTableOperation planSelectSampleSrc, PhyTableOperation planSelectSampleDst) { - super(srcSchemaName, dstSchemaName, srcLogicalTableName, dstLogicalTableName, sourceTargetGroup, - srcPhyDbAndTables, dstPhyDbAndTables, srcColumns, dstColumns, srcPks, dstPks, parallelism, lockTimeOut, + super(srcSchemaName, dstSchemaName, srcLogicalTableName, dstLogicalTableName, + srcPhyDbAndTables, dstPhyDbAndTables, srcColumns, dstColumns, srcPks, dstPks, planSelectHashCheckSrc, planSelectHashCheckWithUpperBoundSrc, planSelectHashCheckWithLowerBoundSrc, planSelectHashCheckWithLowerUpperBoundSrc, planSelectHashCheckDst, planSelectHashCheckWithUpperBoundDst, planSelectHashCheckWithLowerBoundDst, planSelectHashCheckWithLowerUpperBoundDst, planIdleSelectSrc, @@ -78,7 +76,7 @@ public LogicalTableDataMigrationFastChecker(String srcSchemaName, String dstSche * dstTable: gsi table */ public static List create(String logicalSchemaSrc, String logicalSchemaDst, String logicalTableSrc, - String logicalTableDst, long parallelism, ExecutionContext ec) { + String logicalTableDst, ExecutionContext ec) { final SchemaManager srcSm = OptimizerContext.getContext(logicalSchemaSrc).getLatestSchemaManager(); final TableMeta tableMetaSrc = srcSm.getTable(logicalTableSrc); final SchemaManager dstSm = OptimizerContext.getContext(logicalSchemaDst).getLatestSchemaManager(); @@ -92,13 +90,7 @@ public static List create(String logicalSchemaSrc, String logicalSc tableMetaDst.getAllColumns().stream().map(ColumnMeta::getName).collect(Collectors.toList()); // 重要:构造planSelectSampleSrc 和 planSelectSampleDst时,传入的主键必须按原本的主键顺序!!! - final List pks = FastChecker.getorderedPrimaryKeys(tableMetaDst, ec); - - if (parallelism <= 0) { - parallelism = Math.max(BackFillThreadPool.getInstance().getCorePoolSize() / 2, 1); - } - - final int lockTimeOut = ec.getParamManager().getInt(ConnectionParams.FASTCHECKER_LOCK_TIMEOUT); + final List pks = FastChecker.getorderedPrimaryKeys(tableMetaDst); final PhysicalPlanBuilder srcBuilder = new PhysicalPlanBuilder(logicalSchemaSrc, ec); final PhysicalPlanBuilder dstBuilder = new PhysicalPlanBuilder(logicalSchemaDst, ec); @@ -123,9 +115,8 @@ public static List create(String logicalSchemaSrc, String logicalSc for (String dstTb : dstTbs) { FastChecker fc = new LogicalTableDataMigrationFastChecker(logicalSchemaSrc, logicalSchemaDst, logicalTableSrc, - logicalTableDst, null, ImmutableMap.of(srcDb, ImmutableSet.of(srcTb)), - ImmutableMap.of(dstDb, ImmutableSet.of(dstTb)), allColumns, allColumns, pks, pks, parallelism, - lockTimeOut, + logicalTableDst, ImmutableMap.of(srcDb, ImmutableSet.of(srcTb)), + ImmutableMap.of(dstDb, ImmutableSet.of(dstTb)), allColumns, allColumns, pks, pks, srcBuilder.buildSelectHashCheckForChecker(tableMetaSrc, allColumns, pks, false, false), srcBuilder.buildSelectHashCheckForChecker(tableMetaSrc, allColumns, pks, false, true), @@ -140,8 +131,8 @@ public static List create(String logicalSchemaSrc, String logicalSc srcBuilder.buildIdleSelectForChecker(tableMetaSrc, allColumns), dstBuilder.buildIdleSelectForChecker(tableMetaDst, allColumns), - srcBuilder.buildSqlSelectForSample(tableMetaSrc, pks, pks, false, false), - dstBuilder.buildSqlSelectForSample(tableMetaDst, pks, pks, false, false)); + srcBuilder.buildSqlSelectForSample(tableMetaSrc, pks), + dstBuilder.buildSqlSelectForSample(tableMetaDst, pks)); fastCheckers.add(fc); } @@ -149,8 +140,7 @@ public static List create(String logicalSchemaSrc, String logicalSc } else { return ImmutableList.of( new LogicalTableDataMigrationFastChecker(logicalSchemaSrc, logicalSchemaDst, logicalTableSrc, - logicalTableDst, null, srcPhyDbAndTbs, dstPhyDbAndTbs, allColumns, allColumns, pks, pks, - parallelism, lockTimeOut, + logicalTableDst, srcPhyDbAndTbs, dstPhyDbAndTbs, allColumns, allColumns, pks, pks, srcBuilder.buildSelectHashCheckForChecker(tableMetaSrc, allColumns, pks, false, false), srcBuilder.buildSelectHashCheckForChecker(tableMetaSrc, allColumns, pks, false, true), @@ -165,8 +155,8 @@ public static List create(String logicalSchemaSrc, String logicalSc srcBuilder.buildIdleSelectForChecker(tableMetaSrc, allColumns), dstBuilder.buildIdleSelectForChecker(tableMetaDst, allColumns), - srcBuilder.buildSqlSelectForSample(tableMetaSrc, pks, pks, false, false), - dstBuilder.buildSqlSelectForSample(tableMetaDst, pks, pks, false, false))); + srcBuilder.buildSqlSelectForSample(tableMetaSrc, pks), + dstBuilder.buildSqlSelectForSample(tableMetaDst, pks))); } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/rebalance/RebalanceDdlPlanManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/rebalance/RebalanceDdlPlanManager.java index 1e53ef614..6d078bdbb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/rebalance/RebalanceDdlPlanManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionmanagement/rebalance/RebalanceDdlPlanManager.java @@ -29,8 +29,10 @@ import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlJobManager; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlPlanAccessorDelegate; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlPlanManager; +import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; import com.alibaba.polardbx.executor.ddl.newengine.utils.TaskHelper; import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; +import com.alibaba.polardbx.gms.config.impl.InstConfUtil; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; import com.alibaba.polardbx.gms.scheduler.DdlPlanRecord; @@ -59,9 +61,16 @@ public RebalanceDdlPlanManager() { public void process(final DdlPlanRecord ddlPlanRecord) { switch (DdlPlanState.valueOf(ddlPlanRecord.getState())) { case INIT: + if (!InstConfUtil.isInRebalanceMaintenanceTimeWindow()) { + break; + } onInit(ddlPlanRecord); break; case EXECUTING: + if (!InstConfUtil.isInRebalanceMaintenanceTimeWindow()) { + terminateRebalanceJob(ddlPlanRecord); + break; + } onExecuting(ddlPlanRecord); break; case SUCCESS: @@ -70,6 +79,12 @@ public void process(final DdlPlanRecord ddlPlanRecord) { case TERMINATED: onTerminated(ddlPlanRecord); break; + case PAUSE_ON_NON_MAINTENANCE_WINDOW: + if (!InstConfUtil.isInRebalanceMaintenanceTimeWindow()) { + onPauseInNonMaintenanceWindow(ddlPlanRecord); + break; + } + onExecuting(ddlPlanRecord); } } @@ -87,7 +102,7 @@ protected void onInit(final DdlPlanRecord ddlPlanRecord) { }); } - protected void onExecuting(final DdlPlanRecord ddlPlanRecord) { + public void onExecuting(final DdlPlanRecord ddlPlanRecord) { final long jobId = ddlPlanRecord.getJobId(); DdlEngineRecord ddlEngineRecord = ddlJobManager.fetchRecordByJobId(jobId); if (ddlEngineRecord == null) { @@ -122,6 +137,19 @@ protected void onTerminated(final DdlPlanRecord ddlPlanRecord) { //do nothing } + protected void onPauseInNonMaintenanceWindow(final DdlPlanRecord ddlPlanRecord) { + //double check the job state + final long jobId = ddlPlanRecord.getJobId(); + DdlEngineRecord ddlEngineRecord = ddlJobManager.fetchRecordByJobId(jobId); + if (ddlEngineRecord == null) { + return; + } + if (DdlState.valueOf(ddlEngineRecord.state) == DdlState.RUNNING) { + LOGGER.info(String.format("try to cancel rebalance job %d", jobId)); + terminateRebalanceJob(ddlPlanRecord); + } + } + /*****************************************************************************************/ /** @@ -146,7 +174,7 @@ protected void onDdlJobRollbackCompleted(long ddlPlanId, long originJobId) { Pair pair = getBackFillCount(jobId, true); CostEstimableDdlTask.CostInfo formerCostInfo = TaskHelper.decodeCostInfo(record.getExtras()); CostEstimableDdlTask.CostInfo costInfo = - CostEstimableDdlTask.createCostInfo(pair.getKey(), pair.getValue()); + CostEstimableDdlTask.createCostInfo(pair.getKey(), pair.getValue(), null); CostEstimableDdlTask.CostInfo newCostInfo = CostEstimableDdlTask.CostInfo.combine(formerCostInfo, costInfo); ddlPlanManager.updateCostInfo(ddlPlanId, newCostInfo); @@ -195,7 +223,7 @@ protected Boolean invoke() { Pair pair = getBackFillCount(jobId, true); CostEstimableDdlTask.CostInfo formerCostInfo = TaskHelper.decodeCostInfo(record.getExtras()); CostEstimableDdlTask.CostInfo costInfo = - CostEstimableDdlTask.createCostInfo(pair.getKey(), pair.getValue()); + CostEstimableDdlTask.createCostInfo(pair.getKey(), pair.getValue(), null); CostEstimableDdlTask.CostInfo newCostInfo = CostEstimableDdlTask.CostInfo.combine(formerCostInfo, costInfo); ddlPlanAccessor.updateExtra(ddlPlanId, TaskHelper.encodeCostInfo(newCostInfo)); @@ -302,4 +330,21 @@ private Pair getBackFillCount(long jobId, boolean archive) { return Pair.of(successRowCount, totalRowCount); } + public void terminateRebalanceJob(DdlPlanRecord ddlPlanRecord) { + if (ddlPlanRecord.getJobId() <= 0) { + LOGGER.info("rebalance job is not schedule yet, not need to terminate it"); + return; + } + + String rebalanceSql = + "/*+TDDL:CMD_EXTRA(CANCEL_REBALANCE_JOB_DUE_MAINTENANCE=true)*/ cancel ddl " + ddlPlanRecord.getJobId(); + + SQLRecorderLogger.ddlEngineLogger.info( + String.format( + "submit terminater rebalance job due to not in maintenance window, schemaName:[%s], ddlSql:[%s]", + ddlPlanRecord.getTableSchema(), rebalanceSql)); + + DdlHelper.getServerConfigManager().executeBackgroundSql(rebalanceSql, ddlPlanRecord.getTableSchema(), null); + } + } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionHeatCollector.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionHeatCollector.java index f75f4846a..849713315 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionHeatCollector.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionHeatCollector.java @@ -16,24 +16,23 @@ package com.alibaba.polardbx.executor.partitionvisualizer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.partitionvisualizer.model.PartitionHeatInfo; import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualAxis; -import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualTypeConstants; import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualLayer; - +import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualTypeConstants; import org.apache.commons.collections.CollectionUtils; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + /** * 分区热度信息采集 + * * @author ximing.yd - * @date 2021/12/20 上午11:17 */ public class PartitionHeatCollector { @@ -85,5 +84,4 @@ private VisualAxis convertToVisualAxis(List partitionHeatInfo return axis; } - } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionVisualController.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionVisualController.java index a469d92f3..0a0af1255 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionVisualController.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionVisualController.java @@ -16,32 +16,30 @@ package com.alibaba.polardbx.executor.partitionvisualizer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.partitionvisualizer.model.LabelPartition; import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualAxis; -import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualTypeConstants; import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualLayer; import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualPlane; import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualResponse; - +import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualTypeConstants; import org.apache.commons.collections.CollectionUtils; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + /** * 分区热度查询 * 组装好前端需要的数据 * * @author ximing.yd - * @date 2021/12/20 上午11:18 */ public class PartitionVisualController { @@ -51,7 +49,6 @@ public class PartitionVisualController { private final PartitionsStatService partitionsStatService = new PartitionsStatService(); - private final int MAX_RESPONSE_BOUNDS_SIZE = 3200; public VisualResponse getVisualResponse(String timeRange, String type) { @@ -188,8 +185,10 @@ private VisualResponse convertToVisualResponse(VisualPlane plane, String type) { //get new table rows to refresh PartitionsStatService.BOUND_META_MAP partitionsStatService.queryPartitionsStat(false); - List> sortBounds = getSortBounds(visualAxes, PartitionsStatService.BOUND_META_MAP, isDnView); - visualResponse.setBoundAxis(convertToLabelPartitions(sortBounds, PartitionsStatService.BOUND_META_MAP, isDnView)); + List> sortBounds = + getSortBounds(visualAxes, PartitionsStatService.BOUND_META_MAP, isDnView); + visualResponse.setBoundAxis( + convertToLabelPartitions(sortBounds, PartitionsStatService.BOUND_META_MAP, isDnView)); visualResponse.setDataMap(convertToDataMap(visualAxes, sortBounds, type, isDnView)); return visualResponse; } @@ -198,7 +197,8 @@ private boolean getIsDnView(String type) { return VisualTypeConstants.TYPE_WITH_DN_OPTIONS.contains(type); } - private List> getSortBounds(List visualAxes, Map> pairMap, boolean isDnView) { + private List> getSortBounds(List visualAxes, + Map> pairMap, boolean isDnView) { //将 bounds 按照字母顺序排列,为了让同一个逻辑表的所有分区排列在一起 Map boundsMap = new HashMap<>(); for (int i = 0; i < visualAxes.size(); i++) { @@ -231,7 +231,8 @@ private List convertToLabelPartitions(List pair = pairMap.get(originBound); String storageInstId = (pair == null || pair.getValue() == null) ? "-" : pair.getValue(); @@ -289,14 +290,14 @@ private Map>> convertToDataMap(List visualAx private String convertToOriginType(String type) { switch (type) { - case VisualTypeConstants.READ_ROWS_WITH_DN: - return VisualTypeConstants.READ_ROWS; - case VisualTypeConstants.WRITTEN_ROWS_WITH_DN: - return VisualTypeConstants.WRITTEN_ROWS; - case VisualTypeConstants.READ_WRITTEN_ROWS_WITH_DN: - return VisualTypeConstants.READ_WRITTEN_ROWS; - default: - return type; + case VisualTypeConstants.READ_ROWS_WITH_DN: + return VisualTypeConstants.READ_ROWS; + case VisualTypeConstants.WRITTEN_ROWS_WITH_DN: + return VisualTypeConstants.WRITTEN_ROWS; + case VisualTypeConstants.READ_WRITTEN_ROWS_WITH_DN: + return VisualTypeConstants.READ_WRITTEN_ROWS; + default: + return type; } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionsStatService.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionsStatService.java index 1f04136f9..83f9e45c8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionsStatService.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/PartitionsStatService.java @@ -22,6 +22,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.TreeSet; import java.util.stream.Collectors; @@ -38,17 +39,30 @@ import com.alibaba.polardbx.gms.partition.TablePartitionRecord; import com.alibaba.polardbx.gms.tablegroup.PartitionGroupRecord; import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; +import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.partition.PartitionInfo; +import com.alibaba.polardbx.optimizer.partition.PartitionInfoManager; +import com.alibaba.polardbx.optimizer.partition.PartitionSpec; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang3.StringUtils; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.stream.Collectors; + /** * 采集分区原始统计信息的服务 * * @author ximing.yd - * @date 2022/3/1 11:41 上午 */ public class PartitionsStatService { @@ -56,7 +70,8 @@ public class PartitionsStatService { private final Integer MAX_PARTITIONS_NAME_LENGTH = 50; - public static Map> BOUND_META_MAP = new HashMap<>(); + public static Map> BOUND_META_MAP = + new HashMap<>(); public static Map PHY_TABLE_ROWS_MAP = new HashMap<>(); @@ -64,7 +79,6 @@ public class PartitionsStatService { public static boolean HAS_CHANGED_COLLECTION_ONLY_PARAM = false; - public List queryPartitionsStat(boolean usePhyTableRowsCache) { Set schemaNames = new TreeSet<>(String::compareToIgnoreCase); schemaNames.addAll(StatsUtils.getDistinctSchemaNames()); @@ -73,7 +87,8 @@ public List queryPartitionsStat(boolean usePhyTableRowsCache) Map> storageInstIdGroupNames = new HashMap<>(); // get all phy tables(partitions) info from all DNs - Map/** statics **/>> phyDbTablesInfoForHeatmap = new HashMap<>(); + Map/** statics **/>> + phyDbTablesInfoForHeatmap = new HashMap<>(); Integer maxScan = getPartitionsHeatmapCollectionMaxScan(); Integer maxSingleLogicSchemaCount = getPartitionsHeatmapCollectionMaxSingleLogicSchemaCount(); @@ -131,14 +146,20 @@ public List queryPartitionsStat(boolean usePhyTableRowsCache) List partitionHeatInfos = new ArrayList<>(); List tableRowList = new ArrayList<>(); + for (TableGroupConfig tableGroupConfig : tableGroupConfigs) { if (tableGroupConfig.getTableCount() == 0) { logger.warn("tableGroupConfig is null"); continue; } String schemaName = tableGroupConfig.getTableGroupRecord().schema; + + OptimizerContext oc = + Objects.requireNonNull(OptimizerContext.getContext(schemaName), schemaName + " not exists"); + PartitionInfoManager pm = oc.getPartitionInfoManager(); + Set indexTableNames = new TreeSet<>(); - if (!MapUtils.isEmpty(onlySchemaTables)){ + if (!MapUtils.isEmpty(onlySchemaTables)) { Set anyTables = onlySchemaTables.get(""); if (!CollectionUtils.isEmpty(anyTables)) { indexTableNames.addAll(anyTables); @@ -150,7 +171,8 @@ public List queryPartitionsStat(boolean usePhyTableRowsCache) } Map>> tablesStatInfoForHeatmap = - StatsUtils.queryTableGroupStatsForHeatmap(tableGroupConfig, indexTableNames, null, phyDbTablesInfoForHeatmap); + StatsUtils.queryTableGroupStatsForHeatmap(tableGroupConfig, indexTableNames, null, + phyDbTablesInfoForHeatmap); if (MapUtils.isEmpty(tablesStatInfoForHeatmap)) { continue; @@ -161,70 +183,75 @@ public List queryPartitionsStat(boolean usePhyTableRowsCache) partitionPyhDbMap.putAll(partitionGroupRecords.stream().collect(Collectors.toMap( PartitionGroupRecord::getPartition_name, PartitionGroupRecord::getPhy_db))); } - for (TablePartRecordInfoContext context : tableGroupConfig.getAllTables()) { - String logicalTableName = context.getTableName().toLowerCase(); + for (String logicalTableName : tableGroupConfig.getAllTables()) { + logicalTableName = logicalTableName.toLowerCase(); if (!StatsUtils.isFilterTable(indexTableNames, null, logicalTableName)) { continue; } Map> tableStatInfoForHeatmap = - tablesStatInfoForHeatmap.get(context.getLogTbRec().tableName.toLowerCase()); + tablesStatInfoForHeatmap.get(logicalTableName); if (tableStatInfoForHeatmap == null) { logger.warn(String.format("table meta tableStatInfo is null: schemaName:%s, tableName:%s", - schemaName, context.getTableName())); + schemaName, logicalTableName)); continue; } + PartitionInfo partitionInfo = pm.getPartitionInfo(logicalTableName); - List tablePartitionRecords = - context.getPartitionRecList().stream().filter( - o -> (o.partLevel != TablePartitionRecord.PARTITION_LEVEL_LOGICAL_TABLE)).collect( - Collectors.toList()); - for (int i = 0; i < tablePartitionRecords.size(); i++) { - TablePartitionRecord record = tablePartitionRecords.get(i); - List tableStatRowForHeatmap = tableStatInfoForHeatmap.get(record.phyTable.toLowerCase()); + List partitionSpecs = partitionInfo.getPartitionBy().getPhysicalPartitions(); + for (int i = 0; i < partitionSpecs.size(); i++) { + PartitionSpec record = partitionSpecs.get(i); + List tableStatRowForHeatmap = + tableStatInfoForHeatmap.get(record.getLocation().getPhyTableName().toLowerCase()); if (CollectionUtils.isEmpty(tableStatRowForHeatmap)) { - logger.warn(String.format("physical table meta tableStatRow is null: schemaName:%s, tableName:%s, phyTable:%s", - schemaName, record.tableName, record.phyTable)); + logger.warn(String.format( + "physical table meta tableStatRow is null: schemaName:%s, tableName:%s, phyTable:%s", + schemaName, schemaName, record.getLocation().getPhyTableName())); continue; } long tableRow = DataTypes.LongType.convertFrom(tableStatRowForHeatmap.get(2)); - String phyTableRowsKey = StatsUtils.getPhyTableRowsKey(((String) tableStatRowForHeatmap.get(1)).toLowerCase(), - ((String) tableStatRowForHeatmap.get(0)).toLowerCase()); + String phyTableRowsKey = + StatsUtils.getPhyTableRowsKey(((String) tableStatRowForHeatmap.get(1)).toLowerCase(), + ((String) tableStatRowForHeatmap.get(0)).toLowerCase()); Long tableRowCache = PHY_TABLE_ROWS_MAP.get(phyTableRowsKey); if (tableRowCache != null) { tableRow = tableRowCache; } tableRowList.add(tableRow); - String phyDb = partitionPyhDbMap.get(record.partName); + String phyDb = partitionPyhDbMap.get(record.getName()); Pair pair = storageInstIdGroupNames.get(phyDb); String storageInstId = pair.getKey(); partitionsNum++; - String tableKey = VisualConvertUtil.genTableKey(schemaName, record.tableName); + String tableKey = VisualConvertUtil.genTableKey(schemaName, record.getLocation().getPhyTableName()); storageInstIdSet.add(storageInstId); partitionsNumMap.merge(tableKey, 1, Integer::sum); - PartitionHeatInfo partitionHeatInfo = generatePartitionHeatInfo(schemaName, record, storageInstId, + PartitionHeatInfo partitionHeatInfo = generatePartitionHeatInfo(schemaName, logicalTableName, + record.getName(), storageInstId, i, tableRow, tableStatRowForHeatmap); partitionHeatInfos.add(partitionHeatInfo); } } } - List mergedPartitionHeatInfos = mergePartitionHeatInfo(partitionHeatInfos, tableRowList, partitionsNum, storageInstIdSet.size(), partitionsNumMap); + List mergedPartitionHeatInfos = + mergePartitionHeatInfo(partitionHeatInfos, tableRowList, partitionsNum, storageInstIdSet.size(), + partitionsNumMap); BOUND_META_MAP = getBoundRowsMap(mergedPartitionHeatInfos); return mergedPartitionHeatInfos; } - private PartitionHeatInfo generatePartitionHeatInfo(String schemaName, TablePartitionRecord record, + private PartitionHeatInfo generatePartitionHeatInfo(String schemaName, String logicalTableName, + String partitionName, String storageInstId, int index, long tableRow, - List tableStatRow){ + List tableStatRow) { PartitionHeatInfo partitionHeatInfo = new PartitionHeatInfo(); partitionHeatInfo.setSchemaName(schemaName); - partitionHeatInfo.setLogicalTable(record.tableName); - partitionHeatInfo.setPartitionName(record.partName); + partitionHeatInfo.setLogicalTable(logicalTableName); + partitionHeatInfo.setPartitionName(partitionName); partitionHeatInfo.setStorageInstId(storageInstId); partitionHeatInfo.setPartitionSeq(index); partitionHeatInfo.setTableRows(tableRow); @@ -237,7 +264,8 @@ private PartitionHeatInfo generatePartitionHeatInfo(String schemaName, TablePart Object rowInsertedObj = DataTypes.ULongType.convertFrom(tableStatRow.get(4)); Object rowUpdatedobj = DataTypes.ULongType.convertFrom(tableStatRow.get(5)); rowRead = VisualConvertUtil.getObjLong(rowReadObj); - rowWritten = VisualConvertUtil.sumRows(VisualConvertUtil.getObjLong(rowInsertedObj), VisualConvertUtil.getObjLong(rowUpdatedobj)); + rowWritten = VisualConvertUtil.sumRows(VisualConvertUtil.getObjLong(rowInsertedObj), + VisualConvertUtil.getObjLong(rowUpdatedobj)); ReadWritten = VisualConvertUtil.sumRows(rowRead, rowWritten); } @@ -256,18 +284,17 @@ private Map> getBoundRowsMap(List String bound = VisualConvertUtil.generateBound(info); Long tableRows = info.getTableRows() == null ? 0L : info.getTableRows(); String storageInstId = info.getStorageInstId(); - Pair pair = new Pair<>(tableRows, storageInstId); + Pair pair = new Pair<>(tableRows, storageInstId); map.put(bound, pair); } return map; } - public List mergePartitionHeatInfo(List originPartitionHeatInfos, - List tableRowList, - Integer partitionsNum, - Integer storageInstIdNum, - Map partitionsNumMap) { + List tableRowList, + Integer partitionsNum, + Integer storageInstIdNum, + Map partitionsNumMap) { Integer maxMergeNum = getPartitionsHeatmapCollectionMaxMergeNum(); if (CollectionUtils.isEmpty(originPartitionHeatInfos) || partitionsNum <= maxMergeNum) { @@ -304,7 +331,7 @@ public List mergePartitionHeatInfo(List or mergeTablePartitionHeatInfosByPercentile(tablePartitionHeatInfos, lastKey, storageInstIdNum, minMergeRatio, percentile50, percentile90, partitionsNumMap, maxMergeNum) : mergeTablePartitionHeatInfosByAdjacent(tablePartitionHeatInfos, lastKey, - storageInstIdNum, minMergeRatio, partitionsNumMap, maxMergeNum); + storageInstIdNum, minMergeRatio, partitionsNumMap, maxMergeNum); lastKey = key; tablePartitionHeatInfos = new ArrayList<>(); @@ -331,19 +358,19 @@ private Long percentile(List ascSortedDatas, double percentile) { * 数据量大于等于50分位且小于90分位的按照压缩率合并 */ private List mergeTablePartitionHeatInfosByPercentile(List partitionHeatInfos, - String key, - Integer storageInstIdNum, - int minMergeRatio, - Long percentile50, - Long percentile90, - Map partitionsNumMap, - Integer maxMergeNum) { + String key, + Integer storageInstIdNum, + int minMergeRatio, + Long percentile50, + Long percentile90, + Map partitionsNumMap, + Integer maxMergeNum) { Integer partNum = partitionsNumMap.get(key); if (partNum == null || partNum <= storageInstIdNum) { return partitionHeatInfos; } - int mergeRatio = (int) Math.ceil((double)(minMergeRatio * 10 - 1) / 4); + int mergeRatio = (int) Math.ceil((double) (minMergeRatio * 10 - 1) / 4); mergeRatio = checkMaxPartitionsNumMergeRatio(mergeRatio, partNum, storageInstIdNum, maxMergeNum); Map> groupInfosMapG90 = new HashMap<>(); @@ -388,7 +415,8 @@ private List mergeTablePartitionHeatInfosByPercentile(List mergedPartitionHeatInfos = new ArrayList(); mergedPartitionHeatInfos.addAll(groupMergedPartitionHeatInfos(groupInfosMapG90, 1)); mergedPartitionHeatInfos.addAll(groupMergedPartitionHeatInfos(groupInfosMapG50L90, mergeRatio)); - mergedPartitionHeatInfos.addAll(groupMergedPartitionHeatInfos(groupInfosMapL50, ((int) Math.ceil((double)partNum / (double)storageInstIdNum) + 1))); + mergedPartitionHeatInfos.addAll(groupMergedPartitionHeatInfos(groupInfosMapL50, + ((int) Math.ceil((double) partNum / (double) storageInstIdNum) + 1))); return mergedPartitionHeatInfos; } @@ -396,14 +424,15 @@ private List mergeTablePartitionHeatInfosByPercentile(List= maxMergeNum) { //一个DN上的所有逻辑分区都合并到同一个分区 - mergeRatio = ((int) Math.ceil((double)partNum / (double)storageInstIdNum)) + 1; + mergeRatio = ((int) Math.ceil((double) partNum / (double) storageInstIdNum)) + 1; } if (partNum < maxMergeNum && partNum >= (maxMergeNum / 2)) { //一个DN上的所有逻辑分区都合并到2个分区 - mergeRatio = (int) Math.ceil((double)partNum / (double)(storageInstIdNum * 2)); + mergeRatio = (int) Math.ceil((double) partNum / (double) (storageInstIdNum * 2)); } return mergeRatio; } @@ -440,8 +469,8 @@ private List mergeTablePartitionHeatInfosByAdjacent(List groupMergedPartitionHeatInfos(Map> groupInfosMap, - int mergeRatio) { + private List groupMergedPartitionHeatInfos(Map> groupInfosMap, + int mergeRatio) { List mergedPartitionHeatInfos = new ArrayList<>(); int seq = 0; for (Map.Entry> entry : groupInfosMap.entrySet()) { @@ -558,7 +587,8 @@ private HashMap> getPartitionsHeatmapCollectionOnly() { schema = singleSchemaTables[0]; tables = singleSchemaTables[1]; String[] tableArrs = tables.split("&"); - Set tableSet = new TreeSet<>(String::compareToIgnoreCase);; + Set tableSet = new TreeSet<>(String::compareToIgnoreCase); + ; for (String table : tableArrs) { if (!StringUtils.isEmpty(table)) { tableSet.add(table); @@ -603,13 +633,15 @@ private int getPartitionsHeatmapCollectionMaxMergeNum() { } return Integer.parseInt(val); } catch (Exception e) { - logger.error(String.format("parse param:[PARTITIONS_HEATMAP_COLLECTION_MAX_MERGE_NUM=%s] error", defaultVal), e); + logger.error( + String.format("parse param:[PARTITIONS_HEATMAP_COLLECTION_MAX_MERGE_NUM=%s] error", defaultVal), e); return defaultVal; } } private int getPartitionsHeatmapCollectionMaxSingleLogicSchemaCount() { - int defaultVal = Integer.parseInt(ConnectionParams.PARTITIONS_HEATMAP_COLLECTION_MAX_SINGLE_LOGIC_SCHEMA_COUNT.getDefault()); + int defaultVal = + Integer.parseInt(ConnectionParams.PARTITIONS_HEATMAP_COLLECTION_MAX_SINGLE_LOGIC_SCHEMA_COUNT.getDefault()); try { String val = MetaDbInstConfigManager.getInstance().getInstProperty( ConnectionProperties.PARTITIONS_HEATMAP_COLLECTION_MAX_SINGLE_LOGIC_SCHEMA_COUNT); @@ -618,7 +650,8 @@ private int getPartitionsHeatmapCollectionMaxSingleLogicSchemaCount() { } return Integer.parseInt(val); } catch (Exception e) { - logger.error(String.format("parse param:[PARTITIONS_HEATMAP_COLLECTION_MAX_MERGE_NUM=%s] error", defaultVal), e); + logger.error( + String.format("parse param:[PARTITIONS_HEATMAP_COLLECTION_MAX_MERGE_NUM=%s] error", defaultVal), e); return defaultVal; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualCompressUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualCompressUtil.java index 65666276d..1e8616d46 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualCompressUtil.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualCompressUtil.java @@ -16,19 +16,17 @@ package com.alibaba.polardbx.executor.partitionvisualizer; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; -import java.io.IOException; +import java.util.Base64; import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; -import java.util.Base64; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; - /** * @author ximing.yd - * @date 2022/2/8 3:20 下午 */ public class VisualCompressUtil { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualConvertUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualConvertUtil.java index 602f2c33f..a3303d4f9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualConvertUtil.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualConvertUtil.java @@ -16,24 +16,22 @@ package com.alibaba.polardbx.executor.partitionvisualizer; -import java.util.Map; - import com.alibaba.polardbx.common.datatype.UInt64; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.partitionvisualizer.model.PartitionHeatInfo; - import io.airlift.slice.Slice; +import java.util.Map; + /** * @author ximing.yd - * @date 2022/4/14 3:29 下午 */ public class VisualConvertUtil { private static final Logger logger = LoggerFactory.getLogger(VisualConvertUtil.class); - public static String generateBound(PartitionHeatInfo pInfo){ + public static String generateBound(PartitionHeatInfo pInfo) { if (pInfo == null) { logger.warn("pInfo is null"); return "-,-,-,-"; @@ -43,7 +41,7 @@ public static String generateBound(PartitionHeatInfo pInfo){ } //为了在bound按照字母序排序时不会出现错乱 - public static String fillZero(Integer originNum){ + public static String fillZero(Integer originNum) { if (originNum == null) { logger.warn("originNum is null"); return "-"; @@ -60,58 +58,55 @@ public static String fillZero(Integer originNum){ return originNum.toString(); } - public static String generatePartitionHeatInfoKey(PartitionHeatInfo pInfo) { return String.format("%s,%s,%s", pInfo.getSchemaName(), pInfo.getLogicalTable(), pInfo.getPartitionName()); } - - public static String getObjString(String key, Map objMap){ + public static String getObjString(String key, Map objMap) { if (objMap.get(key) instanceof Slice) { - return ((Slice)objMap.get(key)).toStringUtf8(); + return ((Slice) objMap.get(key)).toStringUtf8(); } if (objMap.get(key) instanceof String) { - return ((String)objMap.get(key)); + return ((String) objMap.get(key)); } return ""; } - public static Integer getObjInteger(String key, Map objMap){ + public static Integer getObjInteger(String key, Map objMap) { if (objMap.get(key) instanceof UInt64) { - return ((UInt64)objMap.get(key)).intValue(); + return ((UInt64) objMap.get(key)).intValue(); } if (objMap.get(key) instanceof Number) { - return ((Number)objMap.get(key)).intValue(); + return ((Number) objMap.get(key)).intValue(); } if (objMap.get(key) instanceof Integer) { - return ((Integer)objMap.get(key)); + return ((Integer) objMap.get(key)); } return 0; } - public static Long getObjLong(String key, Map objMap){ + public static Long getObjLong(String key, Map objMap) { if (objMap.get(key) instanceof UInt64) { - return ((UInt64)objMap.get(key)).longValue(); + return ((UInt64) objMap.get(key)).longValue(); } if (objMap.get(key) instanceof Number) { - return ((Number)objMap.get(key)).longValue(); + return ((Number) objMap.get(key)).longValue(); } if (objMap.get(key) instanceof Long) { - return ((Long)objMap.get(key)); + return ((Long) objMap.get(key)); } return 0L; } - public static Long getObjLong(Object obj) { if (obj instanceof UInt64) { - return ((UInt64)obj).longValue(); + return ((UInt64) obj).longValue(); } if (obj instanceof Number) { - return ((Number)obj).longValue(); + return ((Number) obj).longValue(); } if (obj instanceof Long) { - return ((Long)obj); + return ((Long) obj); } return 0L; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualLayerService.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualLayerService.java index e90d0c43c..ce5a932dc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualLayerService.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualLayerService.java @@ -16,33 +16,31 @@ package com.alibaba.polardbx.executor.partitionvisualizer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; - import com.alibaba.fastjson.JSON; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualAxis; import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualAxisModel; -import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualTypeConstants; import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualLayer; import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualLayerConfig; - +import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualTypeConstants; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.StringUtils; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + /** * 多层环状数据结构的操作 * * @author ximing.yd - * @date 2021/12/20 下午3:39 */ public class VisualLayerService { @@ -115,13 +113,15 @@ public void fillVisualLayer(List visualLayers) { int start = 0; if (models.size() > layer.getLength()) { //当数据个数大于该层的总大小,属于异常数据,小概率会出现,这里做一个容错处理 - logger.warn(String.format("fillVisualLayer models.size:%s bigger layer.length:%s", models.size(), layer.getLength())); + logger.warn(String.format("fillVisualLayer models.size:%s bigger layer.length:%s", models.size(), + layer.getLength())); start = models.size() - layer.getLength(); } for (int i = start; i < models.size(); i++) { VisualAxisModel model = models.get(i); VisualLayer targetLayer = visualLayers.get(model.getLayerNum()); - addElementToRing(targetLayer.getRingAxis(), layer.getTail(), axisJsonConvertToVisualAxis(model.getAxisJson())); + addElementToRing(targetLayer.getRingAxis(), layer.getTail(), + axisJsonConvertToVisualAxis(model.getAxisJson())); addElementToRing(targetLayer.getRingTimestamp(), layer.getTail(), model.getTimestamp()); targetLayer.setEmpty(false); if (i == start) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualModelService.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualModelService.java index 194eb8b2e..1b08ac532 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualModelService.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/VisualModelService.java @@ -16,9 +16,6 @@ package com.alibaba.polardbx.executor.partitionvisualizer; -import java.sql.Connection; -import java.util.*; - import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.partitionvisualizer.model.PartitionHeatInfo; import com.alibaba.polardbx.executor.partitionvisualizer.model.VisualAxisModel; @@ -26,14 +23,20 @@ import com.alibaba.polardbx.gms.heatmap.PartitionsHeatmapRecord; import com.alibaba.polardbx.gms.util.MetaDbLogUtil; import com.alibaba.polardbx.gms.util.MetaDbUtil; - import org.apache.commons.collections.CollectionUtils; +import java.sql.Connection; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + /** * 原始热度数据的操作 * * @author ximing.yd - * @date 2021/12/23 下午8:34 */ public class VisualModelService { @@ -109,7 +112,6 @@ public int getLayerVisualAxesCount() { } } - private Integer convertToCount(List> rs) { Integer count = 0; try { @@ -171,7 +173,8 @@ private PartitionHeatInfo subtractPartitionHeatInfo(PartitionHeatInfo newPartiti resultInfo.setPartitionSeq(oldPartitionHeatInfo.getPartitionSeq()); resultInfo.setRowsRead(newPartitionHeatInfo.getRowsRead() - oldPartitionHeatInfo.getRowsRead()); resultInfo.setRowsWritten(newPartitionHeatInfo.getRowsWritten() - oldPartitionHeatInfo.getRowsWritten()); - resultInfo.setRowsReadWritten(newPartitionHeatInfo.getRowsReadWritten() - oldPartitionHeatInfo.getRowsReadWritten()); + resultInfo.setRowsReadWritten( + newPartitionHeatInfo.getRowsReadWritten() - oldPartitionHeatInfo.getRowsReadWritten()); return resultInfo; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/LabelPartition.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/LabelPartition.java index e5fddae27..ddc5acf24 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/LabelPartition.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/LabelPartition.java @@ -16,14 +16,13 @@ package com.alibaba.polardbx.executor.partitionvisualizer.model; +import lombok.Data; + import java.io.Serializable; import java.util.List; -import lombok.Data; - /** * @author ximing.yd - * @date 2021/12/20 上午11:15 */ @Data public class LabelPartition implements Serializable { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/PartitionHeatInfo.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/PartitionHeatInfo.java index 1afc6401c..e8a5eb38c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/PartitionHeatInfo.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/PartitionHeatInfo.java @@ -16,13 +16,12 @@ package com.alibaba.polardbx.executor.partitionvisualizer.model; -import java.io.Serializable; - import lombok.Data; +import java.io.Serializable; + /** * @author ximing.yd - * @date 2021/12/20 上午11:13 */ @Data public class PartitionHeatInfo implements Serializable { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualAxis.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualAxis.java index e818eb116..2fc8f03af 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualAxis.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualAxis.java @@ -16,19 +16,18 @@ package com.alibaba.polardbx.executor.partitionvisualizer.model; +import lombok.Data; + import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import lombok.Data; - /** * 热力图上的纵轴 * * @author ximing.yd - * @date 2021/12/20 上午11:11 */ @Data public class VisualAxis implements Serializable { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualAxisModel.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualAxisModel.java index e1ae900aa..927898090 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualAxisModel.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualAxisModel.java @@ -16,14 +16,14 @@ package com.alibaba.polardbx.executor.partitionvisualizer.model; -import java.io.Serializable; - import lombok.Data; +import java.io.Serializable; + /** * 存储到数据库中的纵向轴数据,可以和VisualAxis互相转换 + * * @author ximing.yd - * @date 2021/12/20 上午11:14 */ @Data public class VisualAxisModel implements Serializable { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualLayer.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualLayer.java index 720cc5329..1fdeca6b2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualLayer.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualLayer.java @@ -16,14 +16,13 @@ package com.alibaba.polardbx.executor.partitionvisualizer.model; +import lombok.Data; + import java.io.Serializable; import java.util.List; -import lombok.Data; - /** * @author ximing.yd - * @date 2021/12/20 上午11:13 */ @Data public class VisualLayer implements Serializable { @@ -44,5 +43,4 @@ public class VisualLayer implements Serializable { private VisualLayer next; - } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualLayerConfig.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualLayerConfig.java index e0d1953b4..3887d844a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualLayerConfig.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualLayerConfig.java @@ -16,13 +16,12 @@ package com.alibaba.polardbx.executor.partitionvisualizer.model; -import java.io.Serializable; - import lombok.Data; +import java.io.Serializable; + /** * @author ximing.yd - * @date 2021/12/20 下午4:03 */ @Data public class VisualLayerConfig implements Serializable { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualPlane.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualPlane.java index f4dee433e..f7498e98b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualPlane.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualPlane.java @@ -16,14 +16,13 @@ package com.alibaba.polardbx.executor.partitionvisualizer.model; +import lombok.Data; + import java.io.Serializable; import java.util.List; -import lombok.Data; - /** * @author ximing.yd - * @date 2021/12/20 上午11:15 */ @Data public class VisualPlane implements Serializable { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualResponse.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualResponse.java index 87688e4e0..e970a0bfc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualResponse.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualResponse.java @@ -16,15 +16,14 @@ package com.alibaba.polardbx.executor.partitionvisualizer.model; +import lombok.Data; + import java.io.Serializable; import java.util.List; import java.util.Map; -import lombok.Data; - /** * @author ximing.yd - * @date 2021/12/20 上午11:12 */ @Data public class VisualResponse implements Serializable { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualTypeConstants.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualTypeConstants.java index 06767f860..47d29f0c6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualTypeConstants.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/partitionvisualizer/model/VisualTypeConstants.java @@ -21,7 +21,6 @@ /** * @author ximing.yd - * @date 2021/12/23 上午10:58 */ public class VisualTypeConstants { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillExecutor.java new file mode 100644 index 000000000..d38fbb7f9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillExecutor.java @@ -0,0 +1,71 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.physicalbackfill; + +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.backfill.BatchConsumer; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.mysql.cj.polarx.protobuf.PolarxPhysicalBackfill; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +@Deprecated +public class PhysicalBackfillExecutor { + + public PhysicalBackfillExecutor() { + } + + public int backfill(String schemaName, + String tableName, + Map> sourcePhyTables, + Map> targetPhyTables, + Map sourceTargetGroup, + boolean isBroadcast, + ExecutionContext ec) { + final long batchSize = ec.getParamManager().getLong(ConnectionParams.PHYSICAL_BACKFILL_BATCH_SIZE); + final long minUpdateBatch = + ec.getParamManager().getLong(ConnectionParams.PHYSICAL_BACKFILL_MIN_SUCCESS_BATCH_UPDATE); + final long parallelism = ec.getParamManager().getLong(ConnectionParams.PHYSICAL_BACKFILL_PARALLELISM); + + if (null == ec.getServerVariables()) { + ec.setServerVariables(new HashMap<>()); + } + PhysicalBackfillExtractor extractor = + new PhysicalBackfillExtractor(schemaName, tableName, sourcePhyTables, targetPhyTables, sourceTargetGroup, + isBroadcast, + batchSize, + parallelism, + minUpdateBatch); + physicalBackfillLoader loader = new physicalBackfillLoader(schemaName, tableName); + extractor.doExtract(ec, new BatchConsumer() { + @Override + public void consume(Pair targetDbAndGroup, + Pair targetFileAndDir, + List> targetHosts, + Pair userInfo, + PolarxPhysicalBackfill.TransferFileDataOperator transferFileData) { + loader.applyBatch(targetDbAndGroup, targetFileAndDir, targetHosts, userInfo, transferFileData, ec); + } + }); + return 0; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillExtractor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillExtractor.java new file mode 100644 index 000000000..8ac278b9b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillExtractor.java @@ -0,0 +1,1044 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.physicalbackfill; + +import com.alibaba.polardbx.common.async.AsyncTask; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.backfill.BatchConsumer; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.ddl.newengine.cross.CrossEngineValidator; +import com.alibaba.polardbx.executor.ddl.workqueue.BackFillThreadPool; +import com.alibaba.polardbx.executor.ddl.workqueue.PriorityFIFOTask; +import com.alibaba.polardbx.executor.spi.ITransactionManager; +import com.alibaba.polardbx.gms.partition.PhysicalBackfillDetailInfoFieldJSON; +import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.gms.topology.DbTopologyManager; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.ScaleOutPlanUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.rpc.client.XSession; +import com.alibaba.polardbx.rpc.pool.XConnection; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import com.mysql.cj.polarx.protobuf.PolarxPhysicalBackfill; +import org.jetbrains.annotations.NotNull; + +import java.sql.SQLException; +import java.text.DecimalFormat; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Calendar; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static com.alibaba.polardbx.common.TddlConstants.LONG_ENOUGH_TIMEOUT_FOR_DDL_ON_XPROTO_CONN; + +/** + * Created by luoyanxin. + * + * @author luoyanxin + */ +@Deprecated +public class PhysicalBackfillExtractor { + + private long lastUpdateTime = 0l; + protected final String schemaName; + protected final String logicalTableName; + protected final long batchSize; + protected volatile long curSpeedLimit; + protected final long parallelism; + protected final long minUpdateBatch; + protected final PhysicalBackfillManager backfillManager; + protected final Map> sourcePhyTables; + protected final Map> targetPhyTables; + protected final Map sourceTargetGroup; + protected final boolean isBroadcast; + protected final boolean newPartitionDb; + + protected final ITransactionManager tm; + + protected final PhysicalBackfillReporter reporter; + private final Object lock = new Object(); + private final Map groupStorageInsts = new TreeMap<>(String::compareToIgnoreCase); + private final Map> storageInstAndUserInfos = new ConcurrentHashMap<>(); + // key:target DN id, value:leader/follower host info + private final Map>> cacheTargetHostInfo = + new TreeMap<>(String::compareToIgnoreCase); + + protected PhysicalBackfillExtractor(String schemaName, String logicalTableName, + Map> sourcePhyTables, + Map> targetPhyTables, + Map sourceTargetGroup, + boolean isBroadcast, + long batchSize, long parallelism, + long minUpdateBatch) { + this.schemaName = schemaName; + this.logicalTableName = logicalTableName; + this.sourcePhyTables = sourcePhyTables; + this.targetPhyTables = targetPhyTables; + this.sourceTargetGroup = sourceTargetGroup; + this.isBroadcast = isBroadcast; + this.batchSize = batchSize; + this.parallelism = parallelism; + this.minUpdateBatch = minUpdateBatch; + + this.tm = ExecutorContext.getContext(schemaName).getTransactionManager(); + this.backfillManager = new PhysicalBackfillManager(schemaName); + this.reporter = new PhysicalBackfillReporter(backfillManager); + this.curSpeedLimit = OptimizerContext.getContext(schemaName).getParamManager() + .getLong(ConnectionParams.PHYSICAL_BACKFILL_SPEED_LIMIT); + this.newPartitionDb = DbInfoManager.getInstance().isNewPartitionDb(schemaName); + if (!newPartitionDb && GeneralUtil.isEmpty(sourceTargetGroup)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + "missing source-target group mapping entry"); + } + } + + /** + * Load latest position mark + * + * @param ec Id of parent DDL job + * @return this + */ + public PhysicalBackfillManager.BackfillBean loadBackfillMeta(final ExecutionContext ec, final String dbIndex, + final String phyTable, + final String physicalPartition, + final String sourceGroup, final String targetGroup, + final Pair srcFileAndDir, + final Pair targetFileAndDir, + final long totalBatch, final long batchSize, + final long offset, final long lsn, + final Pair sourceHost, + final List> targetHosts) { + Long backfillId = ec.getBackfillId(); + + // Init position mark with upper bound + final PhysicalBackfillManager.BackfillObjectRecord initBfo = + initUpperBound(backfillId, schemaName, logicalTableName, dbIndex, phyTable, physicalPartition, sourceGroup, + targetGroup, srcFileAndDir, targetFileAndDir, totalBatch, batchSize, offset, lsn, sourceHost, + targetHosts); + + // Insert ignore + backfillManager.initBackfillMeta(backfillId, initBfo); + + // Load from system table + PhysicalBackfillManager.BackfillBean backfillBean = + this.reporter.loadBackfillMeta(backfillId, schemaName, dbIndex, phyTable, physicalPartition); + + SQLRecorderLogger.ddlLogger.info( + String.format("loadBackfillMeta for backfillId %d: %s", backfillId, this.reporter.getBackfillBean())); + + return backfillBean; + } + + public void insertBackfillMeta(final Long backfillId, final String dbIndex, + final String phyTable, + final String physicalPartition, + final String sourceGroup, final String targetGroup, + final Pair srcFileAndDir, + final Pair targetFileAndDir, + final long totalBatch, final long batchSize, + final long offset, final long lsn, + final Pair sourceHost, + final List> targetHosts) { + + // Init position mark with upper bound + final PhysicalBackfillManager.BackfillObjectRecord initBfo = + initUpperBound(backfillId, schemaName, logicalTableName, dbIndex, phyTable, physicalPartition, sourceGroup, + targetGroup, srcFileAndDir, targetFileAndDir, totalBatch, batchSize, offset, lsn, sourceHost, + targetHosts); + + // Insert ignore + backfillManager.initBackfillMeta(backfillId, initBfo); + } + + private PhysicalBackfillManager.BackfillObjectRecord initUpperBound(final long ddlJobId, final String schemaName, + final String tableName, final String dbIndex, + final String phyTable, + final String physicalPartition, + final String sourceGroup, + final String targetGroup, + final Pair srcFileAndDir, + final Pair targetFileAndDir, + final long totalBatch, final long batchSize, + final long offset, final long lsn, + final Pair sourceHost, + final List> targetHosts) { + PhysicalBackfillManager.BackfillObjectRecord obj = + getBackfillObjectRecords(ddlJobId, schemaName, tableName, dbIndex, phyTable, physicalPartition, sourceGroup, + targetGroup, srcFileAndDir, targetFileAndDir, totalBatch, batchSize, offset, lsn); + PhysicalBackfillDetailInfoFieldJSON json = new PhysicalBackfillDetailInfoFieldJSON(); + json.setTargetHostAndPorts(targetHosts); + json.setSourceHostAndPort(sourceHost); + obj.setDetailInfo(PhysicalBackfillDetailInfoFieldJSON.toJson(json)); + return obj; + } + + @NotNull + protected PhysicalBackfillManager.BackfillObjectRecord getBackfillObjectRecords(final long ddlJobId, + final String schemaName, + final String tableName, + final String physicalDb, + final String phyTable, + final String physicalPartition, + final String sourceGroup, + final String targetGroup, + final Pair srcFileAndDir, + final Pair targetFileAndDir, + final long totalBatch, + final long batchSize, + final long offset, + final long lsn) { + return new PhysicalBackfillManager.BackfillObjectRecord(ddlJobId, schemaName, tableName, schemaName, + tableName, physicalDb, phyTable, physicalPartition, sourceGroup, targetGroup, srcFileAndDir, + targetFileAndDir, totalBatch, batchSize, offset, lsn); + } + + public void doExtract(ExecutionContext ec, BatchConsumer batchConsumer) { + PhysicalBackfillUtils.checkInterrupted(ec, null); + groupStorageInsts.clear(); + storageInstAndUserInfos.clear(); + Map> physicalTableGroupMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); + AtomicBoolean alreadyWaitLsn = new AtomicBoolean(true); + + List>> tablesInfo = new ArrayList<>(); + if (isBroadcast) { + for (Map.Entry> sourceEntry : sourcePhyTables.entrySet()) { + for (String sourcePhyTb : GeneralUtil.emptyIfNull(sourceEntry.getValue())) { + for (Map.Entry> targetEntry : targetPhyTables.entrySet()) { + assert GeneralUtil.emptyIfNull(targetEntry.getValue()).size() == 1; + for (String targetPhyTb : GeneralUtil.emptyIfNull(targetEntry.getValue())) { + tablesInfo.add(Pair.of(sourcePhyTb, Pair.of(sourceEntry.getKey(), targetEntry.getKey()))); + } + } + } + } + } else if (!newPartitionDb) { + for (Map.Entry> sourceEntry : sourcePhyTables.entrySet()) { + String targetGroup = sourceTargetGroup.get(sourceEntry.getKey()); + for (String sourcePhyTb : GeneralUtil.emptyIfNull(sourceEntry.getValue())) { + tablesInfo.add(Pair.of(sourcePhyTb, Pair.of(sourceEntry.getKey(), targetGroup))); + } + } + } else { + for (Map.Entry> sourceEntry : sourcePhyTables.entrySet()) { + for (String sourcePhyTb : GeneralUtil.emptyIfNull(sourceEntry.getValue())) { + for (Map.Entry> targetEntry : targetPhyTables.entrySet()) { + if (targetEntry.getValue().contains(sourcePhyTb)) { + tablesInfo.add(Pair.of(sourcePhyTb, Pair.of(sourceEntry.getKey(), targetEntry.getKey()))); + } + } + } + } + } + + for (Pair> tableInfo : tablesInfo) { + String physicalTableName = tableInfo.getKey(); + String sourceGroupName = tableInfo.getValue().getKey(); + String targetGroupName = tableInfo.getValue().getValue(); + groupStorageInsts.putIfAbsent(sourceGroupName, + DbTopologyManager.getStorageInstIdByGroupName(schemaName, sourceGroupName)); + groupStorageInsts.putIfAbsent(targetGroupName, + DbTopologyManager.getStorageInstIdByGroupName(schemaName, targetGroupName)); + + DbGroupInfoRecord srcDbGroupInfoRecord = ScaleOutPlanUtil.getDbGroupInfoByGroupName(sourceGroupName); + DbGroupInfoRecord tarDbGroupInfoRecord = ScaleOutPlanUtil.getDbGroupInfoByGroupName(targetGroupName); + + assert srcDbGroupInfoRecord != null; + assert tarDbGroupInfoRecord != null; + + Pair srcDbAndGroup = + Pair.of(srcDbGroupInfoRecord.phyDbName.toLowerCase(), srcDbGroupInfoRecord.groupName); + Pair targetDbAndGroup = + Pair.of(tarDbGroupInfoRecord.phyDbName.toLowerCase(), tarDbGroupInfoRecord.groupName); + + List phyPartNames = + PhysicalBackfillUtils.getPhysicalPartitionNames(schemaName, srcDbAndGroup.getValue(), + srcDbAndGroup.getKey(), + physicalTableName); + + foreachPhysicalFile(ec, srcDbAndGroup, targetDbAndGroup, physicalTableName.toLowerCase(), phyPartNames, + targetGroupName, alreadyWaitLsn, batchConsumer); + } + } + + public void foreachPhysicalFile(final ExecutionContext ec, final Pair srcDbAndGroup, + final Pair targetDbAndGroup, final String phyTable, + final List physicalPartNames, final String targetGroupName, + final AtomicBoolean alreadyWaitLsn, final BatchConsumer consumer) { + + //1 flush table for export FLUSH TABLES t1 FOR EXPORT; + //2 copy ibd + //3 unlock table + //4 upsert fileInfo to gms + //5 copy to target dn + //6 delete temp ibd file + + String msg = "begin to backfill the idb file for table[" + srcDbAndGroup.getKey() + ":" + phyTable + "]"; + SQLRecorderLogger.ddlLogger.info(msg); + + DbGroupInfoRecord tarDbGroupInfoRecord = ScaleOutPlanUtil.getDbGroupInfoByGroupName(targetGroupName); + + boolean hasNoPhyPart = GeneralUtil.isEmpty(physicalPartNames); + if (hasNoPhyPart) { + physicalPartNames.add(""); + } + String sourceStorageInstId = groupStorageInsts.get(srcDbAndGroup.getValue()); + String targetStorageInstId = groupStorageInsts.get(targetDbAndGroup.getValue()); + Pair userInfo = storageInstAndUserInfos.computeIfAbsent(sourceStorageInstId, + key -> PhysicalBackfillUtils.getUserPasswd(sourceStorageInstId)); + + boolean healthyCheck = + ec.getParamManager().getBoolean(ConnectionParams.PHYSICAL_BACKFILL_STORAGE_HEALTHY_CHECK); + + for (String physicalPartition : physicalPartNames) { + List> offsetAndSize = new ArrayList<>(); + + final Pair targetFileAndDir; + + PhysicalBackfillManager.BackfillBean initBean = + reporter.loadBackfillMeta(ec.getBackfillId(), schemaName, srcDbAndGroup.getKey(), phyTable, + physicalPartition); + Pair srcFileAndDir = null; + Long lsn = 0l; + Pair sourceHost = null; + final Pair tempFileAndDir; + List> targetHosts = null; + if (initBean.isEmpty() || initBean.isInit()) { + final Boolean copyIbdFromFollower = + ec.getParamManager().getBoolean(ConnectionParams.PHYSICAL_BACKFILL_FROM_FOLLOWER); + + if (copyIbdFromFollower) { + sourceHost = PhysicalBackfillUtils.getMySQLOneFollowerIpAndPort(sourceStorageInstId); + } else { + sourceHost = PhysicalBackfillUtils.getMySQLLeaderIpAndPort(sourceStorageInstId); + } + //cache&recheck targetHost for per DN, in the same backfill task, the targethost can't HA + targetHosts = cacheTargetHostInfo.computeIfAbsent(targetStorageInstId, + key -> PhysicalBackfillUtils.getMySQLServerNodeIpAndPorts(targetStorageInstId, healthyCheck)); + Map targetGroupAndStorageIdMap = new HashMap<>(); + Map sourceGroupAndStorageIdMap = new HashMap<>(); + + targetGroupAndStorageIdMap.put(targetDbAndGroup.getValue(), targetStorageInstId); + sourceGroupAndStorageIdMap.put(srcDbAndGroup.getValue(), sourceStorageInstId); + + if (!alreadyWaitLsn.get()) { + + //wait target DN to finish the ddl + Map groupAndLsnMap = + PhysicalBackfillUtils.waitLsn(schemaName, targetGroupAndStorageIdMap, false, ec); + //wait source DN to replay the dml binlog in follower + PhysicalBackfillUtils.waitLsn(schemaName, sourceGroupAndStorageIdMap, false, ec); + lsn = groupAndLsnMap.get(targetDbAndGroup.getValue()); + alreadyWaitLsn.set(true); + } + Map> srcFileAndDirs = + PhysicalBackfillUtils.getSourceTableInfo(userInfo, srcDbAndGroup.getKey(), phyTable, + physicalPartNames, + hasNoPhyPart, sourceHost); + if (!initBean.isEmpty()) { + for (String phyPart : physicalPartNames) { + PhysicalBackfillManager.BackfillBean backfillBean = + reporter.loadBackfillMeta(ec.getBackfillId(), schemaName, srcDbAndGroup.getKey(), phyTable, + phyPart); + assert backfillBean.isInit(); + PhysicalBackfillManager.BackfillObjectBean bean = backfillBean.backfillObject; + try { + PhysicalBackfillDetailInfoFieldJSON detailInfoFieldJSON = bean.detailInfo; + PhysicalBackfillUtils.deleteInnodbDataFile(schemaName, bean.sourceGroupName, + bean.physicalDb, + detailInfoFieldJSON.getSourceHostAndPort().getKey(), + detailInfoFieldJSON.getSourceHostAndPort().getValue(), + PhysicalBackfillUtils.convertToCfgFileName(bean.sourceDirName, + PhysicalBackfillUtils.CFG), true, ec); + PhysicalBackfillUtils.deleteInnodbDataFile(schemaName, bean.sourceGroupName, + bean.physicalDb, + detailInfoFieldJSON.getSourceHostAndPort().getKey(), + detailInfoFieldJSON.getSourceHostAndPort().getValue(), bean.sourceDirName, true, ec); + } catch (Exception ex) { + //ignore + try { + SQLRecorderLogger.ddlLogger.info(ex.toString()); + } catch (Exception e) { + + } + } + backfillManager.deleteById(bean.id); + } + } + for (Map.Entry> entry : srcFileAndDirs.entrySet()) { + Pair partSrcFileAndDir = entry.getValue(); + String tmpDir = partSrcFileAndDir.getValue() + PhysicalBackfillUtils.TEMP_FILE_POSTFIX; + String tmpFile = partSrcFileAndDir.getKey(); + Pair partTempFileAndDir = Pair.of(tmpFile, tmpDir); + + String partTargetFile = partSrcFileAndDir.getKey().substring(srcDbAndGroup.getKey().length()); + partTargetFile = tarDbGroupInfoRecord.phyDbName.toLowerCase() + partTargetFile; + + String partTargetDir = partSrcFileAndDir.getValue() + .substring(PhysicalBackfillUtils.IDB_DIR_PREFIX.length() + srcDbAndGroup.getKey().length()); + partTargetDir = PhysicalBackfillUtils.IDB_DIR_PREFIX + tarDbGroupInfoRecord.phyDbName.toLowerCase() + + partTargetDir; + + Pair partTargetFileAndDir = new Pair<>(partTargetFile, partTargetDir); + + insertBackfillMeta(ec.getBackfillId(), srcDbAndGroup.getKey(), phyTable, entry.getKey(), + srcDbAndGroup.getValue(), + targetDbAndGroup.getValue(), partTempFileAndDir, partTargetFileAndDir, 0, + batchSize, 0, lsn, sourceHost, targetHosts); + + } + cloneInnodbDataFile(ec, srcDbAndGroup, srcFileAndDirs, phyTable, physicalPartNames, hasNoPhyPart, + sourceHost); + for (Map.Entry> entry : srcFileAndDirs.entrySet()) { + + PhysicalBackfillUtils.getTempIbdFileInfo(userInfo, sourceHost, srcDbAndGroup, phyTable, + entry.getKey(), entry.getValue(), + batchSize, false, offsetAndSize); + + PhysicalBackfillManager.BackfillBean backfillBean = + reporter.loadBackfillMeta(ec.getBackfillId(), schemaName, srcDbAndGroup.getKey(), phyTable, + entry.getKey()); + assert backfillBean.isInit(); + + backfillManager.updateStatusAndTotalBatch(backfillBean.backfillObject.id, offsetAndSize.size()); + } + if (!hasNoPhyPart) { + srcFileAndDir = srcFileAndDirs.get(physicalPartition); + } else { + srcFileAndDir = srcFileAndDirs.entrySet().iterator().next().getValue(); + } + tempFileAndDir = + PhysicalBackfillUtils.getTempIbdFileInfo(userInfo, sourceHost, srcDbAndGroup, phyTable, + physicalPartition, srcFileAndDir, batchSize, + false, offsetAndSize); + + String partTargetFile = srcFileAndDir.getKey().substring(srcDbAndGroup.getKey().length()); + partTargetFile = tarDbGroupInfoRecord.phyDbName.toLowerCase() + partTargetFile; + + String partTargetDir = + srcFileAndDir.getValue() + .substring(PhysicalBackfillUtils.IDB_DIR_PREFIX.length() + srcDbAndGroup.getKey().length()); + partTargetDir = + PhysicalBackfillUtils.IDB_DIR_PREFIX + tarDbGroupInfoRecord.phyDbName.toLowerCase() + partTargetDir; + + targetFileAndDir = new Pair<>(partTargetFile, partTargetDir); + + } else { + if (initBean.isSuccess()) { + continue; + } + sourceHost = initBean.backfillObject.detailInfo.getSourceHostAndPort(); + srcFileAndDir = Pair.of(initBean.backfillObject.sourceFileName, initBean.backfillObject.sourceDirName); + tempFileAndDir = srcFileAndDir; + targetFileAndDir = + Pair.of(initBean.backfillObject.targetFileName, initBean.backfillObject.targetDirName); + //update the offsetAndSize + PhysicalBackfillUtils.getTempIbdFileInfo(userInfo, sourceHost, srcDbAndGroup, phyTable, + physicalPartition, srcFileAndDir, batchSize, + true, offsetAndSize); + } + + BitSet bitSet; + long[] bitSetPosMark = null; + PhysicalBackfillManager.BackfillBean backfillBean = + loadBackfillMeta(ec, srcDbAndGroup.getKey(), phyTable, physicalPartition, srcDbAndGroup.getValue(), + targetDbAndGroup.getValue(), tempFileAndDir, targetFileAndDir, offsetAndSize.size(), batchSize, 0, + lsn, sourceHost, targetHosts); + + assert !backfillBean.isInit(); + + PhysicalBackfillDetailInfoFieldJSON detailInfo = backfillBean.backfillObject.detailInfo; + if (backfillBean.isSuccess()) { + return; + } else { + if (detailInfo != null) { + bitSetPosMark = detailInfo.getBitSet(); + } else { + detailInfo = new PhysicalBackfillDetailInfoFieldJSON(); + } + } + + List futures = new ArrayList<>(16); + AtomicReference excep = new AtomicReference<>(null); + final AtomicInteger successBatch = new AtomicInteger(0); + final List> targetHost = detailInfo.getTargetHostAndPorts(); + final Pair sourceHostIpAndPort = detailInfo.getSourceHostAndPort(); + final AtomicReference interrupted = new AtomicReference<>(false); + + // copy the .cfg file before .ibd file + String srcFileName = srcFileAndDir.getKey(); + String srcDir; + if (initBean.isEmpty()) { + srcDir = PhysicalBackfillUtils.convertToCfgFileName( + srcFileAndDir.getValue() + PhysicalBackfillUtils.TEMP_FILE_POSTFIX, PhysicalBackfillUtils.CFG); + } else { + srcDir = + PhysicalBackfillUtils.convertToCfgFileName(srcFileAndDir.getValue(), PhysicalBackfillUtils.CFG); + } + + String tarFileName = targetFileAndDir.getKey(); + String tarDir = + PhysicalBackfillUtils.convertToCfgFileName(targetFileAndDir.getValue(), PhysicalBackfillUtils.CFG); + copyCfgFile(Pair.of(srcFileName, srcDir), srcDbAndGroup, sourceHostIpAndPort, + Pair.of(tarFileName, tarDir), targetDbAndGroup, targetHost, consumer); + + if (bitSetPosMark == null || bitSetPosMark.length == 0) { + bitSet = new BitSet(offsetAndSize.size()); + } else { + bitSet = BitSet.valueOf(bitSetPosMark); + } + + long fileSize = 0l; + if (offsetAndSize.size() > 0) { + Pair lastBatch = offsetAndSize.get(offsetAndSize.size() - 1); + fileSize = lastBatch.getKey() + lastBatch.getValue(); + } + fallocateIbdFile(ec, targetFileAndDir, targetDbAndGroup, targetHost, phyTable, "", fileSize); + + if (parallelism <= 0 || parallelism >= BackFillThreadPool.getInstance().getCorePoolSize()) { + // Full queued. + offsetAndSize.forEach(v -> { + int index = (int) (v.getKey() / batchSize); + if (!bitSet.get(index)) { + FutureTask task = new FutureTask<>( + () -> foreachPhyFileBatch(srcDbAndGroup, targetDbAndGroup, tempFileAndDir, targetFileAndDir, + phyTable, v, bitSet, batchSize, successBatch, minUpdateBatch, sourceHostIpAndPort, + targetHost, consumer, ec, interrupted), null); + futures.add(task); + BackFillThreadPool.getInstance() + .executeWithContext(task, PriorityFIFOTask.TaskPriority.GSI_BACKFILL_TASK); + } + }); + } else { + + // Use a bounded blocking queue to control the parallelism. + BlockingQueue blockingQueue = new ArrayBlockingQueue<>((int) parallelism); + + offsetAndSize.forEach(v -> { + int index = (int) (v.getKey() / batchSize); + if (!bitSet.get(index)) { + if (CrossEngineValidator.isJobInterrupted(ec) || Thread.currentThread().isInterrupted() + || interrupted.get()) { + long jobId = ec.getDdlJobId(); + excep.set(new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The job '" + jobId + "' has been cancelled")); + interrupted.set(true); + return; + } else { + try { + blockingQueue.put(new Object()); + } catch (Exception e) { + excep.set(e); + interrupted.set(true); + } + } + if (null == excep.get() && !interrupted.get()) { + FutureTask task = new FutureTask<>(() -> { + try { + foreachPhyFileBatch(srcDbAndGroup, targetDbAndGroup, tempFileAndDir, + targetFileAndDir, phyTable, v, bitSet, batchSize, successBatch, minUpdateBatch, + sourceHostIpAndPort, targetHost, consumer, ec, interrupted); + } finally { + // Poll in finally to prevent dead lock on putting blockingQueue. + blockingQueue.poll(); + } + return null; + }); + futures.add(task); + BackFillThreadPool.getInstance() + .executeWithContext(task, PriorityFIFOTask.TaskPriority.GSI_BACKFILL_TASK); + } + } + }); + } + + if (excep.get() != null) { + // Interrupt all. + futures.forEach(f -> { + try { + f.cancel(true); + } catch (Throwable ignore) { + } + }); + } + + for (Future future : futures) { + try { + future.get(); + } catch (Exception e) { + futures.forEach(f -> { + try { + f.cancel(true); + } catch (Throwable ignore) { + } + }); + if (null == excep.get()) { + excep.set(e); + } + // set interrupt + interrupted.set(true); + } + } + + PhysicalBackfillManager.BackfillBean bfb = reporter.getBackfillBean(); + PhysicalBackfillManager.BackfillObjectRecord bor = new PhysicalBackfillManager.BackfillObjectRecord(); + bor.setJobId(bfb.backfillObject.jobId); + bor.setSuccessBatchCount(bfb.backfillObject.successBatchCount + successBatch.get()); + bor.setExtra(bfb.backfillObject.extra); + bor.setPhysicalDb(bfb.backfillObject.physicalDb); + bor.setPhysicalTable(bfb.backfillObject.physicalTable); + bor.setPhysicalPartition(bfb.backfillObject.physicalPartition); + bor.setEndTime(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime())); + + if (excep.get() != null) { + detailInfo.setMsg(excep.get().toString()); + detailInfo.setBitSet(bitSet.toLongArray()); + bor.setDetailInfo(PhysicalBackfillDetailInfoFieldJSON.toJson(detailInfo)); + bor.setStatus((int) PhysicalBackfillManager.BackfillStatus.FAILED.getValue()); + + reporter.updateBackfillObject(bor); + throw GeneralUtil.nestedException(excep.get()); + } + bfb.backfillObject.detailInfo.setBitSet(null); + bor.setStatus((int) PhysicalBackfillManager.BackfillStatus.SUCCESS.getValue()); + bor.setDetailInfo(PhysicalBackfillDetailInfoFieldJSON.toJson(bfb.backfillObject.detailInfo)); + bor.setSuccessBatchCount(offsetAndSize.size()); + + Pair ipPortPair = bfb.backfillObject.detailInfo.getSourceHostAndPort(); + + PhysicalBackfillUtils.deleteInnodbDataFile(schemaName, srcDbAndGroup.getValue(), srcDbAndGroup.getKey(), + ipPortPair.getKey(), ipPortPair.getValue(), + PhysicalBackfillUtils.convertToCfgFileName(tempFileAndDir.getValue(), PhysicalBackfillUtils.CFG), false, + ec); + + PhysicalBackfillUtils.deleteInnodbDataFile(schemaName, srcDbAndGroup.getValue(), srcDbAndGroup.getKey(), + ipPortPair.getKey(), ipPortPair.getValue(), tempFileAndDir.getValue(), false, ec); + // After all physical table finished + reporter.updateBackfillObject(bor); + } + msg = "already backfill the idb file for table[" + srcDbAndGroup.getKey() + ":" + phyTable + "]"; + SQLRecorderLogger.ddlLogger.info(msg); + } + + private void foreachPhyFileBatch(final Pair srcDbAndGroup, + final Pair targetDbAndGroup, + final Pair srcFileAndDir, + final Pair targetFileAndDir, + final String physicalTableName, + final Pair offsetAndSize, + final BitSet bitSet, + long batchSize, + final AtomicInteger successBatch, + final long minUpdateBatch, + final Pair sourceHost, + final List> targetHost, + final BatchConsumer consumer, + final ExecutionContext ec, + final AtomicReference interrupted) { + String sourceStorageInstId = groupStorageInsts.get(srcDbAndGroup.getValue()); + String targetStorageInstId = groupStorageInsts.get(targetDbAndGroup.getValue()); + + PolarxPhysicalBackfill.TransferFileDataOperator transferFileData = null; + + Pair srcUserInfo = storageInstAndUserInfos.computeIfAbsent(sourceStorageInstId, + key -> PhysicalBackfillUtils.getUserPasswd(sourceStorageInstId)); + Pair tarUserInfo = storageInstAndUserInfos.computeIfAbsent(sourceStorageInstId, + key -> PhysicalBackfillUtils.getUserPasswd(targetStorageInstId)); + + boolean success = false; + int tryTime = 1; + DecimalFormat df = new DecimalFormat("#.0"); + + Long speedLimit = OptimizerContext.getContext(schemaName).getParamManager() + .getLong(ConnectionParams.PHYSICAL_BACKFILL_SPEED_LIMIT); + if (speedLimit.longValue() != PhysicalBackfillUtils.getRateLimiter().getCurSpeedLimiter()) { + this.curSpeedLimit = speedLimit; + if (speedLimit > 0) { + double curSpeed = PhysicalBackfillUtils.getRateLimiter().getRate() / 1024; + PhysicalBackfillUtils.getRateLimiter().setRate(speedLimit.longValue()); + String msg = + "change the maximum speed limit from " + df.format(curSpeed) + "KB/s to " + + df.format(PhysicalBackfillUtils.getRateLimiter().getRate() / 1024) + + "KB/s"; + SQLRecorderLogger.ddlLogger.info(msg); + } + } + do { + // Check DDL is ongoing. + PhysicalBackfillUtils.checkInterrupted(ec, interrupted); + if (this.curSpeedLimit > 0) { + PhysicalBackfillUtils.getRateLimiter().acquire(offsetAndSize.getValue().intValue()); + } + try ( + XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage(srcDbAndGroup.getKey(), + sourceHost.getKey(), sourceHost.getValue(), srcUserInfo.getKey(), srcUserInfo.getValue(), -1))) { + PolarxPhysicalBackfill.TransferFileDataOperator.Builder builder = + PolarxPhysicalBackfill.TransferFileDataOperator.newBuilder(); + + builder.setOperatorType(PolarxPhysicalBackfill.TransferFileDataOperator.Type.GET_DATA_FROM_SRC_IBD); + PolarxPhysicalBackfill.FileInfo.Builder fileInfoBuilder = PolarxPhysicalBackfill.FileInfo.newBuilder(); + fileInfoBuilder.setFileName(srcFileAndDir.getKey()); + fileInfoBuilder.setTempFile(false); + fileInfoBuilder.setDirectory(srcFileAndDir.getValue()); + fileInfoBuilder.setPartitionName(""); + builder.setFileInfo(fileInfoBuilder.build()); + builder.setBufferLen(offsetAndSize.getValue()); + builder.setOffset(offsetAndSize.getKey()); + transferFileData = conn.execReadBufferFromFile(builder); + success = true; + } catch (Exception ex) { + if (tryTime >= PhysicalBackfillUtils.MAX_RETRY) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + tryTime++; + } + } while (!success); + consumer.consume(targetDbAndGroup, targetFileAndDir, targetHost, tarUserInfo, + transferFileData); + synchronized (lock) { + if (lastUpdateTime == 0) { + lastUpdateTime = System.currentTimeMillis(); + } + bitSet.set((int) (transferFileData.getOffset() / batchSize)); + int curSuccessBatch = successBatch.incrementAndGet(); + if (curSuccessBatch >= minUpdateBatch) { + long curTime = System.currentTimeMillis(); + //update to metadb + PhysicalBackfillDetailInfoFieldJSON detailInfo = new PhysicalBackfillDetailInfoFieldJSON(); + detailInfo.setBitSet(bitSet.toLongArray()); + detailInfo.setMsg(""); + + PhysicalBackfillManager.BackfillBean bfb = reporter.getBackfillBean(); + PhysicalBackfillManager.BackfillObjectRecord bor = + new PhysicalBackfillManager.BackfillObjectRecord(); + + detailInfo.setSourceHostAndPort(bfb.backfillObject.detailInfo.sourceHostAndPort); + detailInfo.setTargetHostAndPorts(bfb.backfillObject.detailInfo.targetHostAndPorts); + + bor.setJobId(bfb.backfillObject.jobId); + bor.setSuccessBatchCount(bfb.backfillObject.successBatchCount + successBatch.get()); + bor.setExtra(bfb.backfillObject.extra); + bor.setPhysicalDb(bfb.backfillObject.physicalDb); + bor.setPhysicalTable(bfb.backfillObject.physicalTable); + bor.setPhysicalPartition(bfb.backfillObject.physicalPartition); + bor.setEndTime(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime())); + bor.setStatus((int) PhysicalBackfillManager.BackfillStatus.RUNNING.getValue()); + bor.setDetailInfo(PhysicalBackfillDetailInfoFieldJSON.toJson(detailInfo)); + + reporter.updateBackfillObject(bor); + reporter.loadBackfillMeta(bor.getJobId(), bor.getTableSchema(), bor.getPhysicalDb(), + bor.getPhysicalTable(), + bor.getPhysicalPartition()); + + successBatch.set(0); + + double speed = (curSuccessBatch * batchSize) * 1000.0 / Math.max(1, curTime - lastUpdateTime) / 1024; + + //todo calc the speed by 1000 batch / time + String msg = "already write " + curSuccessBatch + " batch successfully for " + srcFileAndDir.getValue() + + " speed:" + df.format(speed) + "KB/s the maximum speed limit:" + + df.format(PhysicalBackfillUtils.getRateLimiter().getRate() / 1024) + "KB/s"; + SQLRecorderLogger.ddlLogger.info(msg); + lastUpdateTime = System.currentTimeMillis(); + } + } + } + + private void cloneInnodbDataFile(final ExecutionContext ec, final Pair dbAndGroup, + final Map> srcFileAndDirs, String phyTableName, + List phyPartNames, boolean hasNoPhyPart, + Pair sourceIpAndPort) { + + String sourceStorageInstId = groupStorageInsts.get(dbAndGroup.getValue()); + + String msg = "begin to clone the files for table:" + phyTableName; + SQLRecorderLogger.ddlLogger.info(msg); + XConnection conn = null; + Pair userInfo = storageInstAndUserInfos.computeIfAbsent(sourceStorageInstId, + key -> PhysicalBackfillUtils.getUserPasswd(sourceStorageInstId)); + boolean success = false; + int tryTime = 1; + StringBuilder copyFileInfo = null; + AtomicReference finished = new AtomicReference<>(false); + do { + try { + copyFileInfo = new StringBuilder(); + conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage(dbAndGroup.getKey(), + sourceIpAndPort.getKey(), sourceIpAndPort.getValue(), userInfo.getKey(), userInfo.getValue(), -1)); + conn.setNetworkTimeoutNanos(LONG_ENOUGH_TIMEOUT_FOR_DDL_ON_XPROTO_CONN * 1000000L); + conn.execQuery(String.format(PhysicalBackfillUtils.FLUSH_TABLE_SQL_TEMPLATE, phyTableName)); + PolarxPhysicalBackfill.FileManageOperator.Builder builder = + PolarxPhysicalBackfill.FileManageOperator.newBuilder(); + + PolarxPhysicalBackfill.TableInfo.Builder tableInfoBuilder = + PolarxPhysicalBackfill.TableInfo.newBuilder(); + tableInfoBuilder.setTableSchema(dbAndGroup.getKey()); + tableInfoBuilder.setTableName(phyTableName); + tableInfoBuilder.setPartitioned(!hasNoPhyPart); + int i = 0; + for (Map.Entry> entry : srcFileAndDirs.entrySet()) { + Pair srcFileAndDir = entry.getValue(); + + boolean handlerIbdFile = false; + do { + PhysicalBackfillUtils.checkInterrupted(ec, null); + PolarxPhysicalBackfill.FileInfo.Builder srcFileInfoBuilder = + PolarxPhysicalBackfill.FileInfo.newBuilder(); + + String fileName = srcFileAndDir.getKey(); + String directory = srcFileAndDir.getValue(); + + if (!handlerIbdFile) { + directory = + PhysicalBackfillUtils.convertToCfgFileName(directory, PhysicalBackfillUtils.CFG); + } + + srcFileInfoBuilder.setFileName(fileName); + srcFileInfoBuilder.setDirectory(directory); + srcFileInfoBuilder.setPartitionName(entry.getKey()); + + PolarxPhysicalBackfill.FileInfo.Builder tmpFileInfoBuilder = + PolarxPhysicalBackfill.FileInfo.newBuilder(); + tmpFileInfoBuilder.setFileName(fileName); + tmpFileInfoBuilder.setDirectory(directory + PhysicalBackfillUtils.TEMP_FILE_POSTFIX); + tmpFileInfoBuilder.setPartitionName(entry.getKey()); + + tableInfoBuilder.addFileInfo(srcFileInfoBuilder.build()); + tableInfoBuilder.addFileInfo(tmpFileInfoBuilder.build()); + if (i > 0) { + copyFileInfo.append(", "); + } + copyFileInfo.append(directory); + i++; + if (handlerIbdFile) { + break; + } else { + handlerIbdFile = true; + } + } while (true); + } + builder.setTableInfo(tableInfoBuilder.build()); + + builder.setOperatorType(PolarxPhysicalBackfill.FileManageOperator.Type.COPY_IBD_TO_TEMP_DIR_IN_SRC); + + Thread parentThread = Thread.currentThread(); + XSession session = conn.getSession(); + finished.set(false); + FutureTask task = new FutureTask<>(() -> { + do { + if (finished.get()) { + break; + } + if (session == null) { + SQLRecorderLogger.ddlLogger.info("exeCloneFile session was terminated"); + break; + } + if (parentThread.isInterrupted() || CrossEngineValidator.isJobInterrupted(ec)) { + SQLRecorderLogger.ddlLogger.info( + String.format("exeCloneFile session was cancel, sessionId:%d", session.getSessionId())); + session.cancel(); + break; + } + try { + Thread.sleep(100); + } catch (Exception e) { + //ignore + } + } while (true); + }, null); + Future futureTask = + ec.getExecutorService().submit(ec.getSchemaName(), ec.getTraceId(), AsyncTask.build(task)); + + conn.exeCloneFile(builder); + + finished.set(true); + try { + futureTask.get(); + } catch (Exception ex) { + try { + futureTask.cancel(true); + } catch (Throwable ignore) { + } + } + msg = String.format("already clone the files[%s] for table %s", copyFileInfo, phyTableName); + SQLRecorderLogger.ddlLogger.info(msg); + success = true; + } catch (Exception ex) { + msg = String.format("fail to clone those files:%s, [ip:%s,port:%s,db:%s]", copyFileInfo.toString(), + sourceIpAndPort.getKey(), sourceIpAndPort.getValue().toString(), dbAndGroup.getKey()); + if (ex != null && ex.toString() != null) { + msg += " " + ex.toString(); + } + SQLRecorderLogger.ddlLogger.info(msg); + if (tryTime > PhysicalBackfillUtils.MAX_RETRY) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + PhysicalBackfillUtils.checkInterrupted(ec, null); + tryTime++; + } finally { + try { + finished.set(true); + if (conn != null && !conn.isClosed()) { + try { + conn.execQuery(PhysicalBackfillUtils.UNLOCK_TABLE); + } catch (SQLException e) { + msg = "fail to clone those files:" + copyFileInfo.toString() + " " + e.toString(); + SQLRecorderLogger.ddlLogger.info(msg); + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e); + } + } + } catch (SQLException ex) { + msg = "fail to clone those files:" + copyFileInfo.toString() + " " + ex.toString(); + SQLRecorderLogger.ddlLogger.info(msg); + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + } + } while (!success); + } + + private void fallocateIbdFile(final ExecutionContext ec, final Pair targetFileAndDir, + final Pair tarDbAndGroup, + final List> targetHosts, String physicalTableName, + String phyPartitionName, long fileSize) { + String msg = "begin to fallocate ibd file:" + targetFileAndDir.getValue(); + SQLRecorderLogger.ddlLogger.info(msg); + + String tarStorageInstId = groupStorageInsts.get(tarDbAndGroup.getValue()); + + PolarxPhysicalBackfill.GetFileInfoOperator getFileInfoOperator = null; + Pair tempFileAndDir = null; + Pair userInfo = storageInstAndUserInfos.computeIfAbsent(tarStorageInstId, + key -> PhysicalBackfillUtils.getUserPasswd(tarStorageInstId)); + + for (Pair targetHost : targetHosts) { + boolean success = false; + int tryTime = 1; + do { + PhysicalBackfillUtils.checkInterrupted(ec, null); + try (XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage( + tarDbAndGroup.getKey(), + targetHost.getKey(), targetHost.getValue(), userInfo.getKey(), userInfo.getValue(), -1))) { + PolarxPhysicalBackfill.FileManageOperator.Builder builder = + PolarxPhysicalBackfill.FileManageOperator.newBuilder(); + + builder.setOperatorType(PolarxPhysicalBackfill.FileManageOperator.Type.FALLOCATE_IBD); + PolarxPhysicalBackfill.TableInfo.Builder tableInfoBuilder = + PolarxPhysicalBackfill.TableInfo.newBuilder(); + tableInfoBuilder.setTableSchema(tarDbAndGroup.getKey()); + tableInfoBuilder.setTableName(physicalTableName); + tableInfoBuilder.setPartitioned(false); + + PolarxPhysicalBackfill.FileInfo.Builder fileInfoBuilder = + PolarxPhysicalBackfill.FileInfo.newBuilder(); + fileInfoBuilder.setTempFile(false); + fileInfoBuilder.setFileName(targetFileAndDir.getKey()); + fileInfoBuilder.setPartitionName(phyPartitionName); + fileInfoBuilder.setDirectory(targetFileAndDir.getValue()); + fileInfoBuilder.setDataSize(fileSize); + + tableInfoBuilder.addFileInfo(fileInfoBuilder.build()); + builder.setTableInfo(tableInfoBuilder.build()); + + conn.execFallocateIbdFile(builder); + success = true; + } catch (Exception ex) { + SQLRecorderLogger.ddlLogger.info(ex.toString()); + if (tryTime > PhysicalBackfillUtils.MAX_RETRY) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + PhysicalBackfillUtils.checkInterrupted(ec, null); + tryTime++; + } + } while (!success); + } + msg = "already fallocate the ibd file:" + targetFileAndDir.getValue(); + SQLRecorderLogger.ddlLogger.info(msg); + } + + private void copyCfgFile(final Pair srcFileAndDir, final Pair srcDbAndGroup, + final Pair sourceHostIpAndPort, + final Pair targetFileAndDir, final Pair tarDbAndGroup, + final List> targetHosts, BatchConsumer consumer) { + + String sourceStorageInstId = groupStorageInsts.get(srcDbAndGroup.getValue()); + String targetStorageInstId = groupStorageInsts.get(tarDbAndGroup.getValue()); + + PolarxPhysicalBackfill.TransferFileDataOperator transferFileData = null; + + Pair srcUserInfo = storageInstAndUserInfos.computeIfAbsent(sourceStorageInstId, + key -> PhysicalBackfillUtils.getUserPasswd(sourceStorageInstId)); + Pair tarUserInfo = storageInstAndUserInfos.computeIfAbsent(sourceStorageInstId, + key -> PhysicalBackfillUtils.getUserPasswd(targetStorageInstId)); + + long offset = 0l; + do { + boolean success = false; + int tryTime = 0; + do { + try (XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage( + srcDbAndGroup.getKey(), + sourceHostIpAndPort.getKey(), sourceHostIpAndPort.getValue(), srcUserInfo.getKey(), + srcUserInfo.getValue(), -1))) { + + PolarxPhysicalBackfill.TransferFileDataOperator.Builder builder = + PolarxPhysicalBackfill.TransferFileDataOperator.newBuilder(); + + builder.setOperatorType(PolarxPhysicalBackfill.TransferFileDataOperator.Type.GET_DATA_FROM_SRC_IBD); + PolarxPhysicalBackfill.FileInfo.Builder fileInfoBuilder = + PolarxPhysicalBackfill.FileInfo.newBuilder(); + fileInfoBuilder.setFileName(srcFileAndDir.getKey()); + fileInfoBuilder.setTempFile(false); + fileInfoBuilder.setDirectory(srcFileAndDir.getValue()); + fileInfoBuilder.setPartitionName(""); + builder.setFileInfo(fileInfoBuilder.build()); + builder.setBufferLen(batchSize); + builder.setOffset(offset); + transferFileData = conn.execReadBufferFromFile(builder); + success = true; + } catch (Exception ex) { + if (tryTime >= PhysicalBackfillUtils.MAX_RETRY) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + tryTime++; + } + } while (!success); + consumer.consume(tarDbAndGroup, targetFileAndDir, targetHosts, tarUserInfo, + transferFileData); + if (transferFileData.getBufferLen() < batchSize) { + return; + } + offset += transferFileData.getBufferLen(); + } while (true); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillManager.java new file mode 100644 index 000000000..3be0b4885 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillManager.java @@ -0,0 +1,1494 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.physicalbackfill; + +import com.alibaba.druid.util.JdbcUtils; +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.jdbc.ParameterMethod; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.config.ConfigDataMode; +import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; +import com.alibaba.polardbx.gms.metadb.GmsSystemTables; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.metadb.record.SystemTableRecord; +import com.alibaba.polardbx.gms.partition.PhysicalBackfillDetailInfoFieldJSON; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.config.table.GsiUtils.Consumer; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import com.google.common.base.Joiner; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import org.apache.calcite.linq4j.Ord; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import org.jetbrains.annotations.NotNull; +import org.joda.time.DateTime; + +import javax.sql.DataSource; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.sql.Wrapper; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +/** + * @author luoyanxin + */ +public class PhysicalBackfillManager { + + private static final String SYSTABLE_BACKFILL_OBJECTS = GmsSystemTables.PHYSICAL_BACKFILL_OBJECTS; + + private final DataSource dataSource; + private final String schema; + + public PhysicalBackfillManager(String schema) { + this.schema = schema; + this.dataSource = MetaDbDataSource.getInstance().getDataSource(); + } + + public String getSchema() { + return this.schema; + } + + public DataSource getDataSource() { + return this.dataSource; + } + + public void initBackfillMeta(Long backfillId, BackfillObjectRecord initBackfillObject) { + insertBackfillMeta(backfillId, initBackfillObject, true); + } + + public BackfillBean loadBackfillMeta(long backfillId, String schemaNmae, String phyDb, String physicalTable, + String phyPartition) { + List bfoList = + queryBackfillObject(backfillId, schemaNmae, phyDb, physicalTable, phyPartition); + if (CollectionUtils.isEmpty(bfoList)) { + return BackfillBean.EMPTY; + } + return BackfillBean.create(bfoList); + } + + public List loadBackfillMeta(long backfillId, String tableSchema, String logicalTable) { + List bfoList = queryBackfillObject(backfillId, tableSchema, logicalTable); + if (CollectionUtils.isEmpty(bfoList)) { + return ImmutableList.of(BackfillBean.EMPTY); + } + List result = new ArrayList<>(); + for (BackfillObjectRecord bor : bfoList) { + result.add(BackfillBean.create(ImmutableList.of(bor))); + } + return result; + } + + //todo fetch all the file size and calculate the progress + private Integer computeProgress(BackfillObjectBean bfo, ParameterContext param) { + try { + final Object arg = param.getArgs()[1]; + final DataType type = DataTypeUtil.getTypeOfObject(arg); + + if (DataTypeUtil.isNumberSqlType(type) || DataTypeUtil + .anyMatchSemantically((DataType) param.getArgs()[2], DataTypes.ULongType)) { + final BigDecimal current = DataTypes.DecimalType.convertFrom(arg).toBigDecimal(); + final BigDecimal max = DataTypes.DecimalType.convertFrom(bfo.totalBatch).toBigDecimal(); + + return current.divide(max, 4, RoundingMode.HALF_UP).multiply(BigDecimal.valueOf(100L)).intValue(); + } + } catch (Exception e) { + // Ignore exception + } + + return 0; + } + + public void deleteByBackfillId(Long backfillId) { + + Map params = new HashMap<>(); + params.put(1, new ParameterContext(ParameterMethod.setLong, new Object[] {1, backfillId})); + + wrapWithTransaction(dataSource, (conn) -> { + try { + update(SQL_DELETE_BY_JOB_ID, Lists.newArrayList(params), conn); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, + e, + "delete GSI backfill meta failed!"); + } + }); + } + + public void deleteById(Long id) { + + Map params = new HashMap<>(); + params.put(1, new ParameterContext(ParameterMethod.setLong, new Object[] {1, id})); + + wrapWithTransaction(dataSource, (conn) -> { + try { + update(SQL_DELETE_BY_ID, Lists.newArrayList(params), conn); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, + e, + "delete GSI backfill meta failed!"); + } + }); + } + + public void updateStatusAndTotalBatch(Long id, long totalBatch) { + Map params = new HashMap<>(); + params.put(1, + new ParameterContext(ParameterMethod.setInt, new Object[] {1, (int) BackfillStatus.RUNNING.getValue()})); + params.put(2, new ParameterContext(ParameterMethod.setLong, new Object[] {2, totalBatch})); + params.put(3, new ParameterContext(ParameterMethod.setLong, new Object[] {3, id})); + + wrapWithTransaction(dataSource, (conn) -> { + try { + update(SQL_UPDATE_BACKFILL_STATUS_BATCH, Lists.newArrayList(params), conn); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, + e, + "update physical backfill meta failed!"); + } + }); + } + + public static boolean deleteAll(String schemaName, Connection conn) { + PreparedStatement ps = null; + try { + ps = conn.prepareStatement(SQL_CLEAN_ALL); + ps.setString(1, schemaName.toLowerCase()); + ps.executeUpdate(); + return true; + } catch (SQLException e) { + throw new TddlNestableRuntimeException(e); + } finally { + JdbcUtils.close(ps); + } + } + + public List queryBackfillObject(long backfillId, String schemaName, String tableName) { + return queryByJobIdSchTb(SQL_SELECT_BACKFILL_OBJECT_SCH_TB, backfillId, schemaName, tableName, + BackfillObjectRecord.ORM); + } + + // ~ Basic data access methods + // ------------------------------------------------------------------------------------------ + + private void insertBackfillMeta(Long backfillId, + BackfillObjectRecord backfillObjectRecord, + boolean insertIgnore) { + wrapWithTransaction(dataSource, + (conn) -> { + try { + BackfillBean backfillBean = + loadBackfillMeta(backfillId, backfillObjectRecord.tableSchema, backfillObjectRecord.physicalDb, + backfillObjectRecord.physicalTable, backfillObjectRecord.physicalPartition); + if (backfillBean == BackfillBean.EMPTY) { + //do nothing + } else if (backfillBean.isSuccess()) { + if (isSameTask(backfillObjectRecord, backfillBean)) { + return; + } else { + deleteByBackfillId(backfillId); + } + } else { + if (isSameTask(backfillObjectRecord, backfillBean)) { + return; + } else { + throw new TddlNestableRuntimeException( + "does not allow concurrent backfill job on a logical table"); + } + } + batchInsert(insertIgnore ? SQL_INSERT_IGNORE_BACKFILL_OBJECT : SQL_INSERT_BACKFILL_OBJECT, + ImmutableList.of(backfillObjectRecord), + conn); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, + e, + "add GSI backfill meta failed!"); + } + }); + } + + private boolean isSameTask(BackfillObjectRecord backfillObjectRecord, BackfillBean backfillBean) { + if (backfillObjectRecord == null) { + return false; + } + return StringUtils.equalsIgnoreCase(backfillBean.indexSchema, backfillObjectRecord.indexSchema) + && StringUtils.equalsIgnoreCase(backfillBean.indexName, backfillObjectRecord.indexName); + } + + private List queryBackfillObject(long backfillId, String schemaName, String phyDb, + String physicalTable, + String phyPartition) { + return queryByJobId(SQL_SELECT_BACKFILL_OBJECT, backfillId, schemaName, phyDb, physicalTable, phyPartition, + BackfillObjectRecord.ORM); + } + + public void updateBackfillObjectBean(List backfillObject) { + final List backfillObjectRecords = + backfillObject.stream().map(bfo -> { + return new PhysicalBackfillManager.BackfillObjectRecord(bfo.id, + bfo.jobId, + bfo.tableSchema, + bfo.tableName, + bfo.indexSchema, + bfo.indexName, + bfo.physicalDb, + bfo.physicalTable, + bfo.physicalPartition, + bfo.sourceGroupName, + bfo.targetGroupName, + bfo.sourceFileName, + bfo.sourceDirName, + bfo.targetFileName, + bfo.targetDirName, + (int) bfo.status.value, + PhysicalBackfillDetailInfoFieldJSON.toJson(bfo.detailInfo), + bfo.totalBatch, + bfo.batchSize, + bfo.offset, + bfo.successBatchCount, + bfo.startTime, + new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime()), + bfo.extra, + bfo.lsn); + }) + .collect(Collectors.toList()); + + updateBackfillObject(backfillObjectRecords); + } + + public void updateBackfillObject(List backfillObjectRecords) { + final List> params = backfillObjectRecords.stream() + .map(bfo -> (Map) ImmutableMap.builder() + .put(1, new ParameterContext(ParameterMethod.setInt, new Object[] {1, bfo.status})) + .put(2, new ParameterContext(ParameterMethod.setString, new Object[] {2, bfo.detailInfo})) + .put(3, new ParameterContext(ParameterMethod.setLong, new Object[] {3, bfo.successBatchCount})) + .put(4, new ParameterContext(ParameterMethod.setString, new Object[] {4, bfo.endTime})) + .put(5, new ParameterContext(ParameterMethod.setString, new Object[] {5, bfo.extra})) + .put(6, new ParameterContext(ParameterMethod.setLong, new Object[] {6, bfo.jobId})) + .put(7, new ParameterContext(ParameterMethod.setString, new Object[] {7, bfo.physicalDb})) + .put(8, new ParameterContext(ParameterMethod.setString, new Object[] {8, bfo.physicalTable})) + .put(9, new ParameterContext(ParameterMethod.setString, new Object[] {9, bfo.physicalPartition})) + .build()) + .collect(ArrayList::new, ArrayList::add, ArrayList::addAll); + + wrapWithTransaction(dataSource, (conn) -> { + try { + update(SQL_UPDATE_BACKFILL_PROGRESS, params, conn); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, + e, + "update import table backfill meta failed!"); + } + }); + } + + public List queryBackFillAggInfoById(List backFillIdList) { + if (CollectionUtils.isEmpty(backFillIdList)) { + return new ArrayList<>(); + } + try (Connection connection = dataSource.getConnection()) { + String ids = Joiner.on(",").join(backFillIdList); + String sql = String.format(SQL_SELECT_BACKFILL_VIEW_BY_ID, ids); + return MetaDbUtil.query(sql, PhysicalBackfillManager.BackFillAggInfo.class, connection); + } catch (Exception e) { + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, + e, "queryBackFillAggInfo failed!"); + } + } + + private > List queryByJobId(String sql, long backfillId, String schemaName, String phyDb, + String physicalTable, + String phyPartition, R orm) { + try (Connection connection = dataSource.getConnection()) { + return query(sql, + ImmutableMap.of(1, new ParameterContext(ParameterMethod.setLong, new Object[] {1, backfillId}), + 2, new ParameterContext(ParameterMethod.setString, new Object[] {2, schemaName}), + 3, new ParameterContext(ParameterMethod.setString, new Object[] {3, phyDb}), + 4, new ParameterContext(ParameterMethod.setString, new Object[] {4, physicalTable}), + 5, new ParameterContext(ParameterMethod.setString, new Object[] {5, phyPartition})), + connection, + orm); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, + e, + "query import table backfill meta failed!"); + } + } + + private > List queryByJobIdSchTb(String sql, long backfillId, String schemaName, + String tableName, R orm) { + try (Connection connection = dataSource.getConnection()) { + return query(sql, + ImmutableMap.of(1, new ParameterContext(ParameterMethod.setLong, new Object[] {1, backfillId}), + 2, new ParameterContext(ParameterMethod.setString, new Object[] {2, schemaName}), + 3, new ParameterContext(ParameterMethod.setString, new Object[] {3, tableName})), + connection, + orm); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, + e, + "query import table backfill meta failed!"); + } + } + + private > List query(String sql, R orm) { + try (Connection connection = dataSource.getConnection()) { + return query(sql, + connection, + orm); + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, + e, + "query import table backfill meta failed!"); + } + } + + private static void wrapWithTransaction(DataSource dataSource, Consumer call) { + com.alibaba.polardbx.optimizer.config.table.GsiUtils.wrapWithTransaction(dataSource, call, + (e) -> new TddlRuntimeException(ErrorCode.ERR_GLOBAL_SECONDARY_INDEX_EXECUTE, e, + "get connection for GSI backfill meta failed!")); + } + + private static final String SQL_INSERT_BACKFILL_OBJECT = "INSERT INTO " + + SYSTABLE_BACKFILL_OBJECTS + + "(JOB_ID,TABLE_SCHEMA,TABLE_NAME,INDEX_SCHEMA,INDEX_NAME,PHYSICAL_DB,PHYSICAL_TABLE," + + "PHYSICAL_PARTITION,SOURCE_GROUP_NAME,TARGET_GROUP_NAME,SOURCE_FILE_NAME,SOURCE_DIR_NAME,TARGET_FILE_NAME,TARGET_DIR_NAME," + + "STATUS,DETAIL_INFO,TOTAL_BATCH,BATCH_SIZE,OFFSET,SUCCESS_BATCH_COUNT,START_TIME,END_TIME,EXTRA,LSN) " + + "VALUES(?, ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + + private static final String SQL_INSERT_IGNORE_BACKFILL_OBJECT = "INSERT IGNORE INTO " + + SYSTABLE_BACKFILL_OBJECTS + + "(JOB_ID,TABLE_SCHEMA,TABLE_NAME,INDEX_SCHEMA,INDEX_NAME,PHYSICAL_DB,PHYSICAL_TABLE," + + "PHYSICAL_PARTITION,SOURCE_GROUP_NAME,TARGET_GROUP_NAME,SOURCE_FILE_NAME,SOURCE_DIR_NAME,TARGET_FILE_NAME,TARGET_DIR_NAME," + + "STATUS,DETAIL_INFO,TOTAL_BATCH,BATCH_SIZE,OFFSET,SUCCESS_BATCH_COUNT,START_TIME,END_TIME,EXTRA,LSN) " + + "VALUES(?, ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? , ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + + private static final String SQL_SELECT_BACKFILL_OBJECT = + "SELECT ID,JOB_ID,TABLE_SCHEMA,TABLE_NAME,INDEX_SCHEMA,INDEX_NAME,PHYSICAL_DB,PHYSICAL_TABLE,PHYSICAL_PARTITION,SOURCE_GROUP_NAME,TARGET_GROUP_NAME,SOURCE_FILE_NAME,SOURCE_DIR_NAME,TARGET_FILE_NAME,TARGET_DIR_NAME,STATUS,DETAIL_INFO,TOTAL_BATCH,BATCH_SIZE,OFFSET,SUCCESS_BATCH_COUNT,START_TIME,END_TIME,EXTRA,LSN FROM " + + SYSTABLE_BACKFILL_OBJECTS + + " WHERE JOB_ID = ? AND TABLE_SCHEMA = ? AND PHYSICAL_DB = ? AND PHYSICAL_TABLE = ? AND PHYSICAL_PARTITION = ?"; + + private static final String SQL_SELECT_BACKFILL_OBJECT_SCH_TB = + "SELECT ID,JOB_ID,TABLE_SCHEMA,TABLE_NAME,INDEX_SCHEMA,INDEX_NAME,PHYSICAL_DB,PHYSICAL_TABLE,PHYSICAL_PARTITION,SOURCE_GROUP_NAME,TARGET_GROUP_NAME,SOURCE_FILE_NAME,SOURCE_DIR_NAME,TARGET_FILE_NAME,TARGET_DIR_NAME,STATUS,DETAIL_INFO,TOTAL_BATCH,BATCH_SIZE,OFFSET,SUCCESS_BATCH_COUNT,START_TIME,END_TIME,EXTRA,LSN FROM " + + SYSTABLE_BACKFILL_OBJECTS + + " WHERE JOB_ID = ? AND TABLE_SCHEMA = ? AND TABLE_NAME = ?"; + + private static final String SQL_UPDATE_BACKFILL_PROGRESS = "UPDATE " + + SYSTABLE_BACKFILL_OBJECTS + + " SET STATUS = ?, DETAIL_INFO = ?, SUCCESS_BATCH_COUNT = ?, END_TIME=?, EXTRA = ?" + + " WHERE JOB_ID = ? AND PHYSICAL_DB = ? AND PHYSICAL_TABLE = ? AND PHYSICAL_PARTITION = ? "; + + private static final String SQL_SELECT_BACKFILL_VIEW_BY_ID = + "select job_id,table_schema,table_name,min(`status`) as min_status,max(`status`) as max_status,sum(success_batch_count * batch_size) as success_buffer_size, min(start_time) as start_time,max(end_time) as end_time, sum(timestampdiff(second, start_time, end_time)) as duration from " + + SYSTABLE_BACKFILL_OBJECTS + " where job_id in (%s) group by job_id"; + + private static final String SQL_SELECT_BACKFILL_BY_ID = + "select ID,JOB_ID,TABLE_SCHEMA,TABLE_NAME,INDEX_SCHEMA,INDEX_NAME,PHYSICAL_DB,PHYSICAL_TABLE,PHYSICAL_PARTITION,SOURCE_GROUP_NAME,TARGET_GROUP_NAME,SOURCE_FILE_NAME,SOURCE_DIR_NAME,TARGET_FILE_NAME,TARGET_DIR_NAME,STATUS,DETAIL_INFO,TOTAL_BATCH,BATCH_SIZE,OFFSET,SUCCESS_BATCH_COUNT,START_TIME,END_TIME,EXTRA,LSN from " + + SYSTABLE_BACKFILL_OBJECTS + " where job_id in (%s)"; + + private static final String SQL_DELETE_BY_JOB_ID = "DELETE FROM " + + SYSTABLE_BACKFILL_OBJECTS + + " WHERE JOB_ID = ?"; + + private static final String SQL_DELETE_BY_ID = "DELETE FROM " + + SYSTABLE_BACKFILL_OBJECTS + + " WHERE ID = ?"; + + private static final String SQL_UPDATE_BACKFILL_STATUS_BATCH = "UPDATE " + + SYSTABLE_BACKFILL_OBJECTS + + " SET STATUS = ?, TOTAL_BATCH = ? " + + " WHERE ID = ? "; + + private static final String SQL_CLEAN_ALL = "DELETE FROM " + SYSTABLE_BACKFILL_OBJECTS + " WHERE TABLE_SCHEMA = ?"; + + private List query(String sql, Map params, Connection connection, Orm orm) + throws SQLException { + try (PreparedStatement ps = connection.prepareStatement(sql)) { + ParameterMethod.setParameters(ps, params); + + final ResultSet rs = ps.executeQuery(); + + final List result = new ArrayList<>(); + while (rs.next()) { + result.add(orm.convert(rs)); + } + + return result; + } + } + + private List query(String sql, Connection connection, Orm orm) + throws SQLException { + try (PreparedStatement ps = connection.prepareStatement(sql)) { + final ResultSet rs = ps.executeQuery(); + + final List result = new ArrayList<>(); + while (rs.next()) { + result.add(orm.convert(rs)); + } + + return result; + } + } + + protected void update(String sql, List> params, Connection connection) + throws SQLException { + final int batchSize = 512; + for (int i = 0; i < params.size(); i += batchSize) { + try (PreparedStatement ps = connection.prepareStatement(sql)) { + for (int j = 0; j < batchSize && i + j < params.size(); j++) { + Map batch = params.get(i + j); + ParameterMethod.setParameters(ps, batch); + ps.addBatch(); + } + + ps.executeBatch(); + } + } + } + + private void batchInsert(String sql, List params, Connection connection) throws SQLException { + update(sql, + params.stream().map(Orm::params).collect(ArrayList::new, ArrayList::add, ArrayList::addAll), + connection); + } + + private Map stringParamRow(String... values) { + final Map result = new HashMap<>(); + Ord.zip(values).forEach(ord -> result.put(ord.i + 1, new ParameterContext(ParameterMethod.setString, + new Object[] {ord.i + 1, ord.e}))); + + return result; + } + + @NotNull + public BackfillObjectRecord getBackfillObjectRecord(final long ddlJobId, + final String schemaName, + final String tableName, + final String physicalDb, + final String phyTable, + final String physicalPartition, + final String sourceGroup, + final String targetGroup, + final Pair srcFileAndDir, + final Pair targetFileAndDir, + final long totalBatch, + final long batchSize, + final long offset, + final long lsn) { + return new BackfillObjectRecord(ddlJobId, schemaName, tableName, schemaName, + tableName, physicalDb, phyTable, physicalPartition, sourceGroup, targetGroup, srcFileAndDir, + targetFileAndDir, totalBatch, batchSize, offset, lsn); + } + + public BackfillObjectRecord initUpperBound(final long ddlJobId, final String schemaName, + final String tableName, final String dbIndex, + final String phyTable, + final String physicalPartition, + final String sourceGroup, + final String targetGroup, + final Pair srcFileAndDir, + final Pair targetFileAndDir, + final long totalBatch, final long batchSize, + final long offset, final long lsn, + final Pair sourceHost, + final List> targetHosts) { + BackfillObjectRecord obj = + getBackfillObjectRecord(ddlJobId, schemaName, tableName, dbIndex, phyTable, physicalPartition, sourceGroup, + targetGroup, srcFileAndDir, targetFileAndDir, totalBatch, batchSize, offset, lsn); + PhysicalBackfillDetailInfoFieldJSON json = new PhysicalBackfillDetailInfoFieldJSON(); + json.setTargetHostAndPorts(targetHosts); + json.setSourceHostAndPort(sourceHost); + obj.setDetailInfo(PhysicalBackfillDetailInfoFieldJSON.toJson(json)); + return obj; + } + + public void insertBackfillMeta(final String schemaName, final String logicalTableName, + final Long backfillId, final String dbIndex, + final String phyTable, + final String physicalPartition, + final String sourceGroup, final String targetGroup, + final Pair srcFileAndDir, + final Pair targetFileAndDir, + final long totalBatch, final long batchSize, + final long offset, final long lsn, + final Pair sourceHost, + final List> targetHosts) { + // Init position mark with upper bound + final BackfillObjectRecord initBfo = + initUpperBound(backfillId, schemaName, logicalTableName, dbIndex, phyTable, physicalPartition, sourceGroup, + targetGroup, srcFileAndDir, targetFileAndDir, totalBatch, batchSize, offset, lsn, sourceHost, + targetHosts); + + // Insert ignore + initBackfillMeta(backfillId, initBfo); + } + + // ~ Data model + // --------------------------------------------------------------------------------------------------------- + + public static class BackfillBean { + + public static final BackfillBean EMPTY = new BackfillBean(); + + public final long jobId; + public final String tableSchema; + public final String tableName; + public final String indexSchema; + public final String indexName; + public final BackfillObjectBean backfillObject; + + private BackfillBean() { + this.jobId = -1; + this.tableSchema = null; + this.tableName = null; + this.indexSchema = null; + this.indexName = null; + this.backfillObject = null; + } + + public BackfillBean(long jobId, String tableSchema, String tableName, String indexSchema, + String indexName, BackfillObjectBean backfillObject) { + this.jobId = jobId; + this.tableSchema = tableSchema; + this.tableName = tableName; + this.indexSchema = indexSchema; + this.indexName = indexName; + this.backfillObject = backfillObject; + } + + public static BackfillBean create(List backfillObjectRecords) { + if (GeneralUtil.isEmpty(backfillObjectRecords)) { + return BackfillBean.EMPTY; + } + assert backfillObjectRecords.size() == 1; + BackfillObjectRecord firstObj = backfillObjectRecords.get(0); + return new BackfillBean(firstObj.jobId, firstObj.tableSchema, firstObj.tableName, firstObj.indexSchema, + firstObj.indexName, BackfillObjectBean.create(firstObj)); + } + + public boolean isEmpty() { + return jobId < 0; + } + + public boolean isSuccess() { + if (backfillObject.status != BackfillStatus.SUCCESS) { + return false; + } + return true; + } + + public boolean isInit() { + if (backfillObject.status != BackfillStatus.INIT) { + return false; + } + return true; + } + + @Override + public String toString() { + return "BackfillBean{" + + "jobId=" + jobId + + ", tableSchema='" + tableSchema + '\'' + + ", tableName='" + tableName + '\'' + + ", indexSchema='" + indexSchema + '\'' + + ", indexName='" + indexName + '\'' + + ", backfillObject=" + backfillObject + + '}'; + } + } + + public static class BackfillObjectKey { + + public final String indexSchema; + public final String indexName; + public final String physicalDb; + public final String physicalTable; + public final String physicalPartition; + + public BackfillObjectKey(String indexSchema, String indexName, String physicalDb, String physicalTable, + String physicalPartition) { + this.indexSchema = indexSchema; + this.indexName = indexName; + this.physicalDb = physicalDb; + this.physicalTable = physicalTable; + this.physicalPartition = physicalPartition; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BackfillObjectKey)) { + return false; + } + BackfillObjectKey that = (BackfillObjectKey) o; + return StringUtils.equalsIgnoreCase(indexSchema, that.indexSchema) && StringUtils.equalsIgnoreCase( + indexName, that.indexName) + && StringUtils.equalsIgnoreCase(physicalDb, that.physicalDb) && StringUtils.equalsIgnoreCase( + physicalTable, that.physicalTable) + && StringUtils.equalsIgnoreCase(physicalPartition, that.physicalPartition); + } + + @Override + public int hashCode() { + return Objects.hash(indexSchema, indexName, physicalDb, physicalTable, physicalPartition); + } + + @Override + public String toString() { + return "BackfillObjectKey{" + + "indexSchema='" + indexSchema + '\'' + + ", indexName='" + indexName + '\'' + + ", physicalDb='" + physicalDb + '\'' + + ", physicalTable='" + physicalTable + '\'' + + ", physicalPartition='" + physicalPartition + '\'' + + '}'; + } + } + + public static class BackfillObjectBean { + + public final long id; + public final long jobId; + public final String tableSchema; + public final String tableName; + public final String indexSchema; + public final String indexName; + public final String physicalDb; + public final String physicalTable; + public final String physicalPartition; + public final String sourceGroupName; + public final String targetGroupName; + public final String sourceFileName; + public final String sourceDirName; + public final String targetFileName; + public final String targetDirName; + public final BackfillStatus status; + public final PhysicalBackfillDetailInfoFieldJSON detailInfo; + public final long totalBatch; + public final long batchSize; + public final long offset; + public final long successBatchCount; + + public final String startTime; + public final String endTime; + public final String extra; + public final long lsn; + + public Integer progress; + + private BackfillObjectBean() { + this.id = -1; + this.jobId = -1; + this.tableSchema = null; + this.tableName = null; + this.indexSchema = null; + this.indexName = null; + this.physicalDb = null; + this.physicalTable = null; + this.physicalPartition = null; + this.sourceGroupName = null; + this.targetGroupName = null; + this.sourceFileName = null; + this.sourceDirName = null; + this.targetFileName = null; + this.targetDirName = null; + this.status = null; + this.detailInfo = null; + this.totalBatch = -1; + this.batchSize = -1; + this.offset = -1; + this.successBatchCount = -1; + this.startTime = null; + this.endTime = null; + this.extra = null; + this.progress = 0; + this.lsn = -1l; + } + + public BackfillObjectBean(long id, long jobId, String tableSchema, String tableName, String indexSchema, + String indexName, String physicalDb, String physicalTable, String physicalPartition, + String sourceGroupName, String targetGroupName, String sourceFileName, + String sourceDirName, + String targetFileName, String targetDirName, BackfillStatus status, + PhysicalBackfillDetailInfoFieldJSON detailInfo, + long totalBatch, long batchSize, long offset, + long successBatchCount, String startTime, String endTime, + String extra, long lsn, Integer progress) { + this.id = id; + this.jobId = jobId; + this.tableSchema = tableSchema; + this.tableName = tableName; + this.indexSchema = indexSchema; + this.indexName = indexName; + this.physicalDb = physicalDb; + this.physicalTable = physicalTable; + this.physicalPartition = physicalPartition; + this.sourceGroupName = sourceGroupName; + this.targetGroupName = targetGroupName; + this.sourceFileName = sourceFileName; + this.sourceDirName = sourceDirName; + this.targetFileName = targetFileName; + this.targetDirName = targetDirName; + this.status = status; + this.detailInfo = detailInfo; + this.totalBatch = totalBatch; + this.batchSize = batchSize; + this.offset = offset; + this.successBatchCount = successBatchCount; + this.startTime = startTime; + this.endTime = endTime; + this.extra = extra; + this.lsn = lsn; + this.progress = progress; + } + + public static BackfillObjectBean create(BackfillObjectRecord bfoRecord) { + final Long successBatchCount = bfoRecord.getSuccessBatchCount(); + final Long totalBatchCount = bfoRecord.getTotalBatch(); + + Integer progress = 0; + if (totalBatchCount > 0) { + try { + final BigDecimal max = new BigDecimal(totalBatchCount); + final BigDecimal suc = new BigDecimal(Math.max(0, successBatchCount)); + + progress = suc.divide(max, 4, RoundingMode.HALF_UP).multiply(BigDecimal.valueOf(100L)).intValue(); + + } catch (Exception e) { + + } + } + + return new BackfillObjectBean(bfoRecord.id, + bfoRecord.jobId, + bfoRecord.tableSchema, + bfoRecord.tableName, + bfoRecord.indexSchema, + bfoRecord.indexName, + bfoRecord.physicalDb, + bfoRecord.physicalTable, + bfoRecord.physicalPartition, + bfoRecord.sourceGroupName, + bfoRecord.targetGroupName, + bfoRecord.sourceFileName, + bfoRecord.sourceDirName, + bfoRecord.targetFileName, + bfoRecord.targetDirName, + BackfillStatus.of(bfoRecord.status), + PhysicalBackfillDetailInfoFieldJSON.fromJson(bfoRecord.detailInfo), + bfoRecord.totalBatch, + bfoRecord.batchSize, + bfoRecord.offset, + bfoRecord.successBatchCount, + bfoRecord.startTime, + bfoRecord.endTime, + bfoRecord.extra, + bfoRecord.lsn, + progress); + } + + public BackfillObjectKey key() { + return new BackfillObjectKey(indexSchema, indexName, physicalDb, physicalTable, physicalPartition); + } + + public Integer getProgress() { + return progress; + } + + public void setProgress(Integer progress) { + this.progress = progress; + } + + @Override + public String toString() { + return "BackfillObjectBean{" + + "id=" + id + + ", jobId=" + jobId + + ", tableSchema='" + tableSchema + '\'' + + ", tableName='" + tableName + '\'' + + ", indexSchema='" + indexSchema + '\'' + + ", indexName='" + indexName + '\'' + + ", physicalDb='" + physicalDb + '\'' + + ", physicalTable='" + physicalTable + '\'' + + ", physicalPartition='" + physicalPartition + '\'' + + ", sourceFileName='" + sourceFileName + '\'' + + ", sourceDirName='" + sourceDirName + '\'' + + ", targetFileName='" + targetFileName + '\'' + + ", targetDirName='" + targetDirName + '\'' + + ", status=" + status + + ", detailInfo='" + detailInfo + '\'' + + ", totalBatch=" + totalBatch + + ", batchSize=" + batchSize + + ", offset=" + offset + + ", successBatchCount=" + successBatchCount + + ", startTime='" + startTime + '\'' + + ", endTime='" + endTime + '\'' + + ", extra=" + extra + + ", lsn=" + lsn + + ", progress=" + progress + + '}'; + } + } + + public enum BackfillStatus { + INIT(0), RUNNING(1), SUCCESS(2), FAILED(3); + + private long value; + + BackfillStatus(long value) { + this.value = value; + } + + public long getValue() { + return value; + } + + public static BackfillStatus of(long value) { + switch ((int) value) { + case 0: + return INIT; + case 1: + return RUNNING; + case 2: + return SUCCESS; + case 3: + return FAILED; + default: + throw new IllegalArgumentException("Unsupported BackfillStatus value " + value); + } + } + + public static String display(long value) { + switch ((int) value) { + case 0: + return INIT.name(); + case 1: + return RUNNING.name(); + case 2: + return SUCCESS.name(); + case 3: + return FAILED.name(); + default: + return "UNKNOWN"; + } + } + + public boolean is(EnumSet set) { + return set.contains(this); + } + + public static final EnumSet UNFINISHED = EnumSet.of(INIT, RUNNING, FAILED); + } + + public interface Orm { + + T convert(ResultSet resultSet) throws SQLException; + + Map params(); + } + + private static abstract class AbstractBackfillBean implements Wrapper { + + @Override + @SuppressWarnings("unchecked") + public T unwrap(Class iface) throws SQLException { + if (isWrapperFor(iface)) { + return (T) this; + } else { + throw new SQLException("not a wrapper for " + iface); + } + } + } + + public static class BackFillAggInfo implements SystemTableRecord { + + private long backFillId; + private String tableSchema; + private String tableName; + private long status; + private long successBufferSize; + private Timestamp startTime; + private Timestamp endTime; + private long duration; + + @Override + public PhysicalBackfillManager.BackFillAggInfo fill(ResultSet resultSet) throws SQLException { + this.backFillId = resultSet.getLong("JOB_ID"); + this.tableSchema = resultSet.getString("TABLE_SCHEMA"); + this.tableName = resultSet.getString("TABLE_NAME"); + int minStatus = resultSet.getInt("MIN_STATUS"); + int maxStatus = resultSet.getInt("MAX_STATUS"); + if (minStatus == maxStatus) { + this.status = minStatus; + } else if (BackfillStatus.of(maxStatus) == BackfillStatus.FAILED) { + this.status = (int) BackfillStatus.FAILED.getValue(); + ; + } else { + this.status = (int) BackfillStatus.RUNNING.getValue(); + } + + this.successBufferSize = resultSet.getLong("SUCCESS_BUFFER_SIZE"); + this.startTime = resultSet.getTimestamp("START_TIME"); + this.endTime = resultSet.getTimestamp("END_TIME"); + this.duration = resultSet.getLong("DURATION"); + return this; + } + + public long getBackFillId() { + return backFillId; + } + + public void setBackFillId(long backFillId) { + this.backFillId = backFillId; + } + + public String getTableSchema() { + return tableSchema; + } + + public void setTableSchema(String tableSchema) { + this.tableSchema = tableSchema; + } + + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public long getStatus() { + return status; + } + + public void setStatus(long status) { + this.status = status; + } + + public long getSuccessBufferSize() { + return successBufferSize; + } + + public void setSuccessBufferSize(long successBufferSize) { + this.successBufferSize = successBufferSize; + } + + public Timestamp getStartTime() { + return startTime; + } + + public void setStartTime(Timestamp startTime) { + this.startTime = startTime; + } + + public Timestamp getEndTime() { + return endTime; + } + + public void setEndTime(Timestamp endTime) { + this.endTime = endTime; + } + + public long getDuration() { + return duration; + } + + public void setDuration(long duration) { + this.duration = duration; + } + } + + public static class BackfillObjectRecord extends AbstractBackfillBean implements Orm { + + public static BackfillObjectRecord ORM = new BackfillObjectRecord(); + + private long id; + private long jobId; + private String tableSchema; + private String tableName; + private String indexSchema; + private String indexName; + private String physicalDb; + private String physicalTable; + private String physicalPartition; + private String sourceGroupName; + private String targetGroupName; + private String sourceFileName; + private String sourceDirName; + private String targetFileName; + private String targetDirName; + private int status; + private String detailInfo; + private long totalBatch; + private long batchSize; + private long offset; + private long successBatchCount; + private String startTime; + private String endTime; + private String extra; + private long lsn; + + public BackfillObjectRecord() { + this.id = -1; + this.jobId = -1; + this.tableSchema = null; + this.tableName = null; + this.indexSchema = null; + this.indexName = null; + this.physicalDb = null; + this.physicalTable = null; + this.physicalPartition = null; + this.sourceGroupName = null; + this.targetGroupName = null; + this.sourceFileName = null; + this.sourceDirName = null; + this.targetFileName = null; + this.targetDirName = null; + this.status = -1; + this.detailInfo = null; + this.totalBatch = -1; + this.batchSize = -1; + this.offset = -1; + this.successBatchCount = -1; + this.startTime = null; + this.endTime = null; + this.extra = null; + this.lsn = -1l; + + } + + public BackfillObjectRecord(long jobId, String tableSchema, String tableName, String indexSchema, + String indexName, String physicalDb, String physicalTable, + String physicalPartition, + String sourceGroupName, + String targetGroupName, + Pair srcFileAndDir, + Pair targetFileAndDir, + long totalBatch, + long batchSize, + long offset, + long lsn) { + this.id = id; + this.jobId = jobId; + this.tableSchema = tableSchema; + this.tableName = tableName; + this.indexSchema = indexSchema; + this.indexName = indexName; + this.physicalDb = physicalDb; + this.physicalTable = physicalTable; + this.physicalPartition = physicalPartition; + this.sourceGroupName = sourceGroupName; + this.targetGroupName = targetGroupName; + this.sourceFileName = srcFileAndDir.getKey(); + this.sourceDirName = srcFileAndDir.getValue(); + this.targetFileName = targetFileAndDir.getKey(); + this.targetDirName = targetFileAndDir.getValue(); + this.status = (int) BackfillStatus.INIT.getValue(); + this.detailInfo = ""; + this.totalBatch = totalBatch; + this.batchSize = batchSize; + this.offset = offset; + this.successBatchCount = 0l; + this.startTime = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime()); + this.endTime = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(Calendar.getInstance().getTime()); + this.extra = ""; + this.lsn = lsn; + } + + public BackfillObjectRecord(long id, long jobId, String tableSchema, String tableName, String indexSchema, + String indexName, String physicalDb, String physicalTable, + String physicalPartition, String sourceGroupName, String targetGroupName, + String sourceFileName, String sourceDirName, + String targetFileName, String targetDirName, int status, String detailInfo, + long totalBatch, long batchSize, long offset, long successBatchCount, + String startTime, + String endTime, String extra, long lsn) { + this.id = id; + this.jobId = jobId; + this.tableSchema = tableSchema; + this.tableName = tableName; + this.indexSchema = indexSchema; + this.indexName = indexName; + this.physicalDb = physicalDb; + this.physicalTable = physicalTable; + this.physicalPartition = physicalPartition; + this.sourceGroupName = sourceGroupName; + this.targetGroupName = targetGroupName; + this.sourceFileName = sourceFileName; + this.sourceDirName = sourceDirName; + this.targetFileName = targetFileName; + this.targetDirName = targetDirName; + this.status = status; + this.detailInfo = detailInfo; + this.totalBatch = totalBatch; + this.batchSize = batchSize; + this.offset = offset; + this.successBatchCount = successBatchCount; + this.startTime = startTime; + this.endTime = endTime; + this.extra = extra; + this.lsn = lsn; + } + + public BackfillObjectRecord copy() { + BackfillObjectRecord result = new BackfillObjectRecord(); + result.id = this.id; + result.jobId = this.jobId; + result.tableSchema = this.tableSchema; + result.tableName = this.tableName; + result.indexSchema = this.indexSchema; + result.indexName = this.indexName; + result.physicalDb = this.physicalDb; + result.physicalTable = this.physicalTable; + result.physicalPartition = this.physicalPartition; + result.sourceGroupName = this.sourceGroupName; + result.targetGroupName = this.targetGroupName; + result.sourceFileName = this.sourceFileName; + result.sourceDirName = this.sourceDirName; + result.targetFileName = this.targetFileName; + result.targetDirName = this.targetDirName; + result.status = this.status; + result.detailInfo = this.detailInfo; + result.totalBatch = this.totalBatch; + result.batchSize = this.batchSize; + result.offset = this.offset; + result.successBatchCount = this.successBatchCount; + result.startTime = this.startTime; + result.endTime = this.endTime; + result.extra = this.extra; + result.lsn = this.lsn; + return result; + } + + @Override + public BackfillObjectRecord convert(ResultSet resultSet) throws SQLException { + final long id = resultSet.getLong("ID"); + final long jobId = resultSet.getLong("JOB_ID"); + final String tableSchema = resultSet.getString("TABLE_SCHEMA"); + final String tableName = resultSet.getString("TABLE_NAME"); + final String indexSchema = resultSet.getString("INDEX_SCHEMA"); + final String indexName = resultSet.getString("INDEX_NAME"); + final String physicalDb = resultSet.getString("PHYSICAL_DB"); + final String physicalTable = resultSet.getString("PHYSICAL_TABLE"); + final String physicalPartition = resultSet.getString("PHYSICAL_PARTITION"); + final String sourceGroupName = resultSet.getString("SOURCE_GROUP_NAME"); + final String targetGroupName = resultSet.getString("TARGET_GROUP_NAME"); + final String sourceFileName = resultSet.getString("SOURCE_FILE_NAME"); + final String sourceDirName = resultSet.getString("SOURCE_DIR_NAME"); + final String targetFileName = resultSet.getString("TARGET_FILE_NAME"); + final String targetDirName = resultSet.getString("TARGET_DIR_NAME"); + final int status = resultSet.getInt("STATUS"); + final String detailInfo = resultSet.getString("DETAIL_INFO"); + final long totalBatch = resultSet.getLong("TOTAL_BATCH"); + final long batchSize = resultSet.getLong("BATCH_SIZE"); + final long offset = resultSet.getLong("offset"); + final long successBatchCount = resultSet.getLong("SUCCESS_BATCH_COUNT"); + final String startTime = resultSet.getString("START_TIME"); + final String endTime = resultSet.getString("END_TIME"); + final String extra = resultSet.getString("EXTRA"); + final Long lsn = resultSet.getLong("LSN"); + + return new BackfillObjectRecord(id, + jobId, + tableSchema, + tableName, + indexSchema, + indexName, + physicalDb, + physicalTable, + physicalPartition, + sourceGroupName, + targetGroupName, + sourceFileName, + sourceDirName, + targetFileName, + targetDirName, + status, + detailInfo, + totalBatch, + batchSize, + offset, + successBatchCount, + startTime, + endTime, + extra, + lsn); + } + + @Override + public Map params() { + final Map params = new HashMap<>(); + int i = 1; + params.put(i, new ParameterContext(ParameterMethod.setLong, new Object[] {i++, this.jobId})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.tableSchema})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.tableName})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.indexSchema})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.indexName})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.physicalDb})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.physicalTable})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.physicalPartition})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.sourceGroupName})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.targetGroupName})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.sourceFileName})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.sourceDirName})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.targetFileName})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.targetDirName})); + params.put(i, new ParameterContext(ParameterMethod.setInt, new Object[] {i++, this.status})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.detailInfo})); + params.put(i, new ParameterContext(ParameterMethod.setLong, new Object[] {i++, this.totalBatch})); + params.put(i, new ParameterContext(ParameterMethod.setLong, new Object[] {i++, this.batchSize})); + params.put(i, new ParameterContext(ParameterMethod.setLong, new Object[] {i++, this.offset})); + params.put(i, new ParameterContext(ParameterMethod.setLong, new Object[] {i++, this.successBatchCount})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.startTime})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.endTime})); + params.put(i, new ParameterContext(ParameterMethod.setString, new Object[] {i++, this.extra})); + params.put(i, new ParameterContext(ParameterMethod.setLong, new Object[] {i++, this.lsn})); + return params; + } + + @Override + public boolean isWrapperFor(Class iface) { + return BackfillObjectRecord.class.isAssignableFrom(iface); + } + + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } + + public long getJobId() { + return jobId; + } + + public void setJobId(long jobId) { + this.jobId = jobId; + } + + public String getTableSchema() { + return tableSchema; + } + + public void setTableSchema(String tableSchema) { + this.tableSchema = tableSchema; + } + + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public String getIndexSchema() { + return indexSchema; + } + + public void setIndexSchema(String indexSchema) { + this.indexSchema = indexSchema; + } + + public String getIndexName() { + return indexName; + } + + public void setIndexName(String indexName) { + this.indexName = indexName; + } + + public String getPhysicalDb() { + return physicalDb; + } + + public void setPhysicalDb(String physicalDb) { + this.physicalDb = physicalDb; + } + + public String getPhysicalTable() { + return physicalTable; + } + + public void setPhysicalTable(String physicalTable) { + this.physicalTable = physicalTable; + } + + public String getPhysicalPartition() { + return physicalPartition; + } + + public void setPhysicalPartition(String physicalPartition) { + this.physicalPartition = physicalPartition; + } + + public String getSourceGroupName() { + return sourceGroupName; + } + + public void setSourceGroupName(String sourceGroupName) { + this.sourceGroupName = sourceGroupName; + } + + public String getTargetGroupName() { + return targetGroupName; + } + + public void setTargetGroupName(String targetGroupName) { + this.targetGroupName = targetGroupName; + } + + public String getSourceFileName() { + return sourceFileName; + } + + public void setSourceFileName(String sourceFileName) { + this.sourceFileName = sourceFileName; + } + + public String getSourceDirName() { + return sourceDirName; + } + + public void setSourceDirName(String sourceDirName) { + this.sourceDirName = sourceDirName; + } + + public String getTargetFileName() { + return targetFileName; + } + + public void setTargetFileName(String targetFileName) { + this.targetFileName = targetFileName; + } + + public String getTargetDirName() { + return targetDirName; + } + + public void setTargetDirName(String targetDirName) { + this.targetDirName = targetDirName; + } + + public int getStatus() { + return status; + } + + public void setStatus(int status) { + this.status = status; + } + + public String getDetailInfo() { + return detailInfo; + } + + public void setDetailInfo(String detailInfo) { + this.detailInfo = detailInfo; + } + + public long getTotalBatch() { + return totalBatch; + } + + public void setTotalBatch(long totalBatch) { + this.totalBatch = totalBatch; + } + + public long getBatchSize() { + return batchSize; + } + + public void setBatchSize(long batchSize) { + this.batchSize = batchSize; + } + + public long getOffset() { + return offset; + } + + public void setOffset(long offset) { + this.offset = offset; + } + + public long getSuccessBatchCount() { + return successBatchCount; + } + + public void setSuccessBatchCount(long successBatchCount) { + this.successBatchCount = successBatchCount; + } + + public String getStartTime() { + return startTime; + } + + public void setStartTime(String startTime) { + this.startTime = startTime; + } + + public String getEndTime() { + return endTime; + } + + public void setEndTime(String endTime) { + this.endTime = endTime; + } + + public String getExtra() { + return extra; + } + + public void setExtra(String extra) { + this.extra = extra; + } + + public long getLsn() { + return lsn; + } + + public void setLsn(long lsn) { + this.lsn = lsn; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillReporter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillReporter.java new file mode 100644 index 000000000..d937a11b4 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillReporter.java @@ -0,0 +1,59 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.physicalbackfill; + +import com.google.common.collect.ImmutableList; + +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +public class PhysicalBackfillReporter { + private final PhysicalBackfillManager backfillManager; + + /** + * Extractor position mark + */ + private PhysicalBackfillManager.BackfillBean backfillBean; + + public PhysicalBackfillReporter(PhysicalBackfillManager backfillManager) { + this.backfillManager = backfillManager; + } + + public PhysicalBackfillManager getBackfillManager() { + return backfillManager; + } + + public PhysicalBackfillManager.BackfillBean loadBackfillMeta(long backfillId, String schemaName, String phyDb, + String physicalTable, + String phyPartition) { + backfillBean = backfillManager.loadBackfillMeta(backfillId, schemaName, phyDb, physicalTable, phyPartition); + return backfillBean; + } + + public PhysicalBackfillManager.BackfillBean getBackfillBean() { + return backfillBean; + } + + public void updateBackfillInfo(PhysicalBackfillManager.BackfillObjectBean backfillObject) { + backfillManager.updateBackfillObjectBean(ImmutableList.of(backfillObject)); + } + + public void updateBackfillObject(PhysicalBackfillManager.BackfillObjectRecord backfillObjectRecord) { + backfillManager.updateBackfillObject(ImmutableList.of(backfillObjectRecord)); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillUtils.java new file mode 100644 index 000000000..0a331d9b1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/PhysicalBackfillUtils.java @@ -0,0 +1,937 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.physicalbackfill; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.jdbc.BytesSql; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.AddressUtils; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.common.TopologyHandler; +import com.alibaba.polardbx.executor.ddl.newengine.cross.CrossEngineValidator; +import com.alibaba.polardbx.executor.spi.IGroupExecutor; +import com.alibaba.polardbx.executor.utils.GroupingFetchLSN; +import com.alibaba.polardbx.gms.ha.HaSwitchParams; +import com.alibaba.polardbx.gms.ha.impl.StorageHaManager; +import com.alibaba.polardbx.gms.ha.impl.StorageInstHaContext; +import com.alibaba.polardbx.gms.ha.impl.StorageNodeHaInfo; +import com.alibaba.polardbx.gms.ha.impl.StorageRole; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskAccessor; +import com.alibaba.polardbx.gms.node.StorageStatus; +import com.alibaba.polardbx.gms.node.StorageStatusManager; +import com.alibaba.polardbx.gms.partition.PhysicalBackfillDetailInfoFieldJSON; +import com.alibaba.polardbx.gms.tablegroup.TableGroupLocation; +import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.gms.topology.DbInfoRecord; +import com.alibaba.polardbx.gms.topology.DbTopologyManager; +import com.alibaba.polardbx.gms.topology.GroupDetailInfoExRecord; +import com.alibaba.polardbx.gms.topology.ServerInstIdManager; +import com.alibaba.polardbx.gms.topology.StorageInfoRecord; +import com.alibaba.polardbx.gms.util.PasswdUtil; +import com.alibaba.polardbx.group.jdbc.TGroupDataSource; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.ScaleOutPlanUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.dialect.DbType; +import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; +import com.alibaba.polardbx.optimizer.core.rel.PhyTableOpBuildParams; +import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; +import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperationFactory; +import com.alibaba.polardbx.rpc.compatible.XDataSource; +import com.alibaba.polardbx.rpc.pool.XConnection; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.RateLimiter; +import com.mysql.cj.polarx.protobuf.PolarxPhysicalBackfill; +import io.grpc.netty.shaded.io.netty.util.internal.StringUtil; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptUtil; +import org.apache.calcite.plan.RelTraitSet; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rel.type.RelDataTypeSystem; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlSelect; +import org.apache.calcite.sql.type.SqlTypeFactoryImpl; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.text.DecimalFormat; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +/** + * Created by luoyanxin. + * + * @author luoyanxin + */ +public class PhysicalBackfillUtils { + public final static int MAX_RETRY = 3; + private static final Map dataSourcePool = new ConcurrentHashMap<>(); + private static final String CHECK_TABLE = "select 1 from %s limit 1"; + private static final String SELECT_PHY_PARTITION_NAMES = + "SELECT partition_name FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = '%s' and table_schema='%s' and partition_name is not null"; + private static final String TABLESPACE_IS_DISCARD = "Tablespace has been discarded"; + private static final String TEST_SPEED_SRC_DIR = "/tmp/test_speed_out.idb"; + private static final String TEST_SPEED_TAR_DIR = "/tmp/test_speed_in.idb"; + + public static final String IBD = "ibd"; + public static final String CFG = "cfg"; + + public static final String CFP = "cfp"; + public final static String FLUSH_TABLE_SQL_TEMPLATE = "FLUSH TABLES %s FOR EXPORT"; + public final static String UNLOCK_TABLE = "UNLOCK TABLES"; + public final static String TEMP_FILE_POSTFIX = ".TEMPFILE"; + public final static String IDB_DIR_PREFIX = "./"; + public final static int miniBatchForeachThread = 10; + public final static String SQL_LOG_BIN = "sql_log_bin"; + + // for 8032 the in-paramater of dbms_xa.advance_gcn_no_flush is unsigned + private final static long INIT_TSO = 0l; + private final static PhysicalBackfillRateLimiter rateLimiter = new PhysicalBackfillRateLimiter(); + + public static XDataSource initializeDataSource(String host, int port, String username, String password, + String defaultDB, String name) { + synchronized (dataSourcePool) { + final XDataSource clientPool = + dataSourcePool + .computeIfAbsent(digest(host, port, username + "@" + defaultDB), + key -> { + //todo for log + SQLRecorderLogger.ddlLogger.info( + String.format("host:%s,port:%d,u:%s,mask:%s,defaultDB:%s,name:%s", host, + port, username, password.substring(0, Math.min(3, password.length())) + "****", + defaultDB, name)); + + return new XDataSource(host, port, username, password, defaultDB, name); + }); + return clientPool; + } + } + + public static void destroyDataSources() { + if (dataSourcePool.isEmpty()) { + return; + } + boolean existPhysicalBackfillTask = true; + try (Connection metaDbConn = MetaDbDataSource.getInstance().getConnection()) { + DdlEngineTaskAccessor accessor = new DdlEngineTaskAccessor(); + accessor.setConnection(metaDbConn); + existPhysicalBackfillTask = accessor.existPhysicalBackfillTask(); + } catch (Exception ex) { + SQLRecorderLogger.ddlLogger.info("destroy physicalBackfillDataSources fail:" + ex); + } + if (existPhysicalBackfillTask) { + return; + } + synchronized (dataSourcePool) { + existPhysicalBackfillTask = true; + //double check + if (dataSourcePool.isEmpty()) { + return; + } + try (Connection metaDbConn = MetaDbDataSource.getInstance().getConnection()) { + DdlEngineTaskAccessor accessor = new DdlEngineTaskAccessor(); + accessor.setConnection(metaDbConn); + existPhysicalBackfillTask = accessor.existPhysicalBackfillTask(); + } catch (Exception ex) { + SQLRecorderLogger.ddlLogger.info("destroy physicalBackfillDataSources fail:" + ex); + } + if (!existPhysicalBackfillTask) { + String msg = "begin to destroy physicalBackfillDataSources"; + SQLRecorderLogger.ddlLogger.info(msg); + for (Map.Entry entry : dataSourcePool.entrySet()) { + entry.getValue().close(); + } + dataSourcePool.clear(); + } else { + SQLRecorderLogger.ddlLogger.info("ignore destroy physicalBackfillDataSources due to task is running"); + } + } + } + + public static String digest(String host, int port, String username) { + return username + "@" + host + ":" + port; + } + + public static PhyTableOperation generateTableOperation(String schemaName, String logicalName, String dbIndex, + String phyTableName, String sqlTemplate, + ExecutionContext ec) { + final RelOptCluster cluster = SqlConverter.getInstance(schemaName, ec).createRelOptCluster(); + RelTraitSet traitSet = RelTraitSet.createEmpty(); + RelDataTypeFactory typeFactory = new SqlTypeFactoryImpl(RelDataTypeSystem.DEFAULT); + RelDataType rowType = RelOptUtil.createDmlRowType(SqlKind.OTHER_DDL, typeFactory); + + PhyTableOpBuildParams buildParams = new PhyTableOpBuildParams(); + buildParams.setSchemaName(schemaName); + buildParams.setLogTables(ImmutableList.of(logicalName)); + buildParams.setGroupName(dbIndex); + buildParams.setPhyTables(ImmutableList.of(ImmutableList.of(phyTableName))); + buildParams.setSqlKind(SqlKind.OTHER_DDL); + buildParams.setLockMode(SqlSelect.LockMode.UNDEF); + + buildParams.setLogicalPlan(null); + buildParams.setCluster(cluster); + buildParams.setTraitSet(traitSet); + buildParams.setRowType(rowType); + buildParams.setCursorMeta(null); + + buildParams.setBytesSql(BytesSql.getBytesSql(sqlTemplate)); + buildParams.setDbType(DbType.MYSQL); + return PhyTableOperationFactory.getInstance().buildPhyTblOpByParams(buildParams); + } + + /* + * type = 0; delete source and target files + * type = 1; delete source files only + * type = 2; delete target files only + * */ + public static void rollbackCopyIbd(Long backfillId, String tableSchema, String tableName, int type, + ExecutionContext ec) { + PhysicalBackfillManager physicalBackfillManager = new PhysicalBackfillManager(tableSchema); + List backfillObjectRecords = + physicalBackfillManager.queryBackfillObject(backfillId, tableSchema, tableName); + for (PhysicalBackfillManager.BackfillObjectRecord record : GeneralUtil.emptyIfNull(backfillObjectRecords)) { + PhysicalBackfillDetailInfoFieldJSON detailInfoFieldJSON = + PhysicalBackfillDetailInfoFieldJSON.fromJson(record.getDetailInfo()); + if (detailInfoFieldJSON.getSourceHostAndPort() != null) { + if (type != 2) { + deleteInnodbDataFiles(tableSchema, detailInfoFieldJSON.getSourceHostAndPort(), + record.getSourceDirName(), record.getSourceGroupName(), record.getPhysicalDb(), true, ec); + } + + if (type != 1) { + DbGroupInfoRecord tarDbGroupInfoRecord = + ScaleOutPlanUtil.getDbGroupInfoByGroupName(record.getTargetGroupName()); + + Pair targetDbAndGroup = + Pair.of(tarDbGroupInfoRecord.phyDbName.toLowerCase(), tarDbGroupInfoRecord.groupName); + for (Pair pair : GeneralUtil.emptyIfNull( + detailInfoFieldJSON.getTargetHostAndPorts())) { + deleteInnodbDataFiles(tableSchema, pair, record.getTargetDirName(), record.getTargetGroupName(), + targetDbAndGroup.getKey(), true, ec); + } + } + } + } + } + + public static void deleteInnodbDataFiles(String schemaName, Pair hostInfo, String dir, + String groupName, String physicalDb, + boolean couldIgnore, + ExecutionContext ec) { + PhysicalBackfillUtils.deleteInnodbDataFile(schemaName, groupName, physicalDb, + hostInfo.getKey(), + hostInfo.getValue(), + PhysicalBackfillUtils.convertToCfgFileName(dir, CFG), couldIgnore, ec); + PhysicalBackfillUtils.deleteInnodbDataFile(schemaName, groupName, physicalDb, + hostInfo.getKey(), + hostInfo.getValue(), + PhysicalBackfillUtils.convertToCfgFileName(dir, CFP), couldIgnore, ec); + PhysicalBackfillUtils.deleteInnodbDataFile(schemaName, groupName, physicalDb, + hostInfo.getKey(), + hostInfo.getValue(), dir, true, ec); + } + + public static void deleteInnodbDataFile(String schemaName, String groupName, String phyDb, String host, int port, + String tempFilePath, boolean couldIgnore, ExecutionContext ec) { + + String msg = "begin to delete the idb file " + tempFilePath + " in " + host + ":" + port + " group" + groupName; + SQLRecorderLogger.ddlLogger.info(msg); + String storageInstId = DbTopologyManager.getStorageInstIdByGroupName(schemaName, groupName); + Pair userInfo = getUserPasswd(storageInstId); + boolean success = false; + int tryTime = 1; + boolean ignore = false; + + boolean healthyCheck = + ec.getParamManager().getBoolean(ConnectionParams.PHYSICAL_BACKFILL_STORAGE_HEALTHY_CHECK); + + do { + try (XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage(phyDb, host, port, + userInfo.getKey(), userInfo.getValue(), -1))) { + PolarxPhysicalBackfill.FileManageOperator.Builder builder = + PolarxPhysicalBackfill.FileManageOperator.newBuilder(); + + PolarxPhysicalBackfill.TableInfo.Builder tableInfoBuilder = + PolarxPhysicalBackfill.TableInfo.newBuilder(); + tableInfoBuilder.setTableSchema(""); + tableInfoBuilder.setTableName(""); + tableInfoBuilder.setPartitioned(false); + + PolarxPhysicalBackfill.FileInfo.Builder fileInfoBuilder = PolarxPhysicalBackfill.FileInfo.newBuilder(); + fileInfoBuilder.setTempFile(true); + fileInfoBuilder.setFileName(""); + fileInfoBuilder.setDirectory(tempFilePath); + fileInfoBuilder.setPartitionName(""); + + tableInfoBuilder.addFileInfo(fileInfoBuilder.build()); + + builder.setTableInfo(tableInfoBuilder); + builder.setOperatorType(PolarxPhysicalBackfill.FileManageOperator.Type.DELETE_IBD_FROM_TEMP_DIR_IN_SRC); + + conn.execDeleteTempIbdFile(builder); + success = true; + } catch (Exception ex) { + if (tryTime > MAX_RETRY) { + if (couldIgnore && ex != null && ex.toString() != null + && ex.toString().indexOf("connect fail") != -1) { + List> hostsIpAndPort = + PhysicalBackfillUtils.getMySQLServerNodeIpAndPorts(storageInstId, healthyCheck); + Optional> targetHostOpt = + hostsIpAndPort.stream().filter(o -> o.getKey().equalsIgnoreCase(host) + && o.getValue().intValue() == port).findFirst(); + if (!targetHostOpt.isPresent()) { + //maybe backup in other host + ignore = true; + break; + } + } + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + try { + Thread.sleep(50); + } catch (Exception e) { + ///ignore + } + tryTime++; + } + } while (!success); + if (ignore) { + msg = "ignore delete the idb file " + tempFilePath + " in " + host + ":" + port + + " because host is not exist now"; + } else { + msg = "already delete the idb file " + tempFilePath + " in " + host + ":" + port; + } + SQLRecorderLogger.ddlLogger.info(msg); + } + + public static Connection getXConnectionForStorage(String schema, String host, int port, String user, String passwd, + int socketTimeout) throws SQLException { + XDataSource dataSource = initializeDataSource(host, port, user, passwd, schema, "importTableDataSource"); + //dataSource.setDefaultQueryTimeoutMillis(); + Connection conn = dataSource.getConnection(); + return conn; + } + + public static List> getMySQLServerNodeIpAndPorts(String storageInstId, + boolean healthCheck) { + HaSwitchParams haSwitchParams = StorageHaManager.getInstance().getStorageHaSwitchParams(storageInstId); + if (haSwitchParams == null) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + String.format("no found the storage inst for %s", storageInstId)); + } + List> hostInfos = new ArrayList<>(haSwitchParams.storageHaInfoMap.size()); + List> loggerList = new ArrayList<>(); + List> learnerList = new ArrayList<>(); + //not xport + List> masterNodesNormPortList = new ArrayList<>(); + //not xport + List> learnerNodesNormPortList = new ArrayList<>(); + + for (StorageNodeHaInfo haInfo : haSwitchParams.storageHaInfoMap.values()) { + Pair nodeIpPort = AddressUtils.getIpPortPairByAddrStr(haInfo.getAddr()); + String ip = nodeIpPort.getKey(); + int xport = haInfo.getXPort(); + if (haInfo.getRole() == StorageRole.LOGGER) { + loggerList.add(Pair.of(ip, xport)); + masterNodesNormPortList.add(nodeIpPort); + } else if (haInfo.getRole() == StorageRole.LEARNER) { + learnerList.add(Pair.of(ip, xport)); + learnerNodesNormPortList.add(nodeIpPort); + } else { + hostInfos.add(Pair.of(ip, xport)); + masterNodesNormPortList.add(nodeIpPort); + } + } + if (healthCheck && !nodeHealthCheck(storageInstId, masterNodesNormPortList)) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + String.format("storage %s is not healthy", storageInstId)); + } + if (healthCheck) { + List storageSlaveInfos = + ServerInstIdManager.getInstance().getSlaveStorageInfosByMasterStorageInstId(storageInstId); + Set slaveNodeSet = new HashSet<>(); + Set learnerNodeSet = new HashSet<>(); + storageSlaveInfos.stream().filter(o -> o.isVip == 0).forEach(o -> + slaveNodeSet.add(o.ip.toLowerCase() + "@" + o.port)); + learnerNodesNormPortList.stream() + .forEach(o -> learnerNodeSet.add(o.getKey().toLowerCase() + "@" + o.getValue())); + Set slaveIds = new TreeSet<>(String::compareToIgnoreCase); + storageSlaveInfos.stream().forEach(o -> slaveIds.add(o.storageInstId)); + if (learnerNodeSet.size() < slaveNodeSet.size()) { + SQLRecorderLogger.ddlLogger.info( + String.format("learner from HA context:%s", learnerNodeSet)); + SQLRecorderLogger.ddlLogger.info( + String.format("slave from Metadb:%s", slaveNodeSet)); + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + String.format("storage %s is not healthy", storageInstId)); + } + for (String slaveIpAndPort : slaveNodeSet) { + if (!learnerNodeSet.contains(slaveIpAndPort)) { + SQLRecorderLogger.ddlLogger.info( + String.format("slave host %s from Metadb is not exist in HA context", + slaveIpAndPort)); + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + String.format("storage %s is not healthy", storageInstId)); + } + } + } + + hostInfos.addAll(learnerList); + return hostInfos; + } + + public static boolean nodeHealthCheck(String storageInstId, + List> masterNodesList) { + Map storageInstHaCtxCache = + StorageHaManager.getInstance().getStorageHaCtxCache(); + List nodesInfoFromMetadb = + storageInstHaCtxCache.get(storageInstId).getStorageNodeInfos().values().stream().filter(o -> o.isVip != 1) + .collect( + Collectors.toList()); + if (nodesInfoFromMetadb.size() != masterNodesList.size()) { + SQLRecorderLogger.ddlLogger.info( + String.format("storage from HA context:%s", masterNodesList)); + SQLRecorderLogger.ddlLogger.info( + String.format("storage from Metadb:%s", + nodesInfoFromMetadb.stream().map(o -> o.getHostPort()).collect( + Collectors.toList()))); + return false; + } else { + Set nodeInfoSet = new HashSet<>(); + masterNodesList.stream().forEach(o -> nodeInfoSet.add(o.getKey() + "@" + o.getValue())); + for (StorageInfoRecord storageInfoRecord : nodesInfoFromMetadb) { + if (!nodeInfoSet.contains(storageInfoRecord.ip.toLowerCase() + "@" + storageInfoRecord.port)) { + SQLRecorderLogger.ddlLogger.info( + String.format("storage %s from Metadb is not exist in HA context", + storageInfoRecord.ip.toLowerCase() + "@" + storageInfoRecord.port)); + return false; + } + } + } + return true; + } + + public static Pair getMySQLLeaderIpAndPort(String storageInstId) { + HaSwitchParams haSwitchParams = StorageHaManager.getInstance().getStorageHaSwitchParams(storageInstId); + if (haSwitchParams == null) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + String.format("no found the storage inst for %s", storageInstId)); + } + Pair nodeIpPort = AddressUtils.getIpPortPairByAddrStr(haSwitchParams.curAvailableAddr); + String ip = nodeIpPort.getKey(); + int xport = haSwitchParams.xport; + return Pair.of(ip, xport); + } + + // if only has leader, return leader + public static Pair getMySQLOneFollowerIpAndPort(String storageInstId) { + HaSwitchParams haSwitchParams = StorageHaManager.getInstance().getStorageHaSwitchParams(storageInstId); + if (haSwitchParams == null) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + String.format("no found the storage inst for %s", storageInstId)); + } + Pair nodeIpPort = AddressUtils.getIpPortPairByAddrStr(haSwitchParams.curAvailableAddr); + String ip = nodeIpPort.getKey(); + int xport = haSwitchParams.xport; + Pair leaderIpPort = new Pair<>(ip, xport); + StorageNodeHaInfo followerNodeHaInfo = null; + List> HostInfos = new ArrayList<>(haSwitchParams.storageHaInfoMap.size()); + for (StorageNodeHaInfo haInfo : haSwitchParams.storageHaInfoMap.values()) { + if (haInfo.getRole() == StorageRole.FOLLOWER) { + followerNodeHaInfo = haInfo; + break; + } + } + if (followerNodeHaInfo != null) { + Pair follerNodeIpPort = AddressUtils.getIpPortPairByAddrStr(followerNodeHaInfo.getAddr()); + return new Pair<>(follerNodeIpPort.getKey(), followerNodeHaInfo.getXPort()); + } else { + return leaderIpPort; + } + } + + public static Long getLeaderCurrentLatestLsn(String schemaName, String group) { + TopologyHandler topologyHandler = ExecutorContext.getContext(schemaName).getTopologyHandler(); + IGroupExecutor srcGroupExecutor = topologyHandler.get(group); + if (srcGroupExecutor == null) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, String.format("invalid group:%s", group)); + } + + TGroupDataSource groupDataSource = (TGroupDataSource) srcGroupExecutor.getDataSource(); + try { + Long lsn = + GroupingFetchLSN.getInstance().groupingLsn(groupDataSource.getOneAtomDs(true).getDnId(), INIT_TSO); + String lsnDetail = + String.format("the latest lsn in dn.group:[%s.%s] is %s", groupDataSource.getOneAtomDs(true).getDnId(), + group, lsn.toString()); + SQLRecorderLogger.ddlLogger.info(lsnDetail); + return lsn; + } catch (Exception e) { + String errMsg = String.format("fail to get the latest lsn in group:[%s.%s]", + schemaName, group); + SQLRecorderLogger.ddlLogger.info(errMsg); + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e, errMsg); + } + } + + public static Map waitLsn(String schemaName, Map groupAndStorageIdMap, + boolean rollback, + ExecutionContext ec) { + Map groupAndLsnMap = new HashMap<>(); + boolean healthyCheck = + ec.getParamManager().getBoolean(ConnectionParams.PHYSICAL_BACKFILL_STORAGE_HEALTHY_CHECK); + for (Map.Entry entry : groupAndStorageIdMap.entrySet()) { + Long masterLsn = getLeaderCurrentLatestLsn(schemaName, entry.getKey()); + groupAndLsnMap.put(entry.getKey(), masterLsn); + DbGroupInfoRecord dbGroupInfoRecord = ScaleOutPlanUtil.getDbGroupInfoByGroupName(entry.getKey()); + List> allNodes = + getMySQLServerNodeIpAndPorts(entry.getValue(), healthyCheck); + for (Pair pair : allNodes) { + Pair userInfo = getUserPasswd(entry.getValue()); + boolean success = false; + long retryTime = 0; + do { + if (!rollback && (CrossEngineValidator.isJobInterrupted(ec) || Thread.currentThread() + .isInterrupted())) { + long jobId = ec.getDdlJobId(); + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The job '" + jobId + "' has been cancelled"); + } + long maxRetry = OptimizerContext.getContext(schemaName).getParamManager().getLong( + ConnectionParams.PHYSICAL_BACKFILL_MAX_RETRY_WAIT_FOLLOWER_TO_LSN); + String cmd = String.format("SET read_lsn = %d", masterLsn); + try (Connection conn = getXConnectionForStorage(dbGroupInfoRecord.phyDbName, pair.getKey(), + pair.getValue(), userInfo.getKey(), userInfo.getValue(), -1)) { + try (Statement stmt = conn.createStatement()) { + SQLRecorderLogger.ddlLogger.info(cmd); + stmt.execute(cmd); + } + success = true; + } catch (SQLException ex) { + if (retryTime++ > maxRetry) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex, + String.format("wait to sync to lsn:[%d] fail in node:{%s,%d}, after %d retry", + masterLsn, pair.getKey(), + pair.getValue(), retryTime)); + } + SQLRecorderLogger.ddlLogger.info( + "fail to execute:" + cmd + " in " + pair.getKey() + ":" + pair.getValue() + " for schema:" + + schemaName + " group:" + entry.getKey() + " " + ex); + } + } while (!success); + } + + } + return groupAndLsnMap; + } + + public static long getTheMaxSlaveLatency() { + long maxLatency; + int retry = 3; + int sleep_interval = 3; + do { + maxLatency = -1; + retry--; + Map statusMap = StorageStatusManager.getInstance().getStorageStatus(); + for (Map.Entry entry : statusMap.entrySet()) { + if (maxLatency < entry.getValue().getDelaySecond()) { + maxLatency = entry.getValue().getDelaySecond(); + } + if (maxLatency == Integer.MAX_VALUE) { + try { + Thread.sleep(sleep_interval * 1000L); + } catch (InterruptedException e) { + //pass + } + break; + } + } + } while (retry > 0 && maxLatency == Integer.MAX_VALUE); + return maxLatency; + } + + public static double netWorkSpeedTest(ExecutionContext ec) { + + List dbInfoList = DbInfoManager.getInstance().getDbInfoList(); + List schemaList = dbInfoList.stream() + .filter(DbInfoRecord::isUserDb).map(x -> x.dbName).collect(Collectors.toList()); + + boolean enableSpeedTest = ec.getParamManager().getBoolean(ConnectionParams.PHYSICAL_BACKFILL_SPEED_TEST); + + if (GeneralUtil.isEmpty(schemaList) || !enableSpeedTest) { + return 0.00; + } + //先写一个batch,然后读、写读写 + long batchSize = ec.getParamManager().getLong(ConnectionParams.PHYSICAL_BACKFILL_BATCH_SIZE); + long maxTestTime = ec.getParamManager().getLong(ConnectionParams.PHYSICAL_BACKFILL_NET_SPEED_TEST_TIME); + byte[] buffer = new byte[(int) batchSize]; + physicalBackfillLoader physicalBackfillLoader = new physicalBackfillLoader(ec.getSchemaName(), ""); + + PolarxPhysicalBackfill.TransferFileDataOperator.Builder builder = + PolarxPhysicalBackfill.TransferFileDataOperator.newBuilder(); + + builder.setOperatorType(PolarxPhysicalBackfill.TransferFileDataOperator.Type.PUT_DATA_TO_TAR_IBD); + PolarxPhysicalBackfill.FileInfo.Builder fileInfoBuilder = PolarxPhysicalBackfill.FileInfo.newBuilder(); + fileInfoBuilder.setFileName(TEST_SPEED_SRC_DIR); + fileInfoBuilder.setTempFile(false); + fileInfoBuilder.setDirectory(TEST_SPEED_SRC_DIR); + fileInfoBuilder.setPartitionName(""); + builder.setFileInfo(fileInfoBuilder.build()); + builder.setBuffer(com.google.protobuf.ByteString.copyFrom(buffer)); + builder.setBufferLen(batchSize); + builder.setOffset(0); + + List groupDetailInfoExRecords = + TableGroupLocation.getOrderedGroupList(schemaList.get(0)); + GroupDetailInfoExRecord srcGroupDetailInfo = groupDetailInfoExRecords.get(0); + Optional optTarGroupDetailInfo = + groupDetailInfoExRecords.stream() + .filter(o -> !o.storageInstId.equalsIgnoreCase(srcGroupDetailInfo.storageInstId)) + .findFirst(); + GroupDetailInfoExRecord tarGroupDetailInfo = + optTarGroupDetailInfo.isPresent() ? optTarGroupDetailInfo.get() : srcGroupDetailInfo; + + Map groupStorageInsts = new HashMap<>(); + Map> storageInstAndUserInfos = new HashMap<>(); + + storageInstAndUserInfos.put(srcGroupDetailInfo.storageInstId, + PhysicalBackfillUtils.getUserPasswd(srcGroupDetailInfo.storageInstId)); + + storageInstAndUserInfos.computeIfAbsent(tarGroupDetailInfo.storageInstId, + key -> PhysicalBackfillUtils.getUserPasswd(tarGroupDetailInfo.storageInstId)); + + groupStorageInsts.put(srcGroupDetailInfo.groupName, srcGroupDetailInfo.storageInstId); + groupStorageInsts.put(tarGroupDetailInfo.groupName, tarGroupDetailInfo.storageInstId); + + Pair srcUserAndPwd = storageInstAndUserInfos.get(srcGroupDetailInfo.storageInstId); + Pair sourceHost = + PhysicalBackfillUtils.getMySQLOneFollowerIpAndPort(srcGroupDetailInfo.storageInstId); + + physicalBackfillLoader.applyBatch(Pair.of(srcGroupDetailInfo.phyDbName, srcGroupDetailInfo.groupName), + Pair.of(TEST_SPEED_SRC_DIR, TEST_SPEED_SRC_DIR), + Lists.newArrayList(sourceHost), + storageInstAndUserInfos.get(srcGroupDetailInfo.getStorageInstId()), builder.build(), ec); + + long totalTransferDataSize = 0l; + long testTime = 0; + long startTime = System.currentTimeMillis(); + boolean healthyCheck = + ec.getParamManager().getBoolean(ConnectionParams.PHYSICAL_BACKFILL_STORAGE_HEALTHY_CHECK); + do { + PolarxPhysicalBackfill.TransferFileDataOperator transferFileData; + try (XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage( + srcGroupDetailInfo.getPhyDbName(), + sourceHost.getKey(), sourceHost.getValue(), srcUserAndPwd.getKey(), srcUserAndPwd.getValue(), -1))) { + PolarxPhysicalBackfill.TransferFileDataOperator.Builder readBuilder = + PolarxPhysicalBackfill.TransferFileDataOperator.newBuilder(); + + readBuilder.setOperatorType(PolarxPhysicalBackfill.TransferFileDataOperator.Type.GET_DATA_FROM_SRC_IBD); + PolarxPhysicalBackfill.FileInfo.Builder srcFileInfoBuilder = + PolarxPhysicalBackfill.FileInfo.newBuilder(); + srcFileInfoBuilder.setFileName(TEST_SPEED_SRC_DIR); + srcFileInfoBuilder.setTempFile(false); + srcFileInfoBuilder.setDirectory(TEST_SPEED_SRC_DIR); + srcFileInfoBuilder.setPartitionName(""); + readBuilder.setFileInfo(srcFileInfoBuilder.build()); + readBuilder.setBufferLen(batchSize); + readBuilder.setOffset(0); + readBuilder.build(); + transferFileData = conn.execReadBufferFromFile(readBuilder); + } catch (Exception ex) { + testTime = System.currentTimeMillis() - startTime; + if (testTime > maxTestTime) { + break; + } + continue; + } + physicalBackfillLoader.applyBatch(Pair.of(tarGroupDetailInfo.phyDbName, tarGroupDetailInfo.groupName), + Pair.of(TEST_SPEED_TAR_DIR, TEST_SPEED_TAR_DIR), + PhysicalBackfillUtils.getMySQLServerNodeIpAndPorts(tarGroupDetailInfo.storageInstId, healthyCheck), + storageInstAndUserInfos.get(tarGroupDetailInfo.getStorageInstId()), transferFileData, ec); + totalTransferDataSize += transferFileData.getBufferLen(); + testTime = System.currentTimeMillis() - startTime; + } while (testTime < maxTestTime); + + double speed = totalTransferDataSize / testTime / 1.024; // KB/second + DecimalFormat df = new DecimalFormat("#.00"); + return Double.parseDouble(df.format(speed)); + + } + + public static boolean checkTableSpace(String schemaName, String group, String phyTableName) { + boolean tableSpaceExists = true; + TopologyHandler topologyHandler = ExecutorContext.getContext(schemaName).getTopologyHandler(); + IGroupExecutor executor = topologyHandler.get(group); + if (executor == null) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, String.format("invalid group:%s", group)); + } + + TGroupDataSource groupDataSource = (TGroupDataSource) executor.getDataSource(); + try (Connection connection = groupDataSource.getConnection(); Statement stmt = connection.createStatement();) { + ResultSet rs = stmt.executeQuery(String.format(CHECK_TABLE, phyTableName)); + rs.next(); + + } catch (SQLException ex) { + if (ex != null && ex.getMessage() != null && ex.getMessage().contains(TABLESPACE_IS_DISCARD)) { + tableSpaceExists = false; + } + // ignore + } + return tableSpaceExists; + } + + public static List getPhysicalPartitionNames(String schemaName, String group, String phyDb, + String phyTableName) { + List phyPartNames = new ArrayList<>(); + TopologyHandler topologyHandler = ExecutorContext.getContext(schemaName).getTopologyHandler(); + IGroupExecutor executor = topologyHandler.get(group); + if (executor == null) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, String.format("invalid group:%s", group)); + } + + TGroupDataSource groupDataSource = (TGroupDataSource) executor.getDataSource(); + try (Connection connection = groupDataSource.getConnection(); Statement stmt = connection.createStatement();) { + ResultSet rs = stmt.executeQuery(String.format(SELECT_PHY_PARTITION_NAMES, phyTableName, phyDb)); + while (rs.next()) { + phyPartNames.add(rs.getString("partition_name")); + } + + } catch (SQLException ex) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex, + String.format("fail to fetch the physical partition info for 【%s.%s】 ", + phyDb, phyTableName)); + } + return phyPartNames; + } + + public static Pair getUserPasswd(String storageInstId) { + HaSwitchParams haSwitchParams = StorageHaManager.getInstance().getStorageHaSwitchParams(storageInstId); + if (haSwitchParams == null) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + String.format("no found the storage inst for %s", storageInstId)); + } + String user = haSwitchParams.userName; + String passwdEnc = haSwitchParams.passwdEnc; + String passwd = PasswdUtil.decrypt(passwdEnc); + return Pair.of(user, passwd); + } + + public static Map> getSourceTableInfo(Pair userInfo, String phyDbName, + String physicalTableName, + List phyPartNames, + boolean hasNoPhyPart, + Pair sourceIpAndPort) { + String msg = "begin to get the source table[" + phyDbName + "." + physicalTableName + ":]'s innodb data file"; + SQLRecorderLogger.ddlLogger.info(msg); + + PolarxPhysicalBackfill.GetFileInfoOperator getFileInfoOperator = null; + Map> srcFileAndDirs = new HashMap<>(); + boolean success = false; + int tryTime = 1; + boolean isPartitioned = !hasNoPhyPart; + do { + try (XConnection conn = (XConnection) (getXConnectionForStorage(phyDbName, + sourceIpAndPort.getKey(), sourceIpAndPort.getValue(), userInfo.getKey(), userInfo.getValue(), -1))) { + PolarxPhysicalBackfill.GetFileInfoOperator.Builder builder = + PolarxPhysicalBackfill.GetFileInfoOperator.newBuilder(); + + builder.setOperatorType(PolarxPhysicalBackfill.GetFileInfoOperator.Type.CHECK_SRC_FILE_EXISTENCE); + PolarxPhysicalBackfill.TableInfo.Builder tableInfoBuilder = + PolarxPhysicalBackfill.TableInfo.newBuilder(); + tableInfoBuilder.setTableSchema(phyDbName); + tableInfoBuilder.setTableName(physicalTableName); + tableInfoBuilder.setPartitioned(isPartitioned); + if (isPartitioned) { + tableInfoBuilder.addAllPhysicalPartitionName(phyPartNames); + } + builder.setTableInfo(tableInfoBuilder.build()); + getFileInfoOperator = conn.execCheckFileExistence(builder); + for (PolarxPhysicalBackfill.FileInfo fileInfo : getFileInfoOperator.getTableInfo().getFileInfoList()) { + Pair srcFileAndDir = Pair.of(fileInfo.getFileName(), fileInfo.getDirectory()); + srcFileAndDirs.put(fileInfo.getPartitionName(), srcFileAndDir); + } + + success = true; + } catch (SQLException ex) { + if (tryTime > MAX_RETRY) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + tryTime++; + } + } while (!success); + msg = "already get the source table[" + phyDbName + "." + physicalTableName + ":]'s innodb data file"; + SQLRecorderLogger.ddlLogger.info(msg); + return srcFileAndDirs; + } + + public static Pair getTempIbdFileInfo(Pair userInfo, + Pair sourceHost, + Pair srcDbAndGroup, + String physicalTableName, + String phyPartitionName, + Pair srcFileAndDir, + long batchSize, boolean fullTempDir, + List> offsetAndSize) { + + String tempIbdDir = + fullTempDir ? srcFileAndDir.getValue() : srcFileAndDir.getValue() + PhysicalBackfillUtils.TEMP_FILE_POSTFIX; + String msg = "begin to get the temp ibd file:" + tempIbdDir; + SQLRecorderLogger.ddlLogger.info(msg); + + PolarxPhysicalBackfill.GetFileInfoOperator getFileInfoOperator = null; + Pair tempFileAndDir = null; + + boolean success = false; + int tryTime = 1; + do { + try ( + XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage(srcDbAndGroup.getKey(), + sourceHost.getKey(), sourceHost.getValue(), userInfo.getKey(), userInfo.getValue(), -1))) { + PolarxPhysicalBackfill.GetFileInfoOperator.Builder builder = + PolarxPhysicalBackfill.GetFileInfoOperator.newBuilder(); + + builder.setOperatorType(PolarxPhysicalBackfill.GetFileInfoOperator.Type.CHECK_SRC_FILE_EXISTENCE); + PolarxPhysicalBackfill.TableInfo.Builder tableInfoBuilder = + PolarxPhysicalBackfill.TableInfo.newBuilder(); + tableInfoBuilder.setTableSchema(srcDbAndGroup.getKey()); + tableInfoBuilder.setTableName(physicalTableName); + tableInfoBuilder.setPartitioned(false); + + PolarxPhysicalBackfill.FileInfo.Builder fileInfoBuilder = PolarxPhysicalBackfill.FileInfo.newBuilder(); + fileInfoBuilder.setTempFile(true); + fileInfoBuilder.setFileName(srcFileAndDir.getKey()); + fileInfoBuilder.setPartitionName(phyPartitionName); + fileInfoBuilder.setDirectory(tempIbdDir); + + tableInfoBuilder.addFileInfo(fileInfoBuilder.build()); + + builder.setTableInfo(tableInfoBuilder.build()); + getFileInfoOperator = conn.execCheckFileExistence(builder); + PolarxPhysicalBackfill.FileInfo fileInfo = getFileInfoOperator.getTableInfo().getFileInfo(0); + tempFileAndDir = Pair.of(fileInfo.getFileName(), fileInfo.getDirectory()); + long fileSize = fileInfo.getDataSize(); + long offset = 0; + offsetAndSize.clear(); + do { + long bufferSize = Math.min(fileSize - offset, batchSize); + offsetAndSize.add(new Pair<>(offset, bufferSize)); + offset += bufferSize; + } while (offset < fileSize); + success = true; + } catch (Exception ex) { + SQLRecorderLogger.ddlLogger.info(ex.toString()); + if (tryTime > PhysicalBackfillUtils.MAX_RETRY) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + tryTime++; + offsetAndSize.clear(); + } + } while (!success); + msg = "already get the temp ibd file:" + tempIbdDir; + SQLRecorderLogger.ddlLogger.info(msg); + return tempFileAndDir; + } + + public static String convertToCfgFileName(String ibdFileName, String fileExtension) { + assert !StringUtil.isNullOrEmpty(ibdFileName); + int ibdIndex = ibdFileName.lastIndexOf(IBD); + int tempIndex = ibdFileName.lastIndexOf(TEMP_FILE_POSTFIX); + assert ibdIndex != -1; + if (tempIndex != -1) { + return ibdFileName.substring(0, ibdIndex) + fileExtension + TEMP_FILE_POSTFIX; + } else { + return ibdFileName.substring(0, ibdIndex) + fileExtension; + } + } + + public static void checkInterrupted(ExecutionContext ec, AtomicReference interrupted) { + // Check DDL is ongoing. + if (CrossEngineValidator.isJobInterrupted(ec) || Thread.currentThread().isInterrupted() + || (interrupted != null && interrupted.get())) { + long jobId = ec.getDdlJobId(); + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The job '" + jobId + "' has been cancelled"); + } + } + + public static boolean isSupportForPhysicalBackfill(String schemaName, ExecutionContext ec) { + String defaultIndex = ec.getSchemaManager(schemaName).getTddlRuleManager().getDefaultDbIndex(null); + final TGroupDataSource groupDataSource = + (TGroupDataSource) ExecutorContext.getContext(schemaName).getTopologyExecutor() + .getGroupExecutor(defaultIndex).getDataSource(); + + try (Connection conn = groupDataSource.getConnection().getRealConnection()) { + return ec.getParamManager().getBoolean(ConnectionParams.PHYSICAL_BACKFILL_ENABLE) + && ExecutorContext.getContext(schemaName).getStorageInfoManager() + .supportXOptForPhysicalBackfill() && conn.isWrapperFor(XConnection.class) && conn.unwrap( + XConnection.class).isXRPC(); + } catch (Exception ex) { + SQLRecorderLogger.ddlLogger.info("isSupportForPhysicalBackfill=false"); + SQLRecorderLogger.ddlLogger.info(ex.toString()); + return false; + } + } + + public static PhysicalBackfillRateLimiter getRateLimiter() { + return rateLimiter; + } + + public static class PhysicalBackfillRateLimiter { + //250MB/s + private static long curSpeedLimiter = 250 * 1024 * 1024l; + private static final RateLimiter rateLimiter = RateLimiter.create(curSpeedLimiter); + + public void PhysicalBackfillRateLimiter() { + } + + public double acquire(int permits) { + return rateLimiter.acquire(permits); + } + + public void setRate(long permitsPerSecond) { + rateLimiter.setRate(permitsPerSecond); + curSpeedLimiter = permitsPerSecond; + } + + public double getRate() { + return rateLimiter.getRate(); + } + + public long getCurSpeedLimiter() { + return curSpeedLimiter; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/physicalBackfillLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/physicalBackfillLoader.java new file mode 100644 index 000000000..032a35c69 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/physicalbackfill/physicalBackfillLoader.java @@ -0,0 +1,120 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.physicalbackfill; + +import com.alibaba.polardbx.common.async.AsyncTask; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.rpc.pool.XConnection; +import com.mysql.cj.polarx.protobuf.PolarxPhysicalBackfill; + +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; + +/** + * Fill batch data into index table with duplication check + */ +public class physicalBackfillLoader { + + private final String schemaName; + private final String tableName; + private final static int MAX_RETRY = 3; + + public physicalBackfillLoader(String schemaName, String tableName) { + this.schemaName = schemaName; + this.tableName = tableName; + } + + public void applyBatch(final Pair targetDbAndGroup, final Pair targetFileAndDir, + final List> targetHosts, + final Pair userInfo, + final PolarxPhysicalBackfill.TransferFileDataOperator transferFileData, + final ExecutionContext ec) { + if (GeneralUtil.isEmpty(targetHosts)) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, + String.format("invalid target address for group:[%s]", targetDbAndGroup.getValue())); + } + PolarxPhysicalBackfill.TransferFileDataOperator.Builder builder = + PolarxPhysicalBackfill.TransferFileDataOperator.newBuilder(); + + builder.setOperatorType(PolarxPhysicalBackfill.TransferFileDataOperator.Type.PUT_DATA_TO_TAR_IBD); + PolarxPhysicalBackfill.FileInfo.Builder fileInfoBuilder = PolarxPhysicalBackfill.FileInfo.newBuilder(); + fileInfoBuilder.setFileName(targetFileAndDir.getKey()); + fileInfoBuilder.setTempFile(false); + fileInfoBuilder.setDirectory(targetFileAndDir.getValue()); + fileInfoBuilder.setPartitionName(""); + builder.setFileInfo(fileInfoBuilder.build()); + builder.setBufferLen(transferFileData.getBufferLen()); + builder.setOffset(transferFileData.getOffset()); + builder.setBuffer(transferFileData.getBuffer()); + List futures = new ArrayList<>(targetHosts.size()); + + targetHosts.forEach(v -> { + FutureTask task = new FutureTask<>(() -> { + boolean success = false; + int tryTime = 1; + do { + try (XConnection conn = (XConnection) (PhysicalBackfillUtils.getXConnectionForStorage( + targetDbAndGroup.getKey(), v.getKey(), v.getValue(), userInfo.getKey(), userInfo.getValue(), + -1))) { + long writeSize = conn.execTransferFile(builder); + if (writeSize != transferFileData.getBufferLen()) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, + "the length of buffer write to target file is different from read from source file"); + } + success = true; + } catch (SQLException ex) { + if (tryTime > MAX_RETRY) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, ex); + } + tryTime++; + } + } while (!success); + }, null); + futures.add(task); + ec.getExecutorService().submit(ec.getSchemaName(), ec.getTraceId(), AsyncTask.build(task)); + }); + waitApplyFinish(futures); + + } + + private void waitApplyFinish(List futures) { + for (Future future : futures) { + try { + future.get(); + } catch (Exception e) { + futures.forEach(f -> { + try { + f.cancel(true); + } catch (Throwable ignore) { + } + }); + + throw GeneralUtil.nestedException(e); + } + } + + futures.clear(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/AbstractPl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/AbstractPl.java index 3512973e2..8f9799787 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/AbstractPl.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/AbstractPl.java @@ -128,11 +128,10 @@ public void close() { memoryPool.destroy(); } // make sure all cursor has been closed - blockStmtToCursors.values().stream().map(t -> t.values().stream().map(cur -> { + blockStmtToCursors.values().forEach(t -> t.values().forEach(cur -> { if (cur != null) { - return cur.close(new ArrayList<>()); + cur.close(new ArrayList<>()); } - return null; })); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/PLUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/PLUtils.java index 52545ef38..b155252b2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/PLUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/PLUtils.java @@ -17,21 +17,12 @@ package com.alibaba.polardbx.executor.pl; import com.alibaba.polardbx.common.datatype.Decimal; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.druid.sql.SQLUtils; import com.alibaba.polardbx.druid.sql.ast.SQLName; import com.alibaba.polardbx.druid.sql.ast.SQLStatement; import com.alibaba.polardbx.druid.sql.ast.expr.SQLCharExpr; import com.alibaba.polardbx.druid.sql.ast.expr.SQLPropertyExpr; import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateFunctionStatement; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLSelectItem; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLSelectQuery; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLSelectQueryBlock; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLSelectStatement; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLUnionQuery; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLValuesQuery; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLValuesTableSource; import com.alibaba.polardbx.druid.sql.parser.SQLParserUtils; import com.alibaba.polardbx.druid.sql.parser.SQLStatementParser; import com.alibaba.polardbx.druid.util.JdbcConstants; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/PlCacheCursor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/PlCacheCursor.java index 41b43ff8c..0aae7eba6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/PlCacheCursor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/PlCacheCursor.java @@ -42,7 +42,7 @@ public void openSpillFile() { } // if spill happened, read the spill file if (spiller != null) { - getIterator(); + iterator = getIterator(); } openSpillFile = true; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/ProcedureManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/ProcedureManager.java index d99434edf..4aff57836 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/ProcedureManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/ProcedureManager.java @@ -24,13 +24,12 @@ import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateProcedureStatement; import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.PlConstants; import com.alibaba.polardbx.executor.ddl.job.task.basic.pl.accessor.ProcedureAccessor; -import com.alibaba.polardbx.gms.metadb.pl.procedure.ProcedureMetaRecord; import com.alibaba.polardbx.gms.metadb.pl.procedure.ProcedureDefinitionRecord; +import com.alibaba.polardbx.gms.metadb.pl.procedure.ProcedureMetaRecord; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.memory.MemoryManager; import com.alibaba.polardbx.optimizer.memory.MemorySetting; import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; -import com.google.common.collect.ImmutableMap; import java.sql.Connection; import java.sql.SQLException; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/StatementKind.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/StatementKind.java index 4d8ec11fb..172a7728d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/StatementKind.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/StatementKind.java @@ -25,7 +25,6 @@ import com.alibaba.polardbx.druid.sql.ast.statement.SQLLoopStatement; import com.alibaba.polardbx.druid.sql.ast.statement.SQLOpenStatement; import com.alibaba.polardbx.druid.sql.ast.statement.SQLSetStatement; -import com.alibaba.polardbx.druid.sql.ast.statement.SQLShowStatement; import com.alibaba.polardbx.druid.sql.ast.statement.SQLUseStatement; import com.alibaba.polardbx.druid.sql.ast.statement.SQLWhileStatement; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.clause.MySqlCaseStatement; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/UdfUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/UdfUtils.java index 37e92c136..d179deb88 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/UdfUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/pl/UdfUtils.java @@ -17,18 +17,17 @@ package com.alibaba.polardbx.executor.pl; import com.alibaba.polardbx.druid.sql.SQLUtils; -import com.alibaba.polardbx.druid.sql.ast.SQLDataType; import com.alibaba.polardbx.druid.sql.ast.SQLParameter; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLBlockStatement; import com.alibaba.polardbx.druid.sql.ast.statement.SQLCreateFunctionStatement; +import com.alibaba.polardbx.druid.sql.visitor.VisitorFeature; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; -import com.alibaba.polardbx.optimizer.core.datatype.type.BasicTypeBuilders; import com.alibaba.polardbx.optimizer.core.TddlOperatorTable; import com.alibaba.polardbx.optimizer.core.TddlRelDataTypeSystemImpl; import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; import org.apache.calcite.rel.type.RelDataType; -import org.apache.calcite.rel.type.RelDataTypeSystem; import org.apache.calcite.schema.Function; import org.apache.calcite.schema.impl.TypeKnownScalarFunction; import org.apache.calcite.sql.SqlIdentifier; @@ -94,4 +93,25 @@ public static void unregisterSqlUdf(String functionName) { // disable type coercion SqlStdOperatorTable.instance().disableTypeCoercion(functionName, SqlSyntax.FUNCTION); } + + public static String removeFuncBody(String createFunctionContent) { + SQLCreateFunctionStatement + statement = (SQLCreateFunctionStatement) FastsqlUtils.parseSql(createFunctionContent).get(0); + statement.setBlock(new SQLBlockStatement()); + return statement.toString(VisitorFeature.OutputPlOnlyDefinition); + } + + public static void validateContent(String content) { + String createFunction = UdfUtils.removeFuncBody(content); + // validate parser + SQLCreateFunctionStatement + statement = (SQLCreateFunctionStatement) FastsqlUtils.parseSql(createFunction).get(0); + // validate input types + List inputParams = statement.getParameters(); + for (SQLParameter param : inputParams) { + DataTypeUtil.createBasicSqlType(TddlRelDataTypeSystemImpl.getInstance(), param.getDataType()); + } + // validate return types + DataTypeUtil.createBasicSqlType(TddlRelDataTypeSystemImpl.getInstance(), statement.getReturnDataType()); + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/planmanagement/BaselineSyncController.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/planmanagement/BaselineSyncController.java index e5c2db9b7..9c0f80e83 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/planmanagement/BaselineSyncController.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/planmanagement/BaselineSyncController.java @@ -16,13 +16,13 @@ package com.alibaba.polardbx.executor.planmanagement; -import com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType; import com.alibaba.polardbx.executor.scheduler.ScheduledJobsManager; import com.alibaba.polardbx.executor.sync.BaselineUpdateSyncAction; import com.alibaba.polardbx.executor.sync.DeleteBaselineSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType; import com.alibaba.polardbx.gms.scheduler.ScheduledJobsRecord; -import com.alibaba.polardbx.gms.topology.SystemDbHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.planmanager.BaselineInfo; import com.alibaba.polardbx.optimizer.planmanager.IBaselineSyncController; import com.alibaba.polardbx.optimizer.planmanager.PlanInfo; @@ -40,7 +40,7 @@ public void updateBaselineSync(String schemaName, BaselineInfo baselineInfo) { List baselineJson = Lists.newArrayList(); baselineJson.add(BaselineInfo.serializeToJson(baselineInfo, false)); baselineMap.put(schemaName, baselineJson); - SyncManagerHelper.syncWithDefaultDB(new BaselineUpdateSyncAction(baselineMap)); + SyncManagerHelper.syncWithDefaultDB(new BaselineUpdateSyncAction(baselineMap), SyncScope.CURRENT_ONLY); } @Override @@ -49,7 +49,7 @@ public void deleteBaseline(String schemaName, BaselineInfo baselineInfo) { new DeleteBaselineSyncAction( schemaName, baselineInfo.getParameterSql()), - schemaName); + schemaName, SyncScope.CURRENT_ONLY); } @Override @@ -59,7 +59,7 @@ public void deletePlan(String schemaName, BaselineInfo baselineInfo, PlanInfo pl schemaName, baselineInfo.getParameterSql(), planInfo.getId()), - schemaName); + schemaName, SyncScope.CURRENT_ONLY); } @Override diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/rplchecker/LogicalTableHashCalculator.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/rplchecker/LogicalTableHashCalculator.java new file mode 100644 index 000000000..bb02b4787 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/rplchecker/LogicalTableHashCalculator.java @@ -0,0 +1,267 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.rplchecker; + +import com.alibaba.polardbx.common.OrderInvariantHasher; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.jdbc.ParameterContext; +import com.alibaba.polardbx.common.jdbc.ParameterMethod; +import com.alibaba.polardbx.executor.ExecutorHelper; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.fastchecker.FastChecker; +import com.alibaba.polardbx.executor.gsi.GsiUtils; +import com.alibaba.polardbx.executor.gsi.PhysicalPlanBuilder; +import com.alibaba.polardbx.executor.gsi.utils.Transformer; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.SchemaManager; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.rel.PhyTableOpBuildParams; +import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; +import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperationFactory; +import com.alibaba.polardbx.optimizer.core.row.Row; +import com.alibaba.polardbx.optimizer.utils.PlannerUtils; +import com.google.common.collect.ImmutableList; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * @author yudong + * @since 2023/8/29 12:19 + **/ +public class LogicalTableHashCalculator { + private final String schemaName; + private final String tableName; + private final Map boundParameters; + private final ExecutionContext ec; + private final PhyTableOperation planSelectHashChecker; + private final boolean withLowerBound; + private final boolean withUpperBound; + + public LogicalTableHashCalculator(String schemaName, String tableName, List checkColumnList, + List lowerBounds, List upperBounds, ExecutionContext ec) { + this.schemaName = schemaName; + this.tableName = tableName; + this.ec = ec; + this.withLowerBound = withBound(lowerBounds); + this.withUpperBound = withBound(upperBounds); + this.boundParameters = prepareParameterContext(lowerBounds, upperBounds); + this.planSelectHashChecker = preparePlanSelectHashChecker(schemaName, tableName, checkColumnList); + } + + /** + * 唯一对外暴露的接口,计算逻辑表的哈希值 + */ + public Long calculateHash() { + // step1. 构造物理执行计划 + List plans = buildPhysicalPlan(); + + // step2. 执行物理执行计划 + List hashValues = new ArrayList<>(); + for (PhyTableOperation plan : plans) { + Long hash = executePhysicalPlan(plan); + if (hash != null) { + hashValues.add(hash); + } + } + // hash values为0,说明指定的范围内不包含任何数据 + if (hashValues.isEmpty()) { + return 0L; + } + + // step3. 综合所有物理表的哈希值 + final OrderInvariantHasher calculator = new OrderInvariantHasher(); + for (Long elem : hashValues) { + calculator.add(elem); + } + return calculator.getResult(); + } + + /** + * 构造所有的物理执行计划,每张物理表对应一个物理执行计划 + */ + private List buildPhysicalPlan() { + List result = new ArrayList<>(); + final Map> phyTables = GsiUtils.getPhyTables(schemaName, tableName); + for (Map.Entry> entry : phyTables.entrySet()) { + String phyDb = entry.getKey(); + for (String phyTb : entry.getValue()) { + result.add(buildPhysicalPlanHelper(phyDb, phyTb)); + } + } + return result; + } + + /** + * 为物理表构造物理执行计划,物理执行计划就是下推到DN上执行的hashcheck函数 + */ + private PhyTableOperation buildPhysicalPlanHelper(String phyDb, String phyTb) { + Map planParams = new HashMap<>(); + // Physical table is 1st parameter + planParams.put(1, PlannerUtils.buildParameterContextForTableName(phyTb, 1)); + + PhyTableOpBuildParams opBuildParams = new PhyTableOpBuildParams(); + opBuildParams.setGroupName(phyDb); + opBuildParams.setPhyTables(ImmutableList.of(ImmutableList.of(phyTb))); + + // parameters for where (DNF) + int beginParamIndex = 2; + final int pkNumber; + if (withLowerBound || withUpperBound) { + pkNumber = boundParameters.size() / ((withLowerBound ? 1 : 0) + (withUpperBound ? 1 : 0)); + } else { + pkNumber = boundParameters.size(); + } + + if (withLowerBound) { + for (int i = 0; i < pkNumber; ++i) { + for (int j = 0; j <= i; ++j) { + planParams.put(beginParamIndex, + new ParameterContext(boundParameters.get(j).getParameterMethod(), + new Object[] {beginParamIndex, boundParameters.get(j).getArgs()[1]})); + beginParamIndex++; + } + } + } + if (withUpperBound) { + final int base = withLowerBound ? pkNumber : 0; + for (int i = 0; i < pkNumber; ++i) { + for (int j = 0; j <= i; ++j) { + planParams.put(beginParamIndex, + new ParameterContext(boundParameters.get(base + j).getParameterMethod(), + new Object[] {beginParamIndex, boundParameters.get(base + j).getArgs()[1]})); + beginParamIndex++; + } + } + } + + opBuildParams.setDynamicParams(planParams); + return PhyTableOperationFactory.getInstance() + .buildPhyTableOperationByPhyOp(planSelectHashChecker, opBuildParams); + } + + /** + * 执行一个物理计划,每个物理计划对应计算一张物理表的哈希值 + */ + private Long executePhysicalPlan(PhyTableOperation plan) { + // TODO: retry on exception + Long result = null; + Cursor cursor = null; + try { + cursor = ExecutorHelper.executeByCursor(plan, ec, false); + Row row; + if (cursor != null && (row = cursor.next()) != null) { + result = (Long) row.getObject(0); + } + } finally { + if (cursor != null) { + cursor.close(new ArrayList<>()); + } + } + return result; + } + + private Map prepareParameterContext(List lowerBounds, List upperBounds) { + Map result = new HashMap<>(); + Cursor cursor = convertToCursor(lowerBounds, upperBounds); + List> maps; + try { + maps = Transformer.convertUpperBoundWithDefault(cursor, false, (columnMeta, i) -> { + // Generate default parameter context for upper bound of empty source table + ParameterMethod defaultMethod = ParameterMethod.setString; + Object defaultValue = "0"; + final DataType columnType = columnMeta.getDataType(); + if (DataTypeUtil.anyMatchSemantically(columnType, DataTypes.DateType, DataTypes.TimestampType, + DataTypes.DatetimeType, DataTypes.TimeType, DataTypes.YearType)) { + // For time data type, use number zero as upper bound + defaultMethod = ParameterMethod.setLong; + defaultValue = 0L; + } + return new ParameterContext(defaultMethod, new Object[] {i, defaultValue}); + }); + } finally { + cursor.close(new ArrayList<>()); + } + + int i = 0; + for (Map pmap : maps) { + for (ParameterContext pc : pmap.values()) { + result.put(i++, pc); + } + } + return result; + } + + private Cursor convertToCursor(List lowerBounds, List upperBounds) { + ArrayResultCursor result = new ArrayResultCursor(tableName); + final SchemaManager sm = ec.getSchemaManager(schemaName); + final TableMeta baseTableMeta = sm.getTable(tableName); + List pkMetaList = new ArrayList<>(baseTableMeta.getPrimaryKey()); + for (ColumnMeta pkMeta : pkMetaList) { + result.addColumn(pkMeta); + } + if (withLowerBound) { + result.addRow(convertBoundList(lowerBounds, pkMetaList)); + } + if (withUpperBound) { + result.addRow(convertBoundList(upperBounds, pkMetaList)); + } + return result; + } + + private Object[] convertBoundList(List boundList, List metaList) { + if (boundList.size() != metaList.size()) { + throw new TddlRuntimeException(ErrorCode.ERR_CDC_GENERIC, + "bound list size:" + boundList.size() + " not equal to meta list size: " + metaList.size()); + } + Object[] result = new Object[boundList.size()]; + for (int i = 0; i < boundList.size(); i++) { + ColumnMeta meta = metaList.get(i); + result[i] = meta.getDataType().convertFrom(boundList.get(i)); + } + + return result; + } + + private PhyTableOperation preparePlanSelectHashChecker(String schemaName, String tableName, + List checkColumnList) { + final SchemaManager sm = ec.getSchemaManager(schemaName); + final TableMeta baseTableMeta = sm.getTable(tableName); + if (baseTableMeta == null) { + throw new TddlRuntimeException(ErrorCode.ERR_CDC_GENERIC, + "failed to get table meta for table:" + schemaName + "." + tableName); + } + final List baseTablePks = FastChecker.getorderedPrimaryKeys(baseTableMeta); + final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); + return builder.buildSelectHashCheckForChecker(baseTableMeta, checkColumnList, baseTablePks, withLowerBound, + withUpperBound); + } + + private boolean withBound(List boundValues) { + return boundValues != null && !boundValues.isEmpty(); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/BackfillExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/BackfillExecutor.java index e1fe62a72..15a0c17e5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/BackfillExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/BackfillExecutor.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.scaleout.backfill; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.Pair; @@ -27,8 +26,6 @@ import com.alibaba.polardbx.executor.backfill.Extractor; import com.alibaba.polardbx.executor.backfill.Loader; import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils; -import com.alibaba.polardbx.executor.partitionmanagement.backfill.AlterTableGroupExtractor; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import org.apache.calcite.rel.RelNode; @@ -59,6 +56,7 @@ public int backfill(String schemaName, String tableName, ExecutionContext baseEc final long speedMin = baseEc.getParamManager().getLong(ConnectionParams.SCALEOUT_BACKFILL_SPEED_MIN); final long speedLimit = baseEc.getParamManager().getLong(ConnectionParams.SCALEOUT_BACKFILL_SPEED_LIMITATION); final long parallelism = baseEc.getParamManager().getLong(ConnectionParams.SCALEOUT_BACKFILL_PARALLELISM); + final boolean useBinary = baseEc.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY); if (null == baseEc.getServerVariables()) { baseEc.setServerVariables(new HashMap<>()); @@ -68,11 +66,11 @@ public int backfill(String schemaName, String tableName, ExecutionContext baseEc Extractor extractor; if (isChangeSet) { extractor = ChangeSetExecutor - .create(schemaName, tableName, tableName, batchSize, speedMin, speedLimit, parallelism, sourcePhyTables, - baseEc); + .create(schemaName, tableName, tableName, batchSize, speedMin, speedLimit, parallelism, useBinary, + null, sourcePhyTables, baseEc); } else { extractor = MoveTableExtractor - .create(schemaName, tableName, batchSize, speedMin, speedLimit, parallelism, sourcePhyTables, + .create(schemaName, tableName, batchSize, speedMin, speedLimit, parallelism, useBinary, sourcePhyTables, baseEc); } final Loader loader = diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/ChangeSetExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/ChangeSetExecutor.java index 32448d992..011604d2c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/ChangeSetExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/ChangeSetExecutor.java @@ -55,7 +55,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.BiConsumer; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -74,20 +73,19 @@ public class ChangeSetExecutor extends Extractor { private final PhyTableOperation planSelect; protected ChangeSetExecutor(String schemaName, String sourceTableName, String targetTableName, long batchSize, - long speedMin, - long speedLimit, long parallelism, + long speedMin, long speedLimit, long parallelism, boolean useBinary, + List modifyStringColumns, PhyTableOperation planSelectWithMax, PhyTableOperation planSelectWithMin, PhyTableOperation planSelectWithMinAndMax, PhyTableOperation planSelect, PhyTableOperation planSelectMaxPk, PhyTableOperation planSelectSample, - PhyTableOperation planSelectMinAndMaxSample, List primaryKeysId, Map> sourcePhyTables) { - super(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, - planSelectWithMax, planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, - planSelectSample, planSelectMinAndMaxSample, primaryKeysId); + super(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, useBinary, + modifyStringColumns, planSelectWithMax, planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, + planSelectSample, primaryKeysId); this.sourcePhyTables = sourcePhyTables; this.planSelect = planSelect; } @@ -310,8 +308,12 @@ public void buildBatchParamAndLoad(Cursor cursor, String dbIndex, String phyTabl final Map params = new HashMap<>(columns.size()); for (int i = 0; i < columns.size(); i++) { + ColumnMeta columnMeta = columns.get(i); + String colName = columnMeta.getName(); + boolean canConvert = + useBinary && (notConvertColumns == null || !notConvertColumns.contains(colName)); - final ParameterContext parameterContext = buildColumnParam(row, i); + final ParameterContext parameterContext = buildColumnParam(row, i, canConvert); params.put(i + 1, parameterContext); } @@ -401,10 +403,10 @@ public void statsAddBackFillRows(ExecutionContext ec, List> sourcePhyTables, + long speedMin, long speedLimit, long parallelism, boolean useBinary, + List modifyStringColumns, Map> sourcePhyTables, ExecutionContext ec) { - final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); + final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, useBinary, modifyStringColumns, ec); ExtractorInfo info = Extractor.buildExtractorInfo(ec, schemaName, sourceTableName, targetTableName, false); @@ -417,6 +419,8 @@ public static Extractor create(String schemaName, String sourceTableName, String speedMin, speedLimit, parallelism, + useBinary, + modifyStringColumns, builder.buildSelectForBackfillNotLimit(info.getSourceTableMeta(), info.getTargetTableColumns(), info.getPrimaryKeys(), false, @@ -438,10 +442,7 @@ public static Extractor create(String schemaName, String sourceTableName, String false, lockMode), builder.buildSelectMaxPkForBackfill(info.getSourceTableMeta(), info.getPrimaryKeys()), - builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys(), info.getPrimaryKeys(), - false, false), - builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys(), info.getPrimaryKeys(), - true, true), + builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys()), info.getPrimaryKeysId(), sourcePhyTables); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/MoveTableExtractor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/MoveTableExtractor.java index 297f49c83..3ef086b86 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/MoveTableExtractor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/MoveTableExtractor.java @@ -39,28 +39,28 @@ protected MoveTableExtractor(String schemaName, String sourceTableName, String t long speedMin, long speedLimit, long parallelism, + boolean useBinary, PhyTableOperation planSelectWithMax, PhyTableOperation planSelectWithMin, PhyTableOperation planSelectWithMinAndMax, PhyTableOperation planSelectMaxPk, PhyTableOperation planSelectSample, - PhyTableOperation planSelectMinAndMaxSample, List primaryKeysId, Map> sourcePhyTables) { - super(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, - planSelectWithMax, planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, - planSelectSample, planSelectMinAndMaxSample, primaryKeysId); + super(schemaName, sourceTableName, targetTableName, batchSize, speedMin, speedLimit, parallelism, useBinary, + null, planSelectWithMax, planSelectWithMin, planSelectWithMinAndMax, planSelectMaxPk, + planSelectSample, primaryKeysId); this.sourcePhyTables = sourcePhyTables; } public static Extractor create(String schemaName, String sourceTableName, long batchSize, - long speedMin, long speedLimit, long parallelism, + long speedMin, long speedLimit, long parallelism, boolean useBinary, Map> sourcePhyTables, ExecutionContext ec) { ExtractorInfo info = Extractor.buildExtractorInfo(ec, schemaName, sourceTableName, sourceTableName, true); - final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); + final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, useBinary, ec); SqlSelect.LockMode lockMode = SqlSelect.LockMode.SHARED_LOCK; @@ -71,6 +71,7 @@ public static Extractor create(String schemaName, String sourceTableName, long b speedMin, speedLimit, parallelism, + useBinary, builder.buildSelectForBackfill(info.getSourceTableMeta(), info.getTargetTableColumns(), info.getPrimaryKeys(), false, true, lockMode), @@ -81,10 +82,7 @@ public static Extractor create(String schemaName, String sourceTableName, long b info.getPrimaryKeys(), true, true, lockMode), builder.buildSelectMaxPkForBackfill(info.getSourceTableMeta(), info.getPrimaryKeys()), - builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys(), info.getPrimaryKeys(), - false, false), - builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys(), info.getPrimaryKeys(), - true, true), + builder.buildSqlSelectForSample(info.getSourceTableMeta(), info.getPrimaryKeys()), info.getPrimaryKeysId(), sourcePhyTables); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/MoveTableLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/MoveTableLoader.java index 6f07f2cd0..ce115ab69 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/MoveTableLoader.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/backfill/MoveTableLoader.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.scaleout.backfill; +import com.alibaba.polardbx.executor.backfill.Extractor; import com.alibaba.polardbx.executor.backfill.Loader; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.gsi.InsertIndexExecutor; @@ -62,7 +63,7 @@ protected MoveTableLoader(String schemaName, String tableName, SqlInsert insert, BiFunction, ExecutionContext, List> executeFunc, Map sourceTargetGroupMap) { super(schemaName, tableName, insert, insertIgnore, checkerPlan, checkerPkMapping, checkerParamMapping, - executeFunc, true); + executeFunc, true, null); this.sourceTargetGroupMap = sourceTargetGroupMap; } @@ -122,7 +123,7 @@ public static Loader create(String schemaName, String primaryTable, String index final TddlRuleManager tddlRuleManager = optimizerContext.getRuleManager(); final Set filterColumns = Sets.newTreeSet(String::compareToIgnoreCase); final Set primaryKeys = Sets.newTreeSet(String::compareToIgnoreCase); - primaryKeys.addAll(GlobalIndexMeta.getPrimaryKeys(primaryTableMeta)); + primaryKeys.addAll(Extractor.getPrimaryKeys(primaryTableMeta, ec)); filterColumns.addAll(primaryKeys); filterColumns.addAll(tddlRuleManager.getSharedColumns(primaryTable)); filterColumns.addAll(tddlRuleManager.getSharedColumns(indexTable)); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/corrector/MoveTableChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/corrector/MoveTableChecker.java index c2b305bb0..7e72065fc 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/corrector/MoveTableChecker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/corrector/MoveTableChecker.java @@ -51,7 +51,9 @@ public MoveTableChecker(String schemaName, String tableName, String indexName, TableMeta gsiTableMeta, long batchSize, long speedMin, long speedLimit, - long parallelism, SqlSelect.LockMode primaryLock, + long parallelism, + boolean useBinary, + SqlSelect.LockMode primaryLock, SqlSelect.LockMode gsiLock, PhyTableOperation planSelectWithMaxPrimary, PhyTableOperation planSelectWithMaxGsi, @@ -66,7 +68,7 @@ public MoveTableChecker(String schemaName, String tableName, String indexName, Map> targetTargetTables, Map sourceTargetGroupMap) { super(schemaName, tableName, indexName, primaryTableMeta, gsiTableMeta, batchSize, speedMin, speedLimit, - parallelism, + parallelism, useBinary, primaryLock, gsiLock, planSelectWithMaxPrimary, planSelectWithMaxGsi, planSelectWithMinAndMaxPrimary, planSelectWithMinAndMaxGsi, planSelectWithInTemplate, planSelectWithIn, planSelectMaxPk, indexColumns, primaryKeysId, rowComparator); @@ -76,8 +78,8 @@ public MoveTableChecker(String schemaName, String tableName, String indexName, } public static Checker create(String schemaName, String tableName, String indexName, long batchSize, long speedMin, - long speedLimit, - long parallelism, SqlSelect.LockMode primaryLock, SqlSelect.LockMode gsiLock, + long speedLimit, long parallelism, boolean useBinary, + SqlSelect.LockMode primaryLock, SqlSelect.LockMode gsiLock, ExecutionContext ec, Map> sourceTargetTables, Map> targetTargetTables, @@ -95,7 +97,7 @@ public static Checker create(String schemaName, String tableName, String indexNa Extractor.ExtractorInfo info = Extractor.buildExtractorInfo(ec, schemaName, tableName, indexName, false); - final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); + final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, useBinary, ec); final Pair selectWithIn = builder .buildSelectWithInForChecker(baseTableMeta, info.getTargetTableColumns(), info.getPrimaryKeys(), @@ -126,6 +128,7 @@ public static Checker create(String schemaName, String tableName, String indexNa speedMin, speedLimit, parallelism, + useBinary, primaryLock, gsiLock, builder.buildSelectForBackfill(info.getSourceTableMeta(), info.getTargetTableColumns(), diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/fastchecker/MoveTableFastChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/fastchecker/MoveTableFastChecker.java index 0296f4027..20bb70158 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/fastchecker/MoveTableFastChecker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scaleout/fastchecker/MoveTableFastChecker.java @@ -19,10 +19,9 @@ import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.executor.backfill.Extractor; +import com.alibaba.polardbx.executor.ddl.workqueue.BackFillThreadPool; import com.alibaba.polardbx.executor.fastchecker.FastChecker; import com.alibaba.polardbx.executor.gsi.PhysicalPlanBuilder; -import com.alibaba.polardbx.executor.ddl.workqueue.BackFillThreadPool; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.SchemaManager; @@ -44,7 +43,7 @@ public class MoveTableFastChecker extends FastChecker { public MoveTableFastChecker(String schemaName, String srcLogicalTableName, String dstLogicalTableName, Map> srcPhyDbAndTables, Map> dstPhyDbAndTables, List srcColumns, List dstColumns, List srcPks, - List dstPks, long parallelism, int lockTimeOut, + List dstPks, PhyTableOperation planSelectHashCheckSrc, PhyTableOperation planSelectHashCheckWithUpperBoundSrc, PhyTableOperation planSelectHashCheckWithLowerBoundSrc, @@ -55,17 +54,17 @@ public MoveTableFastChecker(String schemaName, String srcLogicalTableName, Strin PhyTableOperation planSelectHashCheckWithLowerUpperBoundDst, PhyTableOperation planIdleSelectSrc, PhyTableOperation planIdleSelectDst, PhyTableOperation planSelectSampleSrc, PhyTableOperation planSelectSampleDst) { - super(schemaName, schemaName, srcLogicalTableName, dstLogicalTableName, null, srcPhyDbAndTables, - dstPhyDbAndTables, srcColumns, dstColumns, srcPks, dstPks, parallelism, lockTimeOut, planSelectHashCheckSrc, + super(schemaName, schemaName, srcLogicalTableName, dstLogicalTableName, srcPhyDbAndTables, + dstPhyDbAndTables, srcColumns, dstColumns, srcPks, dstPks, planSelectHashCheckSrc, planSelectHashCheckWithUpperBoundSrc, planSelectHashCheckWithLowerBoundSrc, planSelectHashCheckWithLowerUpperBoundSrc, planSelectHashCheckDst, planSelectHashCheckWithUpperBoundDst, planSelectHashCheckWithLowerBoundDst, planSelectHashCheckWithLowerUpperBoundDst, planIdleSelectSrc, planIdleSelectDst, planSelectSampleSrc, planSelectSampleDst); } - public static FastChecker create(String schemaName, String tableName, Map sourceTargetGroup, + public static FastChecker create(String schemaName, String tableName, Map> srcPhyDbAndTables, - Map> dstPhyDbAndTables, long parallelism, + Map> dstPhyDbAndTables, ExecutionContext ec) { final SchemaManager sm = OptimizerContext.getContext(schemaName).getLatestSchemaManager(); final TableMeta tableMeta = sm.getTable(tableName); @@ -78,18 +77,12 @@ public static FastChecker create(String schemaName, String tableName, Map pks = FastChecker.getorderedPrimaryKeys(tableMeta, ec); - - if (parallelism <= 0) { - parallelism = Math.max(BackFillThreadPool.getInstance().getCorePoolSize() / 2, 1); - } - - final int lockTimeOut = ec.getParamManager().getInt(ConnectionParams.FASTCHECKER_LOCK_TIMEOUT); + final List pks = FastChecker.getorderedPrimaryKeys(tableMeta); final PhysicalPlanBuilder builder = new PhysicalPlanBuilder(schemaName, ec); return new MoveTableFastChecker(schemaName, tableName, tableName, srcPhyDbAndTables, dstPhyDbAndTables, - allColumns, allColumns, pks, pks, parallelism, lockTimeOut, + allColumns, allColumns, pks, pks, builder.buildSelectHashCheckForChecker(tableMeta, allColumns, pks, false, false), builder.buildSelectHashCheckForChecker(tableMeta, allColumns, pks, false, true), builder.buildSelectHashCheckForChecker(tableMeta, allColumns, pks, true, false), @@ -103,7 +96,7 @@ public static FastChecker create(String schemaName, String tableName, Map>> results = SyncManagerHelper.sync(new FetchRunningScheduleJobsSyncAction(), - TddlConstants.INFORMATION_SCHEMA); + TddlConstants.INFORMATION_SCHEMA, SyncScope.ALL); Map> executingJobs = merge(results); if (!hasLeadership()) { return -1; @@ -369,6 +374,10 @@ protected Integer invoke() { * try safe exit */ SchedulerExecutor esj = SchedulerExecutor.createSchedulerExecutor(job); + // might be an expired tasks + if (esj == null) { + continue; + } if (esj.needInterrupted().getKey() && esj.safeExit()) { //mark as fail ScheduledJobsManager.updateState(scheduleId, fireTime, INTERRUPTED, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/ScheduledJobsTrigger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/ScheduledJobsTrigger.java index 02249bce1..998ba5750 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/ScheduledJobsTrigger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/ScheduledJobsTrigger.java @@ -29,11 +29,11 @@ public interface ScheduledJobsTrigger { static ScheduledJobsTrigger restoreTrigger(ScheduledJobsRecord record, ScheduledJobsAccessor scheduledJobsAccessor, - FiredScheduledJobsAccessor firedScheduledJobsScanner){ - if(record==null || StringUtils.isEmpty(record.getScheduleType())){ + FiredScheduledJobsAccessor firedScheduledJobsScanner) { + if (record == null || StringUtils.isEmpty(record.getScheduleType())) { return null; } - if(StringUtils.equalsIgnoreCase(record.getScheduleType(), SchedulerType.QUARTZ_CRON.name())){ + if (StringUtils.equalsIgnoreCase(record.getScheduleType(), SchedulerType.QUARTZ_CRON.name())) { return new DefaultQuartzCronTrigger(record, scheduledJobsAccessor, firedScheduledJobsScanner); } @@ -42,19 +42,16 @@ static ScheduledJobsTrigger restoreTrigger(ScheduledJobsRecord record, /** * 获取下次定时任务的调度时间,这个函数是有状态的,因为它的值会根据上一次调度的时间生成 - * @return */ Optional getNextFireTime(); /** * 调度定时任务,会修改getNextFireTime - * @return */ boolean fire(); /** * 立刻调度一次定时任务,不影响getNextFireTime - * @return */ boolean fireOnceNow(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/AutoSplitTableGroupScheduledJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/AutoSplitTableGroupScheduledJob.java index 48f6fd5a3..058ed9dd7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/AutoSplitTableGroupScheduledJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/AutoSplitTableGroupScheduledJob.java @@ -40,7 +40,10 @@ import java.util.Locale; import java.util.Map; -import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.*; +import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.FAILED; +import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.QUEUED; +import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.RUNNING; +import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.SUCCESS; import static com.cronutils.model.CronType.QUARTZ; /** diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/GsiStatisticScheduledJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/GsiStatisticScheduledJob.java index 01ab64ec3..7c5a23f87 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/GsiStatisticScheduledJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/GsiStatisticScheduledJob.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.sync.GsiStatisticsSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.gms.scheduler.ExecutableScheduledJob; +import com.alibaba.polardbx.gms.sync.SyncScope; import java.time.ZonedDateTime; @@ -86,7 +87,8 @@ public boolean execute() { protected void persistGsiStatistics() { SyncManagerHelper.sync( - new GsiStatisticsSyncAction(null, null, null, GsiStatisticsSyncAction.WRITE_BACK_ALL_SCHEMA)); + new GsiStatisticsSyncAction(null, null, null, GsiStatisticsSyncAction.WRITE_BACK_ALL_SCHEMA), + SyncScope.ALL); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/OptimizerAlertScheduledJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/OptimizerAlertScheduledJob.java index 9c05e3cc6..9d544129d 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/OptimizerAlertScheduledJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/OptimizerAlertScheduledJob.java @@ -25,6 +25,7 @@ import com.alibaba.polardbx.executor.sync.OptimizerAlertScheduleSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.gms.scheduler.ExecutableScheduledJob; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.google.common.collect.Sets; import org.apache.commons.collections.CollectionUtils; @@ -70,7 +71,7 @@ public boolean execute() { } else { // enable OPTIMIZER_ALERT List>> results = SyncManagerHelper.syncWithDefaultDB( - new OptimizerAlertScheduleSyncAction()); + new OptimizerAlertScheduleSyncAction(), SyncScope.CURRENT_ONLY); StringBuilder sb = new StringBuilder(); long countSum = 0L; Set alertSets = Sets.newHashSet(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/PartitionVisualizerScheduledJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/PartitionVisualizerScheduledJob.java index 35f30451e..0914810ff 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/PartitionVisualizerScheduledJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/PartitionVisualizerScheduledJob.java @@ -16,8 +16,6 @@ package com.alibaba.polardbx.executor.scheduler.executor; -import java.time.ZonedDateTime; - import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; import com.alibaba.polardbx.common.utils.logger.Logger; @@ -26,9 +24,10 @@ import com.alibaba.polardbx.executor.scheduler.ScheduledJobsManager; import com.alibaba.polardbx.gms.config.impl.MetaDbInstConfigManager; import com.alibaba.polardbx.gms.scheduler.ExecutableScheduledJob; - import org.apache.commons.lang3.StringUtils; +import java.time.ZonedDateTime; + import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.FAILED; import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.QUEUED; import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.SUCCESS; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/PurgeOssFileScheduledJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/PurgeOssFileScheduledJob.java index 5a687dd74..903cb2bdd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/PurgeOssFileScheduledJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/PurgeOssFileScheduledJob.java @@ -29,9 +29,6 @@ import com.alibaba.polardbx.gms.engine.FileSystemGroup; import com.alibaba.polardbx.gms.engine.FileSystemManager; import com.alibaba.polardbx.gms.scheduler.ExecutableScheduledJob; -import com.alibaba.polardbx.optimizer.config.server.DefaultServerConfigManager; -import com.alibaba.polardbx.optimizer.config.server.IServerConfigManager; -import com.alibaba.polardbx.optimizer.utils.OptimizerHelper; import org.apache.commons.lang3.StringUtils; import java.time.LocalDateTime; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/ScheduleJobStarter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/ScheduleJobStarter.java index a7b9a465f..180ed0213 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/ScheduleJobStarter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/ScheduleJobStarter.java @@ -16,6 +16,8 @@ package com.alibaba.polardbx.executor.scheduler.executor; +import com.alibaba.polardbx.common.eventlogger.EventLogger; +import com.alibaba.polardbx.common.eventlogger.EventType; import com.alibaba.polardbx.common.scheduler.SchedulePolicy; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; @@ -96,7 +98,7 @@ protected Integer invoke() { logger.warn("Scheduled Job For STATISTIC_HLL_SKETCH Has Exist"); return 0; } - return scheduledJobsAccessor.insert(scheduledJobsRecord); + return scheduledJobsAccessor.insertIgnoreFail(scheduledJobsRecord); } }.execute(); logger.info(String.format("Init %s Success %s", scheduledJobsRecord.getExecutorType(), count)); @@ -214,13 +216,14 @@ protected Integer invoke() { logger.warn("Scheduled Job For OPTIMIZER_ALERT Has Exist"); return 0; } - return scheduledJobsAccessor.insert(scheduledJobsRecord); + return scheduledJobsAccessor.insertIgnoreFail(scheduledJobsRecord); } }.execute(); logger.info(String.format("Init %s Success %s", scheduledJobsRecord.getExecutorType(), count)); } private static void initCleanLogTableJob() { + // Delete old clean job. String tableSchema = VisualConstants.VISUAL_SCHEMA_NAME; String tableName = VisualConstants.DUAL_TABLE_NAME; String cronExpr = "0 45 * * * ? "; @@ -239,11 +242,40 @@ private static void initCleanLogTableJob() { protected Integer invoke() { List list = scheduledJobsAccessor.queryByExecutorType(scheduledJobsRecord.getExecutorType()); + if (list.isEmpty()) { + logger.warn("Scheduled Job For CLEAN_LOG_TABLE not exists. Ignore deleting."); + return 0; + } + return scheduledJobsAccessor.deleteById(list.get(0).getScheduleId()); + } + }.execute(); + logger.info(String.format("Delete %s Success %s", scheduledJobsRecord.getExecutorType(), count)); + if (count > 1) { + EventLogger.log(EventType.TRX_LOG_ERR, "Delete old CLEAN_LOG_TABLE."); + } + + // Init new clean job. + cronExpr = "0 0/1 * * * ? "; + timeZone = "+08:00"; + ScheduledJobsRecord scheduledJobsRecordV2 = ScheduledJobsManager.createQuartzCronJob( + tableSchema, + null, + tableName, + ScheduledJobExecutorType.CLEAN_LOG_TABLE_V2, + cronExpr, + timeZone, + SchedulePolicy.SKIP + ); + count = new ScheduledJobsAccessorDelegate() { + @Override + protected Integer invoke() { + List list = + scheduledJobsAccessor.queryByExecutorType(scheduledJobsRecordV2.getExecutorType()); if (list.size() > 0) { - logger.warn("Scheduled Job For CLEAN_LOG_TABLE Has Exist"); + logger.warn("Scheduled Job For CLEAN_LOG_TABLE_V2 Has Exist"); return 0; } - return scheduledJobsAccessor.insertIgnoreFail(scheduledJobsRecord); + return scheduledJobsAccessor.insertIgnoreFail(scheduledJobsRecordV2); } }.execute(); logger.info(String.format("Init %s Success %s", scheduledJobsRecord.getExecutorType(), count)); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/SchedulerExecutor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/SchedulerExecutor.java index 73269d1cf..dd0d940a4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/SchedulerExecutor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/SchedulerExecutor.java @@ -24,6 +24,7 @@ import com.alibaba.polardbx.executor.scheduler.executor.spm.SPMBaseLineSyncScheduledJob; import com.alibaba.polardbx.executor.scheduler.executor.statistic.StatisticRowCountCollectionScheduledJob; import com.alibaba.polardbx.executor.scheduler.executor.statistic.StatisticSampleCollectionScheduledJob; +import com.alibaba.polardbx.executor.scheduler.executor.trx.CleanLogTableScheduledJob; import com.alibaba.polardbx.gms.config.impl.InstConfUtil; import com.alibaba.polardbx.gms.scheduler.ExecutableScheduledJob; import com.alibaba.polardbx.optimizer.config.server.DefaultServerConfigManager; @@ -31,6 +32,9 @@ import com.alibaba.polardbx.optimizer.utils.OptimizerHelper; import org.apache.commons.lang3.StringUtils; +import java.util.List; +import java.util.Map; + public abstract class SchedulerExecutor { public static SchedulerExecutor createSchedulerExecutor(ExecutableScheduledJob job) { @@ -87,7 +91,7 @@ public static SchedulerExecutor createSchedulerExecutor(ExecutableScheduledJob j } if (StringUtils.equalsIgnoreCase(job.getExecutorType(), - ScheduledJobExecutorType.CLEAN_LOG_TABLE.name())) { + ScheduledJobExecutorType.CLEAN_LOG_TABLE_V2.name())) { return new CleanLogTableScheduledJob(job); } return null; @@ -98,6 +102,11 @@ public void executeBackgroundSql(String sql, String schemaName, InternalTimeZone serverConfigManager.executeBackgroundSql(sql, schemaName, timeZone); } + public List> executeInternalQuery(String sql, String schemaName, InternalTimeZone timeZone) { + IServerConfigManager serverConfigManager = getServerConfigManager(); + return serverConfigManager.executeQuerySql(sql, schemaName, timeZone); + } + public IServerConfigManager getServerConfigManager() { IServerConfigManager serverConfigManager = OptimizerHelper.getServerConfigManager(); if (serverConfigManager == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/spm/SPMBaseLineSyncScheduledJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/spm/SPMBaseLineSyncScheduledJob.java index f79fbbb82..dad58149c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/spm/SPMBaseLineSyncScheduledJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/spm/SPMBaseLineSyncScheduledJob.java @@ -23,18 +23,22 @@ import com.alibaba.polardbx.executor.scheduler.ScheduledJobsManager; import com.alibaba.polardbx.executor.scheduler.executor.SchedulerExecutor; import com.alibaba.polardbx.executor.sync.BaselineLoadSyncAction; -import com.alibaba.polardbx.executor.sync.BaselineQuerySyncAction; +import com.alibaba.polardbx.executor.sync.BaselineQueryAllSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.gms.config.impl.InstConfUtil; +import com.alibaba.polardbx.gms.metadb.table.BaselineInfoAccessor; import com.alibaba.polardbx.gms.module.LogLevel; +import com.alibaba.polardbx.gms.module.LogPattern; import com.alibaba.polardbx.gms.module.Module; import com.alibaba.polardbx.gms.module.ModuleLogInfo; import com.alibaba.polardbx.gms.scheduler.ExecutableScheduledJob; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.planmanager.BaselineInfo; import com.alibaba.polardbx.optimizer.planmanager.PlanManager; -import com.alibaba.polardbx.optimizer.planmanager.PolarDbXSystemTableBaselineInfo; +import com.google.common.collect.Maps; import com.google.common.collect.Sets; +import org.apache.commons.lang.StringUtils; import java.time.ZonedDateTime; import java.util.List; @@ -111,19 +115,54 @@ public boolean execute() { // do the job logger.info("plan manager async load data"); // merge&prune baseline from cluster - Map> fullBaseline = queryBaselineFromCluster(); + Map>> fullBaseline = queryBaselineFromCluster(); // persist baseline - for (Map.Entry> e : fullBaseline.entrySet()) { - String schema = e.getKey(); - e.getValue().values().forEach(b -> PolarDbXSystemTableBaselineInfo.persist(schema, b)); - if (e.getValue().size() != 0) { - remark.append(schema).append(":").append(e.getValue().size()).append(";"); + try (BaselineInfoAccessor baselineInfoAccessor = new BaselineInfoAccessor(true)) { + // for each inst + for (String instId : fullBaseline.keySet()) { + Map> instBaseline = fullBaseline.get(instId); + StringBuilder logStr = new StringBuilder(instId + " "); + // for each schema + for (Map.Entry> e : instBaseline.entrySet()) { + String schema = e.getKey(); + + // for each baseline + for (BaselineInfo baselineInfo : e.getValue().values()) { + + boolean persistPlanStats = false; + if (InstConfUtil.isInMaintenanceTimeWindow()) { + persistPlanStats = true; + } + + baselineInfoAccessor.persist(schema, + baselineInfo.buildBaselineRecord(schema, instId), + baselineInfo.buildPlanRecord(schema, instId), + persistPlanStats); + + } + logStr.append(schema).append(":").append(e.getValue().size()).append(" "); + if (e.getValue().size() != 0) { + remark.append(schema).append(":").append(e.getValue().size()).append(";"); + } + } + ModuleLogInfo.getInstance() + .logRecord(Module.SPM, LogPattern.PROCESS_END, + new String[] {"spm merge baseline", logStr.toString()}, + LogLevel.NORMAL); } + } catch (Exception e) { + ModuleLogInfo.getInstance() + .logRecord( + Module.SPM, + UNEXPECTED, + new String[] {BASELINE_SYNC + "," + fireTime, e.getMessage()}, + CRITICAL, + e); } // sync merged baseline to cluster - syncBaseLineInfoAndPlanInfo(); + SyncManagerHelper.syncWithDefaultDB(new BaselineLoadSyncAction(), SyncScope.ALL); ModuleLogInfo.getInstance() .logRecord( Module.SPM, @@ -147,21 +186,34 @@ public boolean execute() { } } - private Map> queryBaselineFromCluster() { - List>> results = SyncManagerHelper.syncWithDefaultDB(new BaselineQuerySyncAction()); + private Map>> queryBaselineFromCluster() { + List>> results = SyncManagerHelper.syncWithDefaultDB(new BaselineQueryAllSyncAction(), + SyncScope.ALL); - Map> current = PlanManager.getInstance().getBaselineMap(); + Map>> instSchemaSqlBaselineMap = Maps.newConcurrentMap(); // Node for (List> nodeRows : results) { if (nodeRows == null) { continue; } Map row = nodeRows.get(0); + if (!row.containsKey("inst_id")) { + // some cluster might not update to this version yet + continue; + } + String instId = (String) row.get("inst_id"); String baselines = (String) row.get("baselines"); Map> temp = PlanManager.getBaselineFromJson(baselines); - mergeBaseline(current, temp); + + if (instSchemaSqlBaselineMap.containsKey(instId)) { + Map> current = instSchemaSqlBaselineMap.get(instId); + mergeBaseline(current, temp); + } else { + instSchemaSqlBaselineMap.put(instId, temp); + } + } - return current; + return instSchemaSqlBaselineMap; } /** @@ -246,6 +298,5 @@ private void errorExit(long scheduleId, long fireTime, String remark, String err } private void syncBaseLineInfoAndPlanInfo() { - SyncManagerHelper.syncWithDefaultDB(new BaselineLoadSyncAction()); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticHllScheduledJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticHllScheduledJob.java index b1b832fdd..0f77778ed 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticHllScheduledJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticHllScheduledJob.java @@ -17,6 +17,8 @@ package com.alibaba.polardbx.executor.scheduler.executor.statistic; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; import com.alibaba.polardbx.common.utils.GeneralUtil; @@ -32,9 +34,12 @@ import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; +import com.alibaba.polardbx.optimizer.optimizeralert.OptimizerAlertUtil; import com.google.common.collect.Lists; import org.apache.commons.lang.StringUtils; +import org.glassfish.jersey.internal.guava.Sets; import java.time.ZonedDateTime; import java.util.ArrayList; @@ -70,6 +75,7 @@ public class StatisticHllScheduledJob extends SchedulerExecutor { private final ExecutableScheduledJob executableScheduledJob; + private boolean fromScheduleJob = true; public StatisticHllScheduledJob(final ExecutableScheduledJob executableScheduledJob) { this.executableScheduledJob = executableScheduledJob; @@ -83,6 +89,12 @@ public boolean execute() { String remark = ""; try { // check conf + // test code + boolean interruptedTest = InstConfUtil.getBool(ConnectionParams.ALERT_STATISTIC_INTERRUPT); + if (interruptedTest) { + throw new TddlRuntimeException(ErrorCode.ERR_STATISTIC_JOB_INTERRUPTED, + "statistic job is interrupted by alert test"); + } boolean enableStatisticBackground = InstConfUtil.getBool(ConnectionParams.ENABLE_BACKGROUND_STATISTIC_COLLECTION); if (!enableStatisticBackground) { @@ -99,18 +111,21 @@ public boolean execute() { return succeedExit(scheduleId, fireTime, remark); } - //mark as RUNNING - boolean casSuccess = - ScheduledJobsManager.casStateWithStartTime(scheduleId, fireTime, QUEUED, RUNNING, startTime); - if (!casSuccess) { - ModuleLogInfo.getInstance() - .logRecord( - Module.SCHEDULE_JOB, - STATE_CHANGE_FAIL, - new String[] {STATISTIC_HLL_SKETCH + "," + fireTime, QUEUED.name(), RUNNING.name()}, - WARNING); - return false; + if (fromScheduleJob) { + //mark as RUNNING + boolean casSuccess = + ScheduledJobsManager.casStateWithStartTime(scheduleId, fireTime, QUEUED, RUNNING, startTime); + if (!casSuccess) { + ModuleLogInfo.getInstance() + .logRecord( + Module.SCHEDULE_JOB, + STATE_CHANGE_FAIL, + new String[] {STATISTIC_HLL_SKETCH + "," + fireTime, QUEUED.name(), RUNNING.name()}, + WARNING); + return false; + } } + List schemas = DbInfoManager.getInstance().getDbList(); ModuleLogInfo.getInstance() .logRecord( @@ -130,8 +145,15 @@ public boolean execute() { if (SystemDbHelper.isDBBuildIn(schema)) { continue; } + if (!OptimizerContext.getActiveSchemaNames().contains(schema)) { + continue; + } - Set logicalTableSet = StatisticManager.getInstance().getTableNamesCollected(schema); + Set logicalTableSet = Sets.newHashSet(); + for (TableMeta tableMeta : OptimizerContext.getContext(schema).getLatestSchemaManager() + .getAllUserTables()) { + logicalTableSet.add(tableMeta.getTableName().toLowerCase()); + } long start = System.currentTimeMillis(); List toRemoveList = Lists.newLinkedList(); for (String logicalTableName : logicalTableSet) { @@ -265,18 +287,32 @@ private boolean testSketchPointCheck() { } private boolean succeedExit(long scheduleId, long fireTime, String remark) { + if (fromScheduleJob) { + return true; + } long finishTime = System.currentTimeMillis() / 1000; //mark as SUCCESS return ScheduledJobsManager.casStateWithFinishTime(scheduleId, fireTime, RUNNING, SUCCESS, finishTime, remark); } private void errorExit(long scheduleId, long fireTime, String error) { - //mark as fail - ScheduledJobsManager.updateState(scheduleId, fireTime, FAILED, null, error); + if (fromScheduleJob) { + //mark as fail + ScheduledJobsManager.updateState(scheduleId, fireTime, FAILED, null, error); + } + + OptimizerAlertUtil.statisticErrorAlert(); } @Override public Pair needInterrupted() { + if (!fromScheduleJob) { + return Pair.of(false, "not from schedule job"); + } return ExecUtils.needSketchInterrupted(); } + + public void setFromScheduleJob(boolean fromScheduleJob) { + this.fromScheduleJob = fromScheduleJob; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticRowCountCollectionScheduledJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticRowCountCollectionScheduledJob.java index 565aabe3b..400b80467 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticRowCountCollectionScheduledJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticRowCountCollectionScheduledJob.java @@ -29,7 +29,6 @@ import com.alibaba.polardbx.gms.module.ModuleLogInfo; import com.alibaba.polardbx.gms.scheduler.ExecutableScheduledJob; import com.alibaba.polardbx.gms.tablegroup.TableGroupLocation; -import com.alibaba.polardbx.gms.topology.DbTopologyManager; import com.alibaba.polardbx.gms.topology.GroupDetailInfoExRecord; import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.optimizer.OptimizerContext; @@ -43,8 +42,6 @@ import com.google.common.collect.Maps; import com.google.common.collect.Sets; -import java.sql.ResultSet; -import java.sql.SQLException; import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.HashMap; @@ -64,6 +61,8 @@ import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.sumRowCount; import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.isFileStore; import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.getFileStoreStatistic; +import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.isFileStore; +import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.sumRowCount; import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_INJECT_IGNORE_INTERRUPTED_TO_STATISTIC_SCHEDULE_JOB; import static com.alibaba.polardbx.gms.module.LogLevel.CRITICAL; import static com.alibaba.polardbx.gms.module.LogLevel.NORMAL; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticSampleCollectionScheduledJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticSampleCollectionScheduledJob.java index 527ed6e49..34ae3f554 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticSampleCollectionScheduledJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/statistic/StatisticSampleCollectionScheduledJob.java @@ -17,15 +17,17 @@ package com.alibaba.polardbx.executor.scheduler.executor.statistic; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.timezone.InternalTimeZone; +import com.alibaba.polardbx.common.utils.timezone.TimeZoneUtils; import com.alibaba.polardbx.executor.gms.util.StatisticUtils; import com.alibaba.polardbx.executor.scheduler.ScheduledJobsManager; import com.alibaba.polardbx.executor.scheduler.executor.SchedulerExecutor; -import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.executor.sync.UpdateStatisticSyncAction; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.config.impl.InstConfUtil; import com.alibaba.polardbx.gms.module.Module; @@ -34,39 +36,36 @@ import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; import com.alibaba.polardbx.optimizer.optimizeralert.OptimizerAlertUtil; import com.google.common.collect.Lists; +import org.apache.commons.lang.StringUtils; +import org.glassfish.jersey.internal.guava.Sets; import org.apache.commons.collections.CollectionUtils; import java.time.ZonedDateTime; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.FAILED; import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.QUEUED; import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.RUNNING; import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.SUCCESS; -import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.collectRowCount; -import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.sampleTable; -import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.sketchTable; import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_INJECT_IGNORE_INTERRUPTED_TO_STATISTIC_SCHEDULE_JOB; import static com.alibaba.polardbx.gms.module.LogLevel.CRITICAL; import static com.alibaba.polardbx.gms.module.LogLevel.NORMAL; import static com.alibaba.polardbx.gms.module.LogLevel.WARNING; import static com.alibaba.polardbx.gms.module.LogPattern.INTERRUPTED; import static com.alibaba.polardbx.gms.module.LogPattern.NOT_ENABLED; -import static com.alibaba.polardbx.gms.module.LogPattern.PROCESSING; import static com.alibaba.polardbx.gms.module.LogPattern.PROCESS_END; import static com.alibaba.polardbx.gms.module.LogPattern.PROCESS_START; -import static com.alibaba.polardbx.gms.module.LogPattern.REMOVE; import static com.alibaba.polardbx.gms.module.LogPattern.STATE_CHANGE_FAIL; import static com.alibaba.polardbx.gms.module.LogPattern.UNEXPECTED; import static com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType.STATISTIC_SAMPLE_SKETCH; -import static com.alibaba.polardbx.optimizer.config.table.statistic.StatisticUtils.DEFAULT_SAMPLE_SIZE; -import static com.alibaba.polardbx.optimizer.config.table.statistic.StatisticUtils.getColumnMetas; /** * statistic sample job @@ -74,10 +73,12 @@ * @author fangwu */ public class StatisticSampleCollectionScheduledJob extends SchedulerExecutor { - - public static final int DATA_MAX_LEN = 128; + public static final String CHECK_SQL = "select count(distinct schema_name, table_name, column_name, table_rows, " + + "ndv, ndv_source, topn, histogram, sample_rate) as count from information_schema.statistics_data"; + public static final String GROUP_BY = " group by host"; private final ExecutableScheduledJob executableScheduledJob; + private boolean fromScheduleJob = true; public StatisticSampleCollectionScheduledJob(final ExecutableScheduledJob executableScheduledJob) { this.executableScheduledJob = executableScheduledJob; @@ -89,8 +90,15 @@ public boolean execute() { long fireTime = executableScheduledJob.getFireTime(); long startTime = ZonedDateTime.now().toEpochSecond(); String remark = ""; + List schemas = null; try { // check conf + // test code + boolean interruptedTest = InstConfUtil.getBool(ConnectionParams.ALERT_STATISTIC_INTERRUPT); + if (interruptedTest) { + throw new TddlRuntimeException(ErrorCode.ERR_STATISTIC_JOB_INTERRUPTED, + "statistic job is interrupted by alert test"); + } boolean enableStatisticBackground = InstConfUtil.getBool(ConnectionParams.ENABLE_BACKGROUND_STATISTIC_COLLECTION); if (!enableStatisticBackground) { @@ -107,19 +115,22 @@ public boolean execute() { return succeedExit(scheduleId, fireTime, remark); } - //mark as RUNNING - boolean casSuccess = - ScheduledJobsManager.casStateWithStartTime(scheduleId, fireTime, QUEUED, RUNNING, startTime); - if (!casSuccess) { - ModuleLogInfo.getInstance() - .logRecord( - Module.SCHEDULE_JOB, - STATE_CHANGE_FAIL, - new String[] {STATISTIC_SAMPLE_SKETCH + "," + fireTime, QUEUED.name(), RUNNING.name()}, - WARNING); - return false; + if (fromScheduleJob) { + //mark as RUNNING + boolean casSuccess = + ScheduledJobsManager.casStateWithStartTime(scheduleId, fireTime, QUEUED, RUNNING, startTime); + if (!casSuccess && fromScheduleJob) { + ModuleLogInfo.getInstance() + .logRecord( + Module.SCHEDULE_JOB, + STATE_CHANGE_FAIL, + new String[] {STATISTIC_SAMPLE_SKETCH + "," + fireTime, QUEUED.name(), RUNNING.name()}, + WARNING); + return false; + } } - List schemas = DbInfoManager.getInstance().getDbList(); + + schemas = DbInfoManager.getInstance().getDbList(); ModuleLogInfo.getInstance() .logRecord( Module.STATISTICS, @@ -129,105 +140,102 @@ public boolean execute() { "schemas:" + schemas }, NORMAL); - List criticalExceptions = new ArrayList<>(); - for (String schema : schemas) { - if (SystemDbHelper.isDBBuildIn(schema)) { - continue; - } + boolean statisticInconsistentTest = InstConfUtil.getBool(ConnectionParams.ALERT_STATISTIC_INCONSISTENT); + + if (!statisticInconsistentTest) { + for (String schema : schemas) { + if (SystemDbHelper.isDBBuildIn(schema)) { + continue; + } + if (!OptimizerContext.getActiveSchemaNames().contains(schema)) { + continue; + } + + Set logicalTableSet = Sets.newHashSet(); + for (TableMeta tableMeta : OptimizerContext.getContext(schema).getLatestSchemaManager() + .getAllUserTables()) { + logicalTableSet.add(tableMeta.getTableName().toLowerCase()); + } + long start = System.currentTimeMillis(); + List toRemoveList = Lists.newLinkedList(); + for (String logicalTableName : logicalTableSet) { + try { + // check table if exists + OptimizerContext optimizerContext = OptimizerContext.getContext(schema); + if (optimizerContext == null || + optimizerContext.getLatestSchemaManager().getTableWithNull(logicalTableName) == null) { + if (logicalTableName != null) { + toRemoveList.add(logicalTableName); + } + continue; + } - Set logicalTableSet = StatisticManager.getInstance().getTableNamesCollected(schema); - long start = System.currentTimeMillis(); - List toRemoveList = Lists.newLinkedList(); - for (String logicalTableName : logicalTableSet) { - try { - // check table if exists - if (OptimizerContext.getContext(schema).getLatestSchemaManager() - .getTableWithNull(logicalTableName) == null) { - if (logicalTableName != null) { - toRemoveList.add(logicalTableName); + // skip oss table sample + if (StatisticUtils.isFileStore(schema, logicalTableName)) { + continue; } - continue; - } - // skip oss table sample - if (StatisticUtils.isFileStore(schema, logicalTableName)) { - continue; - } + Pair pair = needInterrupted(); + if (pair.getKey()) { + ModuleLogInfo.getInstance() + .logRecord( + Module.STATISTICS, + INTERRUPTED, + new String[] { + STATISTIC_SAMPLE_SKETCH + "," + fireTime, + pair.getValue() + }, + NORMAL); + return succeedExit(scheduleId, fireTime, "being interrupted"); + } + long startPerTable = System.currentTimeMillis(); + StatisticManager.CacheLine c = + StatisticManager.getInstance().getCacheLine(schema, logicalTableName); + if (c.hasExpireForCollection() || testSamplePointCheck()) { + // sample + StatisticUtils.sampleOneTable(schema, logicalTableName); + OptimizerAlertUtil.statisticsAlert(schema, logicalTableName, + StatisticManager.getInstance().getCacheLine(schema, logicalTableName)); + } - Pair pair = needInterrupted(); - if (pair.getKey()) { + long endPerTable = System.currentTimeMillis(); ModuleLogInfo.getInstance() .logRecord( Module.STATISTICS, - INTERRUPTED, + PROCESS_END, new String[] { - STATISTIC_SAMPLE_SKETCH + "," + fireTime, - pair.getValue() + "auto analyze " + STATISTIC_SAMPLE_SKETCH + "," + schema + "," + + logicalTableName, + " consuming " + (endPerTable - startPerTable) / 1000.0 + " seconds" }, NORMAL); - return succeedExit(scheduleId, fireTime, "being interrupted"); + } catch (Throwable t) { + criticalExceptions.add(new TddlNestableRuntimeException( + String.format("%s.%s failed to finish sample job", schema, logicalTableName), t)); } - long startPerTable = System.currentTimeMillis(); - StatisticManager.CacheLine c = - StatisticManager.getInstance().getCacheLine(schema, logicalTableName); - if (c.hasExpire() || testSamplePointCheck()) { - // sample - collectRowCount(schema, logicalTableName); - sampleTable(schema, logicalTableName); - // check cache line status to make sure sample process was succ - StatisticManager.CacheLine - cacheLine = StatisticManager.getInstance().getCacheLine(schema, logicalTableName); - if ((cacheLine.getHistogramMap() == null || cacheLine.getHistogramMap().isEmpty()) - && CollectionUtils.isEmpty(cacheLine.getTopNColumns())) { - continue; - } - // persist - StatisticUtils.persistStatistic(schema, logicalTableName, true); - // sync other nodes - SyncManagerHelper.syncWithDefaultDB( - new UpdateStatisticSyncAction( - schema, - logicalTableName, - cacheLine - )); - OptimizerAlertUtil.statisticsAlert(schema, logicalTableName, - StatisticManager.getInstance().getCacheLine(schema, logicalTableName)); - } - - long endPerTable = System.currentTimeMillis(); - ModuleLogInfo.getInstance() - .logRecord( - Module.STATISTICS, - PROCESS_END, - new String[] { - "auto analyze " + STATISTIC_SAMPLE_SKETCH + "," + schema + "," + logicalTableName, - " consuming " + (endPerTable - startPerTable) / 1000.0 + " seconds" - }, - NORMAL); - } catch (Throwable t) { - criticalExceptions.add(new TddlNestableRuntimeException( - String.format("%s.%s failed to finish sample job", schema, logicalTableName), t)); } - } - // remove table statistic info if not exists - StatisticManager.getInstance().removeLogicalTableList(schema, toRemoveList); + // remove table statistic info if not exists + StatisticManager.getInstance().removeLogicalTableList(schema, toRemoveList); - long end = System.currentTimeMillis(); - ModuleLogInfo.getInstance() - .logRecord( - Module.STATISTICS, - PROCESS_END, - new String[] { - "auto analyze " + STATISTIC_SAMPLE_SKETCH + "," + schema + ",table size " - + logicalTableSet.size(), - " consuming " + (end - start) / 1000.0 + " seconds" - }, - NORMAL); + long end = System.currentTimeMillis(); + ModuleLogInfo.getInstance() + .logRecord( + Module.STATISTICS, + PROCESS_END, + new String[] { + "auto analyze " + STATISTIC_SAMPLE_SKETCH + "," + schema + ",table size " + + logicalTableSet.size(), + " consuming " + (end - start) / 1000.0 + " seconds" + }, + NORMAL); + } } + if (!criticalExceptions.isEmpty()) { throw GeneralUtil.mergeException(criticalExceptions); } + checkStatisticConsistent(); return succeedExit(scheduleId, fireTime, remark); } catch (Throwable t) { ModuleLogInfo.getInstance() @@ -245,6 +253,60 @@ public boolean execute() { } } + private void checkStatisticConsistent() { + // check if the statistics information is complete + // use information_schema.statistics_data to get full statistic info + // check if statistic is consistent between memory and metadb + final String timeZoneStr = executableScheduledJob.getTimeZone(); + final InternalTimeZone timeZone = TimeZoneUtils.convertFromMySqlTZ(timeZoneStr); + + List> rs = + executeInternalQuery(CHECK_SQL, SystemDbHelper.INFO_SCHEMA_DB_NAME, timeZone); + Long count = (Long) rs.get(0).get("count"); + if (count == null) { + ModuleLogInfo.getInstance().logRecord(Module.STATISTICS, UNEXPECTED, new String[] { + "Statistic inconsistent check", + "count is null" + }, CRITICAL); + } + rs = executeInternalQuery(CHECK_SQL + GROUP_BY, SystemDbHelper.INFO_SCHEMA_DB_NAME, timeZone); + for (Map map : rs) { + Long curCount = (Long) map.get("count"); + if (curCount == null || + curCount.longValue() != count.longValue()) { + ModuleLogInfo.getInstance().logRecord(Module.STATISTICS, UNEXPECTED, new String[] { + "Statistic inconsistent check", + "count mismatch:" + count + "," + curCount + }, CRITICAL); + OptimizerAlertUtil.statisticInconsistentAlert(); + break; + } + } + ModuleLogInfo.getInstance().logRecord(Module.STATISTICS, PROCESS_END, new String[] { + "Statistic inconsistent check", + "count:" + count + }, NORMAL); + } + + public Set getTableName(String schema) { + if (StringUtils.isEmpty(schema)) { + return Collections.EMPTY_SET; + } + if (SystemDbHelper.isDBBuildIn(schema)) { + return Collections.EMPTY_SET; + } + OptimizerContext oc = OptimizerContext.getContext(schema); + if (oc == null) { + // schema might be not exists anymore + return Collections.EMPTY_SET; + } + if (!DbInfoManager.getInstance().isNewPartitionDb(schema)) { + return oc.getRuleManager().mergeTableRule(Collections.EMPTY_LIST); + } else { + return oc.getPartitionInfoManager().getPartitionTables(); + } + } + private int testSampleTime = 1; private boolean testSamplePointCheck() { @@ -275,18 +337,28 @@ private boolean testSketchPointCheck() { } private boolean succeedExit(long scheduleId, long fireTime, String remark) { + if (fromScheduleJob) { + return true; + } long finishTime = System.currentTimeMillis() / 1000; //mark as SUCCESS return ScheduledJobsManager.casStateWithFinishTime(scheduleId, fireTime, RUNNING, SUCCESS, finishTime, remark); } private void errorExit(long scheduleId, long fireTime, String error) { - //mark as fail - ScheduledJobsManager.updateState(scheduleId, fireTime, FAILED, null, error); + if (fromScheduleJob) { + //mark as fail + ScheduledJobsManager.updateState(scheduleId, fireTime, FAILED, null, error); + } + // alert + OptimizerAlertUtil.statisticErrorAlert(); } @Override public Pair needInterrupted() { + if (!fromScheduleJob) { + return Pair.of(false, "not from schedule job"); + } if (FailPoint.isKeyEnable(FP_INJECT_IGNORE_INTERRUPTED_TO_STATISTIC_SCHEDULE_JOB)) { return Pair.of(false, "fail point"); } @@ -297,4 +369,8 @@ public Pair needInterrupted() { } return Pair.of(!inMaintenanceWindow(), "maintenance window"); } + + public void setFromScheduleJob(boolean fromScheduleJob) { + this.fromScheduleJob = fromScheduleJob; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/trx/CleanLogTableScheduledJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/trx/CleanLogTableScheduledJob.java index 24b32f7c1..afc74cf5e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/trx/CleanLogTableScheduledJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/trx/CleanLogTableScheduledJob.java @@ -16,23 +16,11 @@ package com.alibaba.polardbx.executor.scheduler.executor.trx; -import com.alibaba.polardbx.common.IdGenerator; -import com.alibaba.polardbx.common.async.AsyncTask; -import com.alibaba.polardbx.common.constants.SystemTables; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.common.utils.AsyncUtils; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.common.utils.logger.MDC; -import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.scheduler.ScheduledJobsManager; import com.alibaba.polardbx.executor.scheduler.executor.SchedulerExecutor; -import com.alibaba.polardbx.executor.spi.ITopologyExecutor; -import com.alibaba.polardbx.gms.config.impl.InstConfUtil; -import com.alibaba.polardbx.gms.ha.impl.StorageHaManager; -import com.alibaba.polardbx.gms.ha.impl.StorageInstHaContext; import com.alibaba.polardbx.gms.module.Module; import com.alibaba.polardbx.gms.module.ModuleLogInfo; import com.alibaba.polardbx.gms.scheduler.ExecutableScheduledJob; @@ -43,13 +31,7 @@ import java.sql.SQLException; import java.sql.Statement; import java.time.ZonedDateTime; -import java.util.ArrayList; -import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.FAILED; import static com.alibaba.polardbx.common.scheduler.FiredScheduledJobState.QUEUED; @@ -59,7 +41,7 @@ import static com.alibaba.polardbx.gms.module.LogLevel.WARNING; import static com.alibaba.polardbx.gms.module.LogPattern.STATE_CHANGE_FAIL; import static com.alibaba.polardbx.gms.module.LogPattern.UNEXPECTED; -import static com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType.CLEAN_LOG_TABLE; +import static com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType.CLEAN_LOG_TABLE_V2; import static com.alibaba.polardbx.gms.topology.SystemDbHelper.DEFAULT_DB_NAME; /** @@ -80,9 +62,8 @@ public boolean execute() { long fireTime = executableScheduledJob.getFireTime(); long startTime = ZonedDateTime.now().toEpochSecond(); - final Map savedMdcContext = MDC.getCopyOfContextMap(); + StringBuilder remark = new StringBuilder(); try { - MDC.put(MDC.MDC_KEY_APP, DEFAULT_DB_NAME); // Mark as RUNNING. boolean casSuccess = ScheduledJobsManager.casStateWithStartTime(scheduleId, fireTime, QUEUED, RUNNING, startTime); @@ -91,39 +72,45 @@ public boolean execute() { .logRecord( Module.SCHEDULE_JOB, STATE_CHANGE_FAIL, - new String[] {CLEAN_LOG_TABLE + "," + fireTime, QUEUED.name(), RUNNING.name()}, + new String[] {CLEAN_LOG_TABLE_V2 + "," + fireTime, QUEUED.name(), RUNNING.name()}, WARNING); return false; } - final int purgeInterval = InstConfUtil.getInt(ConnectionParams.PURGE_TRANS_INTERVAL); - final int purgeBefore = InstConfUtil.getInt(ConnectionParams.PURGE_TRANS_BEFORE); - - int purgeCount = CleanLogTableTask.run(purgeBefore, purgeInterval * 2); + final Map savedMdcContext = MDC.getCopyOfContextMap(); + long purge = 0; + try { + MDC.put(MDC.MDC_KEY_APP, DEFAULT_DB_NAME); + purge = CleanLogTableTask.run(false, remark); + } finally { + MDC.setContextMap(savedMdcContext); + } long finishTime = System.currentTimeMillis() / 1000; - // Mark as SUCCESS. - String remark = "Log clean task done. Before: " + purgeBefore + ", Next: " + purgeInterval * 2 - + ", Purge trans: " + purgeCount; + remark.append("Cost time ") + .append(finishTime - startTime) + .append("s. ") + .append("Purged rows ") + .append(purge) + .append("."); return ScheduledJobsManager - .casStateWithFinishTime(scheduleId, fireTime, RUNNING, SUCCESS, finishTime, remark); + .casStateWithFinishTime(scheduleId, fireTime, RUNNING, SUCCESS, finishTime, remark.toString()); } catch (Throwable t) { ModuleLogInfo.getInstance() .logRecord( Module.TRX, UNEXPECTED, new String[] { - CLEAN_LOG_TABLE + "," + fireTime, + CLEAN_LOG_TABLE_V2 + "," + fireTime, t.getMessage() }, CRITICAL, t ); - String remark = "Clean log table task error: " + t.getMessage(); - ScheduledJobsManager.updateState(scheduleId, fireTime, FAILED, remark, t.getMessage()); + remark.append("Clean log table task error: ") + .append(t.getMessage()); + ScheduledJobsManager.updateState(scheduleId, fireTime, FAILED, remark.toString(), t.getMessage()); return false; - } finally { - MDC.setContextMap(savedMdcContext); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/trx/CleanLogTableTask.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/trx/CleanLogTableTask.java index 6e1653206..33ca52901 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/trx/CleanLogTableTask.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/scheduler/executor/trx/CleanLogTableTask.java @@ -16,135 +16,481 @@ package com.alibaba.polardbx.executor.scheduler.executor.trx; -import com.alibaba.polardbx.common.IdGenerator; import com.alibaba.polardbx.common.async.AsyncTask; import com.alibaba.polardbx.common.constants.SystemTables; +import com.alibaba.polardbx.common.eventlogger.EventLogger; +import com.alibaba.polardbx.common.eventlogger.EventType; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.DynamicConfig; +import com.alibaba.polardbx.common.trx.TrxLogTableConstants; import com.alibaba.polardbx.common.utils.AsyncUtils; -import com.alibaba.polardbx.common.utils.LockUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.druid.util.lang.Consumer; import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.spi.ITopologyExecutor; +import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.ha.impl.StorageHaManager; import com.alibaba.polardbx.gms.ha.impl.StorageInstHaContext; +import com.alibaba.polardbx.gms.metadb.trx.TrxLogStatusAccessor; +import com.alibaba.polardbx.gms.metadb.trx.TrxLogStatusRecord; import com.alibaba.polardbx.gms.topology.DbTopologyManager; +import com.alibaba.polardbx.gms.util.MetaDbUtil; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import static com.alibaba.polardbx.common.trx.TrxLogTableConstants.CREATE_GLOBAL_TX_TABLE_V2; +import static com.alibaba.polardbx.common.trx.TrxLogTableConstants.CREATE_GLOBAL_TX_TABLE_V2_TMP; +import static com.alibaba.polardbx.common.trx.TrxLogTableConstants.DROP_GLOBAL_TX_TABLE_V2_ARCHIVE; +import static com.alibaba.polardbx.common.trx.TrxLogTableConstants.DROP_GLOBAL_TX_TABLE_V2_TMP; +import static com.alibaba.polardbx.common.trx.TrxLogTableConstants.FORCE_RENAME_GLOBAL_TX_TABLE_V2; +import static com.alibaba.polardbx.common.trx.TrxLogTableConstants.SELECT_MAX_TX_ID_IN_ARCHIVE; +import static com.alibaba.polardbx.common.trx.TrxLogTableConstants.SELECT_TABLE_ROWS_V2; +import static com.alibaba.polardbx.common.trx.TrxLogTableConstants.SET_DISTRIBUTED_TRX_ID; +import static com.alibaba.polardbx.common.trx.TrxLogTableConstants.SHOW_ALL_GLOBAL_TX_TABLE_V2; +import static com.alibaba.polardbx.common.trx.TrxLogTableConstants.SWITCH_GLOBAL_TX_TABLE_V2; +import static com.alibaba.polardbx.common.utils.LockUtil.wrapWithLockWaitTimeout; +import static com.alibaba.polardbx.executor.utils.ExecUtils.scanRecoveredTrans; +import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_TRX_LOG_TB_FAILED_BEFORE_CREATE_TMP; +import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_TRX_LOG_TB_FAILED_BEFORE_DROP_TABLE; +import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_TRX_LOG_TB_FAILED_BEFORE_SWITCH_TABLE; +import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_TRX_LOG_TB_FAILED_DURING_CREATE_TMP; +import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_TRX_LOG_TB_FAILED_DURING_DROP_TABLE; +import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_TRX_LOG_TB_FAILED_DURING_SWITCH_TABLE; +import static com.alibaba.polardbx.gms.topology.SystemDbHelper.CDC_DB_NAME; import static com.alibaba.polardbx.gms.topology.SystemDbHelper.DEFAULT_DB_NAME; /** - * @author yaozhlili + * @author yaozhili */ public class CleanLogTableTask { private static final Logger logger = LoggerFactory.getLogger(CleanLogTableTask.class); - private static final String GLOBAL_ASYNC_COMMIT_TX_LOG_TABLE = SystemTables.POLARDBX_ASYNC_COMMIT_TX_LOG_TABLE; - private static final String GLOBAL_ASYNC_COMMIT_TX_LOG_DATABASE = "mysql"; - private static final String GLOBAL_TX_TABLE_MAX_PARTITION = "p_unlimited"; - - private static final String GLOBAL_TX_TABLE_GET_PARTITIONS_V2 = - "SELECT `PARTITION_NAME`, `PARTITION_DESCRIPTION`, `TABLE_ROWS` FROM INFORMATION_SCHEMA.PARTITIONS\n" - + "WHERE TABLE_NAME = '" + GLOBAL_ASYNC_COMMIT_TX_LOG_TABLE + "'\n" - + "AND TABLE_SCHEMA = '" + GLOBAL_ASYNC_COMMIT_TX_LOG_DATABASE + "'"; - - private static final String ALTER_GLOBAL_TX_TABLE_ADD_PARTITION_V2 = - "ALTER TABLE `" + GLOBAL_ASYNC_COMMIT_TX_LOG_DATABASE + "`.`" + GLOBAL_ASYNC_COMMIT_TX_LOG_TABLE + "` \n" - + "REORGANIZE PARTITION `" + GLOBAL_TX_TABLE_MAX_PARTITION + "` INTO \n" - + "(PARTITION `%s` VALUES LESS THAN (%d), PARTITION `" + GLOBAL_TX_TABLE_MAX_PARTITION - + "` VALUES LESS THAN MAXVALUE)"; - - private static final String ALTER_GLOBAL_TX_TABLE_DROP_PARTITION_PREFIX_V2 = - "ALTER TABLE `" + GLOBAL_ASYNC_COMMIT_TX_LOG_DATABASE + "`.`" + GLOBAL_ASYNC_COMMIT_TX_LOG_TABLE + "` \n" - + "DROP PARTITION "; - - /** - * Purge trans log created before {@param beforeSeconds}, and split the last partition into - * [current_max, {@param nextSeconds}) and [{@param nextSeconds}, unlimited) if necessary. - * - * @return approximate number of purged trans log - */ - public static int run(int beforeSeconds, int nextSeconds) { - final long nowTimeMillis = System.currentTimeMillis(); - final long beforeTimeMillis = nowTimeMillis - beforeSeconds * 1000L; - final long beforeTxid = IdGenerator.assembleId(beforeTimeMillis, 0, 0); - final long nextTimeMillis = nowTimeMillis + nextSeconds * 1000L; - final long nextTxid = IdGenerator.assembleId(nextTimeMillis, 0, 0); - - Set dnIds = - StorageHaManager.getInstance().getMasterStorageList().stream().filter(s -> !s.isMetaDb()) - .map(StorageInstHaContext::getStorageInstId).collect(Collectors.toSet()); - List futures = new ArrayList<>(); - ITopologyExecutor executor = ExecutorContext.getContext(DEFAULT_DB_NAME).getTopologyExecutor(); - AtomicInteger purgeCount = new AtomicInteger(0); - for (String dnId : dnIds) { - futures.add(executor.getExecutorService().submit(DEFAULT_DB_NAME, null, AsyncTask.build(() -> { - try (Connection conn = DbTopologyManager.getConnectionForStorage(dnId)) { - LockUtil.wrapWithLockWaitTimeout(conn, 60, - stmt -> purgeCount.addAndGet(rotateV2(stmt, beforeTxid, nextTxid))); - } catch (SQLException e) { - logger.error("Log clean task failed.", e); + public static long run(boolean force, StringBuilder remark) throws SQLException { + Set dnIds = new HashSet<>(); + Set addresses = new HashSet<>(); + for (StorageInstHaContext ctx : StorageHaManager.getInstance().getMasterStorageList()) { + // Filter same host:port. + if (addresses.add(ctx.getCurrAvailableNodeAddr())) { + dnIds.add(ctx.getStorageInstId()); + } + } + + TrxLogStatusAccessor accessor = new TrxLogStatusAccessor(); + AtomicLong purged = new AtomicLong(0); + boolean updateRemark = true; + try (Connection connection = MetaDbUtil.getConnection()) { + accessor.setConnection(connection); + while (true) { + try { + accessor.begin(); + List records = accessor.getRecord(true); + if (records.isEmpty()) { + remark.append("Not found meta, try to init. "); + // Init. + accessor.insertInitialValue(); + accessor.commit(); + + EventLogger.log(EventType.TRX_INFO, "Init trx log v2 meta succeed."); + + // Re-open trx. + accessor.begin(); + records = accessor.getRecord(true); + remark.append("Init meta success. "); + } + + if (records.size() != 1) { + String err = "Number of trx-log-status record != 1, but is " + records.size(); + EventLogger.log(EventType.TRX_LOG_ERR, err); + throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, err); + } + + int status = records.get(0).status; + + remark.append("Current status: ").append(status).append(". "); + + long before = System.currentTimeMillis(); + switch (status) { + case 0: + long lastUpdate = records.get(0).gmtModified.getTime(); + long currentTime = records.get(0).now.getTime(); + long minuteToMillis = DynamicConfig.getInstance().getTrxLogCleanInterval() * 60 * 1000; + if (!force && lastUpdate + minuteToMillis > currentTime) { + updateRemark = false; + remark.append("Not in expected time, wait for next round. "); + return -1; + } + + remark.append("Create tmp table. "); + + if (FailPoint.isKeyEnable(FP_TRX_LOG_TB_FAILED_BEFORE_CREATE_TMP)) { + throw new TddlRuntimeException(ErrorCode.ERR_ASSERT_FAIL, + "Fail point FP_TRX_LOG_TB_FAILED_BEFORE_CREATE_TMP"); + } + + createTmpTable(dnIds); + + if (FailPoint.isKeyEnable(FP_TRX_LOG_TB_FAILED_DURING_CREATE_TMP)) { + throw new TddlRuntimeException(ErrorCode.ERR_ASSERT_FAIL, + "Fail point FP_TRX_LOG_TB_FAILED_DURING_CREATE_TMP"); + } + + break; + case 1: + remark.append("Switch tmp table. "); + + if (FailPoint.isKeyEnable(FP_TRX_LOG_TB_FAILED_BEFORE_SWITCH_TABLE)) { + throw new TddlRuntimeException(ErrorCode.ERR_ASSERT_FAIL, + "Fail point FP_TRX_LOG_TB_FAILED_BEFORE_SWITCH_TABLE"); + } + + switchTables(dnIds, remark); + + if (FailPoint.isKeyEnable(FP_TRX_LOG_TB_FAILED_DURING_SWITCH_TABLE)) { + throw new TddlRuntimeException(ErrorCode.ERR_ASSERT_FAIL, + "Fail point FP_TRX_LOG_TB_FAILED_DURING_SWITCH_TABLE"); + } + + break; + case 2: + remark.append("Drop archive table. "); + + if (FailPoint.isKeyEnable(FP_TRX_LOG_TB_FAILED_BEFORE_DROP_TABLE)) { + throw new TddlRuntimeException(ErrorCode.ERR_ASSERT_FAIL, + "Fail point FP_TRX_LOG_TB_FAILED_BEFORE_DROP_TABLE"); + } + + dropTable(dnIds, remark, purged); + + if (FailPoint.isKeyEnable(FP_TRX_LOG_TB_FAILED_DURING_DROP_TABLE)) { + throw new TddlRuntimeException(ErrorCode.ERR_ASSERT_FAIL, + "Fail point FP_TRX_LOG_TB_FAILED_DURING_DROP_TABLE"); + } + + break; + default: + // Found unexpected status code. Reset it to 0. + accessor.updateStatus(0); + accessor.commit(); + throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, "Unknown trx log status " + status); + } + + remark.append("Done, cost ") + .append(System.currentTimeMillis() - before) + .append("ms. "); + + accessor.updateStatus((status + 1) % 3); + accessor.commit(); + + if (status == 2) { + remark.append("Finish! "); + // Done. + break; + } + } finally { + accessor.rollback(); } - }))); + } + + } catch (Throwable t) { + remark.append("Error occurs, ") + .append(t.getMessage()) + .append(". "); + logger.error("Clean trx log table v2 failed.", t); + throw t; + } finally { + if (updateRemark) { + try (Connection connection = MetaDbUtil.getConnection()) { + accessor.setConnection(connection); + accessor.updateRemark(remark.toString()); + } + } } + return purged.get(); + } + + private static void createTmpTable(Set dnIds) { + ConcurrentLinkedQueue exceptions = new ConcurrentLinkedQueue<>(); + AtomicBoolean lock = new AtomicBoolean(false); + Collection futures = forEachDn(dnIds, (dnId) -> { + try (Connection conn = DbTopologyManager.getConnectionForStorage(dnId); + Statement stmt = conn.createStatement()) { + wrapWithLockWaitTimeout(conn, 3, () -> { + try { + if (FailPoint.isKeyEnable(FP_TRX_LOG_TB_FAILED_DURING_CREATE_TMP) + // Only one DN can create tmp table successfully. + && !lock.compareAndSet(false, true)) { + throw new TddlRuntimeException(ErrorCode.ERR_ASSERT_FAIL, + "Fail point FP_TRX_LOG_TB_FAILED_DURING_CREATE_TMP"); + } + + stmt.execute(DROP_GLOBAL_TX_TABLE_V2_TMP); + stmt.execute(CREATE_GLOBAL_TX_TABLE_V2_TMP); + } catch (Exception e) { + EventLogger.log(EventType.TRX_LOG_ERR, + "Error during creating tmp table, caused by " + e.getMessage()); + exceptions.offer(e); + } + }); + } catch (Exception e) { + exceptions.offer(e); + } + }); AsyncUtils.waitAll(futures); - return purgeCount.get(); + + if (!exceptions.isEmpty()) { + exceptions.forEach(logger::error); + throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, + "create tmp table failed " + exceptions.peek().getMessage()); + } } - private static int rotateV2(Statement stmt, long beforeTxid, long nextTxid) { - try { - int dropped = 0; - ArrayList partitionsWillDrop = new ArrayList<>(); - long txidUpperBound = Long.MIN_VALUE; - try (ResultSet rs = stmt.executeQuery(GLOBAL_TX_TABLE_GET_PARTITIONS_V2)) { + private static void switchTables(Set dnIds, StringBuilder remark) { + /* + Normal procedure: + A -> {A, tmp} -> {B, A} -> A + In normal situation, we may face 2 cases: + 1. A, tmp (rename A to B, tmp to A) + 2. A, B (do nothing) + The following cases are unexpected, and should not happen, + but we still take them into consideration for safety. + 3. A (do nothing) + 4. A, B, tmp (do nothing, B will be dropped in the next step) + 5. B, tmp (rename tmp to A) + 6. tmp (rename tmp to A) + 7. B (create A) + 8. null (create A) + */ + ConcurrentLinkedQueue exceptions = new ConcurrentLinkedQueue<>(); + AtomicBoolean lock = new AtomicBoolean(false); + Collection futures = forEachDn(dnIds, (dnId) -> execWithLockWaitTimeout(dnId, exceptions, (stmt) -> { + try { + ResultSet rs = stmt.executeQuery(SHOW_ALL_GLOBAL_TX_TABLE_V2); + boolean existsA = false, existsB = false, existsTmp = false; while (rs.next()) { - final String partitionName = rs.getString(1); - if (partitionName == null) { - throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, - "Rotate global tx log on non-partitioned table"); + String tableName = String.format("`mysql`.`%s`", rs.getString(1)); + if (SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE + .equalsIgnoreCase(tableName)) { + existsA = true; + remark.append("Exists A. "); + } else if (SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_ARCHIVE + .equalsIgnoreCase(tableName)) { + existsB = true; + remark.append("Exists tmp. "); + } else if (SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_TMP + .equalsIgnoreCase(tableName)) { + existsTmp = true; + remark.append("Exists B. "); } - final String partitionDescText = rs.getString(2); - if ("MAXVALUE".equalsIgnoreCase(partitionDescText)) { - continue; + } + + if (existsA && existsTmp && !existsB) { + // Case 1. + if (FailPoint.isKeyEnable(FP_TRX_LOG_TB_FAILED_DURING_SWITCH_TABLE) + // Only one DN switch table successfully. + && !lock.compareAndSet(false, true)) { + throw new TddlRuntimeException(ErrorCode.ERR_ASSERT_FAIL, + "Fail point FP_TRX_LOG_TB_FAILED_DURING_SWITCH_TABLE"); } + + stmt.execute(SWITCH_GLOBAL_TX_TABLE_V2); + } else if (existsA) { + // Case 2, 3, 4: do nothing + EventLogger.log(EventType.TRX_LOG_ERR, + "Found unexpected log table status: (not exists tmp) or (exists A, B, tmp)"); + remark.append("Do nothing: (not exists tmp) or (exists A, B, tmp). "); + } else if (existsTmp) { + // Case 5 or Case 6. + EventLogger.log(EventType.TRX_LOG_ERR, + "Found unexpected log table status: (B, tmp) or (tmp), force rename tmp to A"); + remark.append("Only rename tmp to A: (B, tmp) or (tmp) "); + stmt.execute(FORCE_RENAME_GLOBAL_TX_TABLE_V2); + } else { + // Case 7 or Case 8. + EventLogger.log(EventType.TRX_LOG_ERR, + "Found unexpected log table status: (B) or (none), force create A"); + remark.append("Create A: (B) or (none) "); + stmt.execute(CREATE_GLOBAL_TX_TABLE_V2); + } + } catch (Exception e) { + EventLogger.log(EventType.TRX_LOG_ERR, + "Error during checking log table, caused by " + e.getMessage()); + exceptions.offer(e); + } + })); + AsyncUtils.waitAll(futures); + + if (!exceptions.isEmpty()) { + exceptions.forEach(remark::append); + throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, "switch tmp table failed " + exceptions.peek()); + } + } + + private static void dropTable(Set dnIds, StringBuilder remark, AtomicLong purged) { + // Check support async commit variables. + boolean dn57 = ExecutorContext.getContext(CDC_DB_NAME).getStorageInfoManager().supportAsyncCommit(); + // Max time 4 * 5 = 20s + int retry = 0, maxRetry = 4, sleepSecond = 5; + ConcurrentLinkedQueue exceptions = new ConcurrentLinkedQueue<>(); + while (retry < maxRetry) { + ITopologyExecutor executor = ExecutorContext.getContext(DEFAULT_DB_NAME).getTopologyExecutor(); + // Find min trx id for prepared trx. + AtomicLong minTrxId = new AtomicLong(Long.MAX_VALUE); + AtomicLong tmpMaxTrxId = new AtomicLong(Long.MIN_VALUE); + scanRecoveredTrans(dnIds, executor, exceptions, minTrxId, tmpMaxTrxId); + + if (!exceptions.isEmpty()) { + exceptions.forEach(remark::append); + EventLogger.log(EventType.TRX_LOG_ERR, "Drop archive table failed when finding min trx id."); + throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, + "Drop archive table failed when finding min trx id. " + exceptions.peek()); + } + + // Find max trx id in archive table. + AtomicLong maxTrxId = new AtomicLong(Long.MIN_VALUE); + Collection futures = + forEachDn(dnIds, (dnId) -> execWithLockWaitTimeout(dnId, exceptions, (stmt) -> { try { - long maxTxidInPartition = Long.parseLong(partitionDescText); - if (maxTxidInPartition < beforeTxid) { - final long tableRows = rs.getLong(3); - dropped += tableRows; - partitionsWillDrop.add("`" + partitionName + "`"); + stmt.execute("begin"); + if (dn57) { + // Use recover timestamp to see all records including uncommitted ones. + stmt.execute(String.format(SET_DISTRIBUTED_TRX_ID, 0)); + stmt.execute(TrxLogTableConstants.RECOVER_TIMESTAMP_SQL); + } + ResultSet rs = stmt.executeQuery(SELECT_MAX_TX_ID_IN_ARCHIVE); + if (rs.next()) { + long txid = rs.getLong(1); + long tmp = maxTrxId.get(); + while (txid > tmp && !maxTrxId.compareAndSet(tmp, txid)) { + tmp = maxTrxId.get(); + } + } + } catch (SQLException e) { + if (e.getMessage().contains("doesn't exist")) { + // Ignore. Archive table is already dropped. + } else { + EventLogger.log(EventType.TRX_LOG_ERR, + "Error during finding max trx id in archive table, caused by " + e.getMessage()); + exceptions.offer(e); + } + } finally { + try { + stmt.execute("rollback"); + } catch (Throwable ignored) { } - txidUpperBound = Math.max(txidUpperBound, maxTxidInPartition); - } catch (NumberFormatException e) { - throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, - "Invalid partition description for partition " + partitionName); } - } + })); + AsyncUtils.waitAll(futures); + + if (!exceptions.isEmpty()) { + exceptions.forEach(remark::append); + throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, + "Drop archive table failed when finding max trx id. " + exceptions.peek()); } - if (nextTxid > txidUpperBound) { - logger.info("Creating new partition" + "p_" + nextTxid + " on async commit tx log"); - stmt.executeUpdate(String.format(ALTER_GLOBAL_TX_TABLE_ADD_PARTITION_V2, "p_" + nextTxid, nextTxid)); + + if (minTrxId.get() > maxTrxId.get()) { + // Safe to delete archive table. + AtomicBoolean lock = new AtomicBoolean(false); + futures = forEachDn(dnIds, (dnId) -> execWithLockWaitTimeout(dnId, exceptions, (stmt) -> { + try { + ResultSet rs = stmt.executeQuery(SHOW_ALL_GLOBAL_TX_TABLE_V2); + boolean existsB = false; + while (rs.next()) { + String tableName = String.format("`mysql`.`%s`", rs.getString(1)); + if (SystemTables.POLARDBX_GLOBAL_TX_LOG_TABLE_ARCHIVE.equalsIgnoreCase(tableName)) { + existsB = true; + break; + } + } + if (existsB) { + // Get approximate dropped rows. + rs = stmt.executeQuery(SELECT_TABLE_ROWS_V2); + if (rs.next()) { + purged.addAndGet(rs.getLong(1)); + } + + if (FailPoint.isKeyEnable(FP_TRX_LOG_TB_FAILED_DURING_DROP_TABLE) + // Only one DN drop table successfully. + && !lock.compareAndSet(false, true)) { + throw new TddlRuntimeException(ErrorCode.ERR_ASSERT_FAIL, + "Fail point FP_TRX_LOG_TB_FAILED_DURING_DROP_TABLE"); + } + + // Then, drop table. + stmt.execute(DROP_GLOBAL_TX_TABLE_V2_ARCHIVE); + } + } catch (Exception e) { + exceptions.offer(e); + EventLogger.log(EventType.TRX_LOG_ERR, + "Error during dropping archive table, caused by " + e.getMessage()); + logger.error("Drop archive table failed when dropping archive table.", e); + } + })); + AsyncUtils.waitAll(futures); + + if (!exceptions.isEmpty()) { + exceptions.forEach(remark::append); + throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, + "Drop archive table failed when dropping archive table. " + exceptions.peek()); + } + + // Done. + return; + } else { + remark.append("Found min prepared trx id ") + .append(minTrxId) + .append(" less than max archive trx id ") + .append(maxTrxId) + .append(", retry ") + .append(retry) + .append(". "); } - if (!partitionsWillDrop.isEmpty()) { - String dropSql = ALTER_GLOBAL_TX_TABLE_DROP_PARTITION_PREFIX_V2 + String.join(",", partitionsWillDrop); - logger.info("Purging async commit tx log with ddl " + dropSql.replace("\n", " ")); - stmt.executeUpdate(dropSql); + + try { + Thread.sleep(sleepSecond * 1000); + } catch (InterruptedException e) { + remark.append("Interrupted when waiting for next retry, current retry ") + .append(retry) + .append(". "); + throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, + "Interrupted when waiting for next retry, current retry " + retry); } - return dropped; + retry++; + } + throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, "Max retry exceeds."); + } + + private static Collection forEachDn(Set dnIds, Consumer task) { + List futures = new ArrayList<>(); + ITopologyExecutor executor = ExecutorContext.getContext(DEFAULT_DB_NAME).getTopologyExecutor(); + for (String dnId : dnIds) { + futures.add(executor.getExecutorService().submit(null, null, + AsyncTask.build(() -> task.accept(dnId)))); + } + return futures; + } + + private static void execWithLockWaitTimeout(String dnId, + ConcurrentLinkedQueue exceptions, + Consumer task) { + try (Connection conn = DbTopologyManager.getConnectionForStorage(dnId); + Statement stmt = conn.createStatement()) { + wrapWithLockWaitTimeout(conn, 3, () -> task.accept(stmt)); } catch (SQLException e) { - throw new TddlRuntimeException(ErrorCode.ERR_TRANS_LOG, e, - "Rotate global transaction log with " + beforeTxid + " failed"); + exceptions.offer(e); } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/shadowtable/ShadowTableUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/shadowtable/ShadowTableUtils.java new file mode 100644 index 000000000..0c38c180b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/shadowtable/ShadowTableUtils.java @@ -0,0 +1,186 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.shadowtable; + +import com.alibaba.polardbx.common.IdGenerator; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; +import com.alibaba.polardbx.druid.util.JdbcConstants; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlData; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils; +import com.alibaba.polardbx.executor.spi.IGroupExecutor; +import com.alibaba.polardbx.executor.utils.StringUtils; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.statistics.SQLRecorderLogger; +import org.apache.calcite.sql.SqlIdentifier; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; + +import static com.alibaba.polardbx.druid.sql.SQLUtils.parseStatementsWithDefaultFeatures; +import static com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils.queryGroupBypassConnPool; +import static com.alibaba.polardbx.gms.metadb.limit.Limits.MAX_LENGTH_OF_IDENTIFIER_NAME; + +public class ShadowTableUtils { + private final static Logger LOG = SQLRecorderLogger.ddlMetaLogger; + + static String taskName = "ShadowTableCheckBeforeExecute"; + + public static String generateShadowTableName(String logicalTableName, Long id) { + // physical table name generated is t1_m7yv_00074, in which case we have shadow table name like + // __t1_12345678, where the shadow table name is no longer than the physical table name. + final int suffixLength = "__t1_12345678".length() - "t1".length(); + final int maxLogicalTableNameLength = MAX_LENGTH_OF_IDENTIFIER_NAME - suffixLength; + if (logicalTableName.length() > maxLogicalTableNameLength) { + logicalTableName = logicalTableName.substring(0, maxLogicalTableNameLength); + } + String shadowTableName = String.format("__%s_%08d", logicalTableName, id % 100_000_000L); + return shadowTableName; + } + + private static String randomString(int length) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < length; i++) { + char ch = (char) (ThreadLocalRandom.current().nextInt('x' - 'a') + 'a'); + sb.append(ch); + } + return sb.toString(); + } + + /** + * change set opt for logical table + */ + public static void createShadowTable(ExecutionContext originEc, String schemaName, String logicalTableName, + String groupName, + String phyTableName, String shadowTableName) { + String createTableSql = + generateCreateTableSql(originEc, schemaName, logicalTableName, groupName, phyTableName, shadowTableName); + TwoPhaseDdlUtils.executePhyDdlBypassConnPool(originEc, -1L, schemaName, groupName, createTableSql, "", + shadowTableName); + } + + public static String showCreateShadowTable(ExecutionContext originEc, String schemaName, String logicalTableName, + String groupName, + String phyTableName, String shadowTableName) { + String showCreateTableSql = + String.format("show create table %s", SqlIdentifier.surroundWithBacktick(shadowTableName)); + List> results = + TwoPhaseDdlUtils.queryGroup(originEc, -1L, taskName, schemaName, logicalTableName, groupName, + showCreateTableSql); + return results.get(0).get(1).toString(); + } + + // TODO: what if CN HA after alter shadow table + public static void alterShadowTable(ExecutionContext originEc, String schemaName, String logicalTableName, + String groupName, + String shadowTableName, String alterStmt) { +// if (!createShadowTableSql.equalsIgnoreCase(createTableSql)) { + String sql = alterStmt; + TwoPhaseDdlUtils.executePhyDdlBypassConnPool(originEc, -1L, schemaName, groupName, sql, "", shadowTableName); +// shadowTableAltered = true; + } + + public static void clearShadowTable(ExecutionContext originEc, String schemaName, String logicalTableName, + String groupName, + String shadowTableName) { + String dropTableStmt = "DROP TABLE IF EXISTS %s"; + String sql = String.format(dropTableStmt, SqlIdentifier.surroundWithBacktick(shadowTableName)); + TwoPhaseDdlUtils.executePhyDdlBypassConnPool(originEc, -1L, schemaName, groupName, sql, "", shadowTableName); + } + + public static void initTraceShadowTable(ExecutionContext originEc, String schemaName, String logicalTableName, + String groupName, + String shadowTableName, Long id) { + String sql = + String.format(TwoPhaseDdlUtils.SQL_INIT_TWO_PHASE_DDL, schemaName, String.valueOf(id), + StringUtils.quote(shadowTableName)); + TwoPhaseDdlUtils.updateGroup(originEc, -1L, schemaName, groupName, sql); + sql = String.format(TwoPhaseDdlUtils.SQL_TRACE_TWO_PHASE_DDL, schemaName, String.valueOf(id)); + TwoPhaseDdlUtils.updateGroup(originEc, -1L, schemaName, groupName, sql); +// shadowTableAltered = true; + } + + public static void finishTraceShadowTable(ExecutionContext originEc, String schemaName, String logicalTableName, + String groupName, + String shadowTableName, Long id) { + String sql = String.format(TwoPhaseDdlUtils.SQL_FINISH_TWO_PHASE_DDL, schemaName, String.valueOf(id)); + TwoPhaseDdlUtils.updateGroup(originEc, -1L, schemaName, groupName, sql); +// shadowTableAltered = true; + } + + public static Pair fetchTraceTableDdl(ExecutionContext originEc, String schemaName, + String logicalTableName, + String groupName, + String shadowTableName, Long id) { + String sql = String.format(TwoPhaseDdlUtils.SQL_PROF_TWO_PHASE_DDL, schemaName, String.valueOf(id)); + Map result = + TwoPhaseDdlUtils.queryGroupBypassConnPool(originEc, -1L, "", schemaName, logicalTableName, groupName, sql) + .get(0); + Boolean prepareMoment = !result.get("REACHED_PREPARED_MOMENT").toString().startsWith("1970"); + Boolean commitMoment = !result.get("REACHED_COMMIT_MOMENT").toString().startsWith("1970"); + return Pair.of(prepareMoment, commitMoment); +// shadowTableAltered = true; + } + + public static String generateCreateTableSql(ExecutionContext originEc, String schemaName, String logicalTableName, + String groupName, + String originalTableName, String targetTableName) { + String showCreateTableStmt = "SHOW CREATE TABLE %s"; + String sql = String.format(showCreateTableStmt, SqlIdentifier.surroundWithBacktick(originalTableName)); + String createTableSql = + TwoPhaseDdlUtils.queryGroup(originEc, -1L, taskName, schemaName, logicalTableName, groupName, sql).get(0) + .get(1) + .toString(); + final MySqlCreateTableStatement + createTableStmt = + (MySqlCreateTableStatement) parseStatementsWithDefaultFeatures(createTableSql, JdbcConstants.MYSQL).get(0) + .clone(); + createTableStmt.setTableName(SqlIdentifier.surroundWithBacktick(targetTableName)); + createTableStmt.setIfNotExiists(true); + return createTableStmt.toString(); + } + +// public CompareResult compareShadowTable(ExecutionContext originEc) { +// String createShadowTableStmt = generateCreateTableSql(originEc, shadowTableName, phyTableName); +// String createPhyTableStmt = generateCreateTableSql(originEc, phyTableName, phyTableName); +// CompareResult compareResult = new CompareResult(createShadowTableStmt, createPhyTableStmt); +// return compareResult; +// } + + // public static void updateGroup(ExecutionContext ec, String schema, String groupName, String sql, List params) { +// +// ExecutorContext executorContext = ExecutorContext.getContext(schema); +// IGroupExecutor ge = executorContext.getTopologyExecutor().getGroupExecutor(groupName); +// +// +// try (Connection conn = ge.getDataSource().getConnection()) { +// PreparedStatement stmt = conn.prepareStatement(sql); +// for(int i = 0; i < params.size(); i++){ +// stmt.set(i + 1, params.get(i)); +// } +// stmt.executeUpdate(); +// } catch (SQLException e) { +// throw GeneralUtil.nestedException( +// String.format("failed to execute on group(%s): %s , Caused by: %s", groupName, sql, e.getMessage()), e); +// } +// } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/spi/ITransactionManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/spi/ITransactionManager.java index 299f48abb..78dd61df8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/spi/ITransactionManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/spi/ITransactionManager.java @@ -60,6 +60,10 @@ default boolean supportAsyncCommit() { return false; } + long getMinSnapshotSeq(); + + long getColumnarMinSnapshotSeq(); + default boolean support2pcOpt() { return false; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/entity/PolarDbXSystemTableColumnStatistic.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/entity/PolarDbXSystemTableColumnStatistic.java index 3b466dd61..9dae61e52 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/entity/PolarDbXSystemTableColumnStatistic.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/entity/PolarDbXSystemTableColumnStatistic.java @@ -68,7 +68,7 @@ public class PolarDbXSystemTableColumnStatistic implements SystemTableColumnStat private static final String ALTER_TABLE_UTF8MB4 = "ALTER TABLE `" + TABLE_NAME + "` CHARACTER SET = UTF8MB4"; private static final String SELECT_SQL = - "SELECT SCHEMA_NAME, TABLE_NAME, COLUMN_NAME, CARDINALITY, CMSKETCH, HISTOGRAM, TOPN, NULL_COUNT, SAMPLE_RATE, UNIX_TIMESTAMP(GMT_MODIFIED) AS UNIX_TIME FROM `" + "SELECT SCHEMA_NAME, TABLE_NAME, COLUMN_NAME, CARDINALITY, CMSKETCH, HISTOGRAM, TOPN, NULL_COUNT, SAMPLE_RATE, UNIX_TIMESTAMP(GMT_MODIFIED) AS UNIX_TIME, EXTEND_FIELD FROM `" + TABLE_NAME + "` "; private static final String DELETE_TABLE_SQL = "DELETE FROM `" + TABLE_NAME + "` WHERE SCHEMA_NAME = '%s' AND " @@ -86,7 +86,7 @@ public class PolarDbXSystemTableColumnStatistic implements SystemTableColumnStat * select table rows sql, need to concat with values */ private static final String REPLACE_SQL = "REPLACE INTO `" + TABLE_NAME + "` (`SCHEMA_NAME`, `TABLE_NAME`, " - + "`COLUMN_NAME`, `CARDINALITY`, `CMSKETCH`, `HISTOGRAM`, `TOPN`, `NULL_COUNT`, `SAMPLE_RATE`) VALUES "; + + "`COLUMN_NAME`, `CARDINALITY`, `CMSKETCH`, `HISTOGRAM`, `TOPN`, `NULL_COUNT`, `SAMPLE_RATE`, `EXTEND_FIELD`) VALUES "; private static final int batchSize = 30; @@ -272,7 +272,8 @@ public Collection selectAll(long sinceTime) { TopN.deserializeFromJson(rs.getString("TOPN")), rs.getLong("NULL_COUNT"), rs.getFloat("SAMPLE_RATE"), - rs.getLong("UNIX_TIME")); + rs.getLong("UNIX_TIME"), + rs.getString("EXTEND_FIELD")); rows.add(row); } catch (Exception e) { @@ -324,33 +325,34 @@ private boolean innerBatchReplace(final List row } else { sqlBuilder.append(","); } - sqlBuilder.append("(?,?,?,?,?,?,?,?,?)"); + sqlBuilder.append("(?,?,?,?,?,?,?,?,?,?)"); } pps = conn.prepareStatement(sql = sqlBuilder.toString()); for (int k = 0; k < batchCount; k++) { SystemTableColumnStatistic.Row row = rowList.get(k + index - batchCount); - pps.setString(k * 9 + 1, row.getSchema().toLowerCase()); - pps.setString(k * 9 + 2, row.getTableName().toLowerCase()); - pps.setString(k * 9 + 3, row.getColumnName().toLowerCase()); - pps.setLong(k * 9 + 4, row.getCardinality()); + pps.setString(k * 10 + 1, row.getSchema().toLowerCase()); + pps.setString(k * 10 + 2, row.getTableName().toLowerCase()); + pps.setString(k * 10 + 3, row.getColumnName().toLowerCase()); + pps.setLong(k * 10 + 4, row.getCardinality()); String cmSketchString = Base64.encodeBase64String(CountMinSketch.serialize(new CountMinSketch(1, 1, 1))); - pps.setString(k * 9 + 5, cmSketchString); + pps.setString(k * 10 + 5, cmSketchString); String histogramString; if (row.getHistogram() != null) { histogramString = Histogram.serializeToJson(row.getHistogram()); } else { histogramString = Histogram.serializeToJson(new Histogram(1, DataTypes.IntegerType, 1)); } - pps.setString(k * 9 + 6, histogramString); + pps.setString(k * 10 + 6, histogramString); String topN = null; if (row.getTopN() != null) { topN = TopN.serializeToJson(row.getTopN()); } - pps.setString(k * 9 + 7, topN); - pps.setLong(k * 9 + 8, row.getNullCount()); - pps.setFloat(k * 9 + 9, row.getSampleRate()); + pps.setString(k * 10 + 7, topN); + pps.setLong(k * 10 + 8, row.getNullCount()); + pps.setFloat(k * 10 + 9, row.getSampleRate()); + pps.setString(k * 10 + 10, row.getExtendField()); } pps.executeUpdate(); pps.close(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/entity/PolarDbXSystemTableNDVSketchStatistic.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/entity/PolarDbXSystemTableNDVSketchStatistic.java index eaf5c5207..6c362d871 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/entity/PolarDbXSystemTableNDVSketchStatistic.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/entity/PolarDbXSystemTableNDVSketchStatistic.java @@ -25,7 +25,6 @@ import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.config.table.statistic.inf.SystemTableNDVSketchStatistic; import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import java.sql.Connection; import java.sql.PreparedStatement; @@ -34,7 +33,6 @@ import java.util.BitSet; import java.util.List; import java.util.Map; -import java.util.Set; import static com.alibaba.polardbx.executor.statistic.ndv.HyperLogLogUtil.HLL_REGBYTES; import static com.alibaba.polardbx.executor.statistic.ndv.HyperLogLogUtil.bitToInt; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/FlowControl.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/FlowControl.java index c5518c121..7a9696182 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/FlowControl.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/FlowControl.java @@ -20,7 +20,6 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; public class FlowControl { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/HyperLogLogUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/HyperLogLogUtil.java index bbeb6dcc3..aafeb84ff 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/HyperLogLogUtil.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/HyperLogLogUtil.java @@ -16,9 +16,8 @@ package com.alibaba.polardbx.executor.statistic.ndv; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.optimizer.config.table.statistic.inf.SystemTableNDVSketchStatistic; +import io.airlift.slice.XxHash64; import java.util.BitSet; @@ -26,16 +25,39 @@ public class HyperLogLogUtil { public static int HLL_REGISTERS = 16384; public static int HLL_REGBYTES = HLL_REGISTERS * 6 / 8; + public static int HLL_REGBYTES_DE = HLL_REGBYTES + 1; public static int HLL_BITS = 6; public static int HLL_REGISTER_MAX = ((1 << HLL_BITS) - 1); + public static int HLL_P_MASK = HLL_REGISTERS - 1; public static int HLL_P = 14; public static int HLL_Q = 64 - HLL_P; public static double HLL_ALPHA_INF = 0.721347520444481703680;/* constant for 0.5/ln(2) */ + public static void hllSet(byte[] r, long ele) { + // hllPatLen + long hash = XxHash64.hash(ele); + int index = (int) (hash & HLL_P_MASK); + + hash = hash >>> HLL_P; + hash = hash | (1L << HLL_Q); + int count = 1; + long bit = 1; + while ((hash & bit) == 0) { + count++; + bit <<= 1; + } + + int oldCount = get(r, index); + if (oldCount < count) { + set(r, index, count); + } + } + public static void merge(byte[] r, byte[] tmp) { + BitSet bitSet = BitSet.valueOf(tmp); for (int i = 0; i < HLL_REGISTERS; i++) { int rValue = get(r, i); - int tmpValue = get(tmp, i); + int tmpValue = bitToInt(bitSet, i * 6); if (rValue < tmpValue) { set(r, i, tmpValue); } @@ -45,10 +67,7 @@ public static void merge(byte[] r, byte[] tmp) { private static int get(byte[] a, int pos) { int bytePos = pos * HLL_BITS / 8; int bitRemine = (pos * HLL_BITS) & 7; - if (bytePos + 1 == a.length) { - return (a[bytePos] >> bitRemine) & HLL_REGISTER_MAX; - } - return ((a[bytePos] >> bitRemine) | (a[bytePos + 1] << (8 - bitRemine))) & HLL_REGISTER_MAX; + return (((a[bytePos] & 0xFF) >>> bitRemine) | (a[bytePos + 1] << (8 - bitRemine))) & HLL_REGISTER_MAX; } private static void set(byte[] a, int pos, int val) { @@ -56,9 +75,6 @@ private static void set(byte[] a, int pos, int val) { int bitRemine = (pos * HLL_BITS) & 7; a[bytePos] &= ~(HLL_REGISTER_MAX << bitRemine); a[bytePos] |= (val << bitRemine); - if (bytePos + 1 == a.length) { - return; - } a[bytePos + 1] &= ~(HLL_REGISTER_MAX >> (8 - bitRemine)); a[bytePos + 1] |= (val >> (8 - bitRemine)); } @@ -142,6 +158,19 @@ public static String buildSketchKey(String schemaName, String tableName, String return (schemaName + ":" + tableName + ":" + columnNames).toLowerCase(); } + public static long getCardinality(byte[] bytes) { + if (bytes == null) { + return 0; + } + int[] registers = new int[HLL_REGISTERS]; + BitSet bitSet = BitSet.valueOf(bytes); + for (int i = 0; i * 6 < HLL_REGBYTES * 8; i++) {// cal the reciprocal + int v = bitToInt(bitSet, i * 6); + registers[i] = v; + } + return reckon(buildReghisto(registers)); + } + public static long estimate(byte[][] bytes) { byte[] m = bytes[0]; if (m == null || m.length != HLL_REGBYTES) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/NDVShardSketch.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/NDVShardSketch.java index 079af9118..963d4ce6e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/NDVShardSketch.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/NDVShardSketch.java @@ -16,12 +16,18 @@ package com.alibaba.polardbx.executor.statistic.ndv; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.jdbc.IDataSource; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.utils.AsyncUtils; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.LoggerUtil; import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.ddl.newengine.cross.CrossEngineValidator; import com.alibaba.polardbx.executor.statistic.entity.PolarDbXSystemTableNDVSketchStatistic; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.sync.UpdateStatisticSyncAction; @@ -31,28 +37,36 @@ import com.alibaba.polardbx.gms.module.LogLevel; import com.alibaba.polardbx.gms.module.Module; import com.alibaba.polardbx.gms.module.ModuleLogInfo; -import com.alibaba.polardbx.gms.node.LeaderStatusBridge; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.group.jdbc.TGroupDirectConnection; import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; import com.alibaba.polardbx.optimizer.config.table.statistic.inf.SystemTableNDVSketchStatistic; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.exception.TableNotFoundException; import com.alibaba.polardbx.optimizer.exception.TableNotFoundException; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; +import org.eclipse.jetty.util.StringUtil; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.Executor; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import static com.alibaba.polardbx.common.properties.ConnectionProperties.ENABLE_HLL; import static com.alibaba.polardbx.executor.statistic.ndv.HyperLogLogUtil.HLL_REGISTERS; @@ -66,11 +80,10 @@ import static com.alibaba.polardbx.gms.module.LogPattern.PROCESS_END; import static com.alibaba.polardbx.gms.module.LogPattern.PROCESS_START; import static com.alibaba.polardbx.gms.module.LogPattern.UNEXPECTED; -import static com.alibaba.polardbx.gms.module.LogPattern.UPDATE_NDV_FOR_CHANGED; import static com.alibaba.polardbx.gms.module.LogPattern.UPDATE_NDV_FOR_EXPIRED; public class NDVShardSketch { - private static final Logger logger = LoggerFactory.getLogger("STATISTICS"); + private static final Logger logger = LoggerUtil.statisticsLogger; public static final double MAX_DIFF_VALUE_RATIO = 0.2D; @@ -84,28 +97,34 @@ public class NDVShardSketch { /** * schemaName:table name:columns name */ - private String shardKey; + private final String shardKey; /** * one shard for one physical table */ - private String[] shardParts; + private final String[] shardParts; /** * ndv value from dn statistic view, might not be accurate */ - private long[] dnCardinalityArray; + private final long[] dnCardinalityArray; /** * sketch type: hyper log log only for now */ - private String sketchType; + private final String sketchType; /** * sketch info update time for every shard */ private long[] gmtUpdate; + /** + * sketch info update time, use the max value in gmtUpdate array + * this is a mem cache, -1L represents inactive state + */ + private long lastGmtUpdate = -1L; + /** * sketch info create time for every shard */ @@ -188,7 +207,10 @@ public boolean validityCheck() { } public long lastModifyTime() { - return Arrays.stream(gmtUpdate).max().getAsLong(); + if (lastGmtUpdate == -1L) { + lastGmtUpdate = Arrays.stream(gmtUpdate).max().getAsLong(); + } + return lastGmtUpdate; } /** @@ -244,7 +266,7 @@ shardKey, shardParts[i], new Date(current).toString(), new Date(gmtUpdate[i]).to long cardinalityTmp = getCurrentCardinality(shardKey, shardParts[i]); cardinalityTime += System.currentTimeMillis() - start; start = System.currentTimeMillis(); - byte[] bytes = getCurrentHll(shardKey, shardParts[i], false); + byte[] bytes = getCurrentHll(shardKey, shardParts[i], false, null); if (bytes == null) { // null meaning the hll request is stopped by something ModuleLogInfo.getInstance() @@ -452,7 +474,8 @@ public static String[] topologyPartToShard(Map> topology) { * build one ndv sketch */ public static NDVShardSketch buildNDVShardSketch(String schemaName, String tableName, String columnName, - boolean isForce) + boolean isForce, ExecutionContext ec, + ThreadPoolExecutor sketchHllExecutor) throws SQLException { if (!InstConfUtil.getBool(ConnectionParams.ENABLE_HLL)) { // just return @@ -489,37 +512,50 @@ public static NDVShardSketch buildNDVShardSketch(String schemaName, String table long[] gmtUpdate = new long[shardPart.length]; long[] gmtCreated = new long[shardPart.length]; - long sketchTime = 0; - long cardinalityTime = 0; - - // fill cardinality and sketch bytes - for (int i = 0; i < shardPart.length; i++) { - long start = System.currentTimeMillis(); - dnCardinalityArray[i] = getCurrentCardinality(shardKey, shardPart[i]); - long mid = System.currentTimeMillis(); - cardinalityTime += mid - start; - sketchArray[i] = getCurrentHll(shardKey, shardPart[i], isForce); - sketchTime += System.currentTimeMillis() - mid; - if (sketchArray[i] == null) { - gmtUpdate[i] = 0L; - } else { - gmtUpdate[i] = System.currentTimeMillis(); - } - gmtCreated[i] = System.currentTimeMillis(); - } - + AtomicLong sketchTime = new AtomicLong(0); + AtomicLong cardinalityTime = new AtomicLong(0); long cardinality = -1; - /** - * sketch data has null meaning it was interrupted for some reason. - * manual analyze table to force rebuilt it or wait the update job. - */ - if (isSketchDataReady(sketchArray)) { - cardinality = estimate(sketchArray); + boolean columnar = false; + // try columnar sketch first + cardinality = sketchByColumnar(shardKey, shardPart, dnCardinalityArray, sketchArray, gmtCreated, gmtUpdate, + cardinalityTime, sketchTime, ec); + if (cardinality >= 0) { + columnar = true; } - if (cardinality < 0) { - cardinality = 0; + List futures = null; + AtomicBoolean stopped = new AtomicBoolean(false); + if (sketchHllExecutor != null) { + futures = new ArrayList<>(shardPart.length); + } + + // fill cardinality and sketch bytes + for (int i = 0; i < shardPart.length; i++) { + if (sketchHllExecutor == null) { + sketchOnePart(shardKey, shardPart, dnCardinalityArray, sketchArray, gmtCreated, gmtUpdate, + cardinalityTime, sketchTime, isForce, ec, i, stopped); + } else { + final int partIdx = i; + Future future = sketchHllExecutor.submit( + () -> sketchOnePart(shardKey, shardPart, dnCardinalityArray, sketchArray, gmtCreated, gmtUpdate, + cardinalityTime, sketchTime, isForce, ec, partIdx, stopped)); + futures.add(future); + } + } + if (futures != null) { + AsyncUtils.waitAll(futures); + } + /** + * sketch data has null meaning it was interrupted for some reason. + * manual analyze table to force rebuilt it or wait the update job. + */ + if (isSketchDataReady(sketchArray)) { + cardinality = estimate(sketchArray); + } + if (cardinality < 0) { + cardinality = 0; + } } ModuleLogInfo.getInstance() @@ -540,18 +576,129 @@ public static NDVShardSketch buildNDVShardSketch(String schemaName, String table ndvShardSketch.setCardinality(cardinality); // persist - PolarDbXSystemTableNDVSketchStatistic.getInstance().batchReplace(ndvShardSketch.serialize(sketchArray)); + PolarDbXSystemTableNDVSketchStatistic.getInstance() + .batchReplace(ndvShardSketch.serialize(sketchArray, columnar)); // sync other nodes SyncManagerHelper.syncWithDefaultDB( new UpdateStatisticSyncAction( schemaName, tableName, - null)); + null), + SyncScope.ALL); return ndvShardSketch; } + private static long sketchByColumnar(String shardKey, String[] shardPart, + long[] dnCardinalityArray, byte[][] sketchArray, + long[] gmtCreated, long[] gmtUpdate, + AtomicLong cardinalityTime, AtomicLong sketchTime, ExecutionContext ec) { + String hint = genColumnarHllHint(ec); + if (StringUtil.isEmpty(hint)) { + return -1; + } + String[] shardKeys = shardKey.split(":"); + String schemaName = shardKeys[0]; + String tableName = shardKeys[1]; + String columnsName = shardKeys[2]; + TableMeta tm = OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTableWithNull(tableName); + if (tm == null) { + return -1; + } + // must be a table with columnar indexes + if (GeneralUtil.isEmpty(tm.getColumnarIndexPublished())) { + return -1; + } + long cardinality = -1; + // must visit columnar indexes + long start = System.currentTimeMillis(); + try (Connection connection = ExecutorContext.getContext(schemaName).getInnerConnectionManager() + .getConnection(schemaName); + Statement stmt = connection.createStatement()) { + String sql = hint + String.format(HYPER_LOG_LOG_SQL, columnsName, tableName); + logger.info(sql); + ResultSet rs = stmt.executeQuery(sql); + if (rs.next()) { + cardinality = rs.getLong(1); + } + while (rs.next()) { + } + } catch (Exception e) { + logger.error("Failed to get hll on columnar", e); + return -1; + } + logger.info(String.format("get hll for %s.%s.%s using columnar", schemaName, tableName, columnsName)); + sketchTime.getAndAdd(System.currentTimeMillis() - start); + for (int i = 0; i < shardPart.length; i++) { + dnCardinalityArray[i] = cardinality; + sketchArray[i] = null; + gmtCreated[i] = start; + gmtUpdate[i] = start; + } + return cardinality; + } + + public static String genColumnarHllHint(ExecutionContext ec) { + boolean isNdv = (ec == null) ? + InstConfUtil.getBool(ConnectionParams.ENABLE_NDV_USE_COLUMNAR) : + ec.getParamManager().getBoolean(ConnectionParams.ENABLE_NDV_USE_COLUMNAR); + + boolean isMppNdv = (ec == null) ? + InstConfUtil.getBool(ConnectionParams.ENABLE_MPP_NDV_USE_COLUMNAR) : + ec.getParamManager().getBoolean(ConnectionParams.ENABLE_MPP_NDV_USE_COLUMNAR); + if (!(isNdv || isMppNdv)) { + return null; + } + StringBuilder sb = new StringBuilder("/*+TDDL:cmd_extra("); + // disable fast path + sb.append(ConnectionProperties.ENABLE_DIRECT_PLAN).append("=false "); + sb.append(ConnectionProperties.ENABLE_POST_PLANNER).append("=false "); + sb.append(ConnectionProperties.ENABLE_INDEX_SELECTION).append("=false "); + sb.append(ConnectionProperties.ENABLE_SORT_AGG).append("=false "); + + // use columnar optimizer + sb.append(ConnectionProperties.WORKLOAD_TYPE).append("=AP "); + sb.append(ConnectionProperties.ENABLE_COLUMNAR_OPTIMIZER).append("=true "); + + if (isMppNdv) { + // use master mpp + sb.append(ConnectionProperties.ENABLE_HTAP).append("=true "); + sb.append(ConnectionProperties.ENABLE_MASTER_MPP).append("=true "); + } + sb.append(")*/"); + return sb.toString(); + } + + private static void sketchOnePart(String shardKey, String[] shardPart, + long[] dnCardinalityArray, byte[][] sketchArray, + long[] gmtCreated, long[] gmtUpdate, + AtomicLong cardinalityTime, AtomicLong sketchTime, + boolean isForce, ExecutionContext ec, + int idx, AtomicBoolean stopped) { + try { + if (stopped.get()) { + return; + } + long start = System.currentTimeMillis(); + dnCardinalityArray[idx] = getCurrentCardinality(shardKey, shardPart[idx]); + long mid = System.currentTimeMillis(); + cardinalityTime.getAndAdd(mid - start); + sketchArray[idx] = getCurrentHll(shardKey, shardPart[idx], isForce, ec); + sketchTime.getAndAdd(System.currentTimeMillis() - mid); + if (sketchArray[idx] == null) { + gmtUpdate[idx] = 0L; + } else { + gmtUpdate[idx] = System.currentTimeMillis(); + } + gmtCreated[idx] = System.currentTimeMillis(); + } catch (Exception e) { + logger.error("Failed to sketch " + shardKey + " on " + shardPart[idx], e); + stopped.compareAndSet(false, true); + throw e; + } + } + private static boolean isSketchDataReady(byte[][] sketchArray) { return !Arrays.stream(sketchArray).anyMatch(bytes -> bytes == null || bytes.length == 0); } @@ -580,21 +727,27 @@ public static String[] buildShardParts(String schemaName, String tableName) { * @param shardPart one physical table * @param ifForce true meaning from `analyze table`, false meaning from scheduled work */ - private static byte[] getCurrentHll(String shardKey, String shardPart, boolean ifForce) throws SQLException { + private static byte[] getCurrentHll(String shardKey, String shardPart, boolean ifForce, ExecutionContext ec) { String[] shardKeys = shardKey.split(":"); String schemaName = shardKeys[0]; String columnsName = shardKeys[2]; - long startTime = System.currentTimeMillis(); // only one part for now Map> shardPartToTopology = shardPartToTopology(shardPart); - byte[] hllBytes = new byte[12288]; + byte[] hllBytes = null; if (shardPartToTopology.size() > 1) { // should not happen throw new IllegalArgumentException("not support multi shardpart"); } + + if (ec != null && CrossEngineValidator.isJobInterrupted(ec)) { + long jobId = ec.getDdlJobId(); + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The job '" + jobId + "' has been cancelled"); + } + for (Map.Entry> entry : shardPartToTopology.entrySet()) { String nodeName = entry.getKey(); Set physicalTables = entry.getValue(); @@ -606,6 +759,12 @@ private static byte[] getCurrentHll(String shardKey, String shardPart, boolean i } for (String physicalTable : physicalTables) { + if (ec != null && CrossEngineValidator.isJobInterrupted(ec)) { + long jobId = ec.getDdlJobId(); + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "The job '" + jobId + "' has been cancelled"); + } + // add time check if (!ifForce) { Pair p = needSketchInterrupted(); @@ -694,30 +853,21 @@ private static byte[] getCurrentHll(String shardKey, String shardPart, boolean i return null; } } finally { - if (rs != null) { - try { + try { + if (rs != null) { rs.close(); - } catch (SQLException e) { - e.printStackTrace(); } - } - if (st != null) { - try { + if (st != null) { st.close(); - } catch (SQLException e) { - e.printStackTrace(); } - } - if (c != null) { - try { + if (c != null) { c.close(); - } catch (SQLException e) { - e.printStackTrace(); } + } catch (SQLException e) { + logger.warn(e.getMessage(), e); } } } - } ModuleLogInfo.getInstance() @@ -734,18 +884,19 @@ private static byte[] getCurrentHll(String shardKey, String shardPart, boolean i return hllBytes; } - public SystemTableNDVSketchStatistic.SketchRow[] serialize(byte[][] sketchBytes) { + public SystemTableNDVSketchStatistic.SketchRow[] serialize(byte[][] sketchBytes, boolean columnar) { String[] shardInfo = shardKey.split(":"); String schemaName = shardInfo[0]; String tableName = shardInfo[1]; String columnNames = shardInfo[2]; List rows = Lists.newLinkedList(); for (int i = 0; i < shardParts.length; i++) { - byte[] sketchByte = null; - if (sketchBytes[i] == null) { - continue; - } else { - sketchByte = sketchBytes[i]; + byte[] sketchByte = sketchBytes[i]; + if (sketchByte == null) { + if (!columnar) { + continue; + } + sketchByte = new byte[1]; } SystemTableNDVSketchStatistic.SketchRow sketchRow = new SystemTableNDVSketchStatistic.SketchRow(schemaName, tableName, columnNames, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/NDVSketch.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/NDVSketch.java index 8bc972b72..0414a9836 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/NDVSketch.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/statistic/ndv/NDVSketch.java @@ -23,10 +23,12 @@ import com.alibaba.polardbx.executor.sync.UpdateStatisticSyncAction; import com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType; import com.alibaba.polardbx.gms.scheduler.ScheduledJobsRecord; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticResult; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticTrace; import com.alibaba.polardbx.optimizer.config.table.statistic.inf.NDVSketchService; import com.alibaba.polardbx.optimizer.config.table.statistic.inf.SystemTableNDVSketchStatistic; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.glassfish.jersey.internal.guava.Sets; @@ -35,6 +37,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ThreadPoolExecutor; import static com.alibaba.polardbx.executor.statistic.ndv.HyperLogLogUtil.buildSketchKey; import static com.alibaba.polardbx.optimizer.config.table.statistic.inf.StatisticResultSource.HLL_SKETCH; @@ -187,7 +190,7 @@ public String scheduleJobs() { @Override public boolean sampleColumns(String schema, String logicalTableName) { - return StatisticUtils.sampleColumns(schema, logicalTableName); + return StatisticUtils.sampleOneTable(schema, logicalTableName); } @Override @@ -200,13 +203,18 @@ public long modifyTime(String schema, String tableName, String columnNames) { return ndvShardSketch.lastModifyTime(); } + public void cleanCache() { + stringNDVShardSketchMap.clear(); + } + @Override - public void updateAllShardParts(String schema, String tableName, String columnName) throws SQLException { + public void updateAllShardParts(String schema, String tableName, String columnName, ExecutionContext ec, + ThreadPoolExecutor sketchHllExecutor) throws SQLException { String ndvKey = buildSketchKey(schema, tableName, columnName); if (!stringNDVShardSketchMap.containsKey(ndvKey)) { // rebuild sketch NDVShardSketch ndvShardSketch = - NDVShardSketch.buildNDVShardSketch(schema, tableName, columnName, false); + NDVShardSketch.buildNDVShardSketch(schema, tableName, columnName, false, ec, sketchHllExecutor); if (ndvShardSketch != null) { stringNDVShardSketchMap.put(ndvKey, ndvShardSketch); } @@ -221,16 +229,19 @@ public void updateAllShardParts(String schema, String tableName, String columnNa new UpdateStatisticSyncAction( schema, tableName, - null)); + null), + SyncScope.ALL); } } @Override - public void reBuildShardParts(String schema, String tableName, String columnName) throws SQLException { + public void reBuildShardParts(String schema, String tableName, String columnName, ExecutionContext ec, + ThreadPoolExecutor sketchHllExecutor) throws SQLException { // only analyze table would enter here remove(tableName, columnName); String ndvKey = buildSketchKey(schema, tableName, columnName); - NDVShardSketch ndvShardSketch = NDVShardSketch.buildNDVShardSketch(schema, tableName, columnName, true); + NDVShardSketch ndvShardSketch = + NDVShardSketch.buildNDVShardSketch(schema, tableName, columnName, true, ec, sketchHllExecutor); if (ndvShardSketch != null) { stringNDVShardSketchMap.put(ndvKey, ndvShardSketch); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/AlterPartitionCountSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/AlterPartitionCountSyncAction.java index 8d192595a..9fd963754 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/AlterPartitionCountSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/AlterPartitionCountSyncAction.java @@ -20,8 +20,6 @@ import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.cursor.ResultCursor; -import java.util.Map; - public class AlterPartitionCountSyncAction extends RepartitionSyncAction { private static final Logger LOGGER = LoggerFactory.getLogger(AlterPartitionCountSyncAction.class); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/AlterSystemRefreshStorageSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/AlterSystemRefreshStorageSyncAction.java index fa513188c..a75e03524 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/AlterSystemRefreshStorageSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/AlterSystemRefreshStorageSyncAction.java @@ -19,9 +19,6 @@ import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.gms.ha.impl.StorageHaManager; -import java.util.ArrayList; -import java.util.List; - public class AlterSystemRefreshStorageSyncAction implements ISyncAction { private String dnId; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselineInvalidatePlanSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselineInvalidatePlanSyncAction.java index 629b753c0..2abb7a6c2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselineInvalidatePlanSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselineInvalidatePlanSyncAction.java @@ -18,12 +18,10 @@ import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.gms.module.LogLevel; -import com.alibaba.polardbx.gms.module.LogPattern; import com.alibaba.polardbx.gms.module.Module; import com.alibaba.polardbx.gms.module.ModuleLogInfo; import com.alibaba.polardbx.optimizer.planmanager.PlanManager; -import static com.alibaba.polardbx.gms.module.LogPattern.LOAD_DATA; import static com.alibaba.polardbx.gms.module.LogPattern.PROCESS_START; /** diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselineLoadSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselineLoadSyncAction.java index f296a940e..d7de134d0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselineLoadSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselineLoadSyncAction.java @@ -17,8 +17,14 @@ package com.alibaba.polardbx.executor.sync; import com.alibaba.polardbx.executor.cursor.ResultCursor; +import com.alibaba.polardbx.gms.module.LogLevel; +import com.alibaba.polardbx.gms.module.Module; +import com.alibaba.polardbx.gms.module.ModuleLogInfo; import com.alibaba.polardbx.optimizer.planmanager.PlanManager; +import static com.alibaba.polardbx.gms.module.LogPattern.PROCESS_END; +import static com.alibaba.polardbx.gms.scheduler.ScheduledJobExecutorType.BASELINE_SYNC; + public class BaselineLoadSyncAction implements ISyncAction { public BaselineLoadSyncAction() { @@ -27,6 +33,12 @@ public BaselineLoadSyncAction() { @Override public ResultCursor sync() { PlanManager.getInstance().forceLoadAll(); + ModuleLogInfo.getInstance() + .logRecord( + Module.SPM, + PROCESS_END, + new String[] {"BaselineLoadSyncAction", ""}, + LogLevel.NORMAL); return null; } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselinePersistSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselinePersistSyncAction.java index 9e9b77607..149d1c740 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselinePersistSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselinePersistSyncAction.java @@ -19,6 +19,7 @@ import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.gms.node.LeaderStatusBridge; +import com.alibaba.polardbx.gms.util.SyncUtil; import com.alibaba.polardbx.optimizer.planmanager.PlanManager; /** @@ -31,7 +32,7 @@ public BaselinePersistSyncAction() { @Override public ResultCursor sync() { - if (ConfigDataMode.isMasterMode() && LeaderStatusBridge.getInstance().hasLeadership()) { + if (SyncUtil.isNodeWithSmallestId()) { PlanManager.getInstance().persistBaseline(); } return null; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselineQueryAllSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselineQueryAllSyncAction.java new file mode 100644 index 000000000..21e7c2be9 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/BaselineQueryAllSyncAction.java @@ -0,0 +1,44 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.sync; + +import com.alibaba.polardbx.executor.cursor.ResultCursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.gms.topology.ServerInstIdManager; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.planmanager.PlanManager; + +public class BaselineQueryAllSyncAction implements ISyncAction { + + public BaselineQueryAllSyncAction() { + } + + @Override + public ResultCursor sync() { + String jsonString = PlanManager.getBaselineAsJson(PlanManager.getInstance().getBaselineMap()); + + ArrayResultCursor result = new ArrayResultCursor("baselines"); + result.addColumn("inst_id", DataTypes.StringType); + result.addColumn("baselines", DataTypes.StringType); + result.initMeta(); + String instId = ServerInstIdManager.getInstance().getInstId(); + + result.addRow(new Object[] {instId, jsonString}); + return result; + } +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ClearFileSystemCacheSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ClearFileSystemCacheSyncAction.java new file mode 100644 index 000000000..3c5aeeeb4 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ClearFileSystemCacheSyncAction.java @@ -0,0 +1,96 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.sync; + +import com.alibaba.polardbx.common.Engine; +import com.alibaba.polardbx.common.oss.filesystem.cache.FileMergeCacheConfig; +import com.alibaba.polardbx.common.oss.filesystem.cache.FileMergeCachingFileSystem; +import com.alibaba.polardbx.common.properties.FileConfig; +import com.alibaba.polardbx.executor.cursor.ResultCursor; +import com.alibaba.polardbx.executor.operator.scan.BlockCacheManager; +import com.alibaba.polardbx.gms.engine.FileSystemGroup; +import com.alibaba.polardbx.gms.engine.FileSystemManager; +import org.apache.hadoop.fs.FileSystem; +import org.jetbrains.annotations.Nullable; + +/** + * @author chenzilin + */ +public class ClearFileSystemCacheSyncAction implements ISyncAction { + + private boolean all; + public Engine engine; + + public ClearFileSystemCacheSyncAction() { + } + + public ClearFileSystemCacheSyncAction(@Nullable Engine engine, boolean all) { + this.engine = engine; + this.all = all; + } + + @Override + public ResultCursor sync() { + + if (all) { + clearAllCache(); + // also clear in memory block cache. + BlockCacheManager.getInstance().clear(); + } else { + clearCache(FileSystemManager.getFileSystemGroup(engine)); + } + return null; + } + + private void clearAllCache() { + for (Engine engine : Engine.values()) { + if (Engine.hasCache(engine)) { + // If the engine does not exist, just skip + clearCache(FileSystemManager.getFileSystemGroup(engine, false)); + } + } + } + + private void clearCache(FileSystemGroup fileSystemGroup) { + if (fileSystemGroup != null) { + FileMergeCacheConfig fileMergeCacheConfig = FileConfig.getInstance().getMergeCacheConfig(); + // rebuild cache by new configs. + ((FileMergeCachingFileSystem) fileSystemGroup.getMaster()).getCacheManager() + .rebuildCache(fileMergeCacheConfig); + for (FileSystem slave : fileSystemGroup.getSlaves()) { + ((FileMergeCachingFileSystem) slave).getCacheManager().rebuildCache(fileMergeCacheConfig); + } + } + } + + public boolean isAll() { + return all; + } + + public void setAll(boolean all) { + this.all = all; + } + + public Engine getEngine() { + return engine; + } + + public void setEngine(Engine engine) { + this.engine = engine; + } +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ClearOSSFileSystemSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ClearOSSFileSystemSyncAction.java deleted file mode 100644 index e02124e27..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ClearOSSFileSystemSyncAction.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.sync; - -import com.alibaba.polardbx.common.Engine; -import com.alibaba.polardbx.common.oss.filesystem.cache.FileMergeCachingFileSystem; -import com.alibaba.polardbx.common.properties.ConnectionProperties; -import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.executor.archive.reader.BufferPoolManager; -import com.alibaba.polardbx.executor.cursor.ResultCursor; -import com.alibaba.polardbx.gms.engine.FileSystemGroup; -import com.alibaba.polardbx.gms.engine.FileSystemManager; -import com.alibaba.polardbx.gms.topology.InstConfigAccessor; -import com.alibaba.polardbx.gms.topology.InstConfigRecord; -import com.alibaba.polardbx.gms.util.InstIdUtil; -import com.alibaba.polardbx.gms.util.MetaDbUtil; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import org.apache.hadoop.fs.FileSystem; - -import java.sql.Connection; -import java.sql.SQLException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.function.Consumer; - -/** - * @author chenzilin - * @date 2022/2/21 11:29 - */ -public class ClearOSSFileSystemSyncAction implements ISyncAction { - - public ClearOSSFileSystemSyncAction() { - } - - @Override - public ResultCursor sync() { - FileSystemGroup fileSystemGroup = FileSystemManager.getFileSystemGroup(Engine.OSS); - if (fileSystemGroup != null) { - Map configs = fetchConfig(); - - if (configs == null || configs.isEmpty()) { - // just clear - ((FileMergeCachingFileSystem) fileSystemGroup.getMaster()).getCacheManager().clear(); - for (FileSystem slave : fileSystemGroup.getSlaves()) { - ((FileMergeCachingFileSystem) slave).getCacheManager().clear(); - } - } else { - // rebuild cache by new configs. - ((FileMergeCachingFileSystem) fileSystemGroup.getMaster()).getCacheManager().rebuildCache(configs); - for (FileSystem slave : fileSystemGroup.getSlaves()) { - ((FileMergeCachingFileSystem) slave).getCacheManager().rebuildCache(configs); - } - } - } - return null; - } - - private Map fetchConfig() { - Map results = new HashMap<>(); - try (Connection connection = MetaDbUtil.getConnection()) { - InstConfigAccessor accessor = new InstConfigAccessor(); - accessor.setConnection(connection); - - doFetch(results, ConnectionProperties.OSS_FS_CACHE_TTL, accessor); - doFetch(results, ConnectionProperties.OSS_FS_MAX_CACHED_ENTRIES, accessor); - } catch (SQLException e) { - GeneralUtil.nestedException(e); - } - return results; - } - - private void doFetch(Map results, String connProp, InstConfigAccessor accessor) { - List records = accessor.queryByParamKey(InstIdUtil.getInstId(), connProp); - Long result; - if (records != null - && !records.isEmpty() - && (result = DataTypes.LongType.convertFrom(records.get(0).paramVal)) != null) { - results.put(connProp, result); - } - } -} - diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/CollectVariableSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/CollectVariableSyncAction.java new file mode 100644 index 000000000..5bca02d3e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/CollectVariableSyncAction.java @@ -0,0 +1,61 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.sync; + +import com.alibaba.polardbx.common.properties.DynamicConfig; +import com.alibaba.polardbx.executor.cursor.ResultCursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.sync.ISyncAction; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import java.lang.reflect.Field; + +public class CollectVariableSyncAction implements ISyncAction { + // varKey should be the member variable in DynamicConfig, + // not the one in ConnectionProperties. + private String varKey; + + public CollectVariableSyncAction(String varKey) { + this.varKey = varKey; + } + + public String getVarKey() { + return varKey; + } + + public void setVarKey(String varKey) { + this.varKey = varKey; + } + + @Override + public ResultCursor sync() { + ArrayResultCursor result = new ArrayResultCursor("Value"); + result.addColumn("Value", DataTypes.StringType); + + try { + Class clazz = DynamicConfig.class; + Field field = clazz.getDeclaredField(varKey); + field.setAccessible(true); + Object obj = field.get(DynamicConfig.getInstance()); + result.addRow(new Object[] {obj.toString()}); + } catch (Throwable t) { + // ignore, result will be empty. + } + + return result; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ColumnarMinSnapshotPurgeSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ColumnarMinSnapshotPurgeSyncAction.java new file mode 100644 index 000000000..e0e0dafc1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ColumnarMinSnapshotPurgeSyncAction.java @@ -0,0 +1,45 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.sync; + +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.gms.sync.IGmsSyncAction; + +public class ColumnarMinSnapshotPurgeSyncAction implements IGmsSyncAction { + + private Long minTso; + + public ColumnarMinSnapshotPurgeSyncAction(Long minTso) { + this.minTso = minTso; + } + + @Override + public Object sync() { + if (minTso != null && minTso != Long.MIN_VALUE) { + ColumnarManager.getInstance().purge(minTso); + } + return null; + } + + public Long getMinTso() { + return this.minTso; + } + + public void setMinTso(Long minTso) { + this.minTso = minTso; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ColumnarSnapshotUpdateSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ColumnarSnapshotUpdateSyncAction.java new file mode 100644 index 000000000..8a789c26b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ColumnarSnapshotUpdateSyncAction.java @@ -0,0 +1,46 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.sync; + +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.gms.sync.IGmsSyncAction; + +public class ColumnarSnapshotUpdateSyncAction implements IGmsSyncAction { + + private Long latestTso; + + public ColumnarSnapshotUpdateSyncAction(Long latestTso) { + this.latestTso = latestTso; + } + + @Override + public Object sync() { + if (latestTso != null) { + ColumnarManager.getInstance().setLatestTso(latestTso); + } + + return null; + } + + public Long getLatestTso() { + return this.latestTso; + } + + public void setLatestTso(Long latestTso) { + this.latestTso = latestTso; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/CreateViewSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/CreateViewSyncAction.java index 4edc99e53..85678cfd8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/CreateViewSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/CreateViewSyncAction.java @@ -18,6 +18,7 @@ import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.core.planner.PlanCache; /** * @author dylan @@ -40,6 +41,7 @@ public CreateViewSyncAction(String schemaName, String viewName) { public ResultCursor sync() { if (viewName != null) { OptimizerContext.getContext(schemaName).getViewManager().invalidate(viewName); + PlanCache.getInstance().invalidateByTable(schemaName, viewName); } return null; @@ -53,4 +55,11 @@ public void setViewName(String tableName) { this.viewName = tableName; } + public String getSchemaName() { + return schemaName; + } + + public void setSchemaName(String schemaName) { + this.schemaName = schemaName; + } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/DeleteBaselineSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/DeleteBaselineSyncAction.java index 1c86a6a10..0f37ae7ce 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/DeleteBaselineSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/DeleteBaselineSyncAction.java @@ -23,9 +23,9 @@ public class DeleteBaselineSyncAction implements ISyncAction { private String schemaName; - private final String parameterSql; + private String parameterSql; - private final Integer planInfoId; + private Integer planInfoId; public DeleteBaselineSyncAction(String schemaName, String parameterSql) { this.schemaName = schemaName; @@ -64,5 +64,13 @@ public String getParameterSql() { public Integer getPlanInfoId() { return planInfoId; } + + public void setParameterSql(String parameterSql) { + this.parameterSql = parameterSql; + } + + public void setPlanInfoId(Integer planInfoId) { + this.planInfoId = planInfoId; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/DropViewSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/DropViewSyncAction.java index 5e3bc21af..526ca16f2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/DropViewSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/DropViewSyncAction.java @@ -58,4 +58,11 @@ public void setViewNames(List viewNames) { this.viewNames = viewNames; } + public String getSchemaName() { + return schemaName; + } + + public void setSchemaName(String schemaName) { + this.schemaName = schemaName; + } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/EncdbMekProvisionSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/EncdbMekProvisionSyncAction.java new file mode 100644 index 000000000..ff4a72026 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/EncdbMekProvisionSyncAction.java @@ -0,0 +1,64 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.sync; + +import com.alibaba.polardbx.common.encdb.EncdbException; +import com.alibaba.polardbx.common.encdb.enums.HashAlgo; +import com.alibaba.polardbx.common.encdb.utils.HashUtil; +import com.alibaba.polardbx.common.utils.encrypt.SecurityUtil; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.ResultCursor; +import com.alibaba.polardbx.gms.metadb.encdb.EncdbKeyManager; + +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; + +/** + * @author pangzhaoxing + */ +public class EncdbMekProvisionSyncAction implements ISyncAction { + + private static final Logger logger = LoggerFactory.getLogger(EncdbMekProvisionSyncAction.class); + + private byte[] mek; + + public EncdbMekProvisionSyncAction(byte[] mek) { + this.mek = mek; + } + + public byte[] getMek() { + return mek; + } + + public void setMek(byte[] mek) { + this.mek = mek; + } + + @Override + public ResultCursor sync() { + try { + if (!EncdbKeyManager.getInstance().setMek(mek)) { + throw new EncdbException("the mekHash is inconsistent with mek"); + } + } catch (Exception e) { + throw new EncdbException("sync mek failed", e); + } + return null; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchMetadataLockSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchMetadataLockSyncAction.java index 1c433cd20..a9487b980 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchMetadataLockSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchMetadataLockSyncAction.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.sync; import com.alibaba.polardbx.common.TddlNode; -import com.alibaba.polardbx.common.jdbc.BytesSql; import com.alibaba.polardbx.druid.sql.parser.ByteString; import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPartitionHeatmapSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPartitionHeatmapSyncAction.java index 04fa19ef5..905f65cd7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPartitionHeatmapSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPartitionHeatmapSyncAction.java @@ -27,7 +27,6 @@ /** * @author ximing.yd - * @date 2022/1/25 6:56 下午 */ public class FetchPartitionHeatmapSyncAction implements ISyncAction { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPlanCacheCapacitySyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPlanCacheCapacitySyncAction.java index 234ec6c29..faa9b5cc8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPlanCacheCapacitySyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPlanCacheCapacitySyncAction.java @@ -19,7 +19,6 @@ import com.alibaba.polardbx.common.TddlNode; import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.planner.PlanCache; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPlanCacheSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPlanCacheSyncAction.java index 5f6dd1a5e..b48c23f0e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPlanCacheSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchPlanCacheSyncAction.java @@ -20,6 +20,7 @@ import com.alibaba.polardbx.common.TddlNode; import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; @@ -90,6 +91,7 @@ public void setWithParameter(boolean withParameter) { public ResultCursor sync() { ArrayResultCursor result = new ArrayResultCursor("PLAN_CACHE"); result.addColumn("COMPUTE_NODE", DataTypes.StringType); + result.addColumn("SCHEMA_NAME", DataTypes.StringType); result.addColumn("TABLE_NAMES", DataTypes.StringType); result.addColumn("ID", DataTypes.StringType); result.addColumn("HIT_COUNT", DataTypes.LongType); @@ -107,6 +109,11 @@ public ResultCursor sync() { if ((!StringUtil.isEmpty(schemaName)) && (!schemaName.equalsIgnoreCase(cacheKey.getSchema()))) { continue; } + + if (SystemDbHelper.CDC_DB_NAME.equalsIgnoreCase(cacheKey.getSchema())) { + continue; + } + final String plan; if (withPlan) { if (executionPlan == PlaceHolderExecutionPlan.INSTANCE) { @@ -133,6 +140,7 @@ public ResultCursor sync() { result.addRow(new Object[] { TddlNode.getHost() + ":" + TddlNode.getPort(), + cacheKey.getSchema(), cacheKey.getTableMetas().stream().map(TableMeta::getTableName).collect(Collectors.joining(",")), cacheKey.getTemplateId(), executionPlan.getHitCount().longValue(), diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchRunningScheduleJobsSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchRunningScheduleJobsSyncAction.java index 899d4eb9d..29de883ad 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchRunningScheduleJobsSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchRunningScheduleJobsSyncAction.java @@ -21,18 +21,8 @@ import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.scheduler.ScheduledJobsManager; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.optimizer.planmanager.BaselineInfo; -import com.alibaba.polardbx.optimizer.planmanager.PlanInfo; -import com.alibaba.polardbx.optimizer.planmanager.PlanManager; -import com.alibaba.polardbx.optimizer.planmanager.parametric.Point; -import org.apache.calcite.plan.RelOptUtil; -import org.apache.calcite.rel.RelNode; -import org.apache.calcite.sql.SqlExplainFormat; -import org.apache.calcite.sql.SqlExplainLevel; -import java.text.NumberFormat; import java.util.Map; -import java.util.Set; /** * @author fangwu diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchSPMSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchSPMSyncAction.java index 7523cbdda..29beee220 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchSPMSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/FetchSPMSyncAction.java @@ -17,29 +17,18 @@ package com.alibaba.polardbx.executor.sync; import com.alibaba.polardbx.common.TddlNode; -import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; -import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.PlannerContext; -import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.gms.topology.ServerInstIdManager; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; -import com.alibaba.polardbx.optimizer.core.planner.PlaceHolderExecutionPlan; -import com.alibaba.polardbx.optimizer.core.planner.PlanCache; import com.alibaba.polardbx.optimizer.planmanager.BaselineInfo; import com.alibaba.polardbx.optimizer.planmanager.PlanInfo; import com.alibaba.polardbx.optimizer.planmanager.PlanManager; -import com.alibaba.polardbx.optimizer.planmanager.parametric.BaseParametricQueryAdvisor; import com.alibaba.polardbx.optimizer.planmanager.parametric.Point; -import com.alibaba.polardbx.optimizer.utils.RelUtils; -import com.alibaba.polardbx.optimizer.utils.RexUtils; import org.apache.calcite.plan.RelOptUtil; import org.apache.calcite.rel.RelNode; -import org.apache.calcite.rel.type.RelDataTypeFieldImpl; import org.apache.calcite.sql.SqlExplainFormat; import org.apache.calcite.sql.SqlExplainLevel; -import org.apache.calcite.sql.type.SqlTypeName; import java.text.NumberFormat; import java.util.Map; @@ -88,7 +77,9 @@ public ResultCursor sync() { PlanManager planManager = PlanManager.getInstance(); Map baselineMap = planManager.getBaselineMap(schemaName); - ArrayResultCursor result = new ArrayResultCursor("PLAN_CACHE"); + ArrayResultCursor result = new ArrayResultCursor("SPM"); + result.addColumn("HOST", DataTypes.StringType); + result.addColumn("INST_ID", DataTypes.StringType); result.addColumn("BASELINE_ID", DataTypes.StringType); result.addColumn("SCHEMA_NAME", DataTypes.StringType); result.addColumn("PLAN_ID", DataTypes.LongType); @@ -111,13 +102,16 @@ public ResultCursor sync() { if (baselineMap == null) { return result; } - + String instId = ServerInstIdManager.getInstance().getInstId(); + String host = TddlNode.getHost(); for (Map.Entry entry : baselineMap.entrySet()) { String paramSql = entry.getKey(); BaselineInfo baselineInfo = entry.getValue(); Set points = baselineInfo.getPointSet(); if (baselineInfo.isRebuildAtLoad()) { result.addRow(new Object[] { + host, + instId, baselineInfo.getId(), schemaName, 0, // plan id @@ -150,6 +144,8 @@ public ResultCursor sync() { Point point = findPoint(points, planInfo.getId()); NumberFormat numberFormat = NumberFormat.getPercentInstance(); result.addRow(new Object[] { + host, + instId, baselineInfo.getId(), schemaName, planInfo.getId(), diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/GlobalAcquireMdlLockInDbSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/GlobalAcquireMdlLockInDbSyncAction.java index 4d0358b95..e7579679f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/GlobalAcquireMdlLockInDbSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/GlobalAcquireMdlLockInDbSyncAction.java @@ -31,7 +31,6 @@ import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import lombok.Getter; -import lombok.Setter; import java.text.MessageFormat; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/GsiStatisticsSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/GsiStatisticsSyncAction.java index 07de5d39d..4716c34cf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/GsiStatisticsSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/GsiStatisticsSyncAction.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.sync; +import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.executor.common.GsiStatisticsManager; @@ -48,21 +49,22 @@ public GsiStatisticsSyncAction(String schemaName, String gsiName, String newValu @Override public ResultCursor sync() { - GsiStatisticsManager statisticsManager = GsiStatisticsManager.getInstance(); - if (alterKind == QUERY_RECORD || alterKind == INSERT_RECORD || alterKind == DELETE_RECORD) { - statisticsManager.writeBackSchemaLevelGsiStatistics(schemaName); - statisticsManager.reLoadSchemaLevelGsiStatisticsInfoFromMetaDb(schemaName); - } else if (alterKind == DELETE_SCHEMA) { - statisticsManager.removeSchemaLevelRecordFromCache(schemaName); - } else if (alterKind == RENAME_RECORD) { - statisticsManager.renameGsiRecordFromCache(schemaName, gsiName, newValue); - statisticsManager.writeBackSchemaLevelGsiStatistics(schemaName); - statisticsManager.reLoadSchemaLevelGsiStatisticsInfoFromMetaDb(schemaName); - } else if (alterKind == WRITE_BACK_ALL_SCHEMA) { - statisticsManager.writeBackAllGsiStatistics(); - statisticsManager.loadFromMetaDb(); + if (ConfigDataMode.needDNResource()) { + GsiStatisticsManager statisticsManager = GsiStatisticsManager.getInstance(); + if (alterKind == QUERY_RECORD || alterKind == INSERT_RECORD || alterKind == DELETE_RECORD) { + statisticsManager.writeBackSchemaLevelGsiStatistics(schemaName); + statisticsManager.reLoadSchemaLevelGsiStatisticsInfoFromMetaDb(schemaName); + } else if (alterKind == DELETE_SCHEMA) { + statisticsManager.removeSchemaLevelRecordFromCache(schemaName); + } else if (alterKind == RENAME_RECORD) { + statisticsManager.renameGsiRecordFromCache(schemaName, gsiName, newValue); + statisticsManager.writeBackSchemaLevelGsiStatistics(schemaName); + statisticsManager.reLoadSchemaLevelGsiStatisticsInfoFromMetaDb(schemaName); + } else if (alterKind == WRITE_BACK_ALL_SCHEMA) { + statisticsManager.writeBackAllGsiStatistics(); + statisticsManager.loadFromMetaDb(); + } } - return null; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/InspectRuleVersionSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/InspectRuleVersionSyncAction.java index f18d6127c..ea3eed6f7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/InspectRuleVersionSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/InspectRuleVersionSyncAction.java @@ -62,7 +62,9 @@ public InspectRuleVersionSyncAction(String schemaName) { @Override public ResultCursor sync() { ArrayResultCursor resultCursor = new ArrayResultCursor("RULE_INFO"); + handleGMS(resultCursor); + return resultCursor; } @@ -83,7 +85,7 @@ private void handleGMS(ArrayResultCursor resultCursor) { String isReadOnly = ""; if ((localNode != null && localNode.instType != ServerInfoRecord.INST_TYPE_MASTER && localNode.instType != ServerInfoRecord.INST_TYPE_STANDBY) || - (localNode == null && ConfigDataMode.isSlaveMode())) { + (localNode == null && ConfigDataMode.isReadOnlyMode())) { isReadOnly = "Y"; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/InvalidateBufferPoolSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/InvalidateBufferPoolSyncAction.java index 361a303e7..910e0b957 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/InvalidateBufferPoolSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/InvalidateBufferPoolSyncAction.java @@ -18,15 +18,11 @@ import com.alibaba.polardbx.executor.archive.reader.BufferPoolManager; import com.alibaba.polardbx.executor.cursor.ResultCursor; -import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; -import com.google.common.collect.ImmutableList; /** * @author chenzilin - * @date 2022/2/21 11:29 */ -public class InvalidateBufferPoolSyncAction implements ISyncAction { +public class InvalidateBufferPoolSyncAction implements ISyncAction { private String schemaName = null; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/LocalSyncManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/LocalSyncManager.java deleted file mode 100644 index 1e9550be9..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/LocalSyncManager.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.executor.sync; - -import com.alibaba.polardbx.common.model.lifecycle.AbstractLifecycle; -import com.alibaba.polardbx.common.utils.extension.Activate; -import com.alibaba.polardbx.executor.cursor.ResultCursor; -import com.alibaba.polardbx.executor.utils.ExecUtils; -import com.alibaba.polardbx.gms.sync.IGmsSyncAction; -import com.alibaba.polardbx.gms.sync.ISyncResultHandler; -import com.alibaba.polardbx.gms.sync.SyncScope; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -/** - * 本机调用 - * - * @author agapple 2015年3月26日 下午5:49:03 - * @since 5.1.19 - */ -@Activate(order = 1) -public class LocalSyncManager extends AbstractLifecycle implements ISyncManager { - - @Override - public List>> sync(IGmsSyncAction action, String schemaName, boolean throwExceptions) { - List>> results = new ArrayList(1); - results.add(ExecUtils.resultSetToList((ResultCursor) action.sync())); - return results; - } - - @Override - public List>> sync(IGmsSyncAction action, String schema, SyncScope scope, - boolean throwExceptions) { - // Don't need sync scope and result handler locally. - return sync(action, schema, throwExceptions); - } - - @Override - public void sync(IGmsSyncAction action, String schema, ISyncResultHandler handler, boolean throwExceptions) { - // Don't need sync scope and result handler locally. - sync(action, schema, throwExceptions); - } - - @Override - public void sync(IGmsSyncAction action, String schema, SyncScope scope, ISyncResultHandler handler, - boolean throwExceptions) { - // Don't need sync scope and result handler locally. - sync(action, schema, throwExceptions); - } - - @Override - public List> sync(IGmsSyncAction action, String schemaName, String serverKey) { - return ExecUtils.resultSetToList((ResultCursor) action.sync()); - } - -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ReloadSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ReloadSyncAction.java index 6b04998f0..64fc89c5c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ReloadSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ReloadSyncAction.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.sync; import com.alibaba.polardbx.atom.CacheVariables; -import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.executor.pl.ProcedureManager; @@ -33,6 +32,8 @@ import com.alibaba.polardbx.optimizer.core.expression.JavaFunctionManager; import com.alibaba.polardbx.optimizer.view.SystemTableView; +import java.util.Optional; + public class ReloadSyncAction implements ISyncAction { private ReloadType type; @@ -66,8 +67,6 @@ public ResultCursor sync() { CacheVariables.invalidateAll(); break; case USERS: - // 触发一次刷新 - ConfigDataMode.setRefreshConfigTimestamp(System.currentTimeMillis() + 5 * 1000); break; case FILESTORAGE: // reset rate-limiter of oss file system @@ -85,6 +84,12 @@ public ResultCursor sync() { case STATISTICS: StatisticManager.getInstance().clearAndReloadData(); break; + + case COLUMNARMANAGER: + Optional + .ofNullable(ExecutorContext.getContext(schemaName)) + .ifPresent(ExecutorContext::reloadColumnarManager); + break; default: break; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/RemoveColumnStatisticSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/RemoveColumnStatisticSyncAction.java index c7290472f..401387347 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/RemoveColumnStatisticSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/RemoveColumnStatisticSyncAction.java @@ -17,9 +17,7 @@ package com.alibaba.polardbx.executor.sync; import com.alibaba.polardbx.executor.cursor.ResultCursor; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; -import com.alibaba.polardbx.optimizer.config.table.statistic.inf.StatisticService; import java.util.List; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/RemoveTableStatisticSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/RemoveTableStatisticSyncAction.java index e802fc3ec..450f43b85 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/RemoveTableStatisticSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/RemoveTableStatisticSyncAction.java @@ -16,10 +16,9 @@ package com.alibaba.polardbx.executor.sync; -import com.google.common.collect.ImmutableList; import com.alibaba.polardbx.executor.cursor.ResultCursor; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; +import com.google.common.collect.ImmutableList; public class RemoveTableStatisticSyncAction implements ISyncAction { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/RequestColumnarSnapshotSeqSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/RequestColumnarSnapshotSeqSyncAction.java new file mode 100644 index 000000000..b3a9aac87 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/RequestColumnarSnapshotSeqSyncAction.java @@ -0,0 +1,55 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.sync; + +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.gms.ColumnarManager; +import com.alibaba.polardbx.executor.spi.ITransactionManager; +import com.alibaba.polardbx.gms.sync.IGmsSyncAction; +import com.alibaba.polardbx.gms.topology.SystemDbHelper; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import java.util.Iterator; +import java.util.Map; + +public class RequestColumnarSnapshotSeqSyncAction implements IGmsSyncAction { + @Override + public Object sync() { + long minSnapshotTime = ColumnarManager.getInstance().latestTso(); + Map executorContextMap = ExecutorContext.getExecutorContextMap(); + for (Map.Entry entry : executorContextMap.entrySet()) { + if (SystemDbHelper.isDBBuildInExceptCdc(entry.getKey())) { + continue; + } + ITransactionManager manager = entry.getValue().getTransactionManager(); + long minTsoOfCurrentDb = manager.getColumnarMinSnapshotSeq(); + minSnapshotTime = Math.min(minSnapshotTime, minTsoOfCurrentDb); + } + // Notice that minSnapshotTime may be null if there are no DBs + ArrayResultCursor resultCursor = buildResultCursor(); + resultCursor.addRow(new Object[] {minSnapshotTime}); + return resultCursor; + } + + public static ArrayResultCursor buildResultCursor() { + ArrayResultCursor resultCursor = new ArrayResultCursor("Request Columnar Snapshot"); + resultCursor.addColumn("TSO", DataTypes.LongType); + resultCursor.initMeta(); + return resultCursor; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/StatisticQuerySyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/StatisticQuerySyncAction.java new file mode 100644 index 000000000..da1a7aeec --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/StatisticQuerySyncAction.java @@ -0,0 +1,107 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.sync; + +import com.alibaba.polardbx.common.TddlNode; +import com.alibaba.polardbx.executor.cursor.ResultCursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.gms.util.StatisticUtils; +import com.alibaba.polardbx.gms.topology.SystemDbHelper; +import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.statistic.Histogram; +import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; +import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticResult; +import com.alibaba.polardbx.optimizer.config.table.statistic.TopN; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.parse.util.Pair; +import com.alibaba.polardbx.optimizer.view.InformationSchemaStatisticsData; +import org.apache.calcite.sql.type.SqlTypeName; + +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static com.alibaba.polardbx.optimizer.config.table.statistic.StatisticUtils.getColumnMetas; + +public class StatisticQuerySyncAction implements ISyncAction { + + public StatisticQuerySyncAction() { + } + + @Override + public ResultCursor sync() { + Map> statisticCache = + StatisticManager.getInstance().getStatisticCache(); + + if (statisticCache == null) { + return null; + } + ArrayResultCursor result = new ArrayResultCursor("statistics_data"); + for (Pair pair : InformationSchemaStatisticsData.meta) { + result.addColumn(pair.getKey(), InformationSchemaStatisticsData.transform(pair.getValue())); + } + result.initMeta(); + String host = TddlNode.getHost(); + + for (Map.Entry> entrySchema : statisticCache.entrySet()) { + String schema = entrySchema.getKey(); + if (SystemDbHelper.isDBBuildIn(schema)) { + continue; + } + for (Map.Entry entryTable : entrySchema.getValue().entrySet()) { + String table = entryTable.getKey(); + + // skip oss table cause sample process would do the same + if (StatisticUtils.isFileStore(schema, table)) { + continue; + } + + StatisticManager.CacheLine cacheLine = StatisticManager.getInstance().getCacheLine(schema, table); + List columns = getColumnMetas(false, schema, table); + if (columns == null) { + continue; + } + Map histogramMap = cacheLine.getHistogramMap(); + for (ColumnMeta columnMeta : columns) { + String column = columnMeta.getName().toLowerCase(); + StatisticResult statisticResult = + StatisticManager.getInstance().getCardinality(schema, table, column, false, false); + + Histogram histogram = histogramMap == null ? null : histogramMap.get(column); + String topN = cacheLine.getTopN(column) == null ? "" : cacheLine.getTopN(column).manualReading(); + result.addRow(new Object[] { + host, + schema, + table, + column, + cacheLine.getRowCount(), + statisticResult.getLongValue(), + statisticResult.getSource(), + topN == null ? "" : topN, + histogram == null || histogram.getBuckets().size() == 0 ? + "" : histogram.manualReading(), + cacheLine.getSampleRate() + }); + } + + } + } + + return result; + } +} + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/SyncManagerHelper.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/SyncManagerHelper.java index 95a165036..14a7daa1e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/SyncManagerHelper.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/SyncManagerHelper.java @@ -42,28 +42,18 @@ public class SyncManagerHelper { instance = ExtensionLoader.load(ISyncManager.class); } - public static List>> sync(IGmsSyncAction action) { - return sync(action, false); + public static List>> sync(IGmsSyncAction action, SyncScope scope) { + return sync(action, scope, false); } - /** - * sync with default db(polardbx) - */ - public static List>> syncWithDefaultDB(IGmsSyncAction action) { - return sync(action, SystemDbHelper.DEFAULT_DB_NAME, false); + public static List>> syncWithDefaultDB(IGmsSyncAction action, SyncScope scope) { + return sync(action, SystemDbHelper.DEFAULT_DB_NAME, scope, false); } - public static List>> sync(IGmsSyncAction action, boolean throwExceptions) { + public static List>> sync(IGmsSyncAction action, SyncScope scope, + boolean throwExceptions) { DdlMetaLogUtil.DDL_META_LOG.info("sync. action:" + JSONObject.toJSONString(action)); - return instance.sync(action, DefaultSchema.getSchemaName(), throwExceptions); - } - - public static List>> sync(IGmsSyncAction action, String schema) { - return sync(action, schema, false); - } - - public static List>> sync(IGmsSyncAction action, String schema, boolean throwExceptions) { - return instance.sync(action, schema, throwExceptions); + return sync(action, DefaultSchema.getSchemaName(), scope, throwExceptions); } public static List>> sync(IGmsSyncAction action, String schema, SyncScope scope) { @@ -75,14 +65,6 @@ public static List>> sync(IGmsSyncAction action, String return instance.sync(action, schema, scope, throwExceptions); } - public static void sync(IGmsSyncAction action, String schema, ISyncResultHandler handler) { - sync(action, schema, handler, false); - } - - public static void sync(IGmsSyncAction action, String schema, ISyncResultHandler handler, boolean throwExceptions) { - instance.sync(action, schema, handler, throwExceptions); - } - public static void sync(IGmsSyncAction action, String schema, SyncScope scope, ISyncResultHandler handler) { sync(action, schema, scope, handler, false); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/UpdateRowCountSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/UpdateRowCountSyncAction.java index a0cb038d5..c69e8c62e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/UpdateRowCountSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/UpdateRowCountSyncAction.java @@ -17,7 +17,6 @@ package com.alibaba.polardbx.executor.sync; import com.alibaba.polardbx.executor.cursor.ResultCursor; -import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; import java.util.HashMap; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/UpdateStatisticSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/UpdateStatisticSyncAction.java index 3f1ee8e19..d9ce38998 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/UpdateStatisticSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/UpdateStatisticSyncAction.java @@ -16,9 +16,9 @@ package com.alibaba.polardbx.executor.sync; +import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.gms.module.LogLevel; import com.alibaba.polardbx.gms.module.Module; -import com.alibaba.polardbx.executor.cursor.ResultCursor; import com.alibaba.polardbx.gms.module.ModuleLogInfo; import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; import com.alibaba.polardbx.optimizer.core.planner.PlanCache; @@ -78,7 +78,7 @@ public ResultCursor sync() { } // refresh plan cache - PlanManager.getInstance().invalidateTable(schemaName, logicalTableName); + PlanCache.getInstance().invalidateByTable(schemaName, logicalTableName); // reload ndv sketch StatisticManager.getInstance().reloadNDVbyTableName(schemaName, logicalTableName); ModuleLogInfo.getInstance() diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ddl/RemoteDdlTaskSyncAction.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ddl/RemoteDdlTaskSyncAction.java index 882e513f1..baca94154 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ddl/RemoteDdlTaskSyncAction.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/sync/ddl/RemoteDdlTaskSyncAction.java @@ -46,9 +46,8 @@ public RemoteDdlTaskSyncAction(final String schemaName, final long jobId, final } /** - * @return - * SUCCESS: 'TRUE'/'FALSE' - * MSG: 'XXXXXX' + * @return SUCCESS: 'TRUE'/'FALSE' + * MSG: 'XXXXXX' */ @Override public ResultCursor sync() { @@ -62,29 +61,29 @@ public ResultCursor sync() { IServerConfigManager serverConfigManager = OptimizerHelper.getServerConfigManager(); serverConfigManager.remoteExecuteDdlTask(schemaName, jobId, taskId); buildSuccessResult(result); - }catch (Exception e){ + } catch (Exception e) { SQLRecorderLogger.ddlEngineLogger.error("execute/rollback DDL TASK failed", e); buildFailureResult(result, e.getMessage()); } return result; } - private void buildSuccessResult(ArrayResultCursor result){ - result.addRow(new Object[]{ + private void buildSuccessResult(ArrayResultCursor result) { + result.addRow(new Object[] { String.valueOf(Boolean.TRUE), "" }); } - private void buildFailureResult(ArrayResultCursor result, String errMsg){ - result.addRow(new Object[]{ + private void buildFailureResult(ArrayResultCursor result, String errMsg) { + result.addRow(new Object[] { String.valueOf(Boolean.FALSE), errMsg }); } - public static boolean isRemoteDdlTaskSyncActionSuccess(List> result){ - if(CollectionUtils.isEmpty(result)){ + public static boolean isRemoteDdlTaskSyncActionSuccess(List> result) { + if (CollectionUtils.isEmpty(result)) { return false; } return StringUtils.equalsIgnoreCase( @@ -93,8 +92,8 @@ public static boolean isRemoteDdlTaskSyncActionSuccess(List> ); } - public static String getMsgFromResult(List> result){ - if(CollectionUtils.isEmpty(result)){ + public static String getMsgFromResult(List> result) { + if (CollectionUtils.isEmpty(result)) { return ""; } return (String) result.get(0).get(MSG); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/CdcExeUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/CdcExeUtil.java new file mode 100644 index 000000000..b35462a4c --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/CdcExeUtil.java @@ -0,0 +1,62 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.utils; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.properties.ParamManager; +import com.alibaba.polardbx.druid.util.StringUtils; +import com.alibaba.polardbx.gms.metadb.cdc.BinlogStreamAccessor; +import com.alibaba.polardbx.gms.metadb.cdc.BinlogStreamRecord; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; + +import java.sql.Connection; +import java.util.List; + +public class CdcExeUtil { + + public static String tryExtractStreamNameFromUser(ExecutionContext executionContext) { + ParamManager paramManager = executionContext.getParamManager(); + boolean enableExtract = paramManager.getBoolean(ConnectionParams.ENABLE_EXTRACT_STREAM_NAME_FROM_USER); + if (enableExtract) { + String user = executionContext.getPrivilegeContext().getUser(); + return extractStreamNameFromUser(user); + } else { + return ""; + } + } + + private static String extractStreamNameFromUser(String userName) { + try (Connection metaDbConn = MetaDbUtil.getConnection()) { + BinlogStreamAccessor accessor = new BinlogStreamAccessor(); + accessor.setConnection(metaDbConn); + List streamList = accessor.listAllStream(); + return streamList.stream() + .map(BinlogStreamRecord::getStreamName) + .filter(s -> StringUtils.equalsIgnoreCase(userName, s + "_cdc_user")) + .findFirst() + .orElse(""); + } catch (Throwable ex) { + if (ex instanceof TddlRuntimeException) { + throw (TddlRuntimeException) ex; + } + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, ex, ex.getMessage()); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ConditionUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ConditionUtils.java index c4d5c4c83..b991aaea1 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ConditionUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ConditionUtils.java @@ -17,9 +17,6 @@ package com.alibaba.polardbx.executor.utils; import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; - -import java.math.BigInteger; /** * Created by chuanqin on 18/3/27. diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/DdlUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/DdlUtils.java index eba0986bc..ca3aca5be 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/DdlUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/DdlUtils.java @@ -18,9 +18,13 @@ import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.ddl.sync.ClearPlanCacheSyncAction; import com.alibaba.polardbx.executor.sync.BaselineInvalidatePlanSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.PhyDdlTableOperation; +import com.alibaba.polardbx.optimizer.utils.ITimestampOracle; public class DdlUtils { @@ -64,7 +68,24 @@ public static void invalidatePlan(PhyDdlTableOperation ddl, String schemaName) { } public static void invalidatePlan(String schema, String table, boolean isForce) { - SyncManagerHelper.syncWithDefaultDB(new BaselineInvalidatePlanSyncAction(schema, table, isForce)); + SyncManagerHelper.syncWithDefaultDB(new BaselineInvalidatePlanSyncAction(schema, table, isForce), + SyncScope.ALL); + } + + public static void invalidatePlanCache(String schema, String table) { + SyncManagerHelper.syncWithDefaultDB(new ClearPlanCacheSyncAction(schema, table), SyncScope.ALL); + } + + /** + * Generate ddl version id + */ + public static long generateVersionId(ExecutionContext ec) { + final ITimestampOracle timestampOracle = + ec.getTransaction().getTransactionManagerUtil().getTimestampOracle(); + if (null == timestampOracle) { + throw new UnsupportedOperationException("Do not support timestamp oracle"); + } + return timestampOracle.nextTimestamp(); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/DrdsToAutoTableCreationSqlUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/DrdsToAutoTableCreationSqlUtil.java index 1894779f0..9aa592849 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/DrdsToAutoTableCreationSqlUtil.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/DrdsToAutoTableCreationSqlUtil.java @@ -19,11 +19,18 @@ import com.alibaba.polardbx.druid.sql.SQLUtils; import com.alibaba.polardbx.druid.sql.ast.SQLDataTypeImpl; import com.alibaba.polardbx.druid.sql.ast.SQLExpr; +import com.alibaba.polardbx.druid.sql.ast.SQLIndexDefinition; import com.alibaba.polardbx.druid.sql.ast.SQLPartition; import com.alibaba.polardbx.druid.sql.ast.SQLPartitionBy; +import com.alibaba.polardbx.druid.sql.ast.SQLPartitionByCoHash; import com.alibaba.polardbx.druid.sql.ast.SQLPartitionByHash; import com.alibaba.polardbx.druid.sql.ast.SQLPartitionByRange; import com.alibaba.polardbx.druid.sql.ast.SQLPartitionValue; +import com.alibaba.polardbx.druid.sql.ast.SQLSubPartition; +import com.alibaba.polardbx.druid.sql.ast.SQLSubPartitionBy; +import com.alibaba.polardbx.druid.sql.ast.SQLSubPartitionByCoHash; +import com.alibaba.polardbx.druid.sql.ast.SQLSubPartitionByHash; +import com.alibaba.polardbx.druid.sql.ast.SQLSubPartitionByRange; import com.alibaba.polardbx.druid.sql.ast.expr.SQLIdentifierExpr; import com.alibaba.polardbx.druid.sql.ast.expr.SQLIntegerExpr; import com.alibaba.polardbx.druid.sql.ast.expr.SQLMethodInvokeExpr; @@ -36,6 +43,7 @@ import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlUnique; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlPartitionByKey; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlSubPartitionByKey; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlTableIndex; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.gms.topology.DbInfoRecord; @@ -45,7 +53,6 @@ import java.sql.Types; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -53,9 +60,11 @@ import java.util.TreeMap; import java.util.TreeSet; import java.util.stream.Collectors; +import java.util.stream.IntStream; import static com.alibaba.polardbx.common.TddlConstants.AUTO_SHARD_KEY_PREFIX; import static com.alibaba.polardbx.gms.metadb.limit.Limits.MAX_LENGTH_OF_IDENTIFIER_NAME; +import static java.lang.Math.max; import static java.lang.Math.min; /** @@ -133,7 +142,6 @@ public static MySqlCreateTableStatement convertDrdsStatementToAutoStatement( MySqlCreateTableStatement autoModeCreateTableStatement = drdsCreateTableStatement.clone(); - eliminateDbPartitionAndTbPartition(autoModeCreateTableStatement); /** * eliminate some local index: * 1. implicit primary column and index @@ -141,6 +149,8 @@ public static MySqlCreateTableStatement convertDrdsStatementToAutoStatement( * */ eliminateImplicitKeyAndAutoShardKey(autoModeCreateTableStatement); eliminateImplicitKeyAndAutoShardKey(drdsCreateTableStatement); + + eliminateDbPartitionAndTbPartition(autoModeCreateTableStatement); //single table if (drdsCreateTableStatement.isSingle() || !drdsCreateTableStatement.isBroadCast() && drdsCreateTableStatement.getDbPartitionBy() == null @@ -163,84 +173,115 @@ public static MySqlCreateTableStatement convertDrdsStatementToAutoStatement( final int tbPartitionNum = (drdsTbPartitions == null) ? DrdsDefaultPartitionNumUtil.getDrdsDefaultTbPartitionNum() : drdsTbPartitions.getNumber().intValue(); - final int drdsPartitionNum = Math.min(dbPartitionNum * tbPartitionNum, maxPartitionsNum); + final int drdsDbPartitionNum = dbPartitionNum; + final int drdsTbPartitionNum = min(tbPartitionNum, maxPartitionsNum / dbPartitionNum); List primaryKey = drdsCreateTableStatement.getPrimaryKeyNames(); - //only dbpartition or only tbpartition + //only dbpartition if (drdsDbPartitionBy != null && drdsTbPartitionBy == null || drdsDbPartitionBy == null && drdsTbPartitionBy != null) { //handle gsi - handleAutoModeGsi(autoModeCreateTableStatement, drdsPartitionNum, maxPartitionColumnNum, + handleAutoModeGsi(autoModeCreateTableStatement, maxPartitionsNum, maxPartitionColumnNum, columnsLengthsInBytes); SQLMethodInvokeExpr drdsPartitionBy = (drdsDbPartitionBy == null) ? drdsTbPartitionBy : drdsDbPartitionBy; + int drdsPartitionNum = (drdsDbPartitionBy == null) ? drdsTbPartitionNum : drdsDbPartitionNum; SQLPartitionBy autoPartitionBy = - convertDrdsPartitionByToAutoSQLPartitionBy(drdsPartitionBy, drdsPartitionNum, primaryKey, + convertDrdsDbPartitionByToAutoSQLPartitionBy(drdsPartitionBy, drdsPartitionNum, primaryKey, maxPartitionColumnNum, columnsLengthsInBytes); - if (drdsPartitionBy.getMethodName().equalsIgnoreCase("range_hash")) { - MySqlTableIndex cgsiOnCol2 = - generateCgsiForRangeHash2ndCol(drdsPartitionBy, drdsPartitionNum, primaryKey, - maxPartitionColumnNum, columnsLengthsInBytes); - autoModeCreateTableStatement.getTableElementList().add(cgsiOnCol2); - } + + /** + * dbpartition by range_hash(c1,c2,n) ==> partition by co_hash(substr(c1,-n), substr(c2,-n)), + * so ignore creating gis for c2 + */ +// if (drdsPartitionBy.getMethodName().equalsIgnoreCase("range_hash")) { +// MySqlTableIndex cgsiOnCol2 = +// generateCgsiForRangeHash2ndCol(drdsPartitionBy, drdsPartitionNum, primaryKey, +// maxPartitionColumnNum, columnsLengthsInBytes); +// autoModeCreateTableStatement.getTableElementList().add(cgsiOnCol2); +// } autoModeCreateTableStatement.setPartitioning(autoPartitionBy); } else if (drdsDbPartitionBy != null && drdsTbPartitionBy != null) { + //contain dbpartition and tbpartition + /** + * 这里不区db和tb的分拆分函数,只要db的sharding key 和 tb的 sharding key 一致(且不考虑拆分函数里面的数字) + * 转换为auto模式时,就只看dbPartition + * */ Set dbShardingKey = new TreeSet<>(String::compareToIgnoreCase); + Set tbShardingKey = new TreeSet<>(String::compareToIgnoreCase); drdsDbPartitionBy.getArguments().forEach( arg -> { if (arg instanceof SQLIdentifierExpr) { - dbShardingKey.add(((SQLIdentifierExpr) arg).normalizedName()); + dbShardingKey.add(((SQLIdentifierExpr) arg).normalizedName().toLowerCase()); + } else if (arg instanceof SQLIntegerExpr) { + //dbShardingKey.add(((SQLIntegerExpr) arg).getNumber().toString().toLowerCase()); } } ); - boolean hasSameShardingKey = false; - for (SQLExpr arg : drdsTbPartitionBy.getArguments()) { - if (arg instanceof SQLIdentifierExpr) { - String shardingKey = ((SQLIdentifierExpr) arg).normalizedName(); - if (dbShardingKey.contains(shardingKey)) { - hasSameShardingKey = true; - break; + drdsTbPartitionBy.getArguments().forEach( + arg -> { + if (arg instanceof SQLIdentifierExpr) { + tbShardingKey.add(((SQLIdentifierExpr) arg).normalizedName().toLowerCase()); + } else if (arg instanceof SQLIntegerExpr) { + //tbShardingKey.add(((SQLIntegerExpr) arg).getNumber().toString().toLowerCase()); } } - } + ); + boolean hasSameShardingKey = dbShardingKey.equals(tbShardingKey); if (hasSameShardingKey) { //handle gsi - handleAutoModeGsi(autoModeCreateTableStatement, drdsPartitionNum, maxPartitionColumnNum, + handleAutoModeGsi(autoModeCreateTableStatement, maxPartitionsNum, maxPartitionColumnNum, columnsLengthsInBytes); SQLPartitionBy autoPartitionBy = - convertDrdsPartitionByToAutoSQLPartitionBy(drdsDbPartitionBy, drdsPartitionNum, primaryKey, + convertDrdsDbPartitionByToAutoSQLPartitionBy(drdsDbPartitionBy, + min(drdsDbPartitionNum * drdsTbPartitionNum, maxPartitionsNum), + primaryKey, maxPartitionColumnNum, columnsLengthsInBytes); - if (drdsDbPartitionBy.getMethodName().equalsIgnoreCase("range_hash")) { - MySqlTableIndex cgsiOnCol2 = - generateCgsiForRangeHash2ndCol(drdsDbPartitionBy, drdsPartitionNum, primaryKey, - maxPartitionColumnNum, columnsLengthsInBytes); - autoModeCreateTableStatement.getTableElementList().add(cgsiOnCol2); - } + /** + * dbpartition by range_hash(c1,c2,n) ==> partition by co_hash(substr(c1,-n), substr(c2,-n)), + * so ignore creating gsi for c2 + */ +// if (drdsDbPartitionBy.getMethodName().equalsIgnoreCase("range_hash")) { +// MySqlTableIndex cgsiOnCol2 = +// generateCgsiForRangeHash2ndCol(drdsDbPartitionBy, drdsPartitionNum, primaryKey, +// maxPartitionColumnNum, columnsLengthsInBytes); +// autoModeCreateTableStatement.getTableElementList().add(cgsiOnCol2); +// } autoModeCreateTableStatement.setPartitioning(autoPartitionBy); } else { //convert origin gsi - handleAutoModeGsi(autoModeCreateTableStatement, drdsPartitionNum, maxPartitionColumnNum, + handleAutoModeGsi(autoModeCreateTableStatement, maxPartitionsNum, maxPartitionColumnNum, columnsLengthsInBytes); //handle dbPartitionBy SQLPartitionBy autoPartitionBy = - convertDrdsPartitionByToAutoSQLPartitionBy(drdsDbPartitionBy, drdsPartitionNum, primaryKey, + convertDrdsDbPartitionByToAutoSQLPartitionBy(drdsDbPartitionBy, drdsDbPartitionNum, primaryKey, maxPartitionColumnNum, columnsLengthsInBytes); autoModeCreateTableStatement.setPartitioning(autoPartitionBy); - //add cgsi for dbpartitionBy range hash 2nd col - if (drdsDbPartitionBy.getMethodName().equalsIgnoreCase("range_hash")) { - MySqlTableIndex cgsiOnCol2 = - generateCgsiForRangeHash2ndCol(drdsDbPartitionBy, drdsPartitionNum, primaryKey, - maxPartitionColumnNum, columnsLengthsInBytes); - autoModeCreateTableStatement.getTableElementList().add(cgsiOnCol2); - } + //handle tbPartitionBy + SQLSubPartitionBy autoSubPartitionBy = + convertDrdsTbPartitionByToAutoSQLPartitionBy(drdsTbPartitionBy, drdsTbPartitionNum, primaryKey, + maxPartitionColumnNum, columnsLengthsInBytes); + autoModeCreateTableStatement.getPartitioning().setSubPartitionBy(autoSubPartitionBy); + + /** + * dbpartition by range_hash(c1,c2,n) ==> partition by co_hash(substr(c1,-n), substr(c2,-n)), + * so ignore creating gis for c2 + */ +// //add cgsi for dbpartitionBy range hash 2nd col +// if (drdsDbPartitionBy.getMethodName().equalsIgnoreCase("range_hash")) { +// MySqlTableIndex cgsiOnCol2 = +// generateCgsiForRangeHash2ndCol(drdsDbPartitionBy, drdsPartitionNum, primaryKey, +// maxPartitionColumnNum, columnsLengthsInBytes); +// autoModeCreateTableStatement.getTableElementList().add(cgsiOnCol2); +// } //handle tbPartitionBy /** @@ -258,7 +299,11 @@ public static MySqlCreateTableStatement convertDrdsStatementToAutoStatement( // convertDrdsPartitionByToAutoSQLPartitionBy(drdsTbPartitionBy, drdsPartitionNum, primaryKey, maxPartitionColumnNum); // gsiOnTbShardingKey.setPartitioning(autoPartitionByOnGsi); // autoModeCreateTableStatement.getTableElementList().add(gsiOnTbShardingKey); -// + + /** + * dbpartition by range_hash(c1,c2,n) ==> partition by co_hash(substr(c1,-n), substr(c2,-n)), + * so ignore creating gis for c2 + */ // //add cgsi for tbpartitionBy range hash 2nd col // if (drdsTbPartitionBy.getMethodName().equalsIgnoreCase("range_hash")) { // MySqlTableIndex cgsiOnCol2 = @@ -317,6 +362,24 @@ private static void handleDrdsModeSequence(MySqlCreateTableStatement statement) private static void eliminateImplicitKeyAndAutoShardKey(MySqlCreateTableStatement drdsCreateTableStatement) { List tableElementList = drdsCreateTableStatement.getTableElementList(); Iterator iterator = tableElementList.iterator(); + List dbPartitionCols = new ArrayList<>(), tbPartitionCols = new ArrayList<>(); + SQLMethodInvokeExpr dbPartitionBy = (SQLMethodInvokeExpr) drdsCreateTableStatement.getDbPartitionBy(); + SQLMethodInvokeExpr tbPartitionBy = (SQLMethodInvokeExpr) drdsCreateTableStatement.getTablePartitionBy(); + + if (dbPartitionBy != null) { + for (SQLExpr expr : dbPartitionBy.getArguments()) { + if (expr instanceof SQLIdentifierExpr) { + dbPartitionCols.add(((SQLIdentifierExpr) expr).getSimpleName()); + } + } + } + if (tbPartitionBy != null) { + for (SQLExpr expr : tbPartitionBy.getArguments()) { + if (expr instanceof SQLIdentifierExpr) { + tbPartitionCols.add(((SQLIdentifierExpr) expr).getSimpleName()); + } + } + } while (iterator.hasNext()) { SQLTableElement element = iterator.next(); if (element instanceof SQLColumnDefinition) { @@ -342,7 +405,19 @@ private static void eliminateImplicitKeyAndAutoShardKey(MySqlCreateTableStatemen } SQLIdentifierExpr keyName = (SQLIdentifierExpr) mySqlUnique.getIndexDefinition().getName(); if (keyName.getSimpleName().toLowerCase().contains(AUTO_SHARD_KEY_PREFIX.toLowerCase())) { - iterator.remove(); + //检查:如果该auto_shard_key和分区类型不一致,则说明其承担普通索引的作用,不应该被remove + List autoShardKeyCols = new ArrayList<>(); + SQLIndexDefinition sqlIndexDefinition = mySqlUnique.getIndexDefinition(); + for (SQLSelectOrderByItem item : sqlIndexDefinition.getColumns()) { + if (item.getExpr() instanceof SQLIdentifierExpr) { + autoShardKeyCols.add(((SQLIdentifierExpr) item.getExpr()).getSimpleName()); + } + } + + if (columnsIsSame(dbPartitionCols, autoShardKeyCols) || columnsIsSame(tbPartitionCols, + autoShardKeyCols)) { + iterator.remove(); + } } } else if (element instanceof MySqlKey) { @@ -350,16 +425,40 @@ private static void eliminateImplicitKeyAndAutoShardKey(MySqlCreateTableStatemen MySqlKey mySqlKey = (MySqlKey) element; SQLIdentifierExpr keyName = (SQLIdentifierExpr) mySqlKey.getIndexDefinition().getName(); if (keyName.getSimpleName().toLowerCase().contains(AUTO_SHARD_KEY_PREFIX.toLowerCase())) { - iterator.remove(); + //检查:如果该auto_shard_key和分区类型不一致,则说明其承担普通索引的作用,不应该被remove + List autoShardKeyCols = new ArrayList<>(); + SQLIndexDefinition sqlIndexDefinition = mySqlKey.getIndexDefinition(); + for (SQLSelectOrderByItem item : sqlIndexDefinition.getColumns()) { + if (item.getExpr() instanceof SQLIdentifierExpr) { + autoShardKeyCols.add(((SQLIdentifierExpr) item.getExpr()).getSimpleName()); + } + } + + if (columnsIsSame(dbPartitionCols, autoShardKeyCols) || columnsIsSame(tbPartitionCols, + autoShardKeyCols)) { + iterator.remove(); + } } } } } - public static SQLPartitionBy convertDrdsPartitionByToAutoSQLPartitionBy(SQLMethodInvokeExpr drdsPartitionBy, - int partitionNum, List primaryKey, - int maxPartitionColumnNum, - Map columnsLengthsInBytes) { + private static boolean columnsIsSame(final List a, final List b) { + if (a.size() != b.size()) { + return false; + } + for (int i = 0; i < a.size(); i++) { + if (a.get(i) == null || !a.get(i).equalsIgnoreCase(b.get(i))) { + return false; + } + } + return true; + } + + public static SQLPartitionBy convertDrdsDbPartitionByToAutoSQLPartitionBy(SQLMethodInvokeExpr drdsPartitionBy, + int partitionNum, List primaryKey, + int maxPartitionColumnNum, + Map columnsLengthsInBytes) { SQLPartitionBy autoSqlPartitionBy = null; /** * 对于映射成key分区的各种partition by,在partition by key的拆分键中附加主键,如果将来需要热点分裂,可以使用 @@ -465,10 +564,7 @@ && validateAutoShardKeyLength(columnsLengthsInBytes, hashKey, pk)) { autoSqlPartitionBy = partitionByKey; autoSqlPartitionBy.setPartitionsCount(partitionNum); } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("range_hash")) { - final SQLPartitionBy partitionByKey = new MySqlPartitionByKey(); - Set hashKey = new TreeSet<>(String::compareToIgnoreCase); - - //build hash(substr(`col1`)) + final SQLPartitionBy partitionByCoHash = new SQLPartitionByCoHash(); List strHashArguements = drdsPartitionBy.getArguments(); if (strHashArguements.size() != 3) { return null; @@ -477,26 +573,24 @@ && validateAutoShardKeyLength(columnsLengthsInBytes, hashKey, pk)) { SQLIdentifierExpr col2 = (SQLIdentifierExpr) strHashArguements.get(1); SQLIntegerExpr suffixLen = (SQLIntegerExpr) strHashArguements.get(2); - SQLExpr newArg = col1.clone(); - partitionByKey.addColumn(newArg); - hashKey.add(SQLUtils.normalize(((SQLIdentifierExpr) newArg).getSimpleName())); + SQLMethodInvokeExpr rightExpr1 = new SQLMethodInvokeExpr("RIGHT"); + SQLExpr newArgCol1 = col1.clone(); + SQLExpr substrPosiExpr1 = suffixLen.clone(); + rightExpr1.addArgument(newArgCol1); + rightExpr1.addArgument(substrPosiExpr1); + + SQLMethodInvokeExpr rightExpr2 = new SQLMethodInvokeExpr("RIGHT"); + SQLExpr newArgCol2 = col2.clone(); + SQLExpr substrPosiExpr2 = suffixLen.clone(); + rightExpr2.addArgument(newArgCol2); + rightExpr2.addArgument(substrPosiExpr2); + + partitionByCoHash.addColumn(rightExpr1); + partitionByCoHash.addColumn(rightExpr2); + partitionByCoHash.setPartitionsCount(partitionNum); + + autoSqlPartitionBy = partitionByCoHash; - primaryKey.forEach( - pk -> { - String autoShardKeyName = - AUTO_SHARD_KEY_PREFIX + hashKey.stream().collect(Collectors.joining("_")) + "_" + pk; - if (!hashKey.contains(SQLUtils.normalize(pk)) - && hashKey.size() + 1 <= maxPartitionColumnNum - && autoShardKeyName.length() <= MAX_LENGTH_OF_IDENTIFIER_NAME - && validateAutoShardKeyLength(columnsLengthsInBytes, hashKey, pk)) { - SQLExpr pkCol = new SQLIdentifierExpr(SQLUtils.encloseWithUnquote(pk)); - partitionByKey.addColumn(pkCol); - hashKey.add(SQLUtils.normalize(pk)); - } - } - ); - autoSqlPartitionBy = partitionByKey; - autoSqlPartitionBy.setPartitionsCount(partitionNum); } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("YYYYMM") || drdsPartitionBy.getMethodName() .equalsIgnoreCase("YYYYMM_OPT")) { //convert to hash(to_months(`col`)) @@ -563,7 +657,210 @@ && validateAutoShardKeyLength(columnsLengthsInBytes, hashKey, pk)) { return autoSqlPartitionBy; } - static private void handleAutoModeGsi(MySqlCreateTableStatement autoCreateTableStatement, int gsiPartitionNum, + public static SQLSubPartitionBy convertDrdsTbPartitionByToAutoSQLPartitionBy(SQLMethodInvokeExpr drdsPartitionBy, + int partitionNum, + List primaryKey, + int maxPartitionColumnNum, + Map columnsLengthsInBytes) { + SQLSubPartitionBy autoSqlSubPartitionBy = null; + /** + * 对于映射成key分区的各种partition by,在partition by key的拆分键中附加主键,如果将来需要热点分裂,可以使用 + * 但由于auto模式的partition columns个数存在上限: + * a) 因此如果在partition by key的拆分键中附加主键导致超出partition columns个数的上限, + * 则只会附加部分主键列 + * b) 如果在partition by key的拆分键中附加主键导致自动生成的auto_shard_key_name名字超出mysql最长限制(64),则也只会附加部分主键列 + * c) 如果在partiition by key的拆分键中附加主键,导致auto_sharding_key这个联合索引长度超过mysql限制,则也只会附加部分主键列 + * (使用columnsLengthsInBytes来获取列的长度,并计算是否超出限制) + * */ + if (drdsPartitionBy.getMethodName().equalsIgnoreCase("hash")) { + final SQLSubPartitionBy subPartitionByKey = new MySqlSubPartitionByKey(); + Set hashKey = new TreeSet<>(String::compareToIgnoreCase); + SQLExpr newArg = drdsPartitionBy.getArguments().get(0).clone(); + subPartitionByKey.addColumn(newArg); + hashKey.add(SQLUtils.normalize(((SQLIdentifierExpr) newArg).getSimpleName())); + primaryKey.forEach( + pk -> { + String autoShardKeyName = + AUTO_SHARD_KEY_PREFIX + hashKey.stream().collect(Collectors.joining("_")) + "_" + pk; + if (!hashKey.contains(SQLUtils.normalize(pk)) + && hashKey.size() + 1 <= maxPartitionColumnNum + && autoShardKeyName.length() <= MAX_LENGTH_OF_IDENTIFIER_NAME + && validateAutoShardKeyLength(columnsLengthsInBytes, hashKey, pk)) { + SQLExpr pkCol = new SQLIdentifierExpr(SQLUtils.encloseWithUnquote(pk)); + subPartitionByKey.addColumn(pkCol); + hashKey.add(SQLUtils.normalize(pk)); + } + } + ); + autoSqlSubPartitionBy = subPartitionByKey; + autoSqlSubPartitionBy.setSubPartitionsCount(new SQLIntegerExpr(partitionNum)); + } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("str_hash")) { + final SQLSubPartitionBy subPartitionByKey = new MySqlSubPartitionByKey(); + Set hashKey = new TreeSet<>(String::compareToIgnoreCase); + List strHashArguements = drdsPartitionBy.getArguments(); + if (strHashArguements.isEmpty() || !(strHashArguements.size() == 1 || strHashArguements.size() == 3 + || strHashArguements.size() == 4 || strHashArguements.size() == 5)) { + return null; + } + SQLExpr newArg = strHashArguements.get(0).clone(); + subPartitionByKey.addColumn(newArg); + hashKey.add(SQLUtils.normalize(((SQLIdentifierExpr) newArg).getSimpleName())); + primaryKey.forEach( + pk -> { + String autoShardKeyName = + AUTO_SHARD_KEY_PREFIX + hashKey.stream().collect(Collectors.joining("_")) + "_" + pk; + if (!hashKey.contains(SQLUtils.normalize(pk)) + && hashKey.size() + 1 <= maxPartitionColumnNum + && autoShardKeyName.length() <= MAX_LENGTH_OF_IDENTIFIER_NAME + && validateAutoShardKeyLength(columnsLengthsInBytes, hashKey, pk)) { + SQLExpr pkCol = new SQLIdentifierExpr(SQLUtils.encloseWithUnquote(pk)); + subPartitionByKey.addColumn(pkCol); + hashKey.add(SQLUtils.normalize(pk)); + } + } + ); + autoSqlSubPartitionBy = subPartitionByKey; + autoSqlSubPartitionBy.setSubPartitionsCount(new SQLIntegerExpr(partitionNum)); + } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("uni_hash")) { + final SQLSubPartitionBy subPartitionByKey = new MySqlSubPartitionByKey(); + Set hashKey = new TreeSet<>(String::compareToIgnoreCase); + SQLExpr newArg = drdsPartitionBy.getArguments().get(0).clone(); + subPartitionByKey.addColumn(newArg); + hashKey.add(SQLUtils.normalize(((SQLIdentifierExpr) newArg).getSimpleName())); + primaryKey.forEach( + pk -> { + String autoShardKeyName = + AUTO_SHARD_KEY_PREFIX + hashKey.stream().collect(Collectors.joining("_")) + "_" + pk; + if (!hashKey.contains(SQLUtils.normalize(pk)) + && hashKey.size() + 1 <= maxPartitionColumnNum + && autoShardKeyName.length() <= MAX_LENGTH_OF_IDENTIFIER_NAME + && validateAutoShardKeyLength(columnsLengthsInBytes, hashKey, pk)) { + SQLExpr pkCol = new SQLIdentifierExpr(SQLUtils.encloseWithUnquote(pk)); + subPartitionByKey.addColumn(pkCol); + hashKey.add(SQLUtils.normalize(pk)); + } + } + ); + autoSqlSubPartitionBy = subPartitionByKey; + autoSqlSubPartitionBy.setSubPartitionsCount(new SQLIntegerExpr(partitionNum)); + } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("right_shift")) { + final SQLSubPartitionBy subPartitionByKey = new MySqlSubPartitionByKey(); + Set hashKey = new TreeSet<>(String::compareToIgnoreCase); + SQLExpr newArg = drdsPartitionBy.getArguments().get(0).clone(); + subPartitionByKey.addColumn(newArg); + hashKey.add(SQLUtils.normalize(((SQLIdentifierExpr) newArg).getSimpleName())); + + primaryKey.forEach( + pk -> { + String autoShardKeyName = + AUTO_SHARD_KEY_PREFIX + hashKey.stream().collect(Collectors.joining("_")) + "_" + pk; + if (!hashKey.contains(SQLUtils.normalize(pk)) + && hashKey.size() + 1 <= maxPartitionColumnNum + && autoShardKeyName.length() <= MAX_LENGTH_OF_IDENTIFIER_NAME + && validateAutoShardKeyLength(columnsLengthsInBytes, hashKey, pk)) { + SQLExpr pkCol = new SQLIdentifierExpr(SQLUtils.encloseWithUnquote(pk)); + subPartitionByKey.addColumn(pkCol); + hashKey.add(SQLUtils.normalize(pk)); + } + } + ); + autoSqlSubPartitionBy = subPartitionByKey; + autoSqlSubPartitionBy.setSubPartitionsCount(new SQLIntegerExpr(partitionNum)); + } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("range_hash")) { + final SQLSubPartitionBy subPartitionByCoHash = new SQLSubPartitionByCoHash(); + List strHashArguements = drdsPartitionBy.getArguments(); + if (strHashArguements.size() != 3) { + return null; + } + SQLIdentifierExpr col1 = (SQLIdentifierExpr) strHashArguements.get(0); + SQLIdentifierExpr col2 = (SQLIdentifierExpr) strHashArguements.get(1); + SQLIntegerExpr suffixLen = (SQLIntegerExpr) strHashArguements.get(2); + + SQLMethodInvokeExpr rightExpr1 = new SQLMethodInvokeExpr("RIGHT"); + SQLExpr newArgCol1 = col1.clone(); + SQLExpr substrPosiExpr1 = suffixLen.clone(); + rightExpr1.addArgument(newArgCol1); + rightExpr1.addArgument(substrPosiExpr1); + + SQLMethodInvokeExpr rightExpr2 = new SQLMethodInvokeExpr("RIGHT"); + SQLExpr newArgCol2 = col2.clone(); + SQLExpr substrPosiExpr2 = suffixLen.clone(); + rightExpr2.addArgument(newArgCol2); + rightExpr2.addArgument(substrPosiExpr2); + + subPartitionByCoHash.addColumn(rightExpr1); + subPartitionByCoHash.addColumn(rightExpr2); + subPartitionByCoHash.setSubPartitionsCount(new SQLIntegerExpr(partitionNum)); + + autoSqlSubPartitionBy = subPartitionByCoHash; + + } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("YYYYMM") || drdsPartitionBy.getMethodName() + .equalsIgnoreCase("YYYYMM_OPT")) { + //convert to hash(to_months(`col`)) + autoSqlSubPartitionBy = new SQLSubPartitionByHash(); + SQLMethodInvokeExpr toMonths = new SQLMethodInvokeExpr("TO_MONTHS"); + SQLExpr col = drdsPartitionBy.getArguments().get(0).clone(); + toMonths.addArgument(col); + autoSqlSubPartitionBy.addColumn(toMonths); + autoSqlSubPartitionBy.setSubPartitionsCount(new SQLIntegerExpr(partitionNum)); + } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("YYYYWEEK") || drdsPartitionBy.getMethodName() + .equalsIgnoreCase("YYYYWEEK_OPT")) { + autoSqlSubPartitionBy = new SQLSubPartitionByHash(); + SQLMethodInvokeExpr toWeeks = new SQLMethodInvokeExpr("TO_WEEKS"); + SQLExpr col = drdsPartitionBy.getArguments().get(0).clone(); + toWeeks.addArgument(col); + autoSqlSubPartitionBy.addColumn(toWeeks); + autoSqlSubPartitionBy.setSubPartitionsCount(new SQLIntegerExpr(partitionNum)); + } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("YYYYDD") || drdsPartitionBy.getMethodName() + .equalsIgnoreCase("YYYYDD_OPT")) { + autoSqlSubPartitionBy = new SQLSubPartitionByHash(); + SQLMethodInvokeExpr toDays = new SQLMethodInvokeExpr("TO_DAYS"); + SQLExpr col = drdsPartitionBy.getArguments().get(0).clone(); + toDays.addArgument(col); + autoSqlSubPartitionBy.addColumn(toDays); + autoSqlSubPartitionBy.setSubPartitionsCount(new SQLIntegerExpr(partitionNum)); + } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("MM")) { + //build month(col) + autoSqlSubPartitionBy = new SQLSubPartitionByRange(); + SQLMethodInvokeExpr month = new SQLMethodInvokeExpr("MONTH"); + SQLExpr col = drdsPartitionBy.getArguments().get(0).clone(); + month.addArgument(col); + autoSqlSubPartitionBy.addColumn(month); + //build subpartition definition + generateRangeSubPartitionDefInAutoMode((SQLSubPartitionByRange) autoSqlSubPartitionBy, 12, 12); + } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("DD")) { + //build dayofmonth(`col`) + autoSqlSubPartitionBy = new SQLSubPartitionByRange(); + SQLMethodInvokeExpr dayOfMonth = new SQLMethodInvokeExpr("DAYOFMONTH"); + SQLExpr col = drdsPartitionBy.getArguments().get(0).clone(); + dayOfMonth.addArgument(col); + autoSqlSubPartitionBy.addColumn(dayOfMonth); + + //build subpartition definition + generateRangeSubPartitionDefInAutoMode((SQLSubPartitionByRange) autoSqlSubPartitionBy, 31, 31); + } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("WEEK")) { + //build dayofweek(`col`) + autoSqlSubPartitionBy = new SQLSubPartitionByRange(); + SQLMethodInvokeExpr dayOfWeek = new SQLMethodInvokeExpr("DAYOFWEEK"); + SQLExpr col = drdsPartitionBy.getArguments().get(0).clone(); + dayOfWeek.addArgument(col); + autoSqlSubPartitionBy.addColumn(dayOfWeek); + //build subpartition definition + generateRangeSubPartitionDefInAutoMode((SQLSubPartitionByRange) autoSqlSubPartitionBy, 7, 7); + } else if (drdsPartitionBy.getMethodName().equalsIgnoreCase("MMDD")) { + //build dayofyear(`col`) + autoSqlSubPartitionBy = new SQLSubPartitionByRange(); + SQLMethodInvokeExpr dayOfYear = new SQLMethodInvokeExpr("DAYOFYEAR"); + SQLExpr col = drdsPartitionBy.getArguments().get(0).clone(); + dayOfYear.addArgument(col); + autoSqlSubPartitionBy.addColumn(dayOfYear); + //build subpartition definition + generateRangeSubPartitionDefInAutoMode((SQLSubPartitionByRange) autoSqlSubPartitionBy, 366, 366); + } + return autoSqlSubPartitionBy; + } + + static private void handleAutoModeGsi(MySqlCreateTableStatement autoCreateTableStatement, int maxPartitionsNum, int maxPartitionColumnNum, Map columnsLengthsInBytes) { List autoTableElementList = autoCreateTableStatement.getTableElementList(); for (int i = 0; i < autoTableElementList.size(); i++) { @@ -572,7 +869,7 @@ static private void handleAutoModeGsi(MySqlCreateTableStatement autoCreateTableS MySqlTableIndex gsi = (MySqlTableIndex) element; if (gsi.isGlobal() || gsi.isClustered()) { SQLTableElement newGsi = - convertDrdsGsiToAutoGsi((MySqlTableIndex) element, gsiPartitionNum, maxPartitionColumnNum, + convertDrdsGsiToAutoGsi((MySqlTableIndex) element, maxPartitionsNum, maxPartitionColumnNum, columnsLengthsInBytes); autoTableElementList.set(i, newGsi); } @@ -580,7 +877,7 @@ static private void handleAutoModeGsi(MySqlCreateTableStatement autoCreateTableS MySqlUnique uniqueIndex = (MySqlUnique) element; if (uniqueIndex.isGlobal() || uniqueIndex.isClustered()) { SQLTableElement newGsi = - convertDrdsUgsiToAutoGsi((MySqlUnique) element, gsiPartitionNum, maxPartitionColumnNum, + convertDrdsUgsiToAutoGsi((MySqlUnique) element, maxPartitionsNum, maxPartitionColumnNum, columnsLengthsInBytes); autoTableElementList.set(i, newGsi); } @@ -648,51 +945,174 @@ static private MySqlTableIndex generateCgsiForRangeHash2ndCol(SQLMethodInvokeExp SQLMethodInvokeExpr drdsPartitionByCopy = drdsPartitionBy.clone(); Collections.swap(drdsPartitionByCopy.getArguments(), 0, 1); SQLPartitionBy partitionByWithSubStrForCol2 = - convertDrdsPartitionByToAutoSQLPartitionBy(drdsPartitionByCopy, partitionNum, primaryKey, + convertDrdsDbPartitionByToAutoSQLPartitionBy(drdsPartitionByCopy, partitionNum, primaryKey, maxPartitionColumnNum, columnsLengthsInBytes); SQLIdentifierExpr newCol = (SQLIdentifierExpr) drdsPartitionByCopy.getArguments().get(0).clone(); return generateCgsi(newCol, partitionByWithSubStrForCol2); } - static private MySqlTableIndex convertDrdsGsiToAutoGsi(MySqlTableIndex drdsGsi, int gsiPartitionNum, + static private MySqlTableIndex convertDrdsGsiToAutoGsi(MySqlTableIndex drdsGsi, int maxPartitionsNum, int maxPartitionColumnNum, Map columnsLengthsInBytes) { MySqlTableIndex autoGsi = drdsGsi.clone(); autoGsi.setDbPartitionBy(null); autoGsi.setTablePartitionBy(null); autoGsi.setTablePartitions(null); - SQLMethodInvokeExpr drdsPartitionBy = - ((drdsGsi.getDbPartitionBy() != null) ? - (SQLMethodInvokeExpr) drdsGsi.getDbPartitionBy() - : (SQLMethodInvokeExpr) drdsGsi.getTablePartitionBy()); - if (drdsPartitionBy != null) { - autoGsi.setPartitioning( - convertDrdsPartitionByToAutoSQLPartitionBy(drdsPartitionBy, gsiPartitionNum, new ArrayList<>(), - maxPartitionColumnNum, columnsLengthsInBytes)); + + SQLMethodInvokeExpr drdsDbPartitionBy = (SQLMethodInvokeExpr) drdsGsi.getDbPartitionBy(); + //SQLIntegerExpr drdsDbPartiions = (SQLIntegerExpr) drdsGsi.getDbPartitions(); + SQLMethodInvokeExpr drdsTbPartitionBy = + (SQLMethodInvokeExpr) drdsGsi.getTablePartitionBy(); + final int tbPartitionNum = + (drdsGsi.getTablePartitions() == null) ? DrdsDefaultPartitionNumUtil.getDrdsDefaultTbPartitionNum() : + ((SQLIntegerExpr) drdsGsi.getTablePartitions()).getNumber().intValue(); + final int drdsDbPartitionNum = DrdsDefaultPartitionNumUtil.getDrdsDefaultDbPartitionNum(); + final int drdsTbPartitionNum = min(tbPartitionNum, maxPartitionsNum / drdsDbPartitionNum); + + //only dbPartition by + if (drdsDbPartitionBy != null && drdsTbPartitionBy == null) { + SQLPartitionBy autoPartitionBy = + convertDrdsDbPartitionByToAutoSQLPartitionBy(drdsDbPartitionBy, drdsDbPartitionNum, new ArrayList<>(), + maxPartitionColumnNum, columnsLengthsInBytes); + autoGsi.setPartitioning(autoPartitionBy); return autoGsi; + } else if (drdsDbPartitionBy != null && drdsTbPartitionBy != null) { + //contain dbpartition and tbpartition + /** + * 这里不区db和tb的分拆分函数,只要db的sharding key 和 tb的 sharding key 一致(且不考虑拆分函数里面的数字) + * 转换为auto模式时,就只看dbPartition + * */ + Set dbShardingKey = new TreeSet<>(String::compareToIgnoreCase); + Set tbShardingKey = new TreeSet<>(String::compareToIgnoreCase); + drdsDbPartitionBy.getArguments().forEach( + arg -> { + if (arg instanceof SQLIdentifierExpr) { + dbShardingKey.add(((SQLIdentifierExpr) arg).normalizedName().toLowerCase()); + } else if (arg instanceof SQLIntegerExpr) { + //dbShardingKey.add(((SQLIntegerExpr) arg).getNumber().toString().toLowerCase()); + } + } + ); + drdsTbPartitionBy.getArguments().forEach( + arg -> { + if (arg instanceof SQLIdentifierExpr) { + tbShardingKey.add(((SQLIdentifierExpr) arg).normalizedName().toLowerCase()); + } else if (arg instanceof SQLIntegerExpr) { + //tbShardingKey.add(((SQLIntegerExpr) arg).getNumber().toString().toLowerCase()); + } + } + ); + + boolean hasSameShardingKey = dbShardingKey.equals(tbShardingKey); + if (hasSameShardingKey) { + //only use dbPartition key + SQLPartitionBy autoPartitionBy = + convertDrdsDbPartitionByToAutoSQLPartitionBy(drdsDbPartitionBy, + min(drdsDbPartitionNum * drdsTbPartitionNum, maxPartitionsNum), + new ArrayList<>(), + maxPartitionColumnNum, columnsLengthsInBytes); + autoGsi.setPartitioning(autoPartitionBy); + return autoGsi; + } else { + //dbPartition by and tbPartition + SQLPartitionBy autoPartitionBy = + convertDrdsDbPartitionByToAutoSQLPartitionBy(drdsDbPartitionBy, drdsDbPartitionNum, + new ArrayList<>(), + maxPartitionColumnNum, columnsLengthsInBytes); + SQLSubPartitionBy autoSubPartitionBy = + convertDrdsTbPartitionByToAutoSQLPartitionBy(drdsTbPartitionBy, drdsTbPartitionNum, + new ArrayList<>(), + maxPartitionColumnNum, columnsLengthsInBytes); + + autoPartitionBy.setSubPartitionBy(autoSubPartitionBy); + autoGsi.setPartitioning(autoPartitionBy); + return autoGsi; + } } else { + //only tbPartition, impossible return null; } } - static private MySqlUnique convertDrdsUgsiToAutoGsi(MySqlUnique drdsGsi, int gsiPartitionNum, + static private MySqlUnique convertDrdsUgsiToAutoGsi(MySqlUnique drdsGsi, int maxPartitionsNum, int maxPartitionColumnNum, Map columnsLengthsInBytes) { MySqlUnique autoGsi = drdsGsi.clone(); autoGsi.setDbPartitionBy(null); autoGsi.setTablePartitionBy(null); autoGsi.setTablePartitions(null); - SQLMethodInvokeExpr drdsPartitionBy = - ((drdsGsi.getDbPartitionBy() != null) ? - (SQLMethodInvokeExpr) drdsGsi.getDbPartitionBy() - : (SQLMethodInvokeExpr) drdsGsi.getTablePartitionBy()); - if (drdsPartitionBy != null) { - autoGsi.setPartitioning( - convertDrdsPartitionByToAutoSQLPartitionBy(drdsPartitionBy, gsiPartitionNum, new ArrayList<>(), - maxPartitionColumnNum, columnsLengthsInBytes)); + + SQLMethodInvokeExpr drdsDbPartitionBy = (SQLMethodInvokeExpr) drdsGsi.getDbPartitionBy(); + SQLMethodInvokeExpr drdsTbPartitionBy = + (SQLMethodInvokeExpr) drdsGsi.getTablePartitionBy(); + final int tbPartitionNum = + (drdsGsi.getTablePartitions() == null) ? DrdsDefaultPartitionNumUtil.getDrdsDefaultTbPartitionNum() : + ((SQLIntegerExpr) drdsGsi.getTablePartitions()).getNumber().intValue(); + final int drdsDbPartitionNum = DrdsDefaultPartitionNumUtil.getDrdsDefaultDbPartitionNum(); + final int drdsTbPartitionNum = min(tbPartitionNum, maxPartitionsNum / drdsDbPartitionNum); + + //only dbPartition by + if (drdsDbPartitionBy != null && drdsTbPartitionBy == null) { + SQLPartitionBy autoPartitionBy = + convertDrdsDbPartitionByToAutoSQLPartitionBy(drdsDbPartitionBy, drdsDbPartitionNum, new ArrayList<>(), + maxPartitionColumnNum, columnsLengthsInBytes); + autoGsi.setPartitioning(autoPartitionBy); return autoGsi; + } else if (drdsDbPartitionBy != null && drdsTbPartitionBy != null) { + //contain dbpartition and tbpartition + /** + * 这里不区db和tb的分拆分函数,只要db的sharding key 和 tb的 sharding key 一致(且不考虑拆分函数里面的数字) + * 转换为auto模式时,就只看dbPartition + * */ + Set dbShardingKey = new TreeSet<>(String::compareToIgnoreCase); + Set tbShardingKey = new TreeSet<>(String::compareToIgnoreCase); + drdsDbPartitionBy.getArguments().forEach( + arg -> { + if (arg instanceof SQLIdentifierExpr) { + dbShardingKey.add(((SQLIdentifierExpr) arg).normalizedName().toLowerCase()); + } else if (arg instanceof SQLIntegerExpr) { + //dbShardingKey.add(((SQLIntegerExpr) arg).getNumber().toString().toLowerCase()); + } + } + ); + drdsTbPartitionBy.getArguments().forEach( + arg -> { + if (arg instanceof SQLIdentifierExpr) { + tbShardingKey.add(((SQLIdentifierExpr) arg).normalizedName().toLowerCase()); + } else if (arg instanceof SQLIntegerExpr) { + //tbShardingKey.add(((SQLIntegerExpr) arg).getNumber().toString().toLowerCase()); + } + } + ); + + boolean hasSameShardingKey = dbShardingKey.equals(tbShardingKey); + if (hasSameShardingKey) { + //only use dbPartition key + SQLPartitionBy autoPartitionBy = + convertDrdsDbPartitionByToAutoSQLPartitionBy(drdsDbPartitionBy, + min(drdsDbPartitionNum * drdsTbPartitionNum, maxPartitionsNum), + new ArrayList<>(), + maxPartitionColumnNum, columnsLengthsInBytes); + autoGsi.setPartitioning(autoPartitionBy); + return autoGsi; + } else { + //dbPartition by and tbPartition + SQLPartitionBy autoPartitionBy = + convertDrdsDbPartitionByToAutoSQLPartitionBy(drdsDbPartitionBy, drdsDbPartitionNum, + new ArrayList<>(), + maxPartitionColumnNum, columnsLengthsInBytes); + SQLSubPartitionBy autoSubPartitionBy = + convertDrdsTbPartitionByToAutoSQLPartitionBy(drdsTbPartitionBy, drdsTbPartitionNum, + new ArrayList<>(), + maxPartitionColumnNum, columnsLengthsInBytes); + + autoPartitionBy.setSubPartitionBy(autoSubPartitionBy); + autoGsi.setPartitioning(autoPartitionBy); + return autoGsi; + } } else { + //only tbPartition, impossible return null; } } @@ -736,6 +1156,45 @@ static private void generateRangePartitionDefInAutoMode(SQLPartitionByRange sqlP sqlPartitionByRange.addPartition(defaultPartition); } + /** + * generate uniform range partition interval + */ + static private void generateRangeSubPartitionDefInAutoMode(SQLSubPartitionByRange sqlSubPartitionByRange, + int needPartitionNum, + int maxPartitionBound) { + /** + * needPartitionNum should not exceed maxPartitionBound. + * e.g. + * in partition by range(month(`col`)), maxPartitionBound = 12 + * */ + needPartitionNum = min(needPartitionNum, maxPartitionBound); + int partitionBoundStep = maxPartitionBound / needPartitionNum; + int partitionBeginBound = (partitionBoundStep == 1 ? partitionBoundStep + 1 : partitionBoundStep); + while (partitionBeginBound < maxPartitionBound) { + SQLSubPartition subPartition = new SQLSubPartition(); + SQLIdentifierExpr pName = new SQLIdentifierExpr("sp" + String.valueOf(partitionBeginBound)); + subPartition.setName(pName); + + SQLPartitionValue sqlPartitionValue = new SQLPartitionValue(SQLPartitionValue.Operator.LessThan); + SQLIntegerExpr item = new SQLIntegerExpr(partitionBeginBound); + sqlPartitionValue.addItem(item); + subPartition.setValues(sqlPartitionValue); + + sqlSubPartitionByRange.getSubPartitionTemplate().add(subPartition); + partitionBeginBound += partitionBoundStep; + } + + //build default(maxvalue) partition + SQLSubPartition defaultSubPartition = new SQLSubPartition(); + SQLIdentifierExpr pName = new SQLIdentifierExpr("spd"); + defaultSubPartition.setName(pName); + SQLPartitionValue sqlSubPartitionValue = new SQLPartitionValue(SQLPartitionValue.Operator.LessThan); + SQLIdentifierExpr item = new SQLIdentifierExpr("MAXVALUE"); + sqlSubPartitionValue.addItem(item); + defaultSubPartition.setValues(sqlSubPartitionValue); + sqlSubPartitionByRange.getSubPartitionTemplate().add(defaultSubPartition); + } + /** * 尽可能求出每个column的最大bytes大小 * 1. 对于char(20)和varchar(10)类型,括号内数字代表最大字符长度,具体所占字节数还需结合字符集确定 diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ExecUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ExecUtils.java index 9d694e342..609aab69f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ExecUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ExecUtils.java @@ -17,15 +17,16 @@ package com.alibaba.polardbx.executor.utils; import com.alibaba.polardbx.common.TddlNode; +import com.alibaba.polardbx.common.async.AsyncTask; import com.alibaba.polardbx.common.constants.SequenceAttribute; import com.alibaba.polardbx.common.exception.NotSupportException; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.jdbc.IConnection; import com.alibaba.polardbx.common.jdbc.IDataSource; +import com.alibaba.polardbx.common.jdbc.ITransactionPolicy; import com.alibaba.polardbx.common.jdbc.MasterSlave; import com.alibaba.polardbx.common.jdbc.ParameterContext; -import com.alibaba.polardbx.common.jdbc.RawString; import com.alibaba.polardbx.common.model.Group; import com.alibaba.polardbx.common.model.RepoInst; import com.alibaba.polardbx.common.properties.ConnectionParams; @@ -33,9 +34,11 @@ import com.alibaba.polardbx.common.properties.DynamicConfig; import com.alibaba.polardbx.common.properties.MetricLevel; import com.alibaba.polardbx.common.properties.ParamManager; +import com.alibaba.polardbx.common.utils.AsyncUtils; import com.alibaba.polardbx.common.utils.ExecutorMode; import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.common.utils.bloomfilter.FastIntBloomFilter; +import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.common.utils.bloomfilter.ConcurrentIntBloomFilter; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.common.utils.version.InstanceVersion; @@ -53,21 +56,33 @@ import com.alibaba.polardbx.executor.mpp.execution.TaskInfo; import com.alibaba.polardbx.executor.operator.util.ConcurrentRawHashTable; import com.alibaba.polardbx.executor.spi.IGroupExecutor; +import com.alibaba.polardbx.executor.spi.ITopologyExecutor; +import com.alibaba.polardbx.executor.sync.CollectVariableSyncAction; +import com.alibaba.polardbx.executor.spi.ITransactionManager; +import com.alibaba.polardbx.executor.sync.CollectVariableSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; import com.alibaba.polardbx.gms.config.impl.InstConfUtil; +import com.alibaba.polardbx.gms.config.impl.MetaDbInstConfigManager; import com.alibaba.polardbx.gms.ha.impl.StorageHaManager; import com.alibaba.polardbx.gms.ha.impl.StorageInstHaContext; +import com.alibaba.polardbx.gms.metadb.MetaDbConnectionProxy; +import com.alibaba.polardbx.gms.metadb.table.FilesAccessor; +import com.alibaba.polardbx.gms.metadb.table.FilesRecord; import com.alibaba.polardbx.gms.node.GmsNodeManager; import com.alibaba.polardbx.gms.node.InternalNode; import com.alibaba.polardbx.gms.node.InternalNodeManager; import com.alibaba.polardbx.gms.node.Node; import com.alibaba.polardbx.gms.node.NodeStatusManager; import com.alibaba.polardbx.gms.sync.IGmsSyncAction; -import com.alibaba.polardbx.gms.topology.DbGroupInfoManager; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.gms.topology.DbTopologyManager; +import com.alibaba.polardbx.gms.topology.InstConfigAccessor; +import com.alibaba.polardbx.gms.topology.InstConfigRecord; import com.alibaba.polardbx.gms.topology.ServerInstIdManager; import com.alibaba.polardbx.gms.topology.SystemDbHelper; +import com.alibaba.polardbx.gms.util.InstIdUtil; import com.alibaba.polardbx.group.jdbc.DataSourceWrapper; import com.alibaba.polardbx.gms.util.GroupInfoUtil; import com.alibaba.polardbx.gms.util.MetaDbLogUtil; @@ -79,6 +94,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; +import com.alibaba.polardbx.optimizer.core.expression.ISelectable; import com.alibaba.polardbx.optimizer.core.expression.bean.NullValue; import com.alibaba.polardbx.optimizer.core.planner.rule.util.CBOUtil; import com.alibaba.polardbx.optimizer.core.rel.BaseQueryOperation; @@ -96,6 +112,7 @@ import com.alibaba.polardbx.optimizer.core.rel.LogicalView; import com.alibaba.polardbx.optimizer.core.rel.OSSTableScan; import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; +import com.alibaba.polardbx.optimizer.core.rel.SemiHashJoin; import com.alibaba.polardbx.optimizer.core.rel.SingleTableOperation; import com.alibaba.polardbx.optimizer.core.rel.UnionOptHelper; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; @@ -105,16 +122,20 @@ import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; import com.alibaba.polardbx.optimizer.utils.GroupConnId; +import com.alibaba.polardbx.optimizer.utils.IColumnarTransaction; import com.alibaba.polardbx.optimizer.utils.IDistributedTransaction; +import com.alibaba.polardbx.optimizer.utils.ITransaction; import com.alibaba.polardbx.optimizer.utils.OptimizerUtils; import com.alibaba.polardbx.optimizer.utils.PhyTableOperationUtil; import com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy; import com.alibaba.polardbx.optimizer.utils.RelUtils; +import com.alibaba.polardbx.rpc.pool.XConnection; import com.alibaba.polardbx.sequence.Sequence; import com.alibaba.polardbx.sequence.exception.SequenceException; import com.alibaba.polardbx.sequence.impl.BaseSequence; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ListenableFuture; import it.unimi.dsi.fastutil.HashCommon; @@ -127,11 +148,10 @@ import org.apache.calcite.rel.core.Sort; import org.apache.calcite.rel.logical.LogicalValues; import org.apache.calcite.rel.metadata.RelMetadataQuery; -import org.apache.calcite.rex.RexDynamicParam; -import org.apache.calcite.rex.RexLiteral; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlSelect; import org.apache.calcite.util.Pair; +import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.weakref.jmx.internal.guava.primitives.Bytes; @@ -139,11 +159,11 @@ import javax.sql.DataSource; import java.nio.charset.StandardCharsets; import java.sql.Connection; -import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; +import java.text.MessageFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -157,17 +177,23 @@ import java.util.Map.Entry; import java.util.Objects; import java.util.Optional; +import java.util.Properties; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.IntStream; import static com.alibaba.polardbx.common.properties.ConnectionParams.MASTER_READ_WEIGHT; import static com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil.NUM_CORES; +import static com.alibaba.polardbx.executor.gsi.utils.Transformer.buildBatchParam; import static com.alibaba.polardbx.executor.utils.failpoint.FailPointKey.FP_INJECT_IGNORE_INTERRUPTED_TO_STATISTIC_SCHEDULE_JOB; import static io.airlift.concurrent.MoreFutures.getFutureValue; import static io.airlift.concurrent.MoreFutures.tryGetFutureValue; @@ -177,6 +203,10 @@ public class ExecUtils { private static final Logger logger = LoggerFactory.getLogger(ExecUtils.class); private static final int MAX_PARALLELISM = 16; + public static byte[] hintPrefix = "/*DRDS /".getBytes(StandardCharsets.UTF_8); + public static byte[] hintDivision = "/".getBytes(StandardCharsets.UTF_8); + public static byte[] hintEnd = "/ */".getBytes(StandardCharsets.UTF_8); + public static byte[] hintNULL = "null".getBytes(StandardCharsets.UTF_8); /** * get a mapping from instance id (host:port) to a list of group data sources @@ -370,6 +400,20 @@ public static int getPolarDbCores(ParamManager paramManager, boolean master) { return dbParallelism; } + public static boolean needPutIfAbsent(ExecutionContext context, String key) { + boolean ret = true; + if (context.getHintCmds().containsKey(key)) { + ret = false; + } else { + Properties cnProperties = + MetaDbInstConfigManager.getInstance().getCnVariableConfigMap(); + if (cnProperties.containsKey(key)) { + ret = false; + } + } + return ret; + } + public static int getMppPrefetchNumForLogicalView(int num) { //TODO 需要结合PolarDB的个数和核数来决定默认Prefetch数量 return Math.min(num, NUM_CORES * 4); @@ -393,6 +437,73 @@ public static int getParallelismForLocal(ExecutionContext context) { return parallelism; } + public static int assignPartitionToExecutor(int counter, int allPartition, int partition, int executorSize) { + if (allPartition < executorSize) { + int fullGroup = executorSize / allPartition; + int leftExecutor = executorSize % allPartition; + int selectSize = fullGroup + ((partition < leftExecutor) ? 1 : 0); + int selectSeq = counter % selectSize; + return allPartition * selectSeq + partition; + } else { + return partition % executorSize; + } + } + + /** + * Calculate how many degrees of parallelism each hash table has. + * The hash table number = MIN(allPartition, executorSize) + * + * @param allPartition total number of partitions. + * @param executorSize total degrees of parallelism. + * @return degrees of parallelism each hash table has. + */ + public static List assignPartitionToExecutor(int allPartition, int executorSize) { + List assignResult = new ArrayList<>(); + if (allPartition < executorSize) { + int fullGroup = executorSize / allPartition; + List fullPartList = IntStream.range(0, allPartition) + .boxed() + .collect(Collectors.toList()); + IntStream.range(0, fullGroup).forEach(t -> assignResult.addAll(fullPartList)); + int leftExecutor = executorSize % allPartition; + List leftPartList = IntStream.range(0, leftExecutor) + .boxed() + .collect(Collectors.toList()); + assignResult.addAll(leftPartList); + } else { + for (int part = 0; part < allPartition; part++) { + assignResult.add(part % executorSize); + } + } + return assignResult; + } + + /** + * Calculate how many partitions each hash table has. + * The hash table number = MIN(allPartition, executorSize) + * + * @param allPartition total number of partitions. + * @param executorSize total degrees of parallelism. + * @return number of partitions each hash table has. + */ + public static int[] partitionsOfEachBucket(int allPartition, int executorSize) { + final int hashTableNum = Math.min(allPartition, executorSize); + int[] results = new int[hashTableNum]; + + if (allPartition <= executorSize) { + for (int i = 0; i < hashTableNum; i++) { + results[i] = 1; + } + } else { + // hashTableNum = executorSize = parallelism + for (int partIndex = 0; partIndex < allPartition; partIndex++) { + final int hashTableIndex = partIndex % hashTableNum; + results[hashTableIndex]++; + } + } + return results; + } + public static List> resultSetToList(ResultCursor rs) { if (rs == null) { @@ -421,14 +532,6 @@ public static List> resultSetToList(ResultCursor rs) { return results; } - public static boolean allElementsNull(List args) { - boolean allArgsNull = true; - for (Object arg : args) { - allArgsNull = allArgsNull && ExecUtils.isNull(arg); - } - return allArgsNull; - } - public static List> resultSetToList(ResultSet rs) { if (rs == null) { @@ -500,20 +603,6 @@ public static int comp(Object c1, Object c2, DataType type, boolean isAsc) { } } - public static boolean isNull(Object o) { - if (o instanceof com.alibaba.polardbx.optimizer.core.function.calc.scalar.filter.Row.RowValue) { - return ExecUtils.allElementsNull( - ((com.alibaba.polardbx.optimizer.core.function.calc.scalar.filter.Row.RowValue) o).getValues()); - } - if (o == null) { - return true; - } - if (o instanceof NullValue) { - return true; - } - return false; - } - public static void closeStatement(java.sql.Statement stmt) { if (stmt != null) { try { @@ -643,6 +732,27 @@ public static QueryConcurrencyPolicy getQueryConcurrencyPolicy(ExecutionContext return QueryConcurrencyPolicy.SEQUENTIAL; } + public static List> getReturningResultByCursors(List cursors, + boolean isBroadcast) { + List> result = new ArrayList<>(); + try { + for (Cursor cursor : cursors) { + if (isBroadcast) { + result = buildBatchParam(cursor, false); + } else { + result.addAll(buildBatchParam(cursor, false)); + } + } + } finally { + for (Cursor inputCursor : cursors) { + if (inputCursor != null) { + inputCursor.close(new ArrayList<>()); + } + } + } + return result; + } + public static int getAffectRowsByCursors(List cursors, boolean isBroadcast) { int affectRows = 0; try { @@ -886,11 +996,6 @@ public static String buildDRDSTraceComment(ExecutionContext context) { return append.toString(); } - public static byte[] hintPrefix = "/*DRDS /".getBytes(StandardCharsets.UTF_8); - public static byte[] hintDivision = "/".getBytes(StandardCharsets.UTF_8); - public static byte[] hintEnd = "/ */".getBytes(StandardCharsets.UTF_8); - public static byte[] hintNULL = "null".getBytes(StandardCharsets.UTF_8); - public static byte[] buildDRDSTraceCommentBytes(ExecutionContext context) { String clientIp = context.getClientIp(); String traceId = context.getTraceId(); @@ -938,9 +1043,70 @@ public boolean exhaustValue() throws SequenceException { return sequence; } + public static void buildOneChunk(Chunk keyChunk, int position, ConcurrentRawHashTable hashTable, + int[] positionLinks, int[] hashCodeResults, int[] intermediates, + int[] blockHashCodes, + ConcurrentIntBloomFilter bloomFilter, int[] ignoreNullBlocks, + int ignoreNullBlocksSize) { + // Calculate hash codes of the whole chunk + keyChunk.hashCodeVector(hashCodeResults, intermediates, blockHashCodes, keyChunk.getPositionCount()); + + if (checkJoinKeysAllNullSafe(keyChunk, ignoreNullBlocks, ignoreNullBlocksSize)) { + // If all keys are not null, we can leave out the null-check procedure + for (int offset = 0; offset < keyChunk.getPositionCount(); offset++, position++) { + int next = hashTable.put(position, hashCodeResults[offset]); + positionLinks[position] = next; + if (bloomFilter != null) { + bloomFilter.putInt(hashCodeResults[offset]); + } + } + } else { + // Otherwise we have to check nullability for each row + for (int offset = 0; offset < keyChunk.getPositionCount(); offset++, position++) { + if (checkJoinKeysNulSafe(keyChunk, offset, ignoreNullBlocks, ignoreNullBlocksSize)) { + int next = hashTable.put(position, hashCodeResults[offset]); + positionLinks[position] = next; + if (bloomFilter != null) { + bloomFilter.putInt(hashCodeResults[offset]); + } + } + } + } + } + + public static void buildOneChunk(Chunk keyChunk, int position, ConcurrentRawHashTable hashTable, + int[] positionLinks, int[] hashCodeResults, int[] intermediates, + int[] blockHashCodes, + ConcurrentIntBloomFilter bloomFilter, List ignoreNullBlocks) { + // Calculate hash codes of the whole chunk + keyChunk.hashCodeVector(hashCodeResults, intermediates, blockHashCodes, keyChunk.getPositionCount()); + + if (checkJoinKeysAllNullSafe(keyChunk, ignoreNullBlocks)) { + // If all keys are not null, we can leave out the null-check procedure + for (int offset = 0; offset < keyChunk.getPositionCount(); offset++, position++) { + int next = hashTable.put(position, hashCodeResults[offset]); + positionLinks[position] = next; + if (bloomFilter != null) { + bloomFilter.putInt(hashCodeResults[offset]); + } + } + } else { + // Otherwise we have to check nullability for each row + for (int offset = 0; offset < keyChunk.getPositionCount(); offset++, position++) { + if (checkJoinKeysNulSafe(keyChunk, offset, ignoreNullBlocks)) { + int next = hashTable.put(position, hashCodeResults[offset]); + positionLinks[position] = next; + if (bloomFilter != null) { + bloomFilter.putInt(hashCodeResults[offset]); + } + } + } + } + } + public static void buildOneChunk(Chunk keyChunk, int position, ConcurrentRawHashTable hashTable, int[] positionLinks, - FastIntBloomFilter bloomFilter, List ignoreNullBlocks) { + ConcurrentIntBloomFilter bloomFilter, List ignoreNullBlocks) { // Calculate hash codes of the whole chunk int[] hashes = keyChunk.hashCodeVector(); @@ -950,7 +1116,7 @@ public static void buildOneChunk(Chunk keyChunk, int position, ConcurrentRawHash int next = hashTable.put(position, hashes[offset]); positionLinks[position] = next; if (bloomFilter != null) { - bloomFilter.put(hashes[offset]); + bloomFilter.putInt(hashes[offset]); } } } else { @@ -960,13 +1126,31 @@ public static void buildOneChunk(Chunk keyChunk, int position, ConcurrentRawHash int next = hashTable.put(position, hashes[offset]); positionLinks[position] = next; if (bloomFilter != null) { - bloomFilter.put(hashes[offset]); + bloomFilter.putInt(hashes[offset]); } } } } } + public static boolean checkJoinKeysAllNullSafe(Chunk keyChunk, int[] ignoreNullBlocks, int size) { + for (int i = 0; i < size; i++) { + if (keyChunk.getBlock(ignoreNullBlocks[i]).mayHaveNull()) { + return false; + } + } + return true; + } + + public static boolean checkJoinKeysNulSafe(Chunk keyChunk, int offset, int[] ignoreNullBlocks, int size) { + for (int i = 0; i < size; i++) { + if (keyChunk.getBlock(ignoreNullBlocks[i]).isNull(offset)) { + return false; + } + } + return true; + } + public static boolean checkJoinKeysAllNullSafe(Chunk keyChunk, List ignoreNullBlocks) { for (int i : ignoreNullBlocks) { if (keyChunk.getBlock(i).mayHaveNull()) { @@ -1332,14 +1516,37 @@ public static boolean isPowerOfTwo(int val) { public static int partition(int hashCode, int partitionCount, boolean isPowerOfTwo) { if (isPowerOfTwo) { -// hashCode = HashCommon.mix(hashCode) >>> (Integer.numberOfLeadingZeros(partitionCount) + 1); hashCode = HashCommon.murmurHash3(hashCode); return hashCode & (partitionCount - 1); } else { hashCode = HashCommon.murmurHash3(hashCode) & Integer.MAX_VALUE; //ensure positive return hashCode % partitionCount; } -// return Math.abs(HashCommon.murmurHash3(hashCode)) % partitionCount; + } + + public static int partitionUnderPairWise(long hashCode, int partitionCount, int fullPartCount, + boolean isFullPartPowerOfTwo) { + if (isFullPartPowerOfTwo) { + return (int) ((hashCode & (fullPartCount - 1)) % partitionCount); + } else { + return (int) (((hashCode & Long.MAX_VALUE) % fullPartCount) % partitionCount); + } + } + + public static int calcStoragePartNum(long hashCode, int fullPartCount, boolean isFullPartPowerOfTwo) { + if (isFullPartPowerOfTwo) { + return (int) (hashCode & (fullPartCount - 1)); + } else { + return (int) ((hashCode & Long.MAX_VALUE) % fullPartCount); + } + } + + public static int directPartition(long hashCode, int partitionCount, boolean isPowerOfTwo) { + if (isPowerOfTwo) { + return (int) (hashCode & (partitionCount - 1)); + } else { + return (int) ((hashCode & Long.MAX_VALUE) % partitionCount); + } } /** @@ -1547,8 +1754,14 @@ public static long getMaxRowCount(RelNode node, ExecutionContext context) { Map params = context.getParams().getCurrentParameter(); if (((Sort) node).fetch != null) { outputCount = CBOUtil.getRexParam(sort.fetch, params); + long offset = 0; if (sort.offset != null) { - outputCount += CBOUtil.getRexParam(sort.offset, params); + offset = CBOUtil.getRexParam(sort.offset, params); + } + if (outputCount == Long.MAX_VALUE || offset == Long.MAX_VALUE) { + outputCount = Long.MAX_VALUE; + } else { + outputCount = outputCount + offset; } } } @@ -1641,7 +1854,7 @@ public static void getRootFailedTask(Optional stageInfo, AtomicRefere t -> { if (t.getTaskStatus().getState().isException()) { if (fail.get() != null) { - if (fail.get().getStats().getEndTime().getMillis() > t.getStats().getEndTime() + if (fail.get().getTaskStats().getEndTime().getMillis() > t.getTaskStats().getEndTime() .getMillis()) { fail.set(t); } @@ -1678,12 +1891,22 @@ public static boolean existMppOnlyInstanceNode() { return nodes != null && nodes.size() > 0; } + public static int getActiveNodeCount() { + if (ServiceProvider.getInstance().getServer() == null) { + return 1; + } + InternalNodeManager nodeManager = ServiceProvider.getInstance().getServer().getNodeManager(); + return Math.max(nodeManager.getAllNodes().getActiveNodes().size(), 1); + } + public static boolean convertBuildSide(Join join) { boolean convertBuildSide = false; if (join instanceof HashJoin) { convertBuildSide = ((HashJoin) join).isOuterBuild(); } else if (join instanceof HashGroupJoin) { convertBuildSide = true; + } else if (join instanceof SemiHashJoin) { + convertBuildSide = ((SemiHashJoin) join).isOuterBuild(); } return convertBuildSide; } @@ -1723,7 +1946,7 @@ public static NodeStatusManager getStatusManager(String schema) { public static void syncNodeStatus(String schema) { try { IGmsSyncAction action = new RefreshNodeSyncAction(schema); - SyncManagerHelper.sync(action, schema); + SyncManagerHelper.sync(action, schema, SyncScope.ALL); } catch (Exception e) { logger.warn("node sync error", e); } @@ -1964,47 +2187,236 @@ public static boolean useParameterDelegate(ExecutionContext context) { } /** - * used for information schema phy table name match + * version: select @@polardbx_engine_version as version + * releaseDate: select @@polardbx_release_date as release_date + * + * @return {Version}-{ReleaseDate} + */ + public static String getDnPolardbVersion() throws Exception { + String sql = MetaDbUtil.POLARDB_VERSION_SQL; + Set allDnId = ExecUtils.getAllDnStorageId(); + if (allDnId.isEmpty()) { + throw new SQLException("Failed to get DN datasource"); + } + String dnId = allDnId.iterator().next(); + try (Connection conn = DbTopologyManager.getConnectionForStorage(dnId); + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery(sql)) { + String dnPolardbxVersion = null; + String dnReleaseDate = null; + if (rs.next()) { + dnPolardbxVersion = rs.getString(1); + dnReleaseDate = rs.getString(2); + } + return String.format("%s-%s", dnPolardbxVersion, dnReleaseDate); + } + } + + /** + * Scan each DN, and find all prepared trx, and record the max trx id and min trx id of these trx. + * + * @param dnIds [in] Data nodes to scan. + * @param executor [in] Executor to get connection of DN. + * @param exceptions [out] Exceptions during scan. + * @param minId [out] Min id of all prepared trx. + * @param maxId [out] Max id of all prepared trx. */ - public static void handleTableNameParams(Object obj, Map params, - Set indexTableNames) { - if (obj instanceof RexDynamicParam) { - String tableName = String.valueOf(params.get(((RexDynamicParam) obj).getIndex() + 1).getValue()); - indexTableNames.add(tableName.toLowerCase()); - } else if (obj instanceof RexLiteral) { - String tableName = ((RexLiteral) obj).getValueAs(String.class); - indexTableNames.add(tableName.toLowerCase()); - } else if (obj instanceof RawString) { - for (Object o : ((RawString) obj).getObjList()) { - assert !(o instanceof List); - indexTableNames.add(o.toString().toLowerCase()); - } - } - } - - public static String getDnPolardbVersion() { - String dnPolardbVersion = null; - if (ExecutorContext.getContext(SystemDbHelper.CDC_DB_NAME) != null) { - TopologyHandler topologyHandler = - ExecutorContext.getContext(SystemDbHelper.CDC_DB_NAME).getTopologyHandler(); - if (topologyHandler.getMatrix().getGroups().isEmpty()) { - return dnPolardbVersion; - } - Group group = topologyHandler.getMatrix().getGroups().get(0); - String groupName = group.getName(); - IGroupExecutor groupExecutor = topologyHandler.get(groupName); - DataSource dataSource = groupExecutor.getDataSource(); - try (Connection conn = dataSource.getConnection(); - Statement stmt = conn.createStatement(); - ResultSet rs = stmt.executeQuery("SELECT @@polardb_version")) { - if (rs.next()) { - dnPolardbVersion = rs.getString(1); + public static void scanRecoveredTrans(Set dnIds, ITopologyExecutor executor, + ConcurrentLinkedQueue exceptions, + AtomicLong minId, AtomicLong maxId) { + List futures = new ArrayList<>(); + // Parallelism is the number of DN. + for (String dnId : dnIds) { + futures.add(executor.getExecutorService().submit(null, null, AsyncTask.build(() -> { + try (Connection conn = DbTopologyManager.getConnectionForStorage(dnId); + Statement stmt = conn.createStatement()) { + if (conn.isWrapperFor(XConnection.class)) { + // Note: XA RECOVER will hold the LOCK_transaction_cache lock, so never block it. + conn.unwrap(XConnection.class).setDefaultTokenKb(Integer.MAX_VALUE); + } + ResultSet rs = stmt.executeQuery("XA RECOVER"); + while (rs.next()) { + long formatId = rs.getLong(1); + int gtridLength = rs.getInt(2); + byte[] data = rs.getBytes(4); + + if (formatId == 1) { + byte[] gtridData = Arrays.copyOfRange(data, 0, gtridLength); + if (checkGtridPrefix(gtridData)) { + int atSymbolIndex = ArrayUtils.indexOf(gtridData, (byte) '@'); + long trxId = Long.parseLong(new String(gtridData, 5, atSymbolIndex - 5), 16); + + // CAS to update min id. + long tmp = minId.get(); + while (trxId < tmp && !minId.compareAndSet(tmp, trxId)) { + tmp = minId.get(); + } + + // CAS to update max id. + tmp = maxId.get(); + while (trxId > tmp && !maxId.compareAndSet(tmp, trxId)) { + tmp = maxId.get(); + } + } + } + } + } catch (Exception e) { + exceptions.offer(e); } - return dnPolardbVersion; - } catch (SQLException e) { - logger.error(e.getMessage(), e); + }))); + } + AsyncUtils.waitAll(futures); + } + + /** + * Check whether begins with prefix 'drds-' + */ + public static boolean checkGtridPrefix(byte[] data) { + return data.length > 5 + && data[0] == 'd' && data[1] == 'r' && data[2] == 'd' && data[3] == 's' && data[4] == '-'; + } + + public static List getFilesMetaByNames(List files) { + try (final Connection connection = MetaDbUtil.getConnection()) { + final FilesAccessor filesAccessor = new FilesAccessor(); + filesAccessor.setConnection(connection); + connection.setAutoCommit(false); + try { + return filesAccessor.queryFilesByNames(files); + } finally { + connection.setAutoCommit(true); + } + } catch (Throwable t) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, t); + } + } + + /** + * Wait some CN global variable in DynamicConfig changed in all CN nodes after setting global. + * + * @param k class member variable in DynamicConfig, in string format. + * @param v expected value after setting global. + * @param timeout wait timeout, in second. + * @return null if success, or error message if failed. + */ + public static String waitVarChange(String k, String v, long timeout) { + logger.warn("Start waiting var changed: " + k + " -> " + v); + int retry = 0, waitMilli = 1000; + int maxRetry = (int) (timeout * 1000 / waitMilli); + boolean success = true; + while (retry < maxRetry) { + success = true; + retry++; + try { + CollectVariableSyncAction action = new CollectVariableSyncAction(k); + List>> values = SyncManagerHelper.sync(action, SyncScope.ALL); + for (List> value : values) { + String result = value.get(0).get("Value").toString(); + if (!v.equalsIgnoreCase(result)) { + System.out.println(result); + success = false; + break; + } + } + if (success) { + break; + } else { + Thread.sleep(waitMilli); + } + } catch (Throwable t) { + return "Error occurs when waiting CN global variable changed: " + t.getMessage(); + } + } + logger.warn("Finish waiting var changed: " + k + " -> " + v + ", success: " + success); + if (!success) { + return "Timeout waiting CN global variable changed."; + } + return null; + } + + /** + * @return a columnar transaction using given tso. + */ + public static ITransaction createColumnarTransaction(String schema, ExecutionContext ec, long tso) { + ITransactionManager manager = ExecutorContext.getContext(schema).getTransactionManager(); + ITransaction trx = + manager.createTransaction(ITransactionPolicy.TransactionClass.COLUMNAR_READ_ONLY_TRANSACTION, ec); + if (trx instanceof IColumnarTransaction) { + ((IColumnarTransaction) trx).setTsoTimestamp(tso); + } + return trx; + } + + public static Runnable forceAllTrx2PC() throws SQLException, InterruptedException { + // Force all trx being strict 2PC trx, + // the following configs are expected to be true. + String instId = InstIdUtil.getInstId(); + Set paramKeys = ImmutableSet.of( + ConnectionProperties.ENABLE_XA_TSO, + ConnectionProperties.ENABLE_AUTO_COMMIT_TSO, + ConnectionProperties.FORBID_AUTO_COMMIT_TRX + ); + InstConfigAccessor accessor = new InstConfigAccessor(); + accessor.setConnection(MetaDbUtil.getConnection()); + // Original config. + List records = accessor.queryByParamKeys(instId, paramKeys); + // Need to be changed config. + Set needChangedConfigs = new HashSet<>(paramKeys); + for (InstConfigRecord record : records) { + if (needChangedConfigs.contains(record.paramKey.toUpperCase()) + && "true".equalsIgnoreCase(record.paramVal)) { + needChangedConfigs.remove(record.paramKey.toUpperCase()); } } - return dnPolardbVersion; + // Changed config. + List changedRecords = null; + if (!needChangedConfigs.isEmpty()) { + // Change these configs. + Properties properties = new Properties(); + for (String changedRecord : needChangedConfigs) { + properties.put(changedRecord, "true"); + } + MetaDbUtil.setGlobal(properties); + // Changed config. + changedRecords = accessor.queryByParamKeys(instId, needChangedConfigs); + waitVarChange("forbidAutoCommitTrx", "true", 5); + // A better way to drain trx ? + Thread.sleep(1000); + } + + final AtomicBoolean recover = new AtomicBoolean(false); + + // Recover these changed configs. + final List changedRecords0 = new ArrayList<>(); + if (null == changedRecords || changedRecords.isEmpty()) { + // No need to recover. + recover.set(true); + } else { + changedRecords0.addAll(changedRecords); + } + return () -> { + if (!recover.compareAndSet(false, true)) { + return; + } + // Restore var. + List current = accessor.queryByParamKeys(instId, needChangedConfigs); + Properties properties = new Properties(); + for (InstConfigRecord changedRecord : changedRecords0) { + for (InstConfigRecord currentRecord : current) { + if (currentRecord.paramKey.equalsIgnoreCase(changedRecord.paramKey)) { + if (currentRecord.gmtModified.compareTo(changedRecord.gmtModified) <= 0) { + // Not changed since we modify it, recover it back. + properties.put(changedRecord.paramKey, "false"); + } + break; + } + } + } + try { + MetaDbUtil.setGlobal(properties); + } catch (SQLException e) { + throw new RuntimeException(e); + } + }; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ExplainExecutorUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ExplainExecutorUtil.java index 01c03cd11..ed3e81067 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ExplainExecutorUtil.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ExplainExecutorUtil.java @@ -53,7 +53,9 @@ import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; import com.alibaba.polardbx.optimizer.core.planner.PlanCache; +import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; import com.alibaba.polardbx.optimizer.core.planner.Xplanner.RelXPlanOptimizer; +import com.alibaba.polardbx.optimizer.core.planner.rule.Xplan.XPlanCalcRule; import com.alibaba.polardbx.optimizer.core.rel.BaseTableOperation; import com.alibaba.polardbx.optimizer.core.rel.DirectMultiDBTableOperation; import com.alibaba.polardbx.optimizer.core.rel.DirectTableOperation; @@ -85,6 +87,7 @@ import com.alibaba.polardbx.optimizer.sharding.ConditionExtractor; import com.alibaba.polardbx.optimizer.sharding.result.ExtractionResult; import com.alibaba.polardbx.optimizer.sharding.result.PlanShardInfo; +import com.alibaba.polardbx.optimizer.statis.XplanStat; import com.alibaba.polardbx.optimizer.utils.ExplainResult; import com.alibaba.polardbx.optimizer.utils.ExplainUtils; import com.alibaba.polardbx.optimizer.utils.OptimizerUtils; @@ -102,6 +105,8 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptSchema; import org.apache.calcite.plan.RelOptUtil; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.RelShuttle; @@ -118,7 +123,6 @@ import org.apache.calcite.sql.SqlExplainLevel; import org.apache.calcite.sql.SqlInsert; import org.apache.calcite.sql.SqlKind; -import org.apache.calcite.sql.SqlSelect; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.util.Pair; import org.apache.calcite.util.trace.RuntimeStatisticsSketch; @@ -132,6 +136,8 @@ import java.sql.Timestamp; import java.text.DecimalFormat; import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; @@ -149,6 +155,7 @@ import static com.alibaba.polardbx.optimizer.utils.ExplainResult.isExplainJsonPlan; import static com.alibaba.polardbx.optimizer.utils.ExplainResult.isExplainLogicalView; import static com.alibaba.polardbx.optimizer.utils.ExplainResult.isExplainOptimizer; +import static com.alibaba.polardbx.optimizer.utils.ExplainResult.isExplainPipeline; import static com.alibaba.polardbx.optimizer.utils.ExplainResult.isExplainSharding; import static com.alibaba.polardbx.optimizer.utils.ExplainResult.isExplainSimple; import static com.alibaba.polardbx.optimizer.utils.ExplainResult.isExplainStatistics; @@ -209,6 +216,8 @@ public static ResultCursor explain(ExecutionPlan executionPlan, ExecutionContext return ExplainStatisticsHandler.handleExplainStatistics(executionContext, executionPlan); } else if (isExplainVec(explain)) { return ExplainExecutorUtil.handleExplainVec(executionContext, executionPlan, explain.explainMode); + } else if (isExplainPipeline(explain)) { + return ExplainExecutorUtil.handleExplainPipeline(executionContext, executionPlan); } else if (executionPlan.getPlan() instanceof BaseDdlOperation) { return handleDdl(executionContext, executionPlan); } else { @@ -822,6 +831,55 @@ private static ResultCursor handleExplainDdl(ExecutionContext executionContext, } + private static ResultCursor handleExplainPipeline(ExecutionContext executionContext, ExecutionPlan executionPlan) { + // To collect the runtime driver stats that detected by StageInfo. + Map> driverStatistics = new HashMap<>(); + executionContext.setDriverStatistics(driverStatistics); + + ArrayResultCursor result = new ArrayResultCursor("ExecutionPlan"); + result.addColumn("trace_id", DataTypes.StringType); + result.addColumn("stage-pipeline", DataTypes.StringType); + result.addColumn("node_id", DataTypes.StringType); + result.addColumn("driver_id", DataTypes.StringType); + result.addColumn("running_cost", DataTypes.StringType); + result.addColumn("pending_cost", DataTypes.StringType); + result.addColumn("blocked_cost", DataTypes.StringType); + result.addColumn("open_cost", DataTypes.StringType); + result.addColumn("total_cost", DataTypes.StringType); + result.addColumn("running_count", DataTypes.StringType); + result.addColumn("pending_count", DataTypes.StringType); + result.addColumn("blocked_count", DataTypes.StringType); + result.initMeta(); + + ExecutorHelper.selectExecutorMode(executionPlan.getPlan(), executionContext, true); + if (executionContext.getExecuteMode() == ExecutorMode.MPP) { + // The statement of EXPLAIN PIPELINE is only for MPP mode. + executePlanForExplainAnalyze(executionPlan, executionContext); + + // Sorting all driver information according to their unique id. + List allDriverInfo = new ArrayList<>(); + driverStatistics.values().forEach(allDriverInfo::addAll); + Collections.sort(allDriverInfo, (o1, o2) -> { + int comparison; + if ((comparison = String.CASE_INSENSITIVE_ORDER.compare((String) o1[1], (String) o2[1])) != 0) { + return comparison; + } else if ((comparison = Long.valueOf((String) o1[2]).compareTo(Long.valueOf((String) o2[2]))) != 0) { + return comparison; + } else { + return Long.valueOf((String) o1[3]).compareTo(Long.valueOf((String) o2[3])); + } + } + + ); + + for (Object[] driverInfo : allDriverInfo) { + result.addRow(driverInfo); + } + } + + return result; + } + private static ResultCursor handleExplain(ExecutionContext executionContext, ExecutionPlan executionPlan, ExplainResult.ExplainMode mode) { SqlExplainLevel explainLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES; @@ -849,7 +907,8 @@ private static ResultCursor handleExplain(ExecutionContext executionContext, Exe if (executionContext.getHintCmds() == null) { executionContext.putAllHintCmds(new HashMap<>()); } - executionContext.getHintCmds().put(ConnectionProperties.MPP_METRIC_LEVEL, 3); + executionContext.getHintCmds() + .put(ConnectionProperties.MPP_METRIC_LEVEL, MetricLevel.OPERATOR.metricLevel); executePlanForExplainAnalyze(executionPlan, executionContext); runtimeStatistic = statistics.toMppSketch(); } else { @@ -1067,56 +1126,65 @@ public RelNode visit(TableScan scan) { boolean metaInit = false; ArrayResultCursor result = new ArrayResultCursor("PhysicalPlan"); if (executionPlan.getPlan() instanceof BaseTableOperation) { - BaseTableOperation operation = (BaseTableOperation) executionPlan.getPlan(); - if (cursorMode && operation.getLockMode() == SqlSelect.LockMode.UNDEF && - operation.getXTemplate() != null && executionContext.getUnOptimizedPlan() != null) { - // handle x_plan - boolean success = explainExecuteXPlan(executionContext.getUnOptimizedPlan(), metaInit, - result, executionContext); - if (success) { - metaInit = true; - } - } - if (!metaInit) { - ResultCursor rc = PlanExecutor.execByExecPlanNodeByOne(executionPlan, executionContext); + ResultCursor rc = PlanExecutor.execByExecPlanNodeByOne(executionPlan, executionContext); + try { + boolean xplanBuilt = false; rc.setCursorMeta(result.getMeta()); - try { - Row row = rc.next(); + Row row = rc.next(); + if (!StringUtils.isEmpty(XplanStat.getXplanIndex(executionContext.getXplanStat()))) { + if (explainExecuteXPlan( + ((BaseTableOperation) executionPlan.getPlan()).getOriginPlan(), + metaInit, + result, executionContext)) { + metaInit = true; + xplanBuilt = true; + while (rc.next() != null) { + // consume all result + } + } + } + if (!xplanBuilt) { initOriginMeta(rc, row, result); metaInit = true; while (row != null) { result.addRow(row.getValues().toArray()); row = rc.next(); } - } catch (Exception e) { - throw GeneralUtil.nestedException(e); - } finally { - rc.close(Lists.newArrayList()); } + } catch (Exception e) { + throw GeneralUtil.nestedException(e); + } finally { + rc.close(Lists.newArrayList()); } } for (LogicalView lv : views) { - if (cursorMode && lv.getXPlan() != null) { - boolean success = explainExecuteXPlan(lv.getPushedRelNode(), metaInit, - result, executionContext); - if (success) { - metaInit = true; - continue; - } - } ExecutionPlan lp = new ExecutionPlan(executionPlan.getAst(), lv, null); ResultCursor rc = PlanExecutor.execByExecPlanNodeByOne(lp, executionContext); - rc.setCursorMeta(result.getMeta()); + try { + rc.setCursorMeta(result.getMeta()); Row row = rc.next(); - if (!metaInit) { - initOriginMeta(rc, row, result); - metaInit = true; + boolean xplanBuilt = false; + if (cursorMode && !StringUtils.isEmpty(XplanStat.getXplanIndex(executionContext.getXplanStat()))) { + if (explainExecuteXPlan(lv.getPushedRelNode(), metaInit, + result, executionContext)) { + metaInit = true; + xplanBuilt = true; + while (rc.next() != null) { + // consume all result + } + } } - while (row != null) { - result.addRow(row.getValues().toArray()); - row = rc.next(); + if (!xplanBuilt) { + if (!metaInit) { + initOriginMeta(rc, row, result); + metaInit = true; + } + while (row != null) { + result.addRow(row.getValues().toArray()); + row = rc.next(); + } } } catch (Exception e) { throw GeneralUtil.nestedException(e); @@ -1136,16 +1204,21 @@ public RelNode visit(TableScan scan) { */ private static boolean explainExecuteXPlan(RelNode plan, boolean metaInit, ArrayResultCursor result, ExecutionContext executionContext) { - if (!executionContext.getParamManager().getBoolean(ConnectionParams.CONN_POOL_XPROTO_XPLAN)) { + if (plan == null) { return false; } - RelXPlanOptimizer.IndexFinder indexFinder = new RelXPlanOptimizer.IndexFinder(); - double finalRowCount; - synchronized (plan.getCluster().getMetadataQuery()) { - indexFinder.go(RelXPlanOptimizer.optimize(plan)); - finalRowCount = plan.getCluster().getMetadataQuery().getRowCount(plan); - } - if (!indexFinder.found()) { + SqlConverter sqlConverter = SqlConverter.getInstance( + PlannerContext.getPlannerContext(plan).getSchemaName(), executionContext); + RelOptCluster cluster = sqlConverter.createRelOptCluster(); + RelOptSchema relOptSchema = sqlConverter.getCatalog(); + String serialPlan = PlanManagerUtil.relNodeToJson(plan); + plan = PlanManagerUtil.jsonToRelNode(serialPlan, cluster, relOptSchema); + PlannerContext.getPlannerContext(plan).setParams(executionContext.getParams()); + RelXPlanOptimizer.XplanExplainExecuteVisitor indexFinder = + new RelXPlanOptimizer.XplanExplainExecuteVisitor(executionContext); + indexFinder.go(RelXPlanOptimizer.optimizeFilter(plan)); + XPlanCalcRule.IndexInfo indexInfo = indexFinder.getIndexInfo(); + if (!indexInfo.isFound()) { return false; } // build meta @@ -1174,30 +1247,26 @@ private static boolean explainExecuteXPlan(RelNode plan, boolean metaInit, result.initMeta(); } // add result - int cnt = 0; - String index = indexFinder.getIndex(); + String index = indexInfo.getIndex(); if (StringUtils.isEmpty(index)) { index = "Primary"; } String type = "Primary".equalsIgnoreCase(index) ? "const" : "ref"; - double filtered = finalRowCount / indexFinder.getRowCount() * 100; - if (filtered > 100D) { - filtered = 100D; - } + double filtered = indexInfo.getFinalRowCount() / indexInfo.getRowCount() * 100; result.addRow(new Object[] { - ++cnt, + 1, "SIMPLE", - indexFinder.getTableName(), + indexInfo.getTableName(), null, type, - index, + String.join(",", indexInfo.getCandidateIndexes()), index, 8, null, - indexFinder.getRowCount(), - String.format("%.2f", filtered), - "Using XPlan" + (indexFinder.isUsingWhere() ? ", Using where" : ""), + indexInfo.getRowCount(), + String.format("%.2f", Math.min(filtered, 100D)), + "Using XPlan" + (indexInfo.isUsingWhere() ? ", Using where" : ""), }); plan.getCluster().invalidateMetadataQuery(); return true; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/GroupKey.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/GroupKey.java index 20964449a..1d511d15f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/GroupKey.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/GroupKey.java @@ -129,7 +129,7 @@ public boolean equals(Object obj) { return true; } - public boolean equalsForUpdate(Object obj) { + public boolean equalsForUpdate(Object obj, boolean checkJsonByStringCompare) { if (this == obj) { return true; } @@ -163,7 +163,7 @@ public boolean equalsForUpdate(Object obj) { } if (thisObject instanceof String && that.groupKeys[i] instanceof String // we should not compare string when type is json - && !(dataType instanceof JsonType)) { + && (checkJsonByStringCompare || !(dataType instanceof JsonType))) { if (!thisObject.equals(that.groupKeys[i])) { return false; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ImportTableTaskManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ImportTableTaskManager.java new file mode 100644 index 000000000..1bf110cd0 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ImportTableTaskManager.java @@ -0,0 +1,59 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.utils; + +import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Semaphore; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +public class ImportTableTaskManager { + private final Semaphore semaphore; + private final ExecutorService executor; + + public ImportTableTaskManager(int parallelism) { + this.semaphore = new Semaphore(parallelism); + this.executor = Executors.newFixedThreadPool(parallelism); + } + + public void execute(Runnable task) { + try { + semaphore.acquire(); + executor.execute(() -> { + try { + task.run(); + } finally { + semaphore.release(); + } + }); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new TddlNestableRuntimeException(e); + } + } + + public void shutdown() { + executor.shutdown(); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/PolarPrivilegeUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/PolarPrivilegeUtils.java index 2ca319225..e7bc08860 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/PolarPrivilegeUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/PolarPrivilegeUtils.java @@ -16,28 +16,39 @@ package com.alibaba.polardbx.executor.utils; -import com.alibaba.polardbx.druid.sql.SQLUtils; -import com.alibaba.polardbx.druid.sql.ast.SqlType; -import com.google.common.collect.Sets; import com.alibaba.polardbx.common.constants.SystemTables; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.taobao.tddl.common.privilege.PrivilegePoint; +import com.alibaba.polardbx.common.privilege.ColumnPrivilegeVerifyItem; import com.alibaba.polardbx.common.privilege.PrivilegeVerifyItem; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.druid.sql.SQLUtils; +import com.alibaba.polardbx.druid.sql.ast.SqlType; +import com.alibaba.polardbx.druid.sql.ast.SQLStatement; import com.alibaba.polardbx.gms.privilege.Permission; import com.alibaba.polardbx.gms.privilege.PermissionCheckContext; +import com.alibaba.polardbx.gms.privilege.PolarAccount; import com.alibaba.polardbx.gms.privilege.PolarPrivManager; import com.alibaba.polardbx.gms.privilege.PolarPrivUtil; import com.alibaba.polardbx.gms.privilege.PrivilegeKind; +import com.alibaba.polardbx.gms.lbac.LBACPrivilegeCheckUtils; import com.alibaba.polardbx.gms.topology.SystemDbHelper; +import com.alibaba.polardbx.lbac.LBACException; import com.alibaba.polardbx.optimizer.config.schema.DefaultDbSchema; -import com.alibaba.polardbx.optimizer.config.schema.MetaDbSchema; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; +import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; import com.alibaba.polardbx.optimizer.parse.privilege.PrivilegeContext; +import com.alibaba.polardbx.optimizer.parse.visitor.DrdsColumnAccessCollector; +import com.alibaba.polardbx.optimizer.planmanager.PlanManagerUtil; +import com.google.common.collect.Sets; +import com.taobao.tddl.common.privilege.PrivilegePoint; import org.apache.commons.lang3.StringUtils; +import java.util.Collections; +import java.util.List; import java.util.Set; /** @@ -50,6 +61,7 @@ public class PolarPrivilegeUtils { static { allowedSqlTypeOfDefaultDb.add(SqlType.SELECT); + allowedSqlTypeOfDefaultDb.add(SqlType.SHOW_CONVERT_TABLE_MODE); } public static void checkPrivilege(ExecutionPlan executionPlan, ExecutionContext executionContext) { @@ -68,12 +80,47 @@ public static void checkPrivilege(ExecutionPlan executionPlan, ExecutionContext } } + public static void checkLBACColumnAccess(ExecutionPlan executionPlan, ExecutionContext executionContext) { + if (!executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_LBAC)) { + return; + } + + //检查所涉及的表是否有security policy + Set> accessTables = + PlanManagerUtil.getTableSetFromAst(executionContext.getFinalPlan().getAst()); + if (!LBACPrivilegeCheckUtils.isNeedLBACCheck(accessTables, executionContext.getSchemaName())) { + return; + } + + List stmtList = FastsqlUtils.parseSql(executionContext.getSql().toString()); + SQLStatement stmt = stmtList.get(0); + DrdsColumnAccessCollector collector = new DrdsColumnAccessCollector(executionContext.getSchemaName()); + stmt.accept(collector); + PolarAccount account = executionContext.getPrivilegeContext().getPolarUserInfo().getAccount(); + for (ColumnPrivilegeVerifyItem item : collector.getAccessColumnVerifyItems()) { + boolean success = LBACPrivilegeCheckUtils.checkColumnRW( + account, item.getDb(), item.getTable(), Collections.singleton(item.getColumn()), + item.getPrivilegePoint() == PrivilegePoint.SELECT); + if (!success) { + throw new LBACException("check lbac privilege failed on column"); + } + } + + } + public static void checkPrivilege(String db, String tb, PrivilegePoint priv, ExecutionContext executionContext) { if (executionContext.isPrivilegeMode()) { verifyPrivilege(db, tb, priv, executionContext); } } + public static void checkInstancePrivilege(PrivilegePoint priv, ExecutionContext executionContext) { + if (executionContext.isPrivilegeMode()) { + executionContext.getPrivilegeContext().setSchema(null); + verifyPrivilege(null, null, priv, executionContext); + } + } + private static void verifyPrivilege(ExecutionPlan executionPlan, ExecutionContext executionContext) { // verify privilege if (executionPlan.getPrivilegeVerifyItems() != null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ReloadUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ReloadUtils.java index abf84b31d..5a53ba07c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ReloadUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ReloadUtils.java @@ -25,8 +25,10 @@ import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.group.utils.VariableProxy; import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; import com.alibaba.polardbx.optimizer.config.table.statistic.inf.SystemTableTableStatistic; import com.alibaba.polardbx.optimizer.view.SystemTableView; +import com.alibaba.polardbx.rule.TddlRule; import javax.sql.DataSource; @@ -46,12 +48,13 @@ public static void reloadDataSources(ExecutorContext executorContext, OptimizerC } executorContext.getSequenceManager().destroy(); optimizerContext.getRuleManager().destroy(); - // TODO yuehan check this -// GsiMetaManager.invalidateCache(executorContext.getTopologyHandler().getAppName()); - SystemTableTableStatistic.invalidateAll(); -// SystemTableColumnStatistic.invalidateAll(); + executorContext.getSequenceManager().destroy(); + optimizerContext.getRuleManager().destroy(); + GsiMetaManager.invalidateCache(executorContext.getTopologyHandler().getAppName()); SystemTableView.invalidateAll(); + TddlRule tddlRule = optimizerContext.getRuleManager().getTddlRule(); + executorContext.getTopologyHandler().reload(); executorContext.getSequenceManager().init(); @@ -74,6 +77,6 @@ public static void reloadDataSources(ExecutorContext executorContext, OptimizerC } public enum ReloadType { - USERS, SCHEMA, DATASOURCES, FILESTORAGE, PROCEDURES, FUNCTIONS, JAVA_FUNCTIONS, STATISTICS; + USERS, SCHEMA, DATASOURCES, FILESTORAGE, PROCEDURES, FUNCTIONS, JAVA_FUNCTIONS, STATISTICS, COLUMNARMANAGER; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/RowSet.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/RowSet.java index 17e9fd6be..912c307ae 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/RowSet.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/RowSet.java @@ -66,7 +66,8 @@ public List> distinctRowSetWithoutNull(DistinctWriter writer) { public List> distinctRowSetWithoutNullThenRemoveSameRow(DistinctWriter writer, Mapping setColumnTargetMapping, Mapping setColumnSourceMapping, - List setColumnMetas) { + List setColumnMetas, + boolean checkJsonByStringCompare) { List> distinctRows = distinctRowSetCache.computeIfAbsent(writer, t -> groupByColumns(rows, metas, writer.getGroupingMapping(), true)); this.sameRowCount = 0; @@ -76,7 +77,7 @@ public List> distinctRowSetWithoutNullThenRemoveSameRow(DistinctWri final List sources = Mappings.permute(row, setColumnSourceMapping); final GroupKey targetKey = new GroupKey(targets.toArray(), setColumnMetas); final GroupKey sourceKey = new GroupKey(sources.toArray(), setColumnMetas); - if (!targetKey.equalsForUpdate(sourceKey)) { + if (!targetKey.equalsForUpdate(sourceKey, checkJsonByStringCompare)) { changedValues.add(row); } else { sameRowCount++; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ScalarSubqueryExecHelper.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ScalarSubqueryExecHelper.java index 2dd695789..980eb403e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ScalarSubqueryExecHelper.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/ScalarSubqueryExecHelper.java @@ -24,6 +24,7 @@ /** * Use to dynamic exec scalar subquery + * * @author chenghui.lch */ public class ScalarSubqueryExecHelper implements IScalarSubqueryExecHelper { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SchemaMetaUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SchemaMetaUtil.java index 9764bdc4b..bde5b247f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SchemaMetaUtil.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SchemaMetaUtil.java @@ -17,6 +17,7 @@ package com.alibaba.polardbx.executor.utils; import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlJobManager; import com.alibaba.polardbx.executor.gsi.CheckerManager; import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; import com.alibaba.polardbx.executor.statistic.entity.PolarDbXSystemTableColumnStatistic; @@ -25,6 +26,8 @@ import com.alibaba.polardbx.gms.listener.impl.MetaDbConfigManager; import com.alibaba.polardbx.gms.listener.impl.MetaDbDataIdBuilder; import com.alibaba.polardbx.gms.metadb.misc.SchemaInfoCleaner; +import com.alibaba.polardbx.gms.metadb.table.BaselineInfoAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableStatus; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; import com.alibaba.polardbx.gms.metadb.table.TablesRecord; import com.alibaba.polardbx.gms.scheduler.DdlPlanAccessor; @@ -32,8 +35,6 @@ import com.alibaba.polardbx.gms.tablegroup.TableGroupUtils; import com.alibaba.polardbx.gms.topology.SchemaMetaCleaner; import com.alibaba.polardbx.gms.util.MetaDbLogUtil; -import com.alibaba.polardbx.optimizer.planmanager.PolarDbXSystemTableBaselineInfo; -import com.alibaba.polardbx.optimizer.planmanager.PolarDbXSystemTablePlanInfo; import com.alibaba.polardbx.optimizer.view.PolarDbXSystemTableView; import java.sql.Connection; @@ -59,6 +60,7 @@ public static void cleanupSchemaMeta(String schemaName, Connection metaDbConn) { TableInfoManager tableInfoManager = new TableInfoManager(); SchemaInfoCleaner schemaInfoCleaner = new SchemaInfoCleaner(); DdlPlanAccessor ddlPlanAccessor = new DdlPlanAccessor(); + BaselineInfoAccessor baselineInfoAccessor = new BaselineInfoAccessor(false); try { assert metaDbConn != null; @@ -66,10 +68,13 @@ public static void cleanupSchemaMeta(String schemaName, Connection metaDbConn) { tableInfoManager.setConnection(metaDbConn); schemaInfoCleaner.setConnection(metaDbConn); ddlPlanAccessor.setConnection(metaDbConn); + baselineInfoAccessor.setConnection(metaDbConn); // If the schema has been dropped, then we have to do some cleanup. String tableListDataId = MetaDbDataIdBuilder.getTableListDataId(schemaName); MetaDbConfigManager.getInstance().unregister(tableListDataId, metaDbConn); + String columnarTableListDataId = MetaDbDataIdBuilder.getColumnarTableListDataId(schemaName); + MetaDbConfigManager.getInstance().unregister(columnarTableListDataId, metaDbConn); List records = tableInfoManager.queryTables(schemaName); for (TablesRecord record : records) { @@ -78,17 +83,19 @@ public static void cleanupSchemaMeta(String schemaName, Connection metaDbConn) { MetaDbConfigManager.getInstance().unregister(tableDataId, metaDbConn); } + tableInfoManager.updateColumnarTableStatusBySchema(schemaName, ColumnarTableStatus.DROP.name()); + tableInfoManager.removeAll(schemaName); schemaInfoCleaner.removeAll(schemaName); + DdlJobManager.cleanUpArchiveSchema(schemaName); PolarDbXSystemTableView.deleteAll(schemaName, metaDbConn); new PolarDbXSystemTableLogicalTableStatistic().deleteAll(schemaName, metaDbConn); new PolarDbXSystemTableColumnStatistic().deleteAll(schemaName, metaDbConn); - PolarDbXSystemTableBaselineInfo.deleteAll(schemaName); - PolarDbXSystemTablePlanInfo.deleteAll(schemaName); + baselineInfoAccessor.deleteBySchema(schemaName); GsiBackfillManager.deleteAll(schemaName, metaDbConn); CheckerManager.deleteAll(schemaName, metaDbConn); @@ -102,6 +109,7 @@ public static void cleanupSchemaMeta(String schemaName, Connection metaDbConn) { } finally { tableInfoManager.setConnection(null); schemaInfoCleaner.setConnection(null); + baselineInfoAccessor.setConnection(null); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/StandardToEnterpriseEditionUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/StandardToEnterpriseEditionUtil.java new file mode 100644 index 000000000..61921b093 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/StandardToEnterpriseEditionUtil.java @@ -0,0 +1,422 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.utils; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.jdbc.MasterSlave; +import com.alibaba.polardbx.common.model.Group; +import com.alibaba.polardbx.common.utils.AddressUtils; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.druid.sql.ast.SQLCommentHint; +import com.alibaba.polardbx.druid.sql.ast.SQLExpr; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLCharExpr; +import com.alibaba.polardbx.druid.sql.ast.expr.SQLIdentifierExpr; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLAssignItem; +import com.alibaba.polardbx.druid.sql.ast.statement.SQLTableElement; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.MySqlPrimaryKey; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; +import com.alibaba.polardbx.executor.spi.IGroupExecutor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.ha.HaSwitchParams; +import com.alibaba.polardbx.gms.ha.impl.StorageHaManager; +import com.alibaba.polardbx.gms.locality.LocalityDesc; +import com.alibaba.polardbx.gms.topology.CreateDbInfo; +import com.alibaba.polardbx.gms.topology.DbGroupInfoManager; +import com.alibaba.polardbx.gms.topology.DbGroupInfoRecord; +import com.alibaba.polardbx.gms.topology.DbTopologyManager; +import com.alibaba.polardbx.gms.topology.StorageInfoRecord; +import com.alibaba.polardbx.gms.util.GmsJdbcUtil; +import com.alibaba.polardbx.gms.util.GroupInfoUtil; +import com.alibaba.polardbx.gms.util.JdbcUtil; +import com.alibaba.polardbx.group.jdbc.TGroupDataSource; +import com.alibaba.polardbx.optimizer.PlannerContext; +import com.alibaba.polardbx.optimizer.config.table.SchemaManager; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; +import com.alibaba.polardbx.optimizer.core.planner.Planner; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; +import com.alibaba.polardbx.optimizer.core.row.Row; +import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; +import com.alibaba.polardbx.repo.mysql.handler.LogicalShowTablesMyHandler; +import io.grpc.netty.shaded.io.netty.util.internal.StringUtil; +import org.apache.calcite.sql.SqlShowTables; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.commons.collections.list.TreeList; + +import java.math.BigInteger; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static com.alibaba.polardbx.common.constants.SequenceAttribute.AUTO_SEQ_PREFIX; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +public class StandardToEnterpriseEditionUtil { + + protected static Connection buildJdbcConnectionByInstantId(String instantId, String phyDb) { + HaSwitchParams haSwitchParams = StorageHaManager.getInstance().getStorageHaSwitchParams(instantId); + Pair ipAndPort = AddressUtils.getIpPortPairByAddrStr(haSwitchParams.curAvailableAddr); + String ip = ipAndPort.getKey(); + Integer port = ipAndPort.getValue(); + String user = haSwitchParams.userName; + String passwdEnc = haSwitchParams.passwdEnc; + String storageConnProps = haSwitchParams.storageConnPoolConfig.connProps; + + Map connPropsMap = GmsJdbcUtil.getPropertiesMapFromAtomConnProps(storageConnProps); + String socketTimeoutStrOfConn = connPropsMap.get("socketTimeout"); + long socketTimeoutValOfConn = socketTimeoutStrOfConn != null ? Long.valueOf(socketTimeoutStrOfConn) : -1; + String connProps = GmsJdbcUtil + .getJdbcConnPropsFromPropertiesMap(GmsJdbcUtil.getDefaultConnPropertiesForGroup(socketTimeoutValOfConn)); + + return JdbcUtil.buildJdbcConnection(ip, port, phyDb, user, passwdEnc, connProps); + } + + //获取imported的database的物理库的连接 + protected static Connection buildJdbcConnectionOnImportedDatabase(String logicalDatabase) throws SQLException { + DbGroupInfoManager dbGroupInfoManager = DbGroupInfoManager.getInstance(); + List groupInfoRecords = dbGroupInfoManager.queryGroupInfoBySchema(logicalDatabase); + if (groupInfoRecords.size() != 1) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, String.format( + "database [%s] is not a imported database", logicalDatabase + )); + } + + String groupName = groupInfoRecords.get(0).groupName; + ExecutorContext executorContext = ExecutorContext.getContext(logicalDatabase); + if (executorContext != null) { + IGroupExecutor groupExecutor = executorContext.getTopologyHandler().get(groupName); + if (groupExecutor != null && groupExecutor.getDataSource() instanceof TGroupDataSource) { + TGroupDataSource dataSource = (TGroupDataSource) groupExecutor.getDataSource(); + return dataSource.getConnection(MasterSlave.MASTER_ONLY); + } + } + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, String.format( + "get physical connection failed, database [%s]", logicalDatabase + )); + } + + public static String queryPhyDbNameByLogicalDbName(String logicalDb) { + DbGroupInfoManager dbGroupInfoManager = DbGroupInfoManager.getInstance(); + List groupInfoRecords = dbGroupInfoManager.queryGroupInfoBySchema(logicalDb); + if (groupInfoRecords.size() != 1) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, String.format( + "database [%s] is not a imported database", logicalDb + )); + } + return groupInfoRecords.get(0).phyDbName; + } + + public static Map queryDatabaseSchemata(String instantId, String phyDb) { + final String querySql = "select * from information_schema.SCHEMATA where schema_name= '%s'"; + + Map result = new TreeMap<>(String::compareToIgnoreCase); + + String sql = String.format(querySql, phyDb); + try (Connection connection = buildJdbcConnectionByInstantId(instantId, "information_schema"); + Statement stmt = connection.createStatement(); + ResultSet rs = stmt.executeQuery(sql)) { + + while (rs.next()) { + String schemaName = rs.getString("SCHEMA_NAME"); + String charset = rs.getString("DEFAULT_CHARACTER_SET_NAME"); + String collate = rs.getString("DEFAULT_COLLATION_NAME"); + String encryption = null; + + //only for mysql 8.0 + try { + if (rs.findColumn("DEFAULT_ENCRYPTION") > 0) { + encryption = rs.getString("DEFAULT_ENCRYPTION"); + } + + } catch (SQLException e) { + + } + result.put("SCHEMA_NAME", schemaName); + result.put("DEFAULT_CHARACTER_SET_NAME", charset); + result.put("DEFAULT_COLLATION_NAME", collate); + result.put("DEFAULT_ENCRYPTION", encryption); + } + } catch (Exception ex) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("failed to query information from instant [%s]", instantId)); + } + + return result; + } + + public static Set queryPhysicalTableListFromPhysicalDabatase(String instantId, String phyDb) { + final String querySql = "show tables"; + + List result = new ArrayList<>(); + try (Connection conn = buildJdbcConnectionByInstantId(instantId, phyDb); + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery(querySql) + ) { + while (rs.next()) { + String phyTableName = rs.getString(1); + result.add(phyTableName); + } + } catch (Exception ex) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("failed to query tables from instant [%s], phyDb [%s]", instantId, phyDb)); + } + + Set st = new TreeSet<>(String::compareToIgnoreCase); + st.addAll(result.stream().filter(tb -> !tb.startsWith("__drds_")).collect(Collectors.toSet())); + return st; + } + + public static Set getTableNamesFromLogicalDatabase(String schemaName, ExecutionContext executionContext) { + ExecutionContext copiedContext = executionContext.copy(); + copiedContext.setSchemaName(schemaName); + SqlShowTables sqlShowTables = + SqlShowTables.create(SqlParserPos.ZERO, false, null, schemaName, null, null, null, null); + PlannerContext plannerContext = PlannerContext.fromExecutionContext(copiedContext); + ExecutionPlan showTablesPlan = Planner.getInstance().getPlan(sqlShowTables, plannerContext); + LogicalShow logicalShowTables = (LogicalShow) showTablesPlan.getPlan(); + + IRepository sourceRepo = ExecutorContext + .getContext(schemaName) + .getTopologyHandler() + .getRepositoryHolder() + .get(Group.GroupType.MYSQL_JDBC.toString()); + LogicalShowTablesMyHandler logicalShowTablesMyHandler = new LogicalShowTablesMyHandler(sourceRepo); + + Cursor showTablesCursor = + logicalShowTablesMyHandler.handle(logicalShowTables, copiedContext); + + Set tables = new TreeSet<>(String::compareToIgnoreCase); + Row showTablesResult = null; + while ((showTablesResult = showTablesCursor.next()) != null) { + if (showTablesResult.getColNum() >= 1 && showTablesResult.getString(0) != null) { + tables.add(showTablesResult.getString(0)); + } else { + new TddlRuntimeException(ErrorCode.ERR_INVALID_DDL_PARAMS, + "get tables name in reference database failed."); + } + } + return tables; + } + + public static Set queryTableWhichHasSequence(String schemaName, ExecutionContext executionContext) { + final String querySql = "show sequences"; + List> result = DdlHelper.getServerConfigManager() + .executeQuerySql( + querySql, + schemaName, + null + ); + + Set tables = new TreeSet<>(String::compareToIgnoreCase); + for (Map row : result) { + String seqName = (String) row.get("NAME"); + if (!StringUtil.isNullOrEmpty(seqName) && seqName.startsWith(AUTO_SEQ_PREFIX) + && seqName.length() > AUTO_SEQ_PREFIX.length()) { + String tbName = seqName.substring(AUTO_SEQ_PREFIX.length()); + tables.add(tbName); + } + } + return tables; + } + + public static boolean logicalCheckTable(String logicalTableName, String logicalDatabase) { + final String querySql = "check table `%s`"; + List> result = DdlHelper.getServerConfigManager() + .executeQuerySql( + String.format(querySql, logicalTableName), + logicalDatabase, + null + ); + + for (Map checkResultRow : result) { + String msg_type = (String) checkResultRow.get("MSG_TYPE"); + String msg_text = (String) checkResultRow.get("MSG_TEXT"); + if ("status".equalsIgnoreCase(msg_type) && "OK".equalsIgnoreCase(msg_text)) { + continue; + } else { + return false; + } + } + + return true; + } + + public static String queryCreateTableSql(String instantId, String phyDb, String phyTable) { + final String querySql = "show create table `%s`"; + + String result = null; + try (Connection conn = buildJdbcConnectionByInstantId(instantId, phyDb); + Statement stmt = conn.createStatement(); + ResultSet rs = stmt.executeQuery(String.format(querySql, phyTable)); + ) { + while (rs.next()) { + result = rs.getString(2); + } + } catch (Exception ex) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("failed to query definition of table [%s]", phyTable)); + } + return result; + } + + public static CreateDbInfo initCreateDbInfo(String logicalDbName, String phyDbName, String charset, String collate, + Boolean encryption, + LocalityDesc locality, + Predicate localityFilter, + int dbType, + long socketTimeout, + boolean ifNotExistTag) { + CreateDbInfo createDbInfo = DbTopologyManager.initCreateDbInfoForImportDatabase( + logicalDbName, charset, collate, encryption, locality, localityFilter, dbType, ifNotExistTag, socketTimeout, + 1 + ); + + //rectify phyDb name on new topology + for (Map.Entry entry : createDbInfo.getGroupPhyDbMap().entrySet()) { + entry.setValue(phyDbName); + } + + return createDbInfo; + } + + public static String buildGroupName(String logicalDbName) { + String groupName = String.format(GroupInfoUtil.GROUP_NAME_FOR_IMPORTED_DATABASE, logicalDbName); + return groupName.toUpperCase(); + } + + public static String normalizePhyTableStructure(String tableName, String createTableSql, String localityExpr) { + MySqlCreateTableStatement createTableStatement = + (MySqlCreateTableStatement) FastsqlUtils.parseSql(createTableSql).get(0); + + //check if contains primary key + boolean findPk = false; + List tableElementList = createTableStatement.getTableElementList(); + for (SQLTableElement element : tableElementList) { + if (element instanceof MySqlPrimaryKey) { + findPk = true; + break; + } + } + if (!findPk) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("not found primary key in table `%s`, please add primary key manually", tableName)); + } + + //handle local partition + List hints = createTableStatement.getOptionHints(); + Iterator hintIter = hints.iterator(); + while (hintIter.hasNext()) { + SQLCommentHint hint = hintIter.next(); + if (hint.getText() != null && hint.getText().toLowerCase().contains("partition")) { + hintIter.remove(); + } + } + + //check engine innodb + List tableOptions = createTableStatement.getTableOptions(); + for (SQLAssignItem sqlAssignItem : tableOptions) { + SQLExpr target = sqlAssignItem.getTarget(); + SQLExpr value = sqlAssignItem.getValue(); + if (target instanceof SQLIdentifierExpr + && ((SQLIdentifierExpr) target).getName() != null + && ((SQLIdentifierExpr) target).getName().toLowerCase().contains("engine")) { + String engine = ((SQLIdentifierExpr) value).toString(); + if (engine != null && !"MYISAM".equalsIgnoreCase(engine) && !"innodb".equalsIgnoreCase(engine)) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("unsupported engine [%s]", engine)); + } + } + } + + //single table + createTableStatement.setSingle(true); + + //locality + SQLCharExpr sqlCharExpr = new SQLCharExpr(localityExpr); + createTableStatement.setLocality(sqlCharExpr); + + return createTableStatement.toString(); + } + + public static Map querySequenceValuesInPhysicalDatabase(String phyDatabase, + String logicalDatabase) { + final String querySql = + "select table_name, auto_increment from information_schema.TABLES where table_schema = '%s' and table_name not like '__drds_%%'"; + + String sql = String.format(querySql, phyDatabase); + + Map sequences = new TreeMap<>(String::compareToIgnoreCase); + try (Connection conn = buildJdbcConnectionOnImportedDatabase(logicalDatabase); + Statement statement = conn.createStatement(); + ResultSet rs = statement.executeQuery(sql); + ) { + while (rs.next()) { + String tableName = rs.getString(1); + BigInteger seq = (BigInteger) rs.getObject(2); + if (seq == null) { + continue; + } + sequences.put(tableName, seq); + } + } catch (Exception e) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, + String.format("failed to query auto_increment information from [%s]", phyDatabase)); + } + + return sequences; + } + + public static void updateSequence(Map sequences, String logicalDatabase, + Map result) { + final String sql = "alter sequence `%s` start with %s "; + final String autoIncrementPrefix = AUTO_SEQ_PREFIX; + + for (Map.Entry sequence : sequences.entrySet()) { + String tableName = sequence.getKey(); + BigInteger seqVal = sequence.getValue(); + String updateSql = String.format(sql, autoIncrementPrefix + tableName, seqVal); + try { + DdlHelper.getServerConfigManager() + .executeQuerySql( + updateSql, + logicalDatabase, + null + ); + } catch (Exception e) { + result.put(tableName, e.getMessage()); + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/StringUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/StringUtils.java index 594182a5d..cf5f5db85 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/StringUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/StringUtils.java @@ -54,4 +54,8 @@ public static String trim(String s, boolean leading, boolean trailing, String sp public static String funcNameToClassName(String funcName) { return funcName.substring(0, 1).toUpperCase() + funcName.substring(1).toLowerCase(); } + + public static String quote(String str) { + return str != null ? "'" + str + "'" : null; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SubqueryApply.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SubqueryApply.java index 584683e6d..0cc6e44d8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SubqueryApply.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SubqueryApply.java @@ -16,10 +16,6 @@ package com.alibaba.polardbx.executor.utils; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.utils.CalciteUtils; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ListenableFuture; import com.alibaba.polardbx.common.properties.ConnectionProperties; import com.alibaba.polardbx.common.utils.ExecutorMode; import com.alibaba.polardbx.common.utils.GeneralUtil; @@ -27,15 +23,17 @@ import com.alibaba.polardbx.executor.ExecutorHelper; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.impl.GatherCursor; -import com.alibaba.polardbx.executor.cursor.impl.MultiCursorAdapter; import com.alibaba.polardbx.executor.mpp.deploy.ServiceProvider; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.expression.calc.IExpression; import com.alibaba.polardbx.optimizer.core.rel.LogicalView; import com.alibaba.polardbx.optimizer.core.row.Row; +import com.alibaba.polardbx.optimizer.utils.CalciteUtils; import com.alibaba.polardbx.optimizer.utils.FunctionUtils; import com.alibaba.polardbx.optimizer.utils.RexUtils; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.ListenableFuture; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.RelVisitor; import org.apache.calcite.rel.core.CorrelationId; @@ -171,9 +169,6 @@ public void prepare() { if (useCursorExecutorMode(plan) && !hasApplyInLogicalView) { //不是apply子查询尽量用cursor cursor = ExecutorHelper.executeByCursor(plan, subQueryEc, false); - if (cursor instanceof MultiCursorAdapter) { - cursor = new GatherCursor(((MultiCursorAdapter) cursor).getSubCursors(), subQueryEc); - } } else { localMode = true; cursor = ExecutorHelper.executeLocal(plan, subQueryEc, false, false); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SubqueryUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SubqueryUtils.java index 110bf2e9f..7a0d32d27 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SubqueryUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/SubqueryUtils.java @@ -16,13 +16,13 @@ package com.alibaba.polardbx.executor.utils; -import com.alibaba.polardbx.optimizer.utils.SubQueryDynamicParamUtils; import com.alibaba.polardbx.executor.chunk.Chunk; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.profiler.memory.MemoryStatAttribute; import com.alibaba.polardbx.optimizer.memory.MemoryPool; -import com.alibaba.polardbx.optimizer.memory.QueryMemoryPoolHolder; import com.alibaba.polardbx.optimizer.memory.MemoryType; +import com.alibaba.polardbx.optimizer.memory.QueryMemoryPoolHolder; +import com.alibaba.polardbx.optimizer.utils.SubQueryDynamicParamUtils; import com.alibaba.polardbx.statistics.RuntimeStatHelper; import com.alibaba.polardbx.statistics.RuntimeStatistics; import org.apache.calcite.rel.RelNode; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/failpoint/FailPoint.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/failpoint/FailPoint.java index edb69334f..382c2cdac 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/failpoint/FailPoint.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/failpoint/FailPoint.java @@ -16,8 +16,8 @@ package com.alibaba.polardbx.executor.utils.failpoint; -import com.google.common.base.Joiner; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.google.common.base.Joiner; import org.apache.commons.lang.math.RandomUtils; import org.apache.commons.lang3.StringUtils; @@ -70,6 +70,16 @@ public static void inject(String key, Runnable runnable) { } } + public static void inject(String key, ExecutionContext executionContext, Runnable runnable) { + try { + assert + !(isKeyEnable(key) && isKeyEnableFromHint(key, executionContext)) + : supply(runnable); + } catch (AssertionError e) { + //ignore + } + } + public static void inject(String key, BiConsumer consumer) { try { assert @@ -193,6 +203,18 @@ public static void injectException(String key) { }); } + public static void injectExceptionFromHint(String key, ExecutionContext executionContext) { + injectFromHint(key, executionContext, () -> { + throw new RuntimeException("injected failure from " + key); + }); + } + + public static void injectExceptionFromHintWithKeyEnable(String key, ExecutionContext executionContext) { + inject(key, executionContext, () -> { + throw new RuntimeException("injected failure from " + key); + }); + } + public static void throwException() { throw new RuntimeException("injected failure"); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/failpoint/FailPointKey.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/failpoint/FailPointKey.java index 8da408e60..3d66f9396 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/failpoint/FailPointKey.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/failpoint/FailPointKey.java @@ -98,6 +98,11 @@ public interface FailPointKey { */ String FP_PHYSICAL_DDL_TIMEOUT = "FP_PHYSICAL_DDL_TIMEOUT"; + /** + * changeset catchup task sleep time (ms) + */ + String FP_CATCHUP_TASK_SUSPEND = "FP_CATCHUP_TASK_SUSPEND"; + /** * set @FP_RANDOM_BACKFILL_EXCEPTION='30'; * Backfill时随机失败,可指定失败概率 @@ -290,4 +295,43 @@ public interface FailPointKey { */ String FP_TTL_PAUSE = "FP_TTL_PAUSE"; + /** + * Fail before creating tmp tables at status 0. + * No tmp table is created. + */ + String FP_TRX_LOG_TB_FAILED_BEFORE_CREATE_TMP = "FP_TRX_LOG_TB_FAILED_BEFORE_CREATE_TMP"; + + /** + * Fail during creating tmp tables at status 0. + * At least one DN finishes creating tmp table. + * Status is still 0. + */ + String FP_TRX_LOG_TB_FAILED_DURING_CREATE_TMP = "FP_TRX_LOG_TB_FAILED_DURING_CREATE_TMP"; + + /** + * Fail before switching tables at status 1. + */ + String FP_TRX_LOG_TB_FAILED_BEFORE_SWITCH_TABLE = "FP_TRX_LOG_TB_FAILED_BEFORE_SWITCH_TABLE"; + + /** + * Fail during switching tables at status 1. + * At least one DN finishes switching tables. + * Status is still 1. + */ + String FP_TRX_LOG_TB_FAILED_DURING_SWITCH_TABLE = "FP_TRX_LOG_TB_FAILED_DURING_SWITCH_TABLE"; + + /** + * Fail before dropping archive tables at status 2. + */ + String FP_TRX_LOG_TB_FAILED_BEFORE_DROP_TABLE = "FP_TRX_LOG_TB_FAILED_BEFORE_DROP_TABLE"; + + /** + * Fail during switching tables at status 2. + * At least one DN finishes dropping archive tables. + * Status is still 2. + */ + String FP_TRX_LOG_TB_FAILED_DURING_DROP_TABLE = "FP_TRX_LOG_TB_FAILED_DURING_DROP_TABLE"; + + String FP_UPDATE_TABLES_VERSION_ERROR = "FP_UPDATE_TABLES_VERSION_ERROR"; + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/PhyOpTrxConnUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/PhyOpTrxConnUtils.java index 8fb57fa7f..3415def9c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/PhyOpTrxConnUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/PhyOpTrxConnUtils.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.utils.transaction; -import com.alibaba.polardbx.common.jdbc.ITransactionPolicy; import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.utils.ITransaction; @@ -24,6 +23,8 @@ import java.sql.Connection; import java.sql.SQLException; +import static com.alibaba.polardbx.common.jdbc.ITransactionPolicy.TransactionClass.SUPPORT_SHARE_READVIEW_TRANSACTION; + /** * @author chenghui.lch */ @@ -37,11 +38,8 @@ public static Connection getConnection( ITransaction.RW rw, ExecutionContext ec, Long grpConnId - ) throws SQLException - { - ITransactionPolicy.TransactionClass tranClass = trans.getTransactionClass(); - - if (tranClass == ITransactionPolicy.TransactionClass.XA || tranClass == ITransactionPolicy.TransactionClass.TSO) { + ) throws SQLException { + if (trans.getTransactionClass().isA(SUPPORT_SHARE_READVIEW_TRANSACTION)) { return trans.getConnection(schemaName, groupName, grpConnId, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/TransactionUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/TransactionUtils.java index 10a4f2bd8..ac7be9fe4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/TransactionUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/TransactionUtils.java @@ -21,20 +21,18 @@ import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.sync.ISyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.group.jdbc.DataSourceWrapper; import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; -import com.google.common.collect.ImmutableList; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; -import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TreeMap; /** * @author wuzhe @@ -78,7 +76,7 @@ public static TrxLookupSet getTrxLookupSet(Collection schemaNames) { } final List>> results = - SyncManagerHelper.sync(fetchAllTransSyncAction, schemaName); + SyncManagerHelper.sync(fetchAllTransSyncAction, schemaName, SyncScope.CURRENT_ONLY); for (final List> result : results) { if (result == null) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/TrxLookupSet.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/TrxLookupSet.java index b303dab1b..ff237c3c2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/TrxLookupSet.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/utils/transaction/TrxLookupSet.java @@ -117,8 +117,8 @@ public Map getGroupConn2Tran() { * @return Triple(waiting trx, blocking trx, in which group we found these transactions) */ public Triple getWaitingAndBlockingTrx(Collection groupNameList, - long waiting, - long blocking) { + long waiting, + long blocking) { for (final String group : groupNameList) { final Long waitingTrxId = groupConn2Tran.get(new GroupConnPair(group, waiting)); final Long blockingTrxId = groupConn2Tran.get(new GroupConnPair(group, blocking)); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/BenchmarkVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/BenchmarkVectorizedExpression.java index 3e8415772..ddc289b97 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/BenchmarkVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/BenchmarkVectorizedExpression.java @@ -53,7 +53,7 @@ public void eval(EvaluationContext ctx) { } RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); - long[] res = ((LongBlock) outputVectorSlot).longArray(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/ExtractVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/ExtractVectorizedExpression.java new file mode 100644 index 000000000..5b82f1bd8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/ExtractVectorizedExpression.java @@ -0,0 +1,251 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized; + +import com.alibaba.polardbx.common.utils.time.MySQLTimeConverter; +import com.alibaba.polardbx.common.utils.time.calculator.MySQLIntervalType; +import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; +import com.alibaba.polardbx.common.utils.time.core.OriginalDate; +import com.alibaba.polardbx.common.utils.time.core.TimeStorage; +import com.alibaba.polardbx.common.utils.time.parser.TimeParserFlags; +import com.alibaba.polardbx.executor.chunk.DateBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.ReferenceBlock; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import java.sql.Types; + +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; + +@ExpressionSignatures( + names = {"EXTRACT"}, + argumentTypes = {"Varchar", "Date"}, + argumentKinds = {Const, Variable} +) +public class ExtractVectorizedExpression extends AbstractVectorizedExpression { + private MySQLIntervalType intervalType; + private boolean isConstOperandNull; + + public ExtractVectorizedExpression(DataType outputDataType, + int outputIndex, VectorizedExpression[] children) { + super(outputDataType, outputIndex, children); + + Object operand0Value = ((LiteralVectorizedExpression) children[0]).getConvertedValue(); + String intervalStr = DataTypes.StringType.convertFrom(operand0Value); + + if (intervalStr != null) { + intervalType = MySQLIntervalType.of(intervalStr); + } + + isConstOperandNull = intervalType == null; + } + + @Override + public void eval(EvaluationContext ctx) { + evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + // output block + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + // date block + RandomAccessBlock inputVectorSlot = + chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); + + // when the interval unit is null + boolean[] outputNulls = outputVectorSlot.nulls(); + if (isConstOperandNull) { + outputVectorSlot.setHasNull(true); + for (int i = 0; i < batchSize; i++) { + outputNulls[i] = true; + } + return; + } + + // handle nulls + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[1].getOutputIndex()); + + if (inputVectorSlot instanceof DateBlock) { + long[] packedLongs = inputVectorSlot.cast(DateBlock.class).getPacked(); + + switch (intervalType) { + case INTERVAL_YEAR: { + // for year + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long l = packedLongs[j]; + output[j] = (l >> 46) / 13; + } + } else { + for (int i = 0; i < batchSize; i++) { + long l = packedLongs[i]; + output[i] = (l >> 46) / 13; + } + } + } + break; + case INTERVAL_MONTH: { + // for month + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long l = packedLongs[j]; + output[j] = (l >> 46) % 13; + } + } else { + for (int i = 0; i < batchSize; i++) { + long l = packedLongs[i]; + output[i] = (l >> 46) % 13; + } + } + } + break; + case INTERVAL_DAY: { + // for day + final long modulo = 1L << 5; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long l = packedLongs[j]; + output[j] = (l >> 41) % modulo; + } + } else { + for (int i = 0; i < batchSize; i++) { + long l = packedLongs[i]; + output[i] = (l >> 41) % modulo; + } + } + } + break; + default: + // for other interval type, use non-vectorized method. + boolean isDate = MySQLIntervalType.isDate(intervalType); + + // normal processing for datetime value. + MysqlDateTime scratchValue = new MysqlDateTime(); + for (int i = 0; i < batchSize; i++) { + int j = isSelectionInUse ? sel[i] : i; + + // parse date value + long packedLong = packedLongs[j]; + TimeStorage.readDate(packedLong, scratchValue); + + // parse interval by sign and mysql datetime value. + int sign = isDate ? 1 : (scratchValue.isNeg() ? -1 : 1); + long result = doParseInterval(scratchValue, sign); + output[j] = result; + } + } + } else if (inputVectorSlot instanceof ReferenceBlock) { + // for other interval type, use non-vectorized method. + boolean isDate = MySQLIntervalType.isDate(intervalType); + + // normal processing for datetime value. + for (int i = 0; i < batchSize; i++) { + int j = isSelectionInUse ? sel[i] : i; + + // parse date value + Object timeObj = inputVectorSlot.elementAt(j); + MysqlDateTime t; + int sign; + if (isDate) { + t = DataTypeUtil.toMySQLDatetimeByFlags(timeObj, Types.TIMESTAMP, + TimeParserFlags.FLAG_TIME_FUZZY_DATE); + if (t == null) { + outputNulls[j] = true; + continue; + } + sign = 1; + } else { + t = DataTypeUtil.toMySQLDatetime(timeObj, Types.TIME); + if (t == null) { + outputNulls[j] = true; + continue; + } + sign = t.isNeg() ? -1 : 1; + } + + // parse interval by sign and mysql datetime value. + long result = doParseInterval(t, sign); + output[j] = result; + } + } + + } + + private long doParseInterval(MysqlDateTime t, int sign) { + switch (intervalType) { + case INTERVAL_YEAR: + return t.getYear(); + case INTERVAL_YEAR_MONTH: + return t.getYear() * 100L + t.getMonth(); + case INTERVAL_QUARTER: + return (t.getMonth() + 2) / 3; + case INTERVAL_MONTH: + return t.getMonth(); + case INTERVAL_WEEK: { + long[] weekAndYear = MySQLTimeConverter.datetimeToWeek(t, TimeParserFlags.FLAG_WEEK_FIRST_WEEKDAY); + return weekAndYear[0]; + } + case INTERVAL_DAY: + return t.getDay(); + case INTERVAL_DAY_HOUR: + return (t.getDay() * 100L + t.getHour()) * sign; + case INTERVAL_DAY_MINUTE: + return (t.getDay() * 10000L + t.getHour() * 100L + t.getMinute()) * sign; + case INTERVAL_DAY_SECOND: + return (t.getDay() * 1000000L + (t.getHour() * 10000L + t.getMinute() * 100 + t.getSecond())) * sign; + case INTERVAL_HOUR: + return t.getHour() * sign; + case INTERVAL_HOUR_MINUTE: + return (t.getHour() * 100 + t.getMinute()) * sign; + case INTERVAL_HOUR_SECOND: + return (t.getHour() * 10000 + t.getMinute() * 100 + t.getSecond()) * sign; + case INTERVAL_MINUTE: + return t.getMinute() * sign; + case INTERVAL_MINUTE_SECOND: + return (t.getMinute() * 100 + t.getSecond()) * sign; + case INTERVAL_SECOND: + return t.getSecond() * sign; + case INTERVAL_MICROSECOND: + return t.getSecondPart() / 1000L * sign; + case INTERVAL_DAY_MICROSECOND: + return ((t.getDay() * 1000000L + t.getHour() * 10000L + t.getMinute() * 100 + t.getSecond()) * 1000000L + t + .getSecondPart() / 1000L) * sign; + case INTERVAL_HOUR_MICROSECOND: + return ((t.getHour() * 10000L + t.getMinute() * 100 + t.getSecond()) * 1000000L + t.getSecondPart() / 1000L) + * sign; + case INTERVAL_MINUTE_MICROSECOND: + return (((t.getMinute() * 100 + t.getSecond())) * 1000000L + t.getSecondPart() / 1000L) * sign; + case INTERVAL_SECOND_MICROSECOND: + return (t.getSecond() * 1000000L + t.getSecondPart() / 1000L) * sign; + default: + return 0; + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/IfVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/IfVectorizedExpression.java index 615587409..ab9c9f71b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/IfVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/IfVectorizedExpression.java @@ -60,7 +60,7 @@ public void eval(EvaluationContext ctx) { // handle when expression and yield then selections and else selections whenOperator.eval(ctx); - long[] whenVector = ((LongBlock) whenBlock).longArray(); + long[] whenVector = (whenBlock.cast(LongBlock.class)).longArray(); if (selectionInUse) { for (int i = 0; i < batchSize; i++) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/InValuesVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/InValuesVectorizedExpression.java new file mode 100644 index 000000000..194937566 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/InValuesVectorizedExpression.java @@ -0,0 +1,192 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized; + +import com.alibaba.polardbx.optimizer.config.table.Field; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.google.common.base.Preconditions; +import it.unimi.dsi.fastutil.ints.IntOpenHashSet; +import it.unimi.dsi.fastutil.longs.LongOpenHashSet; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexNode; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Representation of in values + */ +public class InValuesVectorizedExpression extends AbstractVectorizedExpression { + + private final int operandCount; + private final InValueSet inValueSet; + private boolean hasNull; + + public InValuesVectorizedExpression(DataType dataType, List rexLiteralList, int outputIndex) { + super(dataType, outputIndex, new VectorizedExpression[0]); + this.operandCount = rexLiteralList.size() - 1; + this.inValueSet = new InValueSet(dataType, operandCount); + this.hasNull = false; + + for (int operandIndex = 1; operandIndex <= operandCount; operandIndex++) { + Object value = ((RexLiteral) rexLiteralList.get(operandIndex)).getValue3(); + Object convertedValue = dataType.convertFrom(value); + if (convertedValue == null) { + hasNull = true; + } else { + inValueSet.add(convertedValue); + } + } + } + + /** + * @param rexLiteralList start from index1 + */ + public static InValuesVectorizedExpression from(List rexLiteralList, int outputIndex) { + Preconditions.checkArgument(rexLiteralList.size() > 1, + "Illegal in values, list size: " + rexLiteralList.size()); + RexLiteral rexLiteral = (RexLiteral) rexLiteralList.get(1); + Field field = new Field(rexLiteral.getType()); + return new InValuesVectorizedExpression(field.getDataType(), rexLiteralList, outputIndex); + } + + public int getOperandCount() { + return operandCount; + } + + public InValueSet getInValueSet() { + return inValueSet; + } + + public boolean hasNull() { + return hasNull; + } + + @Override + public void eval(EvaluationContext ctx) { + + } + + public static class InValueSet { + private final DataType dataType; + private final SetType setType; + private Set set = null; + private IntOpenHashSet intHashSet = null; + private LongOpenHashSet longHashSet = null; + + public InValueSet(DataType dataType, int capacity) { + this.dataType = dataType; + this.setType = getSetType(dataType); + switch (setType) { + case INT: + this.intHashSet = new IntOpenHashSet(capacity); + break; + case LONG: + this.longHashSet = new LongOpenHashSet(capacity); + break; + case OTHERS: + this.set = new HashSet<>(capacity); + break; + default: + throw new UnsupportedOperationException("Unsupported in value set type: " + setType); + } + } + + private SetType getSetType(DataType dataType) { + if (dataType == DataTypes.LongType) { + return SetType.LONG; + } + if (dataType == DataTypes.IntegerType) { + return SetType.INT; + } + return SetType.OTHERS; + } + + /** + * skip type check + */ + public void add(Object value) { + switch (setType) { + case INT: + intHashSet.add((int) value); + break; + case LONG: + longHashSet.add((long) value); + break; + case OTHERS: + set.add(value); + break; + default: + throw new UnsupportedOperationException("Unsupported in value set type: " + setType); + } + } + + public boolean contains(int value) { + switch (setType) { + case INT: + return intHashSet.contains(value); + case LONG: + return longHashSet.contains(value); + case OTHERS: + return set.contains(dataType.convertFrom(value)); + default: + throw new UnsupportedOperationException("Unsupported in value set type: " + setType); + } + } + + public boolean contains(long value) { + switch (setType) { + case INT: + if (value >= Integer.MIN_VALUE && value <= Integer.MAX_VALUE) { + return intHashSet.contains((int) value); + } else { + return false; + } + case LONG: + return longHashSet.contains(value); + case OTHERS: + return set.contains(dataType.convertFrom(value)); + default: + throw new UnsupportedOperationException("Unsupported in value set type: " + setType); + } + } + + public boolean contains(Object value) { + if (value == null) { + return false; + } + switch (setType) { + case INT: + return intHashSet.contains(dataType.convertFrom(value)); + case LONG: + return longHashSet.contains(dataType.convertFrom(value)); + case OTHERS: + return set.contains(dataType.convertFrom(value)); + default: + throw new UnsupportedOperationException("Unsupported in value set type: " + setType); + } + } + + enum SetType { + INT, + LONG, + OTHERS + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/LiteralVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/LiteralVectorizedExpression.java index e54f461e1..e15b4b6e6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/LiteralVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/LiteralVectorizedExpression.java @@ -16,6 +16,11 @@ package com.alibaba.polardbx.executor.vectorized; +import com.alibaba.polardbx.common.type.MySQLStandardFieldType; +import com.alibaba.polardbx.executor.chunk.Block; +import com.alibaba.polardbx.executor.chunk.BlockUtils; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; import com.alibaba.polardbx.executor.chunk.MutableChunk; import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; import com.alibaba.polardbx.optimizer.config.table.Field; @@ -51,9 +56,19 @@ public static LiteralVectorizedExpression from(RexLiteral rexLiteral, int output public void eval(EvaluationContext ctx) { MutableChunk chunk = ctx.getPreAllocatedChunk(); RandomAccessBlock outputSlot = chunk.slotIn(outputIndex); - int batchSize = chunk.batchSize(); + // lazy allocation + if (outputSlot == null || ((Block) outputSlot).getPositionCount() == 0) { + int positionCount = batchSize; + if (chunk.isSelectionInUse()) { + positionCount = Math.max(positionCount, chunk.selection().length); + } + outputSlot = BlockUtils.createBlock(outputDataType, positionCount); + outputSlot.resize(positionCount); + chunk.setSlotAt(outputSlot, outputIndex); + } + // if value is null, just update the valueIsNull array and hasNull field. if (value == null) { outputSlot.setHasNull(true); @@ -61,6 +76,42 @@ public void eval(EvaluationContext ctx) { return; } + MySQLStandardFieldType fieldType = outputDataType.fieldType(); + switch (fieldType) { + case MYSQL_TYPE_LONGLONG: + if (convertedValue instanceof Long && outputSlot instanceof LongBlock) { + long[] longArray = ((LongBlock) outputSlot).longArray(); + long longVal = ((Long) convertedValue).longValue(); + if (chunk.isSelectionInUse()) { + int[] selection = chunk.selection(); + for (int i = 0; i < batchSize; i++) { + longArray[selection[i]] = longVal; + } + } else { + Arrays.fill(longArray, longVal); + } + return; + } + break; + case MYSQL_TYPE_LONG: + if (convertedValue instanceof Integer && outputSlot instanceof IntegerBlock) { + int[] intArray = ((IntegerBlock) outputSlot).intArray(); + int intVal = ((Integer) convertedValue).intValue(); + if (chunk.isSelectionInUse()) { + int[] selection = chunk.selection(); + for (int i = 0; i < batchSize; i++) { + intArray[selection[i]] = intVal; + } + } else { + Arrays.fill(intArray, intVal); + } + return; + } + break; + default: + break; + } + if (chunk.isSelectionInUse()) { int[] selection = chunk.selection(); for (int i = 0; i < batchSize; i++) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/SubStrVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/SubStrVectorizedExpression.java new file mode 100644 index 000000000..7a1511f64 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/SubStrVectorizedExpression.java @@ -0,0 +1,204 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized; + +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.ReferenceBlock; +import com.alibaba.polardbx.executor.chunk.SliceBlock; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import io.airlift.slice.Slice; +import io.airlift.slice.Slices; + +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; + +@ExpressionSignatures( + names = {"SUBSTRING"}, + argumentTypes = {"Varchar", "Long", "Long"}, + argumentKinds = {Variable, Const, Const} +) +public class SubStrVectorizedExpression extends AbstractVectorizedExpression { + private boolean shouldReturnNull; + private boolean shouldReturnEmpty; + private boolean useNegativeStart; + private int startPos; + private int subStrLen; + + public SubStrVectorizedExpression(DataType outputDataType, + int outputIndex, VectorizedExpression[] children) { + super(outputDataType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + Object operand2Value = ((LiteralVectorizedExpression) children[2]).getConvertedValue(); + if (operand1Value == null || operand2Value == null) { + startPos = 0; + subStrLen = 0; + shouldReturnNull = true; + shouldReturnEmpty = false; + } else { + startPos = DataTypes.LongType.convertFrom(operand1Value).intValue(); + + // Assumes that the maximum length of a String is < INT_MAX32 + subStrLen = DataTypes.LongType.convertFrom(operand2Value).intValue(); + + // Negative or zero length, will return empty string. + if (subStrLen <= 0) { + shouldReturnNull = false; + shouldReturnEmpty = true; + } + + // handle start position + // In MySQL: start= ((start < 0) ? res->numchars() + start : start - 1); + if (startPos < 0) { + useNegativeStart = true; + } else if (startPos == 0) { + shouldReturnEmpty = true; + } else { + useNegativeStart = false; + startPos = startPos - 1; + } + } + + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] selection = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + + if (shouldReturnNull) { + boolean[] outputNulls = outputVectorSlot.nulls(); + outputVectorSlot.setHasNull(true); + for (int i = 0; i < batchSize; i++) { + outputNulls[i] = true; + } + return; + } + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + + Object[] objectArray = ((ReferenceBlock) outputVectorSlot).objectArray(); + if (shouldReturnEmpty) { + // Directly returning the empty slice. + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = selection[i]; + objectArray[j] = Slices.EMPTY_SLICE; + } + } else { + for (int i = 0; i < batchSize; i++) { + objectArray[i] = Slices.EMPTY_SLICE; + } + } + return; + } + + if (leftInputVectorSlot instanceof SliceBlock) { + SliceBlock sliceBlock = (SliceBlock) leftInputVectorSlot; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = selection[i]; + + Slice slice = sliceBlock.getRegion(j); + Slice result; + + int start = useNegativeStart ? slice.length() + startPos : startPos; + int len = Math.min(slice.length() - start, subStrLen); + if (start < 0 || start + 1 > slice.length()) { + // check start pos out of bound + result = Slices.EMPTY_SLICE; + } else { + result = slice.slice(start, len); + } + + objectArray[j] = result; + } + } else { + for (int i = 0; i < batchSize; i++) { + Slice slice = sliceBlock.getRegion(i); + Slice result; + + int start = useNegativeStart ? slice.length() + startPos : startPos; + int len = Math.min(slice.length() - start, subStrLen); + if (start < 0 || start + 1 > slice.length()) { + // check start pos out of bound + result = Slices.EMPTY_SLICE; + } else { + result = slice.slice(start, len); + } + + objectArray[i] = result; + } + } + } else if (leftInputVectorSlot instanceof ReferenceBlock) { + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = selection[i]; + + Slice slice = ((Slice) leftInputVectorSlot.elementAt(j)); + if (slice == null) { + objectArray[j] = Slices.EMPTY_SLICE; + } else { + Slice result; + + int start = useNegativeStart ? slice.length() + startPos : startPos; + int len = Math.min(slice.length() - start, subStrLen); + if (start < 0 || start + 1 > slice.length()) { + // check start pos out of bound + result = Slices.EMPTY_SLICE; + } else { + result = slice.slice(start, len); + } + + objectArray[j] = result; + + } + } + } else { + for (int i = 0; i < batchSize; i++) { + Slice slice = ((Slice) leftInputVectorSlot.elementAt(i)); + if (slice == null) { + objectArray[i] = Slices.EMPTY_SLICE; + } else { + Slice result; + + int start = useNegativeStart ? slice.length() + startPos : startPos; + int len = Math.min(slice.length() - start, subStrLen); + if (start < 0 || start + 1 > slice.length()) { + // check start pos out of bound + result = Slices.EMPTY_SLICE; + } else { + result = slice.slice(start, len); + } + + objectArray[i] = result; + } + } + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/VectorizedExpressionUtils.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/VectorizedExpressionUtils.java index 7c0c3b831..3589e8103 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/VectorizedExpressionUtils.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/VectorizedExpressionUtils.java @@ -23,20 +23,11 @@ import com.alibaba.polardbx.executor.chunk.LongBlock; import com.alibaba.polardbx.executor.chunk.MutableChunk; import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; -import com.alibaba.polardbx.executor.vectorized.build.CommonExpressionNode; -import com.alibaba.polardbx.executor.vectorized.EvaluationContext; import com.google.common.base.Preconditions; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.Stream; public class VectorizedExpressionUtils { /** @@ -199,6 +190,27 @@ public static void setNulls(MutableChunk batch, int outputIndex) { } } + public static int intersect(int[] sel1, int sel1Size, int[] sel2, int sel2Size, int[] result) { + Preconditions.checkArgument(sel1 != null && sel1Size >= 0); + Preconditions.checkArgument(sel2 != null && sel2Size >= 0); + + int sel1Index = 0, sel2Index = 0, resultIndex = 0; + while (sel1Index < sel1Size && sel2Index < sel2Size) { + if (sel1[sel1Index] < sel2[sel2Index]) { + sel1Index++; + } else if (sel1[sel1Index] > sel2[sel2Index]) { + sel2Index++; + } else { + result[resultIndex] = sel2[sel2Index]; + resultIndex++; + sel2Index++; + sel1Index++; + } + } + + return resultIndex; + } + /** * Remove (subtract) members from an array and produce the results into * a difference array. @@ -260,6 +272,10 @@ public static String digest(VectorizedExpression vectorizedExpression) { return builder.toString(); } + public static boolean isConstantExpression(VectorizedExpression vectorizedExpression) { + return VectorizedExpressionConstantFinder.INSTANCE.visit(vectorizedExpression, 0); + } + private static class VectorizedExpressionPrinter { private static String PREFIX = StringUtils.repeat(" ", 3); @@ -317,6 +333,36 @@ private String printTreeNode(VectorizedExpression expression, int level) { } } + private static class VectorizedExpressionConstantFinder { + static final VectorizedExpressionConstantFinder INSTANCE = new VectorizedExpressionConstantFinder(); + + private static final int MAX_LEVEL = 1 << 10; + + public boolean visit(VectorizedExpression expression, int level) { + if (level >= MAX_LEVEL) { + // prevent from stack overflow. + return false; + } + boolean hasNoInputRef = true; + + VectorizedExpression[] children = expression.getChildren(); + + if (children == null || children.length == 0) { + // leaf node. + return !(expression instanceof InputRefVectorizedExpression); + } + + for (int i = 0; i < children.length; i++) { + hasNoInputRef &= visit(children[i], level + 1); + if (!hasNoInputRef) { + // short-circuit + return false; + } + } + return true; + } + } + public static List getInputIndex(VectorizedExpression vectorizedExpression) { List inputIndex = new ArrayList<>(); getInputIndex(vectorizedExpression, inputIndex); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/ExpressionRewriter.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/ExpressionRewriter.java index b8c75c4a7..ee5bc7a87 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/ExpressionRewriter.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/ExpressionRewriter.java @@ -42,7 +42,6 @@ import org.apache.calcite.rex.RexUtil; import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.calcite.sql.type.SqlTypeName; -import org.apache.calcite.sql.type.SqlTypeUtil; import java.util.ArrayList; import java.util.Arrays; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/InputRefTypeChecker.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/InputRefTypeChecker.java index f8b433fbe..7f56adc1f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/InputRefTypeChecker.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/InputRefTypeChecker.java @@ -36,6 +36,7 @@ import org.apache.calcite.rex.RexTableInputRef; import org.apache.calcite.rex.RexVisitorImpl; +import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -45,9 +46,19 @@ public class InputRefTypeChecker extends RexVisitorImpl { private final List> inputTypes; + /** + * Collect all indexes in inputRefs. + */ + private final List inputRefIndexes; + public InputRefTypeChecker(List> inputTypes) { super(true); this.inputTypes = inputTypes; + this.inputRefIndexes = new ArrayList<>(); + } + + public List getInputRefIndexes() { + return inputRefIndexes; } @Override @@ -58,6 +69,10 @@ public RexNode visitInputRef(RexInputRef inputRef) { } DataType columnType = inputTypes.get(inputRefIndex); DataType inputRefType = DataTypeUtil.calciteToDrdsType(inputRef.getType()); + + // collect indexes in inputRefs. + inputRefIndexes.add(inputRefIndex); + // force the input ref and column to be consistent. if (!DataTypeUtil.equalsSemantically(columnType, inputRefType)) { // lossy transfer diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/Rex2VectorizedExpressionVisitor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/Rex2VectorizedExpressionVisitor.java index 867c72366..6e3f62d83 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/Rex2VectorizedExpressionVisitor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/Rex2VectorizedExpressionVisitor.java @@ -19,11 +19,8 @@ import com.alibaba.polardbx.common.charset.CollationName; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.common.utils.Pair; -import com.alibaba.polardbx.common.utils.time.calculator.MySQLIntervalType; import com.alibaba.polardbx.executor.chunk.MutableChunk; import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; import com.alibaba.polardbx.executor.vectorized.BenchmarkVectorizedExpression; @@ -31,10 +28,12 @@ import com.alibaba.polardbx.executor.vectorized.CaseVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.CoalesceVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.InValuesVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.InputRefVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionRegistry; +import com.alibaba.polardbx.executor.vectorized.compare.FastInVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.metadata.ArgumentInfo; import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionConstructor; import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionMode; @@ -72,6 +71,7 @@ import org.apache.calcite.rex.RexVisitorImpl; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.fun.SqlCastFunction; import org.apache.calcite.sql.fun.SqlRowOperator; import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.calcite.sql.type.IntervalSqlType; @@ -81,17 +81,14 @@ import org.jetbrains.annotations.NotNull; import java.util.ArrayList; -import java.util.Arrays; +import java.util.BitSet; import java.util.Collections; -import java.util.HashSet; import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; -import java.util.stream.Stream; import static org.apache.calcite.sql.type.SqlTypeName.BIGINT; import static org.apache.calcite.sql.type.SqlTypeName.BIGINT_UNSIGNED; @@ -107,10 +104,6 @@ * Don't support sub query util now. */ public class Rex2VectorizedExpressionVisitor extends RexVisitorImpl { - private final static RelDataTypeFactory TYPE_FACTORY = - new TddlTypeFactoryImpl(TddlRelDataTypeSystemImpl.getInstance()); - private final static RexBuilder REX_BUILDER = new RexBuilder(TYPE_FACTORY); - /** * Special vectorized expressions. */ @@ -121,11 +114,14 @@ public class Rex2VectorizedExpressionVisitor extends RexVisitorImpl callsInFilterMode = new IdentityHashMap<>(); private final ExecutionContext executionContext; private final List> outputDataTypes = new ArrayList<>(64); + + // collect output index of literal expression + private final List outputIndexOfLiteral = new ArrayList<>(); + private final boolean fallback; private final boolean enableCSE; private int currentOutputIndex; @@ -168,35 +168,6 @@ public Rex2VectorizedExpressionVisitor(ExecutionContext executionContext, int st this.expressionRewriter = new ExpressionRewriter(executionContext); } - private void setAllowConstantFold(boolean allowConstantFold) { - this.allowConstantFold = allowConstantFold; - } - - - - private RexCall rewrite(RexCall call, boolean isScalar) { - return expressionRewriter.rewrite(call, isScalar); - } - - private void registerFilterModeChildren(RexCall call) { - Preconditions.checkNotNull(call); - final RexCall parent = call; - // register the children that should be in filter mode. - if (call.op == TddlOperatorTable.CASE) { - // for case operator, we should set all when condition expressions to filter mode. - final int operandSize = call.getOperands().size(); - IntStream.range(0, operandSize) - .filter(i -> i % 2 == 0 && i != operandSize - 1) - .mapToObj(i -> call.getOperands().get(i)) - .filter(child -> canBindingToCommonFilterExpression(child)) - .forEach(child -> callsInFilterMode.put((RexCall) child, parent)); - } - } - - private boolean isInFilterMode(RexCall call) { - return callsInFilterMode.containsKey(call); - } - private static boolean isSpecialFunction(RexCall call) { SqlKind sqlKind = call.getKind(); return sqlKind == SqlKind.MINUS_PREFIX @@ -210,7 +181,9 @@ private static boolean isSpecialFunction(RexCall call) { */ private static String normalizeFunctionName(RexCall call) { if (call.op == TddlOperatorTable.CAST - || call.op == TddlOperatorTable.CONVERT) { + || call.op == TddlOperatorTable.CONVERT + || call.op instanceof SqlCastFunction + ) { SqlTypeName castToType = call.getType().getSqlTypeName(); String castFunctionName = VECTORIZED_CAST_FUNCTION_NAMES.get(castToType); if (castFunctionName != null) { @@ -246,6 +219,33 @@ static boolean canBindingToCommonFilterExpression(RexNode node) { return false; } + private void setAllowConstantFold(boolean allowConstantFold) { + this.allowConstantFold = allowConstantFold; + } + + private RexCall rewrite(RexCall call, boolean isScalar) { + return expressionRewriter.rewrite(call, isScalar); + } + + private void registerFilterModeChildren(RexCall call) { + Preconditions.checkNotNull(call); + final RexCall parent = call; + // register the children that should be in filter mode. + if (call.op == TddlOperatorTable.CASE) { + // for case operator, we should set all when condition expressions to filter mode. + final int operandSize = call.getOperands().size(); + IntStream.range(0, operandSize) + .filter(i -> i % 2 == 0 && i != operandSize - 1) + .mapToObj(i -> call.getOperands().get(i)) + .filter(child -> canBindingToCommonFilterExpression(child)) + .forEach(child -> callsInFilterMode.put((RexCall) child, parent)); + } + } + + private boolean isInFilterMode(RexCall call) { + return callsInFilterMode.containsKey(call); + } + @NotNull private LiteralVectorizedExpression doConstantFold(RexCall call) { Rex2VectorizedExpressionVisitor constantVisitor = new Rex2VectorizedExpressionVisitor( @@ -282,8 +282,10 @@ private LiteralVectorizedExpression doConstantFold(RexCall call) { @Override public VectorizedExpression visitLiteral(RexLiteral literal) { + int outputIndex = addOutput(DataTypeUtil.calciteToDrdsType(literal.getType())); + outputIndexOfLiteral.add(outputIndex); return LiteralVectorizedExpression - .from(literal, addOutput(DataTypeUtil.calciteToDrdsType(literal.getType()))); + .from(literal, outputIndex); } @Override @@ -305,8 +307,16 @@ public VectorizedExpression visitCall(RexCall call) { return doConstantFold(call); } - if (!fallback && !fallback && !isSpecialFunction(call)) { - Optional expression = createVectorizedExpression(call); + RexCall rewrittenCall = rewrite(call, false); + if (isInConstCall(rewrittenCall)) { + Optional expression = createInVecExpr(rewrittenCall); + if (expression.isPresent()) { + return expression.get(); + } + } + + if (!fallback && !isSpecialFunction(call)) { + Optional expression = createVectorizedExpression(rewrittenCall); if (expression.isPresent()) { return expression.get(); } @@ -316,6 +326,74 @@ public VectorizedExpression visitCall(RexCall call) { return createGeneralVectorizedExpression(call); } + private Optional createInVecExpr(RexCall call) { + try { + ExpressionConstructor constructor = + ExpressionConstructor.of(FastInVectorizedExpression.class); + int outputIndex = -1; + boolean isInFilterMode = isInFilterMode(call); + DataType dataType = getOutputDataType(call); + + VectorizedExpression[] children = new VectorizedExpression[2]; + RexNode rexNode = call.operands.get(0); + RexNode literalNode1 = call.operands.get(1); + children[0] = rexNode.accept(this); + outputIndex = addOutput(DataTypeUtil.calciteToDrdsType(literalNode1.getType())); + children[1] = InValuesVectorizedExpression.from(call.operands, outputIndex); + + if (!isInFilterMode) { + outputIndex = addOutput(dataType); + } + + VectorizedExpression vecExpr = constructor.build(dataType, outputIndex, children); + + if (!isInFilterMode && !DataTypeUtil.equalsSemantically(vecExpr.getOutputDataType(), dataType)) { + throw new IllegalStateException(String + .format("Vectorized expression %s output type %s not equals to rex call type %s!", + vecExpr.getClass().getSimpleName(), vecExpr.getOutputDataType(), dataType)); + } + return Optional.of(vecExpr); + } catch (Exception e) { + throw GeneralUtil.nestedException("Failed to create IN vectorized expression", e); + } + } + + /** + * check all in values are constants and of same datatype + */ + private boolean isInConstCall(RexCall call) { + if (call.op != SqlStdOperatorTable.IN) { + return false; + } + + if (call.operands.size() <= MAX_CODEGEN_IN_NUMS + 1) { + // use old code path + return false; + } + + SqlTypeName typeName = null; + for (int i = 1; i < call.operands.size(); i++) { + RexNode rexNode = call.operands.get(i); + if (!(rexNode instanceof RexLiteral)) { + return false; + } + if (typeName == null) { + typeName = ((RexLiteral) rexNode).getTypeName(); + } else { + if (typeName != ((RexLiteral) rexNode).getTypeName()) { + return false; + } + } + } + + if (typeName == SqlTypeName.CHAR || typeName == SqlTypeName.VARCHAR) { + // FastInVectorizedExpression does not support collation compare + boolean compatible = executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_OSS_COMPATIBLE); + return !compatible; + } + return true; + } + @Override public VectorizedExpression visitFieldAccess(RexFieldAccess fieldAccess) { throw new IllegalArgumentException("Correlated variable not supported in vectorized expression!"); @@ -362,6 +440,7 @@ public AbstractScalarFunction createFunction(RexCall call, List operandTypes = operands.stream() .map(RexNode::getType) @@ -388,10 +467,8 @@ private VectorizedExpression createGeneralVectorizedExpression(RexCall call) { private Optional createVectorizedExpression(RexCall call) { Preconditions.checkNotNull(call); Optional> constructor; - // rewrite the RexNode Tree. - call = rewrite(call, false); - if (SPECIAL_VECTORIZED_EXPRESSION_MAPPING.containsKey(call.op)) { + if (SPECIAL_VECTORIZED_EXPRESSION_MAPPING.containsKey(call.getOperator())) { // special class binding. constructor = Optional.of( ExpressionConstructor.of( @@ -416,53 +493,7 @@ private Optional createVectorizedExpression(RexCall call) call.getOperands().stream().map(node -> node.accept(this)).toArray(VectorizedExpression[]::new); if (!isInFilterMode) { - boolean reused = false; - // reuse output vector - if (executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_REUSE_VECTOR) - && dataType instanceof DecimalType) { - for (int i = 0; i < children.length; i++) { - RexNode operand = call.getOperands().get(i); - VectorizedExpression child = children[i]; - if (operand instanceof RexCall - && getOutputDataType((RexCall) operand) instanceof DecimalType) { - // decimal call - // / \ - // decimal call other call - outputIndex = child.getOutputIndex(); - reused = true; - break; - } - } - } - - if (!reused) { - outputIndex = addOutput(dataType); - } - - } - - if (!isInFilterMode) { - boolean reused = false; - // reuse output vector - if (executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_REUSE_VECTOR) - && dataType instanceof DecimalType) { - for (int i = 0; i < children.length; i++) { - RexNode operand = call.getOperands().get(i); - VectorizedExpression child = children[i]; - if (operand instanceof RexCall - && getOutputDataType((RexCall) operand) instanceof DecimalType) { - // decimal call - // / \ - // decimal call other call - outputIndex = child.getOutputIndex(); - reused = true; - break; - } - } - } - if (!reused) { - outputIndex = addOutput(dataType); - } + outputIndex = addOutput(dataType); } try { @@ -596,4 +627,13 @@ private int addOutput(DataType outputDataType) { public List> getOutputDataTypes() { return Collections.unmodifiableList(outputDataTypes); } + + public BitSet getLiteralBitmap() { + BitSet literalBitmap = new BitSet(currentOutputIndex); + for (int i = 0; i < outputIndexOfLiteral.size(); i++) { + literalBitmap.set(outputIndexOfLiteral.get(i)); + } + return literalBitmap; + } + } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/VectorizedExpressionBuilder.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/VectorizedExpressionBuilder.java index 3d7d6f34f..cb117c79f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/VectorizedExpressionBuilder.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/build/VectorizedExpressionBuilder.java @@ -85,6 +85,9 @@ public static Pair buildVectorizedExpression MutableChunk preAllocatedChunk = MutableChunk.newBuilder(executionContext.getExecutorChunkLimit()) .addEmptySlots(inputTypes) .addEmptySlots(converter.getOutputDataTypes()) + .addChunkLimit(executionContext.getExecutorChunkLimit()) + .addOutputIndexes(new int[] {expr.getOutputIndex()}) + .addLiteralBitmap(converter.getLiteralBitmap()) .build(); return new Pair<>(expr, preAllocatedChunk); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInIntegerColLongConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInIntegerColLongConstVectorizedExpression.java index 900e02c9d..cc5811f9f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInIntegerColLongConstVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInIntegerColLongConstVectorizedExpression.java @@ -25,7 +25,6 @@ import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.optimizer.core.datatype.SliceType; public abstract class AbstractInIntegerColLongConstVectorizedExpression extends AbstractVectorizedExpression { protected final boolean[] operandIsNulls; @@ -85,7 +84,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = outputVectorSlot.cast(LongBlock.class).longArray(); if (anyOperandsNull()) { boolean[] outputNulls = outputVectorSlot.nulls(); @@ -96,7 +95,7 @@ public void eval(EvaluationContext ctx) { return; } - int[] intArray = ((IntegerBlock) leftInputVectorSlot).intArray(); + int[] intArray = leftInputVectorSlot.cast(IntegerBlock.class).intArray(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInLongColLongConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInLongColLongConstVectorizedExpression.java index 6c137f55a..b07ff0096 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInLongColLongConstVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInLongColLongConstVectorizedExpression.java @@ -16,7 +16,6 @@ package com.alibaba.polardbx.executor.vectorized.compare; -import com.alibaba.polardbx.executor.chunk.IntegerBlock; import com.alibaba.polardbx.executor.chunk.LongBlock; import com.alibaba.polardbx.executor.chunk.MutableChunk; import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; @@ -25,7 +24,6 @@ import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.optimizer.core.datatype.SliceType; public abstract class AbstractInLongColLongConstVectorizedExpression extends AbstractVectorizedExpression { protected final boolean[] operandIsNulls; @@ -85,7 +83,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = outputVectorSlot.cast(LongBlock.class).longArray(); if (anyOperandsNull()) { boolean[] outputNulls = outputVectorSlot.nulls(); @@ -96,7 +94,7 @@ public void eval(EvaluationContext ctx) { return; } - long[] longArray = ((LongBlock) leftInputVectorSlot).longArray(); + long[] longArray = leftInputVectorSlot.cast(LongBlock.class).longArray(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInVarcharColCharConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInVarcharColCharConstVectorizedExpression.java index 589607196..44d08c26c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInVarcharColCharConstVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/AbstractInVarcharColCharConstVectorizedExpression.java @@ -23,6 +23,10 @@ import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; import com.alibaba.polardbx.executor.chunk.ReferenceBlock; import com.alibaba.polardbx.executor.chunk.SliceBlock; +import com.alibaba.polardbx.executor.chunk.columnar.CommonLazyBlock; +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; +import com.alibaba.polardbx.executor.operator.scan.impl.DictionaryMapping; +import com.alibaba.polardbx.executor.operator.scan.impl.MultiDictionaryMapping; import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.EvaluationContext; import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; @@ -32,7 +36,13 @@ import com.alibaba.polardbx.optimizer.core.datatype.SliceType; import io.airlift.slice.Slice; +import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; public abstract class AbstractInVarcharColCharConstVectorizedExpression extends AbstractVectorizedExpression { protected final CollationHandler collationHandler; @@ -41,6 +51,8 @@ public abstract class AbstractInVarcharColCharConstVectorizedExpression extends protected final Slice[] operands; protected Comparable[] operandSortKeys; + protected DictionaryMapping mapping; + public AbstractInVarcharColCharConstVectorizedExpression( int outputIndex, VectorizedExpression[] children) { @@ -52,16 +64,32 @@ public AbstractInVarcharColCharConstVectorizedExpression( this.operandIsNulls = new boolean[operandCount()]; this.operands = new Slice[operandCount()]; + Set dictSet = new HashSet<>(); + List dictList = new ArrayList<>(); for (int operandIndex = 1; operandIndex <= operandCount(); operandIndex++) { Object operand1Value = ((LiteralVectorizedExpression) children[operandIndex]).getConvertedValue(); - if (operand1Value == null) { + if (operand1Value == null) { operandIsNulls[operandIndex - 1] = true; operands[operandIndex - 1] = null; } else { operandIsNulls[operandIndex - 1] = false; - operands[operandIndex - 1] = sliceType.convertFrom(operand1Value); + Slice operand = sliceType.convertFrom(operand1Value); + operands[operandIndex - 1] = operand; + + // collect dictionary id + if (!dictSet.contains(operand)) { + dictSet.add(operand); + dictList.add(operand); + } } } + + if (!dictSet.isEmpty()) { + mapping = new MultiDictionaryMapping(dictList); + } else { + mapping = null; + } + } abstract int operandCount(); @@ -115,7 +143,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); if (anyOperandsNull()) { boolean[] outputNulls = outputVectorSlot.nulls(); @@ -126,30 +154,59 @@ public void eval(EvaluationContext ctx) { VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); } + // Try to use dictionary + BlockDictionary blockDictionary; + if (mapping != null + && !compatible + && (leftInputVectorSlot instanceof SliceBlock || leftInputVectorSlot instanceof CommonLazyBlock) + && (blockDictionary = leftInputVectorSlot.cast(SliceBlock.class).getDictionary()) != null) { + SliceBlock sliceBlock = leftInputVectorSlot.cast(SliceBlock.class); + + int[] reMapping = mapping.merge(blockDictionary); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int dictId = sliceBlock.getDictId(j); + output[j] = (dictId >= 0 && reMapping[dictId] >= 0) + ? LongBlock.TRUE_VALUE + : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + int dictId = sliceBlock.getDictId(i); + output[i] = (dictId >= 0 && reMapping[dictId] >= 0) + ? LongBlock.TRUE_VALUE + : LongBlock.FALSE_VALUE; + } + } + + return; + } if (!compatible && leftInputVectorSlot instanceof SliceBlock) { - SliceBlock sliceBlock = (SliceBlock) leftInputVectorSlot; + SliceBlock sliceBlock = leftInputVectorSlot.cast(SliceBlock.class); // best case. if (operandCount() == 1) { if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { int j = sel[i]; - output[j] = sliceBlock.equals(j, (Slice) operandSortKeys[0]) ; + output[j] = sliceBlock.equals(j, (Slice) operandSortKeys[0]); } } else { for (int i = 0; i < batchSize; i++) { - output[i] = sliceBlock.equals(i, (Slice) operandSortKeys[0]) ; + output[i] = sliceBlock.equals(i, (Slice) operandSortKeys[0]); } } } else if (operandCount() == 2) { if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { int j = sel[i]; - output[j] = sliceBlock.anyMatch(j, (Slice) operandSortKeys[0], (Slice) operandSortKeys[1]) ; + output[j] = sliceBlock.anyMatch(j, (Slice) operandSortKeys[0], (Slice) operandSortKeys[1]); } } else { for (int i = 0; i < batchSize; i++) { - output[i] = sliceBlock.anyMatch(i, (Slice) operandSortKeys[0], (Slice) operandSortKeys[1]) ; + output[i] = sliceBlock.anyMatch(i, (Slice) operandSortKeys[0], (Slice) operandSortKeys[1]); } } } else if (operandCount() == 3) { @@ -180,7 +237,7 @@ public void eval(EvaluationContext ctx) { } else { if (leftInputVectorSlot instanceof SliceBlock) { // normal case. - SliceBlock sliceBlock = (SliceBlock) leftInputVectorSlot; + SliceBlock sliceBlock = leftInputVectorSlot.cast(SliceBlock.class); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenDateColCharConstCharConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenDateColCharConstCharConstVectorizedExpression.java index a38db01e9..a7d60bb7e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenDateColCharConstCharConstVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenDateColCharConstCharConstVectorizedExpression.java @@ -86,7 +86,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); if (operand1IsNull || operand2IsNull) { boolean[] outputNulls = outputVectorSlot.nulls(); @@ -98,7 +98,7 @@ public void eval(EvaluationContext ctx) { } if (leftInputVectorSlot instanceof DateBlock) { - long[] array1 = ((DateBlock) leftInputVectorSlot).getPacked(); + long[] array1 = leftInputVectorSlot.cast(DateBlock.class).getPacked(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenDecimalColDecimalConstDecimalConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenDecimalColDecimalConstDecimalConstVectorizedExpression.java index 6f7a27915..b753d5d50 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenDecimalColDecimalConstDecimalConstVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenDecimalColDecimalConstDecimalConstVectorizedExpression.java @@ -29,15 +29,13 @@ import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import io.airlift.slice.Slice; -import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; -@ExpressionSignatures(names = {"BETWEEN ASYMMETRIC"}, argumentTypes = {"Decimal", "Decimal", "Decimal"}, argumentKinds = {Variable, Const, Const}) +@ExpressionSignatures(names = {"BETWEEN ASYMMETRIC"}, argumentTypes = {"Decimal", "Decimal", "Decimal"}, + argumentKinds = {Variable, Const, Const}) public class BetweenDecimalColDecimalConstDecimalConstVectorizedExpression extends AbstractVectorizedExpression { private final boolean operand1IsNull; private final Decimal operand1; @@ -51,7 +49,7 @@ public BetweenDecimalColDecimalConstDecimalConstVectorizedExpression( super(DataTypes.LongType, outputIndex, children); Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); - if (operand1Value == null) { + if (operand1Value == null) { operand1IsNull = true; operand1 = Decimal.ZERO; } else { @@ -60,7 +58,7 @@ public BetweenDecimalColDecimalConstDecimalConstVectorizedExpression( } Object operand2Value = ((LiteralVectorizedExpression) children[2]).getConvertedValue(); - if (operand2Value == null) { + if (operand2Value == null) { operand2IsNull = true; operand2 = Decimal.ZERO; } else { @@ -81,7 +79,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); DecimalStructure leftDec; DecimalStructure operand1Dec = operand1.getDecimalStructure(); @@ -94,7 +92,7 @@ public void eval(EvaluationContext ctx) { int j = sel[i]; // fetch left decimal value - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(j)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; boolean b2 = FastDecimalUtils.compare(leftDec, operand2Dec) <= 0; @@ -104,7 +102,7 @@ public void eval(EvaluationContext ctx) { } else { for (int i = 0; i < batchSize; i++) { // fetch left decimal value - leftDec = new DecimalStructure(((DecimalBlock) leftInputVectorSlot).getRegion(i)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; boolean b2 = FastDecimalUtils.compare(leftDec, operand2Dec) <= 0; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenIntegerColLongConstLongConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenIntegerColLongConstLongConstVectorizedExpression.java index f5e447127..100bf957c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenIntegerColLongConstLongConstVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenIntegerColLongConstLongConstVectorizedExpression.java @@ -74,7 +74,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = outputVectorSlot.cast(LongBlock.class).longArray(); if (operand1IsNull || operand2IsNull) { boolean[] outputNulls = outputVectorSlot.nulls(); @@ -85,7 +85,7 @@ public void eval(EvaluationContext ctx) { return; } - int[] array1 = ((IntegerBlock) leftInputVectorSlot).intArray(); + int[] array1 = leftInputVectorSlot.cast(IntegerBlock.class).intArray(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenLongColLongConstLongConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenLongColLongConstLongConstVectorizedExpression.java index 88f7f7ba9..8bdaa5921 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenLongColLongConstLongConstVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/BetweenLongColLongConstLongConstVectorizedExpression.java @@ -73,7 +73,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = outputVectorSlot.cast(LongBlock.class).longArray(); if (operand1IsNull || operand2IsNull) { boolean[] outputNulls = outputVectorSlot.nulls(); @@ -84,7 +84,7 @@ public void eval(EvaluationContext ctx) { return; } - long[] array1 = ((LongBlock) leftInputVectorSlot).longArray(); + long[] array1 = (leftInputVectorSlot.cast(LongBlock.class)).longArray(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/EQIntegerColCharConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/EQIntegerColCharConstVectorizedExpression.java new file mode 100644 index 000000000..bf6c50413 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/EQIntegerColCharConstVectorizedExpression.java @@ -0,0 +1,100 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.operator.scan.impl.SingleDictionaryMapping; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; + +@ExpressionSignatures(names = {"EQ", "EQUAL", "="}, argumentTypes = {"Integer", "Char"}, + argumentKinds = {Variable, Const}) +public class EQIntegerColCharConstVectorizedExpression extends AbstractVectorizedExpression { + + protected final int operand; + protected final boolean operandIsNull; + + public EQIntegerColCharConstVectorizedExpression( + int outputIndex, VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + if (operand1Value == null) { + operandIsNull = true; + operand = 0; + } else { + operandIsNull = false; + operand = DataTypes.IntegerType.convertFrom(operand1Value); + } + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + + int[] array1 = (leftInputVectorSlot.cast(IntegerBlock.class)).intArray(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + if (operandIsNull) { + boolean[] outputNulls = outputVectorSlot.nulls(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + outputNulls[j] = true; + } + } else { + for (int i = 0; i < batchSize; i++) { + outputNulls[i] = true; + } + } + } else { + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + res[j] = (array1[j] == operand) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + res[i] = (array1[i] == operand) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/EQLongColCharConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/EQLongColCharConstVectorizedExpression.java new file mode 100644 index 000000000..e275a0a2e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/EQLongColCharConstVectorizedExpression.java @@ -0,0 +1,98 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; + +@ExpressionSignatures(names = {"EQ", "EQUAL", "="}, argumentTypes = {"Long", "Char"}, + argumentKinds = {Variable, Const}) +public class EQLongColCharConstVectorizedExpression extends AbstractVectorizedExpression { + + protected final long operand; + protected final boolean operandIsNull; + + public EQLongColCharConstVectorizedExpression( + int outputIndex, VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + if (operand1Value == null) { + operandIsNull = true; + operand = 0; + } else { + operandIsNull = false; + operand = DataTypes.LongType.convertFrom(operand1Value); + } + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + RandomAccessBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + + long[] array1 = (leftInputVectorSlot.cast(LongBlock.class)).longArray(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + if (operandIsNull) { + boolean[] outputNulls = outputVectorSlot.nulls(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + outputNulls[j] = true; + } + } else { + for (int i = 0; i < batchSize; i++) { + outputNulls[i] = true; + } + } + } else { + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + res[j] = (array1[j] == operand) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + res[i] = (array1[i] == operand) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + } + } +} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/EQVarcharColCharConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/EQVarcharColCharConstVectorizedExpression.java index 2a5ac7b47..70a1ec4e6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/EQVarcharColCharConstVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/EQVarcharColCharConstVectorizedExpression.java @@ -23,6 +23,10 @@ import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; import com.alibaba.polardbx.executor.chunk.ReferenceBlock; import com.alibaba.polardbx.executor.chunk.SliceBlock; +import com.alibaba.polardbx.executor.chunk.columnar.CommonLazyBlock; +import com.alibaba.polardbx.executor.operator.scan.BlockDictionary; +import com.alibaba.polardbx.executor.operator.scan.impl.DictionaryMapping; +import com.alibaba.polardbx.executor.operator.scan.impl.SingleDictionaryMapping; import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.EvaluationContext; import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; @@ -38,12 +42,14 @@ import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; -@ExpressionSignatures(names = {"EQ", "EQUAL", "="}, argumentTypes = {"Varchar", "Char"}, argumentKinds = {Variable, Const}) +@ExpressionSignatures(names = {"EQ", "EQUAL", "="}, argumentTypes = {"Varchar", "Char"}, + argumentKinds = {Variable, Const}) public class EQVarcharColCharConstVectorizedExpression extends AbstractVectorizedExpression { protected final CollationHandler collationHandler; protected final boolean operandIsNull; protected final Slice operand; + protected final DictionaryMapping mapping; public EQVarcharColCharConstVectorizedExpression( int outputIndex, @@ -57,11 +63,14 @@ public EQVarcharColCharConstVectorizedExpression( if (operand1Value == null) { operandIsNull = true; operand = null; + mapping = null; } else { operandIsNull = false; operand = sliceType.convertFrom(operand1Value); - } + // Create dictionary mapping and merge the parameter list + mapping = new SingleDictionaryMapping(operand); + } } @Override @@ -87,7 +96,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); if (operandIsNull) { boolean[] outputNulls = outputVectorSlot.nulls(); @@ -98,10 +107,53 @@ public void eval(EvaluationContext ctx) { VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + BlockDictionary blockDictionary; + if ((leftInputVectorSlot instanceof SliceBlock || leftInputVectorSlot instanceof CommonLazyBlock) + && mapping != null + && (blockDictionary = leftInputVectorSlot.cast(SliceBlock.class).getDictionary()) != null) { + // Best case: use dictionary + int[] reMapping = mapping.merge(blockDictionary); + int targetDictId = reMapping[0]; + + if (targetDictId == -1) { + // no matched value. + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + output[j] = LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + output[i] = LongBlock.FALSE_VALUE; + } + } + return; + } + + SliceBlock sliceBlock = leftInputVectorSlot.cast(SliceBlock.class); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + output[j] = (targetDictId == sliceBlock.getDictId(j)) + ? LongBlock.TRUE_VALUE + : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + output[i] = (targetDictId == sliceBlock.getDictId(i)) + ? LongBlock.TRUE_VALUE + : LongBlock.FALSE_VALUE; + } + } + + return; + } if (!compatible && leftInputVectorSlot instanceof SliceBlock) { // best case. - SliceBlock sliceBlock = (SliceBlock) leftInputVectorSlot; + SliceBlock sliceBlock = leftInputVectorSlot.cast(SliceBlock.class); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { @@ -110,13 +162,20 @@ public void eval(EvaluationContext ctx) { } } else { for (int i = 0; i < batchSize; i++) { - output[i] = sliceBlock.equals(i, (Slice) operandSortKey); + output[i] = sliceBlock.equals(i, (Slice) operandSortKey); } } } else { if (leftInputVectorSlot instanceof SliceBlock) { // normal case. - SliceBlock sliceBlock = (SliceBlock) leftInputVectorSlot; + SliceBlock sliceBlock = leftInputVectorSlot.cast(SliceBlock.class); + + if (sliceBlock.getDictionary() != null && sliceBlock.getDictIds() != null + && sliceBlock.getDictionary().size() < 100) { + + compareWithDict(sliceBlock, outputVectorSlot, output, batchSize, isSelectionInUse, sel); + return; + } if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { @@ -175,4 +234,39 @@ public void eval(EvaluationContext ctx) { } } + + private void compareWithDict(SliceBlock sliceBlock, RandomAccessBlock outputVectorSlot, + long[] output, int batchSize, boolean isSelectionInUse, int[] sel) { + int operandDictIdx; + for (operandDictIdx = 0; operandDictIdx < sliceBlock.getDictionary().size(); operandDictIdx++) { + if (operand.compareTo(sliceBlock.getDictionary().getValue(operandDictIdx)) == 0) { + break; + } + } + + if (operandDictIdx == sliceBlock.getDictionary().size()) { + // none match + boolean[] outputNulls = outputVectorSlot.nulls(); + outputVectorSlot.setHasNull(true); + Arrays.fill(outputNulls, true); + return; + } + + int[] dictIds = sliceBlock.getDictIds(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + output[j] = (operandDictIdx == dictIds[j]) + ? LongBlock.TRUE_VALUE + : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + output[i] = (operandDictIdx == dictIds[i]) + ? LongBlock.TRUE_VALUE + : LongBlock.FALSE_VALUE; + } + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastBetweenDecimalColCharConstCharConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastBetweenDecimalColCharConstCharConstVectorizedExpression.java new file mode 100644 index 000000000..84ea3cf7e --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastBetweenDecimalColCharConstCharConstVectorizedExpression.java @@ -0,0 +1,196 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; + +@ExpressionSignatures( + names = {"BETWEEN ASYMMETRIC"}, + argumentTypes = {"Decimal", "Char", "Char"}, + argumentKinds = {Variable, Const, Const}, + priority = ExpressionPriority.SPECIAL +) +public class FastBetweenDecimalColCharConstCharConstVectorizedExpression extends AbstractVectorizedExpression { + private final boolean operand1IsNull; + private final Decimal operand1; + private final boolean operand2IsNull; + private final Decimal operand2; + private long operand1Long; + private long operand2Long; + + /** + * when operand1, operand2 and InputBlock are of the same scale, + * which are also decimal64 type, + * they can be compared in long value + */ + private boolean useCompareWithDecimal64; + + public FastBetweenDecimalColCharConstCharConstVectorizedExpression( + int outputIndex, + VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + Object operand2Value = ((LiteralVectorizedExpression) children[2]).getConvertedValue(); + + this.useCompareWithDecimal64 = false; + // TODO simplify between null and null + if (operand1Value == null) { + this.operand1 = Decimal.ZERO; + this.operand1IsNull = true; + this.operand1Long = 0; + } else { + this.operand1IsNull = false; + this.operand1 = DataTypes.DecimalType.convertFrom(operand1Value); + } + if (operand2Value == null) { + this.operand2 = Decimal.ZERO; + this.operand2IsNull = true; + this.operand2Long = 0; + } else { + this.operand2IsNull = false; + this.operand2 = DataTypes.DecimalType.convertFrom(operand2Value); + } + if (operand1Value == null && operand2Value == null) { + this.useCompareWithDecimal64 = true; + return; + } + + if (!checkSameScale()) { + return; + } + + if (!checkDecimal64Precision()) { + return; + } + + try { + DecimalStructure tmpBuffer = new DecimalStructure(); + this.operand1Long = operand1.unscale(tmpBuffer); + this.operand2Long = operand2.unscale(tmpBuffer); + this.useCompareWithDecimal64 = true; + } catch (Exception e) { + this.useCompareWithDecimal64 = false; + } + } + + private boolean checkDecimal64Precision() { + return DecimalConverter.isDecimal64(operand1.precision()) + && DecimalConverter.isDecimal64(operand2.precision()); + } + + private boolean checkSameScale() { + return operand1.scale() == operand2.scale(); + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + DecimalBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()).cast(DecimalBlock.class); + + long[] output = outputVectorSlot.cast(LongBlock.class).longArray(); + + DecimalStructure leftDec; + DecimalStructure operand1Dec = operand1.getDecimalStructure(); + DecimalStructure operand2Dec = operand2.getDecimalStructure(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + + if (useCompareWithDecimal64) { + boolean sameScale = (operand1.scale() == leftInputVectorSlot.getScale() && + operand2.scale() == leftInputVectorSlot.getScale()); + if (leftInputVectorSlot.isDecimal64() && sameScale) { + compareByDecimal64(batchSize, isSelectionInUse, sel, leftInputVectorSlot, output); + return; + } + } + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + // fetch left decimal value + leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(j)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; + boolean b2 = FastDecimalUtils.compare(leftDec, operand2Dec) <= 0; + + output[j] = b1 && b2 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(i)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; + boolean b2 = FastDecimalUtils.compare(leftDec, operand2Dec) <= 0; + + output[i] = b1 && b2 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } + + private void compareByDecimal64(int batchSize, boolean isSelectionInUse, + int[] sel, DecimalBlock leftInputVectorSlot, long[] output) { + + long[] array1 = leftInputVectorSlot.getDecimal64Values(); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + boolean b1 = array1[j] >= operand1Long; + boolean b2 = array1[j] <= operand2Long; + + output[j] = b1 && b2 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + + boolean b1 = array1[i] >= operand1Long; + boolean b2 = array1[i] <= operand2Long; + + output[i] = b1 && b2 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastBetweenDecimalColDecimalConstDecimalConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastBetweenDecimalColDecimalConstDecimalConstVectorizedExpression.java new file mode 100644 index 000000000..ce83f2dcd --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastBetweenDecimalColDecimalConstDecimalConstVectorizedExpression.java @@ -0,0 +1,197 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; + +@ExpressionSignatures( + names = {"BETWEEN ASYMMETRIC"}, + argumentTypes = {"Decimal", "Decimal", "Decimal"}, + argumentKinds = {Variable, Const, Const}, + priority = ExpressionPriority.SPECIAL +) +public class FastBetweenDecimalColDecimalConstDecimalConstVectorizedExpression extends AbstractVectorizedExpression { + private final boolean operand1IsNull; + private final Decimal operand1; + private long operand1Long; + + private final boolean operand2IsNull; + private final Decimal operand2; + private long operand2Long; + + /** + * when operand1, operand2 and InputBlock are of the same scale, + * which are also decimal64 type, + * they can be compared in long value + */ + private boolean useCompareWithDecimal64; + + public FastBetweenDecimalColDecimalConstDecimalConstVectorizedExpression( + int outputIndex, + VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + Object operand2Value = ((LiteralVectorizedExpression) children[2]).getConvertedValue(); + + this.useCompareWithDecimal64 = false; + // TODO simplify between null and null + if (operand1Value == null) { + this.operand1 = Decimal.ZERO; + this.operand1IsNull = true; + this.operand1Long = 0; + } else { + this.operand1IsNull = false; + this.operand1 = (Decimal) operand1Value; + } + if (operand2Value == null) { + this.operand2 = Decimal.ZERO; + this.operand2IsNull = true; + this.operand2Long = 0; + } else { + this.operand2IsNull = false; + this.operand2 = (Decimal) operand2Value; + } + if (operand1Value == null && operand2Value == null) { + this.useCompareWithDecimal64 = true; + return; + } + + if (!checkSameScale()) { + return; + } + + if (!checkDecimal64Precision()) { + return; + } + + try { + DecimalStructure tmpBuffer = new DecimalStructure(); + this.operand1Long = operand1.unscale(tmpBuffer); + this.operand2Long = operand2.unscale(tmpBuffer); + this.useCompareWithDecimal64 = true; + } catch (Exception e) { + this.useCompareWithDecimal64 = false; + } + } + + private boolean checkDecimal64Precision() { + return DecimalConverter.isDecimal64(operand1.precision()) + && DecimalConverter.isDecimal64(operand2.precision()); + } + + private boolean checkSameScale() { + return operand1.scale() == operand2.scale(); + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + DecimalBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()).cast(DecimalBlock.class); + + long[] output = outputVectorSlot.cast(LongBlock.class).longArray(); + + DecimalStructure leftDec; + DecimalStructure operand1Dec = operand1.getDecimalStructure(); + DecimalStructure operand2Dec = operand2.getDecimalStructure(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + + if (useCompareWithDecimal64) { + boolean sameScale = (operand1.scale() == leftInputVectorSlot.getScale() && + operand2.scale() == leftInputVectorSlot.getScale()); + if (leftInputVectorSlot.isDecimal64() && sameScale) { + compareByDecimal64(batchSize, isSelectionInUse, sel, leftInputVectorSlot, output); + return; + } + } + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + // fetch left decimal value + leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(j)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; + boolean b2 = FastDecimalUtils.compare(leftDec, operand2Dec) <= 0; + + output[j] = b1 && b2 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(i)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; + boolean b2 = FastDecimalUtils.compare(leftDec, operand2Dec) <= 0; + + output[i] = b1 && b2 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } + + private void compareByDecimal64(int batchSize, boolean isSelectionInUse, + int[] sel, DecimalBlock leftInputVectorSlot, long[] output) { + + long[] array1 = leftInputVectorSlot.getDecimal64Values(); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + boolean b1 = array1[j] >= operand1Long; + boolean b2 = array1[j] <= operand2Long; + + output[j] = b1 && b2 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + + boolean b1 = array1[i] >= operand1Long; + boolean b2 = array1[i] <= operand2Long; + + output[i] = b1 && b2 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGEDecimalColDecimalConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGEDecimalColDecimalConstVectorizedExpression.java new file mode 100644 index 000000000..bd5b68e97 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGEDecimalColDecimalConstVectorizedExpression.java @@ -0,0 +1,187 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import io.airlift.slice.Slice; + +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; +import static com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority.SPECIAL; + +@ExpressionSignatures( + names = {"GE", ">="}, + argumentTypes = {"Decimal", "Decimal"}, + argumentKinds = {Variable, Const}, + priority = SPECIAL +) +public class FastGEDecimalColDecimalConstVectorizedExpression extends AbstractVectorizedExpression { + + private final boolean operand1IsNull; + private final Decimal operand1; + private final boolean useOperand1WithScale; + private final long operand1WithScale; + + public FastGEDecimalColDecimalConstVectorizedExpression(int outputIndex, VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + + if (operand1Value == null) { + operand1IsNull = true; + operand1 = Decimal.ZERO; + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + operand1IsNull = false; + operand1 = (Decimal) operand1Value; + if (operand1.compareTo(Decimal.ZERO) == 0) { + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + if (!DecimalConverter.isDecimal64(operand1.precision())) { + operand1WithScale = 0; + useOperand1WithScale = false; + } else { + DecimalStructure tmpBuffer = new DecimalStructure(); + operand1WithScale = operand1.unscale(tmpBuffer); + useOperand1WithScale = true; + } + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + DecimalBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()) + .cast(DecimalBlock.class); + + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + boolean useDecimal64Compare = useOperand1WithScale && leftInputVectorSlot.isDecimal64() + && checkSameScale(leftInputVectorSlot); + if (useDecimal64Compare) { + // do Decimal64 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long leftVal = leftInputVectorSlot.getLong(j); + output[j] = leftVal >= operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + long leftVal = leftInputVectorSlot.getLong(i); + output[i] = leftVal >= operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + boolean useDecimal128Compare = useOperand1WithScale && leftInputVectorSlot.isDecimal128() + && checkSameScale(leftInputVectorSlot); + if (useDecimal128Compare) { + long[] decimal128Low = leftInputVectorSlot.getDecimal128LowValues(); + long[] decimal128High = leftInputVectorSlot.getDecimal128HighValues(); + long rightLow = operand1WithScale; + long rightHigh = operand1WithScale >= 0 ? 0 : -1; + + // do Decimal128 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int pos = leftInputVectorSlot.realPositionOf(j); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean greatEqual = !((leftHigh < rightHigh) || (leftHigh == rightHigh && leftLow < rightLow)); + output[j] = greatEqual ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + int pos = leftInputVectorSlot.realPositionOf(i); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean greatEqual = !((leftHigh < rightHigh) || (leftHigh == rightHigh && leftLow < rightLow)); + output[i] = greatEqual ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + // do normal decimal compare + DecimalStructure leftDec; + DecimalStructure operand1Dec = operand1.getDecimalStructure(); + Slice cachedSlice = leftInputVectorSlot.allocCachedSlice(); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + // fetch left decimal value + leftDec = + new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j, cachedSlice)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; + + output[j] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + leftDec = + new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i, cachedSlice)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; + + output[i] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } + + private boolean checkSameScale(DecimalBlock leftInputVectorSlot) { + return leftInputVectorSlot.getScale() == operand1.scale(); + } +} + + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGEDecimalColLongConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGEDecimalColLongConstVectorizedExpression.java new file mode 100644 index 000000000..6ddb744ea --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGEDecimalColLongConstVectorizedExpression.java @@ -0,0 +1,178 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.common.utils.MathUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; +import static com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority.SPECIAL; + +@ExpressionSignatures( + names = {"GE", ">="}, + argumentTypes = {"Decimal", "Long"}, + argumentKinds = {Variable, Const}, + priority = SPECIAL +) +public class FastGEDecimalColLongConstVectorizedExpression extends AbstractVectorizedExpression { + + private final boolean operand1IsNull; + private final Decimal operand1; + private final boolean useOperand1WithScale; + private final long operand1WithScale; + + public FastGEDecimalColLongConstVectorizedExpression(int outputIndex, VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + if (operand1Value == null) { + operand1IsNull = true; + operand1 = Decimal.ZERO; + operand1WithScale = 0; + useOperand1WithScale = true; + } else { + operand1IsNull = false; + operand1 = DataTypes.DecimalType.convertFrom(operand1Value); + long left = (long) operand1Value; + if (left == 0) { + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + int scale = children[0].getOutputDataType().getScale(); + if (scale < 0 || scale >= DecimalTypeBase.POW_10.length) { + operand1WithScale = 0; + useOperand1WithScale = false; + } else { + long power = DecimalTypeBase.POW_10[scale]; + operand1WithScale = left * power; + useOperand1WithScale = !MathUtils.longMultiplyOverflow(left, power, operand1WithScale); + } + } + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + DecimalBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()) + .cast(DecimalBlock.class); + + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + if (leftInputVectorSlot.isDecimal64() && useOperand1WithScale) { + // do Decimal64 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long leftVal = leftInputVectorSlot.getLong(j); + output[j] = leftVal >= operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + long leftVal = leftInputVectorSlot.getLong(i); + output[i] = leftVal >= operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + if (leftInputVectorSlot.isDecimal128() && useOperand1WithScale) { + long[] decimal128Low = leftInputVectorSlot.getDecimal128LowValues(); + long[] decimal128High = leftInputVectorSlot.getDecimal128HighValues(); + long rightLow = operand1WithScale; + long rightHigh = operand1WithScale >= 0 ? 0 : -1; + + // do Decimal128 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int pos = leftInputVectorSlot.realPositionOf(j); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean greatEqual = !((leftHigh < rightHigh) || (leftHigh == rightHigh && leftLow < rightLow)); + output[j] = greatEqual ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + int pos = leftInputVectorSlot.realPositionOf(i); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean greatEqual = !((leftHigh < rightHigh) || (leftHigh == rightHigh && leftLow < rightLow)); + output[i] = greatEqual ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + // do normal decimal compare + DecimalStructure leftDec; + DecimalStructure operand1Dec = operand1.getDecimalStructure(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + // fetch left decimal value + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; + + output[j] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; + + output[i] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } +} + + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGTDecimalColDecimalConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGTDecimalColDecimalConstVectorizedExpression.java new file mode 100644 index 000000000..902ff13df --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGTDecimalColDecimalConstVectorizedExpression.java @@ -0,0 +1,187 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import io.airlift.slice.Slice; + +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; +import static com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority.SPECIAL; + +@ExpressionSignatures( + names = {"GT", ">"}, + argumentTypes = {"Decimal", "Decimal"}, + argumentKinds = {Variable, Const}, + priority = SPECIAL +) +public class FastGTDecimalColDecimalConstVectorizedExpression extends AbstractVectorizedExpression { + + private final boolean operand1IsNull; + private final Decimal operand1; + private final boolean useOperand1WithScale; + private final long operand1WithScale; + + public FastGTDecimalColDecimalConstVectorizedExpression(int outputIndex, VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + + if (operand1Value == null) { + operand1IsNull = true; + operand1 = Decimal.ZERO; + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + operand1IsNull = false; + operand1 = (Decimal) operand1Value; + if (operand1.compareTo(Decimal.ZERO) == 0) { + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + if (!DecimalConverter.isDecimal64(operand1.precision())) { + operand1WithScale = 0; + useOperand1WithScale = false; + } else { + DecimalStructure tmpBuffer = new DecimalStructure(); + operand1WithScale = operand1.unscale(tmpBuffer); + useOperand1WithScale = true; + } + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + DecimalBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()) + .cast(DecimalBlock.class); + + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + boolean useDecimal64Compare = useOperand1WithScale && leftInputVectorSlot.isDecimal64() + && checkSameScale(leftInputVectorSlot); + if (useDecimal64Compare) { + // do Decimal64 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long leftVal = leftInputVectorSlot.getLong(j); + output[j] = leftVal > operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + long leftVal = leftInputVectorSlot.getLong(i); + output[i] = leftVal > operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + boolean useDecimal128Compare = useOperand1WithScale && leftInputVectorSlot.isDecimal128() + && checkSameScale(leftInputVectorSlot); + if (useDecimal128Compare) { + long[] decimal128Low = leftInputVectorSlot.getDecimal128LowValues(); + long[] decimal128High = leftInputVectorSlot.getDecimal128HighValues(); + long rightLow = operand1WithScale; + long rightHigh = operand1WithScale >= 0 ? 0 : -1; + + // do Decimal128 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int pos = leftInputVectorSlot.realPositionOf(j); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = (leftHigh > rightHigh) || (leftHigh == rightHigh && leftLow > rightLow); + output[j] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + int pos = leftInputVectorSlot.realPositionOf(i); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = (leftHigh > rightHigh) || (leftHigh == rightHigh && leftLow > rightLow); + output[i] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + // do normal decimal compare + DecimalStructure leftDec; + DecimalStructure operand1Dec = operand1.getDecimalStructure(); + Slice cachedSlice = leftInputVectorSlot.allocCachedSlice(); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + // fetch left decimal value + leftDec = + new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j, cachedSlice)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) > 0; + + output[j] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + leftDec = + new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i, cachedSlice)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) > 0; + + output[i] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } + + private boolean checkSameScale(DecimalBlock leftInputVectorSlot) { + return leftInputVectorSlot.getScale() == operand1.scale(); + } +} + + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGTDecimalColLongConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGTDecimalColLongConstVectorizedExpression.java new file mode 100644 index 000000000..b1bd1f44d --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastGTDecimalColLongConstVectorizedExpression.java @@ -0,0 +1,177 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.common.utils.MathUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; +import static com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority.SPECIAL; + +@ExpressionSignatures( + names = {"GT", ">"}, + argumentTypes = {"Decimal", "Long"}, + argumentKinds = {Variable, Const}, + priority = SPECIAL +) +public class FastGTDecimalColLongConstVectorizedExpression extends AbstractVectorizedExpression { + + private final boolean operand1IsNull; + private final Decimal operand1; + private final boolean useOperand1WithScale; + private final long operand1WithScale; + + public FastGTDecimalColLongConstVectorizedExpression(int outputIndex, VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + if (operand1Value == null) { + operand1IsNull = true; + operand1 = Decimal.ZERO; + operand1WithScale = 0; + useOperand1WithScale = true; + } else { + operand1IsNull = false; + operand1 = DataTypes.DecimalType.convertFrom(operand1Value); + long left = (long) operand1Value; + if (left == 0) { + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + int scale = children[0].getOutputDataType().getScale(); + if (scale < 0 || scale >= DecimalTypeBase.POW_10.length) { + operand1WithScale = 0; + useOperand1WithScale = false; + } else { + long power = DecimalTypeBase.POW_10[scale]; + operand1WithScale = left * power; + useOperand1WithScale = !MathUtils.longMultiplyOverflow(left, power, operand1WithScale); + } + } + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + DecimalBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()) + .cast(DecimalBlock.class); + + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + if (leftInputVectorSlot.isDecimal64() && useOperand1WithScale) { + // do Decimal64 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long leftVal = leftInputVectorSlot.getLong(j); + output[j] = leftVal > operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + long leftVal = leftInputVectorSlot.getLong(i); + output[i] = leftVal > operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + if (leftInputVectorSlot.isDecimal128() && useOperand1WithScale) { + long[] decimal128Low = leftInputVectorSlot.getDecimal128LowValues(); + long[] decimal128High = leftInputVectorSlot.getDecimal128HighValues(); + long rightLow = operand1WithScale; + long rightHigh = operand1WithScale >= 0 ? 0 : -1; + + // do Decimal128 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int pos = leftInputVectorSlot.realPositionOf(j); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = (leftHigh > rightHigh) || (leftHigh == rightHigh && leftLow > rightLow); + output[j] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + int pos = leftInputVectorSlot.realPositionOf(i); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = (leftHigh > rightHigh) || (leftHigh == rightHigh && leftLow > rightLow); + output[i] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + // do normal decimal compare + DecimalStructure leftDec; + DecimalStructure operand1Dec = operand1.getDecimalStructure(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + // fetch left decimal value + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) > 0; + + output[j] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) > 0; + + output[i] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } +} + + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastInVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastInVectorizedExpression.java new file mode 100644 index 000000000..59832b022 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastInVectorizedExpression.java @@ -0,0 +1,131 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.InValuesVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.google.common.base.Preconditions; + +import java.util.Set; + +public class FastInVectorizedExpression extends AbstractVectorizedExpression { + + private final InValuesVectorizedExpression.InValueSet inValuesSet; + private final boolean operandsHaveNull; + + public FastInVectorizedExpression(DataType dataType, + int outputIndex, + VectorizedExpression[] children) { + super(dataType, outputIndex, children); + Preconditions.checkArgument(children.length == 2, + "Unexpected in vec expression children length: " + children.length); + Preconditions.checkArgument(children[1] instanceof InValuesVectorizedExpression, + "Unexpected in values expression type: " + children[1].getClass().getSimpleName()); + InValuesVectorizedExpression inExpr = (InValuesVectorizedExpression) children[1]; + this.operandsHaveNull = inExpr.hasNull(); + this.inValuesSet = inExpr.getInValueSet(); + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + + long[] output = outputVectorSlot.cast(LongBlock.class).longArray(); + if (operandsHaveNull) { + boolean[] outputNulls = outputVectorSlot.nulls(); + outputVectorSlot.setHasNull(true); + for (int i = 0; i < batchSize; i++) { + outputNulls[i] = true; + } + return; + } + + RandomAccessBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + if (leftInputVectorSlot.isInstanceOf(LongBlock.class)) { + evalLongIn(output, leftInputVectorSlot.cast(LongBlock.class), batchSize, isSelectionInUse, sel); + return; + } + + if (leftInputVectorSlot.isInstanceOf(IntegerBlock.class)) { + evalIntIn(output, leftInputVectorSlot.cast(IntegerBlock.class), batchSize, isSelectionInUse, sel); + return; + } + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + output[j] = inValuesSet.contains(leftInputVectorSlot.elementAt(j)) ? + LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + output[i] = inValuesSet.contains(leftInputVectorSlot.elementAt(i)) ? + LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } + + private void evalIntIn(long[] output, IntegerBlock leftInputSlot, + int batchSize, boolean isSelectionInUse, + int[] sel) { + int[] intArray = leftInputSlot.intArray(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + output[j] = inValuesSet.contains(intArray[j]) ? + LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + output[i] = inValuesSet.contains(intArray[i]) ? + LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } + + private void evalLongIn(long[] output, LongBlock leftInputSlot, + int batchSize, boolean isSelectionInUse, + int[] sel) { + long[] longArray = leftInputSlot.longArray(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + output[j] = inValuesSet.contains(longArray[j]) ? + LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + output[i] = inValuesSet.contains(longArray[i]) ? + LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } +} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLEDecimalColDecimalConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLEDecimalColDecimalConstVectorizedExpression.java new file mode 100644 index 000000000..829f01108 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLEDecimalColDecimalConstVectorizedExpression.java @@ -0,0 +1,187 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import io.airlift.slice.Slice; + +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; +import static com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority.SPECIAL; + +@ExpressionSignatures( + names = {"LE", "<="}, + argumentTypes = {"Decimal", "Decimal"}, + argumentKinds = {Variable, Const}, + priority = SPECIAL +) +public class FastLEDecimalColDecimalConstVectorizedExpression extends AbstractVectorizedExpression { + + private final boolean operand1IsNull; + private final Decimal operand1; + private final boolean useOperand1WithScale; + private final long operand1WithScale; + + public FastLEDecimalColDecimalConstVectorizedExpression(int outputIndex, VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + + if (operand1Value == null) { + operand1IsNull = true; + operand1 = Decimal.ZERO; + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + operand1IsNull = false; + operand1 = (Decimal) operand1Value; + if (operand1.compareTo(Decimal.ZERO) == 0) { + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + if (!DecimalConverter.isDecimal64(operand1.precision())) { + operand1WithScale = 0; + useOperand1WithScale = false; + } else { + DecimalStructure tmpBuffer = new DecimalStructure(); + operand1WithScale = operand1.unscale(tmpBuffer); + useOperand1WithScale = true; + } + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + DecimalBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()) + .cast(DecimalBlock.class); + + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + boolean useDecimal64Compare = useOperand1WithScale && leftInputVectorSlot.isDecimal64() + && checkSameScale(leftInputVectorSlot); + if (useDecimal64Compare) { + // do Decimal64 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long leftVal = leftInputVectorSlot.getLong(j); + output[j] = leftVal <= operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + long leftVal = leftInputVectorSlot.getLong(i); + output[i] = leftVal <= operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + boolean useDecimal128Compare = useOperand1WithScale && leftInputVectorSlot.isDecimal128() + && checkSameScale(leftInputVectorSlot); + if (useDecimal128Compare) { + long[] decimal128Low = leftInputVectorSlot.getDecimal128LowValues(); + long[] decimal128High = leftInputVectorSlot.getDecimal128HighValues(); + long rightLow = operand1WithScale; + long rightHigh = operand1WithScale >= 0 ? 0 : -1; + + // do Decimal128 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int pos = leftInputVectorSlot.realPositionOf(j); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = !((leftHigh > rightHigh) || (leftHigh == rightHigh && leftLow > rightLow)); + output[j] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + int pos = leftInputVectorSlot.realPositionOf(i); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = !((leftHigh > rightHigh) || (leftHigh == rightHigh && leftLow > rightLow)); + output[i] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + // do normal decimal compare + DecimalStructure leftDec; + DecimalStructure operand1Dec = operand1.getDecimalStructure(); + Slice cachedSlice = leftInputVectorSlot.allocCachedSlice(); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + // fetch left decimal value + leftDec = + new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j, cachedSlice)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) <= 0; + + output[j] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + leftDec = + new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i, cachedSlice)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) <= 0; + + output[i] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } + + private boolean checkSameScale(DecimalBlock leftInputVectorSlot) { + return leftInputVectorSlot.getScale() == operand1.scale(); + } +} + + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLEDecimalColLongConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLEDecimalColLongConstVectorizedExpression.java new file mode 100644 index 000000000..f198deec4 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLEDecimalColLongConstVectorizedExpression.java @@ -0,0 +1,173 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.common.utils.MathUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; +import static com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority.SPECIAL; + +@ExpressionSignatures( + names = {"LE", "<="}, + argumentTypes = {"Decimal", "Long"}, + argumentKinds = {Variable, Const}, + priority = SPECIAL +) +public class FastLEDecimalColLongConstVectorizedExpression extends AbstractVectorizedExpression { + + private final boolean operand1IsNull; + private final Decimal operand1; + private final boolean useOperand1WithScale; + private final long operand1WithScale; + + public FastLEDecimalColLongConstVectorizedExpression(int outputIndex, VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + if (operand1Value == null) { + operand1IsNull = true; + operand1 = Decimal.ZERO; + operand1WithScale = 0; + useOperand1WithScale = true; + } else { + operand1IsNull = false; + operand1 = DataTypes.DecimalType.convertFrom(operand1Value); + long left = (long) operand1Value; + if (left == 0) { + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + int scale = children[0].getOutputDataType().getScale(); + if (scale < 0 || scale >= DecimalTypeBase.POW_10.length) { + operand1WithScale = 0; + useOperand1WithScale = false; + } else { + long power = DecimalTypeBase.POW_10[scale]; + operand1WithScale = left * power; + useOperand1WithScale = !MathUtils.longMultiplyOverflow(left, power, operand1WithScale); + } + } + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + DecimalBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()) + .cast(DecimalBlock.class); + + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + if (leftInputVectorSlot.isDecimal64() && useOperand1WithScale) { + // do Decimal64 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long leftVal = leftInputVectorSlot.getLong(j); + output[j] = leftVal <= operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + long leftVal = leftInputVectorSlot.getLong(i); + output[i] = leftVal <= operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + if (leftInputVectorSlot.isDecimal128() && useOperand1WithScale) { + long[] decimal128Low = leftInputVectorSlot.getDecimal128LowValues(); + long[] decimal128High = leftInputVectorSlot.getDecimal128HighValues(); + long rightLow = operand1WithScale; + long rightHigh = operand1WithScale >= 0 ? 0 : -1; + + // do Decimal128 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int pos = leftInputVectorSlot.realPositionOf(j); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = !((leftHigh > rightHigh) || (leftHigh == rightHigh && leftLow > rightLow)); + output[j] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + int pos = leftInputVectorSlot.realPositionOf(i); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = !((leftHigh > rightHigh) || (leftHigh == rightHigh && leftLow > rightLow)); + output[i] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + // do normal decimal compare + DecimalStructure leftDec; + DecimalStructure operand1Dec = operand1.getDecimalStructure(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + // fetch left decimal value + leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(j)); + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) <= 0; + output[j] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(i)); + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) <= 0; + output[i] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } +} + + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLTDecimalColDecimalConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLTDecimalColDecimalConstVectorizedExpression.java new file mode 100644 index 000000000..cb1f89582 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLTDecimalColDecimalConstVectorizedExpression.java @@ -0,0 +1,187 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import io.airlift.slice.Slice; + +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; +import static com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority.SPECIAL; + +@ExpressionSignatures( + names = {"LT", "<"}, + argumentTypes = {"Decimal", "Decimal"}, + argumentKinds = {Variable, Const}, + priority = SPECIAL +) +public class FastLTDecimalColDecimalConstVectorizedExpression extends AbstractVectorizedExpression { + + private final boolean operand1IsNull; + private final Decimal operand1; + private final boolean useOperand1WithScale; + private final long operand1WithScale; + + public FastLTDecimalColDecimalConstVectorizedExpression(int outputIndex, VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + + if (operand1Value == null) { + operand1IsNull = true; + operand1 = Decimal.ZERO; + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + operand1IsNull = false; + operand1 = (Decimal) operand1Value; + if (operand1.compareTo(Decimal.ZERO) == 0) { + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + if (!DecimalConverter.isDecimal64(operand1.precision())) { + operand1WithScale = 0; + useOperand1WithScale = false; + } else { + DecimalStructure tmpBuffer = new DecimalStructure(); + operand1WithScale = operand1.unscale(tmpBuffer); + useOperand1WithScale = true; + } + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + DecimalBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()) + .cast(DecimalBlock.class); + + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + boolean useDecimal64Compare = useOperand1WithScale && leftInputVectorSlot.isDecimal64() + && checkSameScale(leftInputVectorSlot); + if (useDecimal64Compare) { + // do Decimal64 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long leftVal = leftInputVectorSlot.getLong(j); + output[j] = leftVal < operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + long leftVal = leftInputVectorSlot.getLong(i); + output[i] = leftVal < operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + boolean useDecimal128Compare = useOperand1WithScale && leftInputVectorSlot.isDecimal128() + && checkSameScale(leftInputVectorSlot); + if (useDecimal128Compare) { + long[] decimal128Low = leftInputVectorSlot.getDecimal128LowValues(); + long[] decimal128High = leftInputVectorSlot.getDecimal128HighValues(); + long rightLow = operand1WithScale; + long rightHigh = operand1WithScale >= 0 ? 0 : -1; + + // do Decimal128 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int pos = leftInputVectorSlot.realPositionOf(j); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = (leftHigh < rightHigh) || (leftHigh == rightHigh && leftLow < rightLow); + output[j] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + int pos = leftInputVectorSlot.realPositionOf(i); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = (leftHigh < rightHigh) || (leftHigh == rightHigh && leftLow < rightLow); + output[i] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + // do normal decimal compare + DecimalStructure leftDec; + DecimalStructure operand1Dec = operand1.getDecimalStructure(); + Slice cachedSlice = leftInputVectorSlot.allocCachedSlice(); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + // fetch left decimal value + leftDec = + new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j, cachedSlice)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) < 0; + + output[j] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + leftDec = + new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i, cachedSlice)); + + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) < 0; + + output[i] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } + + private boolean checkSameScale(DecimalBlock leftInputVectorSlot) { + return leftInputVectorSlot.getScale() == operand1.scale(); + } +} + + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLTDecimalColLongConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLTDecimalColLongConstVectorizedExpression.java new file mode 100644 index 000000000..8628b1bf8 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/FastLTDecimalColLongConstVectorizedExpression.java @@ -0,0 +1,176 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.compare; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.common.utils.MathUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.LongBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; +import static com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority.SPECIAL; + +@ExpressionSignatures( + names = {"LT", "<"}, + argumentTypes = {"Decimal", "Long"}, + argumentKinds = {Variable, Const}, + priority = SPECIAL +) +public class FastLTDecimalColLongConstVectorizedExpression extends AbstractVectorizedExpression { + + private final boolean operand1IsNull; + private final Decimal operand1; + private final boolean useOperand1WithScale; + private final long operand1WithScale; + + public FastLTDecimalColLongConstVectorizedExpression(int outputIndex, VectorizedExpression[] children) { + super(DataTypes.LongType, outputIndex, children); + + Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); + if (operand1Value == null) { + operand1IsNull = true; + operand1 = Decimal.ZERO; + operand1WithScale = 0; + useOperand1WithScale = true; + } else { + operand1IsNull = false; + operand1 = DataTypes.DecimalType.convertFrom(operand1Value); + long left = (long) operand1Value; + if (left == 0) { + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + int scale = children[0].getOutputDataType().getScale(); + if (scale < 0 || scale >= DecimalTypeBase.POW_10.length) { + operand1WithScale = 0; + useOperand1WithScale = false; + } else { + long power = DecimalTypeBase.POW_10[scale]; + operand1WithScale = left * power; + useOperand1WithScale = !MathUtils.longMultiplyOverflow(left, power, operand1WithScale); + } + } + } + + @Override + public void eval(EvaluationContext ctx) { + children[0].eval(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); + DecimalBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()) + .cast(DecimalBlock.class); + + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + if (leftInputVectorSlot.isDecimal64() && useOperand1WithScale) { + // do Decimal64 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long leftVal = leftInputVectorSlot.getLong(j); + output[j] = leftVal < operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + long leftVal = leftInputVectorSlot.getLong(i); + output[i] = leftVal < operand1WithScale ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + if (leftInputVectorSlot.isDecimal128() && useOperand1WithScale) { + long[] decimal128Low = leftInputVectorSlot.getDecimal128LowValues(); + long[] decimal128High = leftInputVectorSlot.getDecimal128HighValues(); + long rightLow = operand1WithScale; + long rightHigh = operand1WithScale >= 0 ? 0 : -1; + + // do Decimal128 compare + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int pos = leftInputVectorSlot.realPositionOf(j); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = (leftHigh < rightHigh) || (leftHigh == rightHigh && leftLow < rightLow); + output[j] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + int pos = leftInputVectorSlot.realPositionOf(i); + + long leftLow = decimal128Low[pos]; + long leftHigh = decimal128High[pos]; + + boolean great = (leftHigh < rightHigh) || (leftHigh == rightHigh && leftLow < rightLow); + output[i] = great ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + return; + } + + // do normal decimal compare + DecimalStructure leftDec; + DecimalStructure operand1Dec = operand1.getDecimalStructure(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int fromIndex = j * DECIMAL_MEMORY_SIZE; + + // fetch left decimal value + leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(j)); + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) < 0; + output[j] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + int fromIndex = i * DECIMAL_MEMORY_SIZE; + + // fetch left decimal value + leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(i)); + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) < 0; + output[i] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } +} + + diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/GTDateColDateColVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/GTDateColDateColVectorizedExpression.java index 0587eb509..68baa4bee 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/GTDateColDateColVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/GTDateColDateColVectorizedExpression.java @@ -54,9 +54,9 @@ public void eval(EvaluationContext ctx) { chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); if (leftInputVectorSlot instanceof DateBlock && rightInputVectorSlot instanceof DateBlock) { - long[] array1 = ((DateBlock) leftInputVectorSlot).getPacked(); + long[] array1 = leftInputVectorSlot.cast(DateBlock.class).getPacked(); long[] array2 = ((DateBlock) rightInputVectorSlot).getPacked(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { @@ -72,7 +72,7 @@ public void eval(EvaluationContext ctx) { VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), children[1].getOutputIndex()); } else if (leftInputVectorSlot instanceof ReferenceBlock && rightInputVectorSlot instanceof ReferenceBlock) { - long[] res = ((LongBlock) outputVectorSlot).longArray(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { @@ -94,8 +94,8 @@ public void eval(EvaluationContext ctx) { VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), children[1].getOutputIndex()); } else if (leftInputVectorSlot instanceof DateBlock && rightInputVectorSlot instanceof ReferenceBlock) { - long[] res = ((LongBlock) outputVectorSlot).longArray(); - long[] array1 = ((DateBlock) leftInputVectorSlot).getPacked(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + long[] array1 = leftInputVectorSlot.cast(DateBlock.class).getPacked(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { @@ -113,7 +113,7 @@ public void eval(EvaluationContext ctx) { VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), children[1].getOutputIndex()); } else if (leftInputVectorSlot instanceof ReferenceBlock && rightInputVectorSlot instanceof DateBlock) { - long[] res = ((LongBlock) outputVectorSlot).longArray(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); long[] array2 = ((DateBlock) rightInputVectorSlot).getPacked(); if (isSelectionInUse) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/LTDateColDateColVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/LTDateColDateColVectorizedExpression.java index c4a2a6c90..3a1be29b6 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/LTDateColDateColVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/LTDateColDateColVectorizedExpression.java @@ -16,9 +16,6 @@ package com.alibaba.polardbx.executor.vectorized.compare; -import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; -import com.alibaba.polardbx.common.utils.time.core.OriginalDate; -import com.alibaba.polardbx.common.utils.time.core.TimeStorage; import com.alibaba.polardbx.executor.chunk.DateBlock; import com.alibaba.polardbx.executor.chunk.LongBlock; import com.alibaba.polardbx.executor.chunk.MutableChunk; @@ -57,9 +54,9 @@ public void eval(EvaluationContext ctx) { chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); if (leftInputVectorSlot instanceof DateBlock && rightInputVectorSlot instanceof DateBlock) { - long[] array1 = ((DateBlock) leftInputVectorSlot).getPacked(); - long[] array2 = ((DateBlock) rightInputVectorSlot).getPacked(); - long[] res = ((LongBlock) outputVectorSlot).longArray(); + long[] array1 = leftInputVectorSlot.cast(DateBlock.class).getPacked(); + long[] array2 = rightInputVectorSlot.cast(DateBlock.class).getPacked(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { @@ -75,7 +72,7 @@ public void eval(EvaluationContext ctx) { VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), children[1].getOutputIndex()); } else if (leftInputVectorSlot instanceof ReferenceBlock && rightInputVectorSlot instanceof ReferenceBlock) { - long[] res = ((LongBlock) outputVectorSlot).longArray(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { @@ -97,8 +94,8 @@ public void eval(EvaluationContext ctx) { VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), children[1].getOutputIndex()); } else if (leftInputVectorSlot instanceof DateBlock && rightInputVectorSlot instanceof ReferenceBlock) { - long[] res = ((LongBlock) outputVectorSlot).longArray(); - long[] array1 = ((DateBlock) leftInputVectorSlot).getPacked(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); + long[] array1 = leftInputVectorSlot.cast(DateBlock.class).getPacked(); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { @@ -116,7 +113,7 @@ public void eval(EvaluationContext ctx) { VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), children[1].getOutputIndex()); } else if (leftInputVectorSlot instanceof ReferenceBlock && rightInputVectorSlot instanceof DateBlock) { - long[] res = ((LongBlock) outputVectorSlot).longArray(); + long[] res = (outputVectorSlot.cast(LongBlock.class)).longArray(); long[] array2 = ((DateBlock) rightInputVectorSlot).getPacked(); if (isSelectionInUse) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/LikeVarcharColCharConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/LikeVarcharColCharConstVectorizedExpression.java index b5b6435f6..c05ad0acb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/LikeVarcharColCharConstVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/LikeVarcharColCharConstVectorizedExpression.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.executor.vectorized.compare; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.optimizer.config.table.charset.CharsetFactory; import com.alibaba.polardbx.optimizer.config.table.collation.CollationHandler; import com.alibaba.polardbx.executor.chunk.LongBlock; @@ -29,6 +30,9 @@ import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.config.table.charset.CharsetFactory; +import com.alibaba.polardbx.optimizer.config.table.charset.CollationHandlers; +import com.alibaba.polardbx.optimizer.config.table.collation.CollationHandler; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.datatype.SliceType; import io.airlift.slice.Slice; @@ -39,10 +43,17 @@ @ExpressionSignatures(names = {"LIKE"}, argumentTypes = {"Varchar", "Char"}, argumentKinds = {Variable, Const}) public class LikeVarcharColCharConstVectorizedExpression extends AbstractVectorizedExpression { - private final CollationHandler collationHandler; - + private final CollationHandler originCollationHandler; + private final CollationHandler latin1CollationHandler = CollationHandlers.COLLATION_HANDLER_LATIN1_BIN; private final boolean operand1IsNull; private final Slice operand1; + /** + * works with ENABLE_OSS_COMPATIBLE=false + */ + private final boolean canUseLatin1Collation; + private boolean isContainsCompare = false; + private byte[] containBytes = null; + private int[] lps = null; public LikeVarcharColCharConstVectorizedExpression( int outputIndex, @@ -50,8 +61,6 @@ public LikeVarcharColCharConstVectorizedExpression( super(DataTypes.LongType, outputIndex, children); SliceType sliceType = (SliceType) children[0].getOutputDataType(); - this.collationHandler = CharsetFactory.DEFAULT_CHARSET_HANDLER.getCollationHandler(); - Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); if (operand1Value == null) { operand1IsNull = true; @@ -60,11 +69,88 @@ public LikeVarcharColCharConstVectorizedExpression( operand1IsNull = false; operand1 = sliceType.convertFrom(operand1Value); } + // FIXME did not consider collation here + this.originCollationHandler = CharsetFactory.DEFAULT_CHARSET_HANDLER.getCollationHandler(); + + if (sliceType.isLatin1Encoding()) { + canUseLatin1Collation = true; + checkIsContainsCompare(operand1); + } else { + if (isAsciiEncoding(operand1)) { + canUseLatin1Collation = true; + checkIsContainsCompare(operand1); + } else { + canUseLatin1Collation = false; + } + } + } + + public static int[] computeLPSArray(byte[] pattern) { + int[] lps = new int[pattern.length]; + int length = 0; + lps[0] = 0; + int i = 1; + + while (i < pattern.length) { + if (pattern[i] == pattern[length]) { + length++; + lps[i] = length; + i++; + } else { + if (length != 0) { + length = lps[length - 1]; + } else { + lps[i] = length; + i++; + } + } + } + return lps; + } + + private void checkIsContainsCompare(Slice operand) { + if (operand == null || operand.length() < 2) { + return; + } + if (!canUseLatin1Collation) { + return; + } + byte[] bytes = operand.getBytes(); + if (bytes[0] == CollationHandler.WILD_MANY && bytes[bytes.length - 1] == CollationHandler.WILD_MANY) { + for (int i = 1; i < bytes.length - 1; i++) { + if (bytes[i] == CollationHandler.WILD_MANY || bytes[i] == CollationHandler.WILD_ONE) { + // no % _ in the middle + return; + } + } + this.isContainsCompare = true; + this.containBytes = new byte[operand.length() - 2]; + System.arraycopy(bytes, 1, containBytes, 0, operand.length() - 2); + this.lps = computeLPSArray(containBytes); + } + } + + private boolean isAsciiEncoding(Slice operand) { + if (operand == null) { + return true; + } + byte[] bytes = operand.getBytes(); + for (byte b : bytes) { + if (b < 0 || b == CollationHandler.WILD_ONE) { + return false; + } + } + return true; } @Override public void eval(EvaluationContext ctx) { children[0].eval(ctx); + + final boolean compatible = + ctx.getExecutionContext().getParamManager().getBoolean(ConnectionParams.ENABLE_OSS_COMPATIBLE); + boolean useLatin1Compare = canUseLatin1Collation && !compatible; + MutableChunk chunk = ctx.getPreAllocatedChunk(); int batchSize = chunk.batchSize(); boolean isSelectionInUse = chunk.isSelectionInUse(); @@ -74,37 +160,53 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); - if (operand1IsNull) { boolean[] outputNulls = outputVectorSlot.nulls(); - outputVectorSlot.setHasNull(true); - for (int i = 0; i < batchSize; i++) { - outputNulls[i] = true; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + outputNulls[j] = true; + } + } else { + for (int i = 0; i < batchSize; i++) { + outputNulls[i] = true; + } } return; } VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); + + if (useLatin1Compare) { + doLatin1Like(leftInputVectorSlot, isSelectionInUse, sel, batchSize, output); + } else { + doCollationLike(leftInputVectorSlot, isSelectionInUse, sel, batchSize, output); + } + } + + private void doCollationLike(RandomAccessBlock leftInputVectorSlot, boolean isSelectionInUse, int[] sel, + int batchSize, long[] output) { + Slice cachedSlice = new Slice(); if (leftInputVectorSlot instanceof SliceBlock) { - SliceBlock sliceBlock = (SliceBlock) leftInputVectorSlot; + SliceBlock sliceBlock = leftInputVectorSlot.cast(SliceBlock.class); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { int j = sel[i]; - Slice slice = sliceBlock.getRegion(j); + Slice slice = sliceBlock.getRegion(j, cachedSlice); - output[j] = collationHandler.wildCompare(slice, operand1) + output[j] = originCollationHandler.wildCompare(slice, operand1) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } } else { for (int i = 0; i < batchSize; i++) { - Slice slice = sliceBlock.getRegion(i); + Slice slice = sliceBlock.getRegion(i, cachedSlice); - output[i] = collationHandler.wildCompare(slice, operand1) + output[i] = originCollationHandler.wildCompare(slice, operand1) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } } @@ -117,7 +219,113 @@ public void eval(EvaluationContext ctx) { if (lSlice == null) { lSlice = Slices.EMPTY_SLICE; } - output[j] = collationHandler.wildCompare(lSlice, operand1) + output[j] = originCollationHandler.wildCompare(lSlice, operand1) + ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + + Slice lSlice = ((Slice) leftInputVectorSlot.elementAt(i)); + if (lSlice == null) { + lSlice = Slices.EMPTY_SLICE; + } + output[i] = originCollationHandler.wildCompare(lSlice, operand1) + ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } + } + + private void doLatin1Like(RandomAccessBlock leftInputVectorSlot, boolean isSelectionInUse, + int[] sel, int batchSize, long[] output) { + if (leftInputVectorSlot instanceof SliceBlock) { + SliceBlock sliceBlock = leftInputVectorSlot.cast(SliceBlock.class); + doLatin1LikeSlice(sliceBlock, isSelectionInUse, sel, batchSize, output); + } else if (leftInputVectorSlot instanceof ReferenceBlock) { + doLatin1LikeReference(leftInputVectorSlot, isSelectionInUse, sel, batchSize, output); + } + } + + private void doLatin1LikeSlice(SliceBlock sliceBlock, boolean isSelectionInUse, int[] sel, int batchSize, + long[] output) { + Slice cachedSlice = new Slice(); + + if (isSelectionInUse) { + if (isContainsCompare && containBytes != null) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + Slice slice = sliceBlock.getRegion(j, cachedSlice); + + output[j] = latin1CollationHandler.containsCompare(slice, containBytes, lps) + ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + Slice slice = sliceBlock.getRegion(j, cachedSlice); + + output[j] = latin1CollationHandler.wildCompare(slice, operand1) + ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } else { + if (isContainsCompare && containBytes != null) { + for (int i = 0; i < batchSize; i++) { + + Slice slice = sliceBlock.getRegion(i, cachedSlice); + + output[i] = latin1CollationHandler.containsCompare(slice, containBytes, lps) + ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + + Slice slice = sliceBlock.getRegion(i, cachedSlice); + + output[i] = latin1CollationHandler.wildCompare(slice, operand1) + ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } + } + + private void doLatin1LikeReference(RandomAccessBlock leftInputVectorSlot, boolean isSelectionInUse, int[] sel, + int batchSize, long[] output) { + if (isSelectionInUse) { + if (isContainsCompare && containBytes != null) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + Slice lSlice = ((Slice) leftInputVectorSlot.elementAt(j)); + if (lSlice == null) { + lSlice = Slices.EMPTY_SLICE; + } + output[j] = latin1CollationHandler.containsCompare(lSlice, containBytes, lps) + ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } else { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + Slice lSlice = ((Slice) leftInputVectorSlot.elementAt(j)); + if (lSlice == null) { + lSlice = Slices.EMPTY_SLICE; + } + output[j] = latin1CollationHandler.wildCompare(lSlice, operand1) + ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + } + } + } else { + if (isContainsCompare && containBytes != null) { + for (int i = 0; i < batchSize; i++) { + + Slice lSlice = ((Slice) leftInputVectorSlot.elementAt(i)); + if (lSlice == null) { + lSlice = Slices.EMPTY_SLICE; + } + output[i] = latin1CollationHandler.containsCompare(lSlice, containBytes, lps) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } } else { @@ -127,7 +335,7 @@ public void eval(EvaluationContext ctx) { if (lSlice == null) { lSlice = Slices.EMPTY_SLICE; } - output[i] = collationHandler.wildCompare(lSlice, operand1) + output[i] = latin1CollationHandler.wildCompare(lSlice, operand1) ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/NEVarcharColCharConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/NEVarcharColCharConstVectorizedExpression.java index eca1453f5..444403ce5 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/NEVarcharColCharConstVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/compare/NEVarcharColCharConstVectorizedExpression.java @@ -38,7 +38,8 @@ import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; -@ExpressionSignatures(names = {"NE","!=","<>"}, argumentTypes = {"Varchar", "Char"}, argumentKinds = {Variable, Const}) +@ExpressionSignatures(names = {"NE", "!=", "<>"}, argumentTypes = {"Varchar", "Char"}, + argumentKinds = {Variable, Const}) public class NEVarcharColCharConstVectorizedExpression extends AbstractVectorizedExpression { protected final CollationHandler collationHandler; @@ -72,8 +73,7 @@ public void eval(EvaluationContext ctx) { boolean isSelectionInUse = chunk.isSelectionInUse(); int[] sel = chunk.selection(); - final boolean compatible = - ctx.getExecutionContext().getParamManager().getBoolean(ConnectionParams.ENABLE_OSS_COMPATIBLE); + final boolean compatible = ctx.getExecutionContext().isEnableOssCompatible(); Comparable operandSortKey; if (operand == null) { operandSortKey = null; @@ -87,7 +87,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock leftInputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); if (operandIsNull) { boolean[] outputNulls = outputVectorSlot.nulls(); @@ -99,7 +99,7 @@ public void eval(EvaluationContext ctx) { VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); if (leftInputVectorSlot instanceof SliceBlock) { - SliceBlock sliceBlock = (SliceBlock) leftInputVectorSlot; + SliceBlock sliceBlock = leftInputVectorSlot.cast(SliceBlock.class); if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastCharToDecimalVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastCharToDecimalVectorizedExpression.java index 01ebd7b4f..8aecffd50 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastCharToDecimalVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastCharToDecimalVectorizedExpression.java @@ -23,7 +23,8 @@ import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; @SuppressWarnings("unused") -@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"Char"}, argumentKinds = {Variable}) +@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"Char"}, + argumentKinds = {Variable}) public class CastCharToDecimalVectorizedExpression extends CastVarcharToDecimalVectorizedExpression { public CastCharToDecimalVectorizedExpression(DataType outputDataType, int outputIndex, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToDecimalVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToDecimalVectorizedExpression.java index 7100d8c32..8dc354f4a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToDecimalVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToDecimalVectorizedExpression.java @@ -27,13 +27,13 @@ import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import io.airlift.slice.Slice; import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; @SuppressWarnings("unused") -@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"Decimal"}, argumentKinds = {Variable}) +@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"Decimal"}, + argumentKinds = {Variable}) public class CastDecimalToDecimalVectorizedExpression extends AbstractVectorizedExpression { public CastDecimalToDecimalVectorizedExpression(DataType outputDataType, int outputIndex, @@ -67,8 +67,10 @@ public void eval(EvaluationContext ctx) { int fromIndex = j * DECIMAL_MEMORY_SIZE; // The convert result will directly wrote to decimal memory segment - DecimalStructure fromValue = new DecimalStructure(((DecimalBlock) inputVectorSlot).getRegion(j)); - DecimalStructure toValue = new DecimalStructure(((DecimalBlock) outputVectorSlot).getRegion(j)); + DecimalStructure fromValue = + new DecimalStructure((inputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); + DecimalStructure toValue = + new DecimalStructure((outputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); // do rescale operation DecimalConverter.rescale(fromValue, toValue, precision, scale, false); @@ -78,12 +80,15 @@ public void eval(EvaluationContext ctx) { int fromIndex = i * DECIMAL_MEMORY_SIZE; // The convert result will directly wrote to decimal memory segment - DecimalStructure fromValue = new DecimalStructure(((DecimalBlock) inputVectorSlot).getRegion(i)); - DecimalStructure toValue = new DecimalStructure(((DecimalBlock) outputVectorSlot).getRegion(i)); + DecimalStructure fromValue = + new DecimalStructure((inputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); + DecimalStructure toValue = + new DecimalStructure((outputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); // do rescale operation DecimalConverter.rescale(fromValue, toValue, precision, scale, false); } } + outputVectorSlot.cast(DecimalBlock.class).setFullState(); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToSignedVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToSignedVectorizedExpression.java index 8d1def7fc..70ac3ee8f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToSignedVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToSignedVectorizedExpression.java @@ -30,7 +30,6 @@ import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import io.airlift.slice.Slice; import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; @@ -57,7 +56,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); DecimalStructure tmpDecimal = new DecimalStructure(); @@ -70,7 +69,8 @@ public void eval(EvaluationContext ctx) { int fromIndex = j * DECIMAL_MEMORY_SIZE; // The convert result will directly wrote to decimal memory segment - DecimalStructure fromValue = new DecimalStructure(((DecimalBlock) inputVectorSlot).getRegion(j)); + DecimalStructure fromValue = + new DecimalStructure((inputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); tmpDecimal.reset(); FastDecimalUtils.round(fromValue, tmpDecimal, 0, DecimalRoundMod.HALF_UP); @@ -82,7 +82,8 @@ public void eval(EvaluationContext ctx) { int fromIndex = i * DECIMAL_MEMORY_SIZE; // The convert result will directly wrote to decimal memory segment - DecimalStructure fromValue = new DecimalStructure(((DecimalBlock) inputVectorSlot).getRegion(i)); + DecimalStructure fromValue = + new DecimalStructure((inputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); tmpDecimal.reset(); FastDecimalUtils.round(fromValue, tmpDecimal, 0, DecimalRoundMod.HALF_UP); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToUnsignedVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToUnsignedVectorizedExpression.java index d85aea8c4..cd6df8e8e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToUnsignedVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDecimalToUnsignedVectorizedExpression.java @@ -30,7 +30,6 @@ import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import io.airlift.slice.Slice; import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; @@ -56,7 +55,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((ULongBlock) outputVectorSlot).longArray(); + long[] output = outputVectorSlot.cast(ULongBlock.class).longArray(); DecimalStructure tmpDecimal = new DecimalStructure(); @@ -69,7 +68,8 @@ public void eval(EvaluationContext ctx) { int fromIndex = j * DECIMAL_MEMORY_SIZE; // The convert result will directly wrote to decimal memory segment - DecimalStructure fromValue = new DecimalStructure(((DecimalBlock) inputVectorSlot).getRegion(j)); + DecimalStructure fromValue = + new DecimalStructure((inputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); tmpDecimal.reset(); FastDecimalUtils.round(fromValue, tmpDecimal, 0, DecimalRoundMod.HALF_UP); @@ -81,7 +81,8 @@ public void eval(EvaluationContext ctx) { int fromIndex = i * DECIMAL_MEMORY_SIZE; // The convert result will directly wrote to decimal memory segment - DecimalStructure fromValue = new DecimalStructure(((DecimalBlock) inputVectorSlot).getRegion(i)); + DecimalStructure fromValue = + new DecimalStructure((inputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); tmpDecimal.reset(); FastDecimalUtils.round(fromValue, tmpDecimal, 0, DecimalRoundMod.HALF_UP); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDoubleToDecimalVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDoubleToDecimalVectorizedExpression.java index 2891c7c3b..f7896b540 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDoubleToDecimalVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastDoubleToDecimalVectorizedExpression.java @@ -34,10 +34,12 @@ import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; @SuppressWarnings("unused") -@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"Double"}, argumentKinds = {Variable}) +@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"Double"}, + argumentKinds = {Variable}) public class CastDoubleToDecimalVectorizedExpression extends AbstractVectorizedExpression { - public CastDoubleToDecimalVectorizedExpression(DataType outputDataType, int outputIndex, VectorizedExpression[] children) { + public CastDoubleToDecimalVectorizedExpression(DataType outputDataType, int outputIndex, + VectorizedExpression[] children) { super(outputDataType, outputIndex, children); } @@ -53,8 +55,8 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - double[] input = ((DoubleBlock) inputVectorSlot).doubleArray(); - Slice output = ((DecimalBlock) outputVectorSlot).getMemorySegments(); + double[] input = inputVectorSlot.cast(DoubleBlock.class).doubleArray(); + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); DecimalStructure tmpDecimal = new DecimalStructure(); int precision = outputDataType.getPrecision(); @@ -101,5 +103,6 @@ public void eval(EvaluationContext ctx) { DecimalConverter.rescale(tmpDecimal, toValue, precision, scale, isUnsigned); } } + outputVectorSlot.cast(DecimalBlock.class).setFullState(); } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastFloatToDecimalVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastFloatToDecimalVectorizedExpression.java index 026fcee90..ccb476eae 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastFloatToDecimalVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastFloatToDecimalVectorizedExpression.java @@ -34,10 +34,12 @@ import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; @SuppressWarnings("unused") -@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"Float"}, argumentKinds = {Variable}) +@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"Float"}, + argumentKinds = {Variable}) public class CastFloatToDecimalVectorizedExpression extends AbstractVectorizedExpression { - public CastFloatToDecimalVectorizedExpression(DataType outputDataType, int outputIndex, VectorizedExpression[] children) { + public CastFloatToDecimalVectorizedExpression(DataType outputDataType, int outputIndex, + VectorizedExpression[] children) { super(outputDataType, outputIndex, children); } @@ -53,8 +55,8 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - float[] input = ((FloatBlock) inputVectorSlot).floatArray(); - Slice output = ((DecimalBlock) outputVectorSlot).getMemorySegments(); + float[] input = inputVectorSlot.cast(FloatBlock.class).floatArray(); + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); DecimalStructure tmpDecimal = new DecimalStructure(); int precision = outputDataType.getPrecision(); @@ -101,5 +103,6 @@ public void eval(EvaluationContext ctx) { DecimalConverter.rescale(tmpDecimal, toValue, precision, scale, isUnsigned); } } + outputVectorSlot.cast(DecimalBlock.class).setFullState(); } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastLongConstToDecimalVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastLongConstToDecimalVectorizedExpression.java new file mode 100644 index 000000000..f23617c34 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastLongConstToDecimalVectorizedExpression.java @@ -0,0 +1,152 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.convert; + +import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; +import com.alibaba.polardbx.common.utils.MathUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; +import com.alibaba.polardbx.executor.chunk.ULongBlock; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import io.airlift.slice.Slice; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; + +@SuppressWarnings("unused") +@ExpressionSignatures( + names = {"CastToDecimal", "ConvertToDecimal"}, + argumentTypes = {"Long"}, + argumentKinds = {Const}) +public class CastLongConstToDecimalVectorizedExpression extends AbstractVectorizedExpression { + + private final Decimal operand1; + private final boolean operand1IsNull; + private final boolean useOperand1WithScale; + private final long operand1WithScale; + + public CastLongConstToDecimalVectorizedExpression(DataType outputDataType, int outputIndex, + VectorizedExpression[] children) { + super(outputDataType, outputIndex, children); + Object operand1Value = ((LiteralVectorizedExpression) children[0]).getConvertedValue(); + if (operand1Value == null) { + operand1IsNull = true; + operand1 = Decimal.ZERO; + operand1WithScale = 0; + useOperand1WithScale = true; + } else { + operand1IsNull = false; + operand1 = DataTypes.DecimalType.convertFrom(operand1Value); + long left = (long) operand1Value; + if (left == 0) { + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + int scale = outputDataType.getScale(); + if (scale < 0 || scale >= DecimalTypeBase.POW_10.length) { + operand1WithScale = 0; + useOperand1WithScale = false; + } else { + long power = DecimalTypeBase.POW_10[scale]; + operand1WithScale = left * power; + useOperand1WithScale = !MathUtils.longMultiplyOverflow(left, power, operand1WithScale); + } + } + } + + @Override + public void eval(EvaluationContext ctx) { + super.evalChildren(ctx); + + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + DecimalBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType).cast(DecimalBlock.class); + if (operand1IsNull) { + VectorizedExpressionUtils.setNulls(chunk, outputIndex); + return; + } + + if (useOperand1WithScale) { + castOperandToDecimal64(outputVectorSlot, batchSize, isSelectionInUse, sel); + } else { + castOperandToNormalDecimal(outputVectorSlot, batchSize, isSelectionInUse, sel); + } + } + + /** + * Common cases: write long to decimal64 output slot + */ + private void castOperandToDecimal64(DecimalBlock outputVectorSlot, int batchSize, boolean isSelectionInUse, + int[] sel) { + long[] decimal64Output = outputVectorSlot.allocateDecimal64(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + decimal64Output[j] = operand1WithScale; + } + } else { + for (int i = 0; i < batchSize; i++) { + decimal64Output[i] = operand1WithScale; + } + } + } + + private void castOperandToNormalDecimal(DecimalBlock outputVectorSlot, int batchSize, boolean isSelectionInUse, + int[] sel) { + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); + DecimalStructure tmpDecimal = new DecimalStructure(); + int precision = outputDataType.getPrecision(); + int scale = outputDataType.getScale(); + DecimalConverter.rescale(operand1.getDecimalStructure(), tmpDecimal, precision, scale, false); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int fromIndex = j * DECIMAL_MEMORY_SIZE; + + Slice decimalMemorySegment = output.slice(fromIndex, DECIMAL_MEMORY_SIZE); + DecimalStructure toValue = new DecimalStructure(decimalMemorySegment); + tmpDecimal.copyTo(toValue); + } + } else { + for (int i = 0; i < batchSize; i++) { + int fromIndex = i * DECIMAL_MEMORY_SIZE; + + Slice decimalMemorySegment = output.slice(fromIndex, DECIMAL_MEMORY_SIZE); + DecimalStructure toValue = new DecimalStructure(decimalMemorySegment); + tmpDecimal.copyTo(toValue); + } + } + outputVectorSlot.setFullState(); + } + +} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastULongToDecimalVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastULongToDecimalVectorizedExpression.java index 1abade76d..26860aa8c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastULongToDecimalVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastULongToDecimalVectorizedExpression.java @@ -34,10 +34,12 @@ import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; @SuppressWarnings("unused") -@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"ULong"}, argumentKinds = {Variable}) +@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"ULong"}, + argumentKinds = {Variable}) public class CastULongToDecimalVectorizedExpression extends AbstractVectorizedExpression { - public CastULongToDecimalVectorizedExpression(DataType outputDataType, int outputIndex, VectorizedExpression[] children) { + public CastULongToDecimalVectorizedExpression(DataType outputDataType, int outputIndex, + VectorizedExpression[] children) { super(outputDataType, outputIndex, children); } @@ -53,8 +55,8 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] input = ((ULongBlock) inputVectorSlot).longArray(); - Slice output = ((DecimalBlock) outputVectorSlot).getMemorySegments(); + long[] input = inputVectorSlot.cast(ULongBlock.class).longArray(); + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); DecimalStructure tmpDecimal = new DecimalStructure(); int precision = outputDataType.getPrecision(); @@ -93,5 +95,6 @@ public void eval(EvaluationContext ctx) { DecimalConverter.rescale(tmpDecimal, toValue, precision, scale, isUnsigned); } } + outputVectorSlot.cast(DecimalBlock.class).setFullState(); } } \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToDecimalVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToDecimalVectorizedExpression.java index 4c348bdf2..d47efe9d8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToDecimalVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToDecimalVectorizedExpression.java @@ -33,9 +33,11 @@ import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; @SuppressWarnings("unused") -@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"Varchar"}, argumentKinds = {Variable}) +@ExpressionSignatures(names = {"CastToDecimal", "ConvertToDecimal"}, argumentTypes = {"Varchar"}, + argumentKinds = {Variable}) public class CastVarcharToDecimalVectorizedExpression extends AbstractVectorizedExpression { - public CastVarcharToDecimalVectorizedExpression(DataType outputDataType, int outputIndex, VectorizedExpression[] children) { + public CastVarcharToDecimalVectorizedExpression(DataType outputDataType, int outputIndex, + VectorizedExpression[] children) { super(outputDataType, outputIndex, children); } @@ -51,7 +53,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - Slice output = ((DecimalBlock) outputVectorSlot).getMemorySegments(); + Slice output = (outputVectorSlot.cast(DecimalBlock.class)).getMemorySegments(); DecimalStructure tmpDecimal = new DecimalStructure(); int precision = outputDataType.getPrecision(); @@ -98,5 +100,6 @@ public void eval(EvaluationContext ctx) { DecimalConverter.rescale(tmpDecimal, toValue, precision, scale, isUnsigned); } } + outputVectorSlot.cast(DecimalBlock.class).setFullState(); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToDoubleVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToDoubleVectorizedExpression.java index 348d4f2f4..d21e81be7 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToDoubleVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToDoubleVectorizedExpression.java @@ -52,7 +52,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - double[] output = ((DoubleBlock) outputVectorSlot).doubleArray(); + double[] output = outputVectorSlot.cast(DoubleBlock.class).doubleArray(); // handle nulls VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToSignedVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToSignedVectorizedExpression.java index 4eee91a72..59698fd28 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToSignedVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToSignedVectorizedExpression.java @@ -52,7 +52,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((LongBlock) outputVectorSlot).longArray(); + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); // handle nulls VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToUnsignedVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToUnsignedVectorizedExpression.java index f37769892..d7060a3d0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToUnsignedVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/convert/CastVarcharToUnsignedVectorizedExpression.java @@ -36,7 +36,7 @@ public class CastVarcharToUnsignedVectorizedExpression extends AbstractVectorizedExpression { public CastVarcharToUnsignedVectorizedExpression(DataType outputDataType, int outputIndex, - VectorizedExpression[] children) { + VectorizedExpression[] children) { super(outputDataType, outputIndex, children); } @@ -52,7 +52,7 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); RandomAccessBlock inputVectorSlot = chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - long[] output = ((ULongBlock) outputVectorSlot).longArray(); + long[] output = outputVectorSlot.cast(ULongBlock.class).longArray(); // handle nulls VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastAddLongConstDecimalColVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastAddLongConstDecimalColVectorizedExpression.java index 3a401e829..1710802cf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastAddLongConstDecimalColVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastAddLongConstDecimalColVectorizedExpression.java @@ -18,8 +18,10 @@ import com.alibaba.polardbx.common.datatype.DecimalConverter; import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; import com.alibaba.polardbx.common.datatype.FastDecimalUtils; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.MathUtils; import com.alibaba.polardbx.executor.chunk.DecimalBlock; import com.alibaba.polardbx.executor.chunk.MutableChunk; import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; @@ -45,6 +47,8 @@ public class FastAddLongConstDecimalColVectorizedExpression extends AbstractVectorizedExpression { private final boolean leftIsNull; private final long left; + private final boolean useLeftWithScale; + private final long leftWithScale; public FastAddLongConstDecimalColVectorizedExpression(int outputIndex, VectorizedExpression[] children) { super(DataTypes.DecimalType, outputIndex, children); @@ -52,9 +56,25 @@ public FastAddLongConstDecimalColVectorizedExpression(int outputIndex, Vectorize if (leftValue == null) { leftIsNull = true; left = (long) 0; + leftWithScale = 0; + useLeftWithScale = true; } else { leftIsNull = false; left = (long) leftValue; + if (left == 0) { + leftWithScale = 0; + useLeftWithScale = true; + return; + } + int scale = children[1].getOutputDataType().getScale(); + if (scale < 0 || scale >= DecimalTypeBase.POW_10.length) { + leftWithScale = 0; + useLeftWithScale = false; + } else { + long power = DecimalTypeBase.POW_10[scale]; + leftWithScale = left * power; + useLeftWithScale = !MathUtils.longMultiplyOverflow(left, power, leftWithScale); + } } } @@ -65,17 +85,31 @@ public void eval(EvaluationContext ctx) { int batchSize = chunk.batchSize(); boolean isSelectionInUse = chunk.isSelectionInUse(); int[] sel = chunk.selection(); - if (leftIsNull) { VectorizedExpressionUtils.setNulls(chunk, outputIndex); return; } - DecimalBlock outputVectorSlot = (DecimalBlock) chunk.slotIn(outputIndex, outputDataType); + DecimalBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType).cast(DecimalBlock.class); DecimalBlock rightInputVectorSlot = - (DecimalBlock) chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); + chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()).cast(DecimalBlock.class); + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[1].getOutputIndex()); + boolean useLeftAsLong = useLeftWithScale + && checkResultScale(rightInputVectorSlot.getScale(), outputVectorSlot.getScale()); - Slice output = outputVectorSlot.getMemorySegments(); + if (rightInputVectorSlot.isDecimal64() && useLeftAsLong) { + boolean success = doDecimal64Add(batchSize, isSelectionInUse, sel, rightInputVectorSlot, outputVectorSlot); + if (success) { + return; + } + } + + if (rightInputVectorSlot.isDecimal128() && useLeftAsLong) { + boolean success = doDecimal128Add(batchSize, isSelectionInUse, sel, rightInputVectorSlot, outputVectorSlot); + if (success) { + return; + } + } DecimalStructure leftDec = new DecimalStructure(); @@ -89,10 +123,9 @@ public void eval(EvaluationContext ctx) { rightInputVectorSlot.collectDecimalInfo(); boolean useFastMethod = !isSelectionInUse && (rightInputVectorSlot.isSimple() && rightInputVectorSlot.getInt2Pos() == -1); - VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[1].getOutputIndex()); if (!useFastMethod || !enableFastVec) { - normalAdd(batchSize, isSelectionInUse, sel, rightInputVectorSlot, output, leftDec); + normalAdd(batchSize, isSelectionInUse, sel, rightInputVectorSlot, outputVectorSlot, leftDec); } else { // a1 + (a2 + b2 * [-9]) // = (a1 + a2) + b2 * [-9] @@ -108,18 +141,143 @@ public void eval(EvaluationContext ctx) { b2 = rightInputVectorSlot.fastFrac(i); sum = a1 + a2; + int carry = (int) (sum / 1000_000_000); if (sum < 1000_000_000) { outputVectorSlot.setAddResult1(i, (int) sum, (int) b2); } else { - outputVectorSlot.setAddResult2(i, 1, (int) (sum - 1000_000_000), (int) b2); + outputVectorSlot.setAddResult2(i, carry, (int) (sum - carry * 1000_000_000), (int) b2); } } + } + } + + private boolean checkResultScale(int scale, int resultScale) { + return scale == resultScale; + } + + /** + * @return success: add done without overflow to normal + */ + private boolean doDecimal64Add(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock rightInputVectorSlot, DecimalBlock outputVectorSlot) { + long[] decimal64Output = outputVectorSlot.allocateDecimal64(); + boolean isOverflowDec64 = false; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long rightDec64 = rightInputVectorSlot.getLong(j); + long result = leftWithScale + rightDec64; + + decimal64Output[j] = result; + isOverflowDec64 |= (((left ^ result) & (rightDec64 ^ result)) < 0); + } + } else { + for (int i = 0; i < batchSize; i++) { + long rightDec64 = rightInputVectorSlot.getLong(i); + long result = leftWithScale + rightDec64; + + decimal64Output[i] = result; + isOverflowDec64 |= (((left ^ result) & (rightDec64 ^ result)) < 0); + } + } + if (!isOverflowDec64) { + return true; + } + // long + decimal64 不会溢出 decimal128 + outputVectorSlot.allocateDecimal128(); + + long[] outputDec128Lows = outputVectorSlot.getDecimal128LowValues(); + long[] outputDec128Highs = outputVectorSlot.getDecimal128HighValues(); + + long leftHigh = leftWithScale >= 0 ? 0 : -1; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long rightDec128Low = rightInputVectorSlot.getLong(j); + long rightDec128High = rightDec128Low >= 0 ? 0 : -1; + long newDec128High = leftHigh + rightDec128High; + long newDec128Low = leftWithScale + rightDec128Low; + long carryOut = ((leftWithScale & rightDec128Low) + | ((leftWithScale | rightDec128Low) & (~newDec128Low))) >>> 63; + long resultHigh = newDec128High + carryOut; + + outputDec128Lows[j] = newDec128Low; + outputDec128Highs[j] = resultHigh; + } + } else { + for (int i = 0; i < batchSize; i++) { + long rightDec128Low = rightInputVectorSlot.getLong(i); + long rightDec128High = rightDec128Low >= 0 ? 0 : -1; + long newDec128High = leftHigh + rightDec128High; + long newDec128Low = leftWithScale + rightDec128Low; + long carryOut = ((leftWithScale & rightDec128Low) + | ((leftWithScale | rightDec128Low) & (~newDec128Low))) >>> 63; + long resultHigh = newDec128High + carryOut; + outputDec128Lows[i] = newDec128Low; + outputDec128Highs[i] = resultHigh; + } } + return true; } - private void normalAdd(int batchSize, boolean isSelectionInUse, int[] sel, DecimalBlock rightInputVectorSlot, - Slice output, DecimalStructure leftDec) { + /** + * @return success: add done without overflow to normal + */ + private boolean doDecimal128Add(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock rightInputVectorSlot, DecimalBlock outputVectorSlot) { + outputVectorSlot.allocateDecimal128(); + long[] outputDec128Lows = outputVectorSlot.getDecimal128LowValues(); + long[] outputDec128Highs = outputVectorSlot.getDecimal128HighValues(); + + long leftHigh = leftWithScale >= 0 ? 0 : -1; + + boolean isOverflow = false; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long rightDec128Low = rightInputVectorSlot.getDecimal128Low(j); + long rightDec128High = rightInputVectorSlot.getDecimal128High(j); + + long newDec128High = leftHigh + rightDec128High; + long newDec128Low = leftWithScale + rightDec128Low; + long carryOut = ((leftWithScale & rightDec128Low) + | ((leftWithScale | rightDec128Low) & (~newDec128Low))) >>> 63; + long resultHigh = newDec128High + carryOut; + + outputDec128Lows[j] = newDec128Low; + outputDec128Highs[j] = resultHigh; + isOverflow |= (((newDec128High ^ resultHigh) & (carryOut ^ resultHigh)) < 0); + } + } else { + for (int i = 0; i < batchSize; i++) { + long rightDec128Low = rightInputVectorSlot.getDecimal128Low(i); + long rightDec128High = rightInputVectorSlot.getDecimal128High(i); + long newDec128High = leftHigh + rightDec128High; + long newDec128Low = leftWithScale + rightDec128Low; + long carryOut = ((leftWithScale & rightDec128Low) + | ((leftWithScale | rightDec128Low) & (~newDec128Low))) >>> 63; + long resultHigh = newDec128High + carryOut; + + outputDec128Lows[i] = newDec128Low; + outputDec128Highs[i] = resultHigh; + isOverflow |= (((newDec128High ^ resultHigh) & (carryOut ^ resultHigh)) < 0); + } + } + if (isOverflow) { + outputVectorSlot.deallocateDecimal128(); + } + return !isOverflow; + } + + private void normalAdd(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock rightInputVectorSlot, + DecimalBlock outputVectorSlot, DecimalStructure leftDec) { + Slice output = outputVectorSlot.getMemorySegments(); + DecimalStructure rightDec; if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { @@ -156,5 +314,6 @@ private void normalAdd(int batchSize, boolean isSelectionInUse, int[] sel, Decim FastDecimalUtils.add(leftDec, rightDec, toValue); } } + outputVectorSlot.setFullState(); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastBetweenDecimalColLongConstLongConstVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastBetweenDecimalColLongConstLongConstVectorizedExpression.java index 3a2d53a4b..dcfdf5856 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastBetweenDecimalColLongConstLongConstVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastBetweenDecimalColLongConstLongConstVectorizedExpression.java @@ -17,9 +17,12 @@ package com.alibaba.polardbx.executor.vectorized.math; import com.alibaba.polardbx.common.datatype.Decimal; +import com.alibaba.polardbx.common.datatype.DecimalConverter; import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; import com.alibaba.polardbx.common.datatype.FastDecimalUtils; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.MathUtils; import com.alibaba.polardbx.executor.chunk.DecimalBlock; import com.alibaba.polardbx.executor.chunk.LongBlock; import com.alibaba.polardbx.executor.chunk.MutableChunk; @@ -35,6 +38,7 @@ import java.util.Arrays; +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Const; import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; @@ -45,39 +49,72 @@ priority = ExpressionPriority.SPECIAL ) public class FastBetweenDecimalColLongConstLongConstVectorizedExpression extends AbstractVectorizedExpression { - private final boolean operand1IsNull; - private final Decimal operand1; - private final boolean operand2IsNull; - private final Decimal operand2; - private final long lower; - private final long upper; + private boolean operand1IsNull; + private Decimal operand1; + private boolean useOperand1WithScale; + private long operand1WithScale; - public FastBetweenDecimalColLongConstLongConstVectorizedExpression(int outputIndex, VectorizedExpression[] children) { + private boolean operand2IsNull; + private Decimal operand2; + private boolean useOperand2WithScale; + private long operand2WithScale; + + public FastBetweenDecimalColLongConstLongConstVectorizedExpression(int outputIndex, + VectorizedExpression[] children) { super(DataTypes.LongType, outputIndex, children); Object operand1Value = ((LiteralVectorizedExpression) children[1]).getConvertedValue(); - Object operand2Value = ((LiteralVectorizedExpression) children[2]).getConvertedValue(); if (operand1Value == null) { operand1IsNull = true; operand1 = Decimal.ZERO; - lower = 0; + operand1WithScale = 0; + useOperand1WithScale = true; } else { operand1IsNull = false; operand1 = DataTypes.DecimalType.convertFrom(operand1Value); - lower = DataTypes.LongType.convertFrom(operand1Value); + long left = (long) operand1Value; + if (left == 0) { + operand1WithScale = 0; + useOperand1WithScale = true; + return; + } + int scale = children[0].getOutputDataType().getScale(); + if (scale < 0 || scale >= DecimalTypeBase.POW_10.length) { + operand1WithScale = 0; + useOperand1WithScale = false; + } else { + long power = DecimalTypeBase.POW_10[scale]; + operand1WithScale = left * power; + useOperand1WithScale = !MathUtils.longMultiplyOverflow(left, power, operand1WithScale); + } } + Object operand2Value = ((LiteralVectorizedExpression) children[2]).getConvertedValue(); if (operand2Value == null) { operand2IsNull = true; operand2 = Decimal.ZERO; - upper = 0; + operand2WithScale = 0; + useOperand2WithScale = true; } else { operand2IsNull = false; operand2 = DataTypes.DecimalType.convertFrom(operand2Value); - upper = DataTypes.LongType.convertFrom(operand2Value); + long right = (long) operand2Value; + if (right == 0) { + operand2WithScale = 0; + useOperand2WithScale = true; + return; + } + int scale = children[0].getOutputDataType().getScale(); + if (scale < 0 || scale >= DecimalTypeBase.POW_10.length) { + operand2WithScale = 0; + useOperand2WithScale = false; + } else { + long power = DecimalTypeBase.POW_10[scale]; + operand2WithScale = right * power; + useOperand2WithScale = !MathUtils.longMultiplyOverflow(right, power, operand2WithScale); + } } - } @Override @@ -90,92 +127,58 @@ public void eval(EvaluationContext ctx) { RandomAccessBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType); DecimalBlock leftInputVectorSlot = - (DecimalBlock) chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); - boolean[] nulls = outputVectorSlot.nulls(); + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()) + .cast(DecimalBlock.class); + + long[] output = (outputVectorSlot.cast(LongBlock.class)).longArray(); - if (operand1IsNull || operand2IsNull) { + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); + if (leftInputVectorSlot.isDecimal64() && useOperand1WithScale && useOperand2WithScale) { + // do Decimal64 compare if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { int j = sel[i]; - nulls[j] = true; + long leftVal = leftInputVectorSlot.getLong(j); + output[j] = leftVal >= operand1WithScale && leftVal <= operand2WithScale ? LongBlock.TRUE_VALUE : + LongBlock.FALSE_VALUE; } } else { - Arrays.fill(nulls, true); + for (int i = 0; i < batchSize; i++) { + // fetch left decimal value + long leftVal = leftInputVectorSlot.getLong(i); + output[i] = leftVal >= operand1WithScale && leftVal <= operand2WithScale ? LongBlock.TRUE_VALUE : + LongBlock.FALSE_VALUE; + } } return; } - - VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex()); - - long[] output = ((LongBlock) outputVectorSlot).longArray(); - VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[1].getOutputIndex()); - - boolean enableFastVec = - ctx.getExecutionContext().getParamManager().getBoolean(ConnectionParams.ENABLE_DECIMAL_FAST_VEC); - - leftInputVectorSlot.collectDecimalInfo(); - boolean useFastMethod = (leftInputVectorSlot.isSimple() && leftInputVectorSlot.getInt2Pos() == -1); - - if (!useFastMethod || !enableFastVec) { - doNormalCompare(batchSize, isSelectionInUse, sel, leftInputVectorSlot, output); - return; - } - - // a2 <= (a1 + b1 * [-9]) <= a3 - // => - // (b1 == 0 && a2 <= a1 <= a3) - // || - // (b1 != 0 && a2 <= a1 < a3) - long a1, b1; - long a2 = lower; - long a3 = upper; - if(isSelectionInUse) { - for (int i = 0; i < batchSize; i++) { - int j = sel[i]; - a1 = leftInputVectorSlot.fastInt1(j); - b1 = leftInputVectorSlot.fastFrac(j); - - boolean equal = (a2 <= a1 && ((b1 == 0 && a1 <= a3) || (b1 != 0 && a1 < a3))); - output[j] = equal ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; - } - } else { - for (int i = 0; i < batchSize; i++) { - a1 = leftInputVectorSlot.fastInt1(i); - b1 = leftInputVectorSlot.fastFrac(i); - - boolean equal = (a2 <= a1 && ((b1 == 0 && a1 <= a3) || (b1 != 0 && a1 < a3))); - output[i] = equal ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; - } - } - } - - private void doNormalCompare(int batchSize, boolean isSelectionInUse, int[] sel, DecimalBlock leftInputVectorSlot, - long[] output) { + // do normal decimal compare DecimalStructure leftDec; DecimalStructure operand1Dec = operand1.getDecimalStructure(); DecimalStructure operand2Dec = operand2.getDecimalStructure(); - if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { int j = sel[i]; // fetch left decimal value - leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(j)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(j)); - boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0 && FastDecimalUtils.compare(leftDec, operand2Dec) <= 0; + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; + boolean b2 = FastDecimalUtils.compare(leftDec, operand2Dec) <= 0; - output[j] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + output[j] = b1 && b2 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } } else { for (int i = 0; i < batchSize; i++) { // fetch left decimal value - leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(i)); + leftDec = new DecimalStructure((leftInputVectorSlot.cast(DecimalBlock.class)).getRegion(i)); - boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0 && FastDecimalUtils.compare(leftDec, operand2Dec) <= 0; + boolean b1 = FastDecimalUtils.compare(leftDec, operand1Dec) >= 0; + boolean b2 = FastDecimalUtils.compare(leftDec, operand2Dec) <= 0; - output[i] = b1 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; + output[i] = b1 && b2 ? LongBlock.TRUE_VALUE : LongBlock.FALSE_VALUE; } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastMultiplyDecimalColIntegerColVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastMultiplyDecimalColIntegerColVectorizedExpression.java new file mode 100644 index 000000000..3c3efe3b3 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastMultiplyDecimalColIntegerColVectorizedExpression.java @@ -0,0 +1,838 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.math; + +import com.alibaba.polardbx.common.datatype.DecimalConverter; +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.common.utils.MathUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.IntegerBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import io.airlift.slice.Slice; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; +import static com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority.SPECIAL; + +@ExpressionSignatures(names = {"*", "multiply"}, argumentTypes = {"Decimal", "Integer"}, + argumentKinds = {Variable, Variable}, priority = SPECIAL) +public class FastMultiplyDecimalColIntegerColVectorizedExpression extends AbstractVectorizedExpression { + + private final boolean isRightUnsigned; + + public FastMultiplyDecimalColIntegerColVectorizedExpression(int outputIndex, VectorizedExpression[] children) { + super(DataTypes.DecimalType, outputIndex, children); + isRightUnsigned = children[1].getOutputDataType().isUnsigned(); + } + + @Override + public void eval(EvaluationContext ctx) { + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + DecimalBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType).cast(DecimalBlock.class); + DecimalBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()).cast(DecimalBlock.class); + IntegerBlock rightInputVectorSlot = + chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()).cast(IntegerBlock.class); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), + children[1].getOutputIndex()); + boolean sameResultScale = checkResultScale(leftInputVectorSlot.getScale(), + outputVectorSlot.getScale()); + if (leftInputVectorSlot.isDecimal64() && sameResultScale) { + boolean success; + if (!isRightUnsigned) { + success = doDecimal64Mul(batchSize, isSelectionInUse, sel, leftInputVectorSlot, rightInputVectorSlot, + outputVectorSlot); + } else { + success = + doDecimal64MulUnsigned(batchSize, isSelectionInUse, sel, leftInputVectorSlot, rightInputVectorSlot, + outputVectorSlot); + } + if (success) { + // expect never overflow for decimal64 + return; + } + } + + if (leftInputVectorSlot.isDecimal128() && sameResultScale) { + boolean success; + if (!isRightUnsigned) { + success = doDecimal128Mul(batchSize, isSelectionInUse, sel, leftInputVectorSlot, rightInputVectorSlot, + outputVectorSlot); + } else { + success = + doDecimal128MulUnsigned(batchSize, isSelectionInUse, sel, leftInputVectorSlot, rightInputVectorSlot, + outputVectorSlot); + } + if (success) { + return; + } + } + + doNormalMul(batchSize, isSelectionInUse, sel, leftInputVectorSlot, rightInputVectorSlot, outputVectorSlot); + } + + private boolean checkResultScale(int scale, int outputVectorSlotScale) { + return scale == outputVectorSlotScale; + } + + private boolean doDecimal64Mul(int batchSize, boolean isSelectionInUse, int[] sel, DecimalBlock leftInputVectorSlot, + IntegerBlock rightInputVectorSlot, DecimalBlock outputVectorSlot) { + long[] decimal64Output = outputVectorSlot.allocateDecimal64(); + long[] leftArray = leftInputVectorSlot.decimal64Values(); + int[] rightArray = rightInputVectorSlot.intArray(); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long x = leftArray[j]; + int y = rightArray[j]; + long result = leftArray[j] * rightArray[j]; + if (MathUtils.longMultiplyOverflow(x, y, result)) { + return doDecimal64MulTo128(batchSize, isSelectionInUse, sel, leftInputVectorSlot, + rightInputVectorSlot, + outputVectorSlot); + } + + decimal64Output[j] = result; + } + } else { + for (int i = 0; i < batchSize; i++) { + long x = leftArray[i]; + int y = rightArray[i]; + long result = x * y; + if (MathUtils.longMultiplyOverflow(x, y, result)) { + return doDecimal64MulTo128(batchSize, isSelectionInUse, sel, leftInputVectorSlot, + rightInputVectorSlot, + outputVectorSlot); + } + + decimal64Output[i] = result; + } + } + return true; + } + + private boolean doDecimal64MulUnsigned(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock leftInputVectorSlot, IntegerBlock rightInputVectorSlot, + DecimalBlock outputVectorSlot) { + long[] decimal64Output = outputVectorSlot.allocateDecimal64(); + long[] leftArray = leftInputVectorSlot.decimal64Values(); + int[] rightArray = rightInputVectorSlot.intArray(); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long x = leftArray[j]; + long y = rightArray[j] & 0xFFFFFFFFL; + long result = leftArray[j] * rightArray[j]; + if (MathUtils.longMultiplyOverflow(x, y, result)) { + return doDecimal64MulUnsignedTo128(batchSize, isSelectionInUse, sel, leftInputVectorSlot, + rightInputVectorSlot, + outputVectorSlot); + } + + decimal64Output[j] = result; + } + } else { + for (int i = 0; i < batchSize; i++) { + long x = leftArray[i]; + long y = rightArray[i] & 0xFFFFFFFFL; + long result = x * y; + if (MathUtils.longMultiplyOverflow(x, y, result)) { + return doDecimal64MulUnsignedTo128(batchSize, isSelectionInUse, sel, leftInputVectorSlot, + rightInputVectorSlot, + outputVectorSlot); + } + + decimal64Output[i] = result; + } + } + return true; + } + + /** + * decimal64 * int will not overflow decimal128 + */ + private boolean doDecimal64MulTo128(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock leftInputVectorSlot, + IntegerBlock rightInputVectorSlot, DecimalBlock outputVectorSlot) { + outputVectorSlot.allocateDecimal128(); + long[] outputDecimal128Low = outputVectorSlot.getDecimal128LowValues(); + long[] outputDecimal128High = outputVectorSlot.getDecimal128HighValues(); + long[] leftArray = leftInputVectorSlot.decimal64Values(); + int[] rightArray = rightInputVectorSlot.intArray(); + if (isSelectionInUse) { + mul64To128(batchSize, sel, leftArray, rightArray, outputDecimal128Low, outputDecimal128High); + } else { + mul64To128(batchSize, leftArray, rightArray, outputDecimal128Low, outputDecimal128High); + } + return true; + } + + /** + * decimal64 * unsigned int will not overflow decimal128 + */ + private boolean doDecimal64MulUnsignedTo128(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock leftInputVectorSlot, + IntegerBlock rightInputVectorSlot, DecimalBlock outputVectorSlot) { + outputVectorSlot.allocateDecimal128(); + long[] outputDecimal128Low = outputVectorSlot.getDecimal128LowValues(); + long[] outputDecimal128High = outputVectorSlot.getDecimal128HighValues(); + long[] leftArray = leftInputVectorSlot.decimal64Values(); + int[] rightArray = rightInputVectorSlot.intArray(); + if (isSelectionInUse) { + mul64UnsignedTo128(batchSize, sel, leftArray, rightArray, outputDecimal128Low, outputDecimal128High); + } else { + mul64UnsignedTo128(batchSize, leftArray, rightArray, outputDecimal128Low, outputDecimal128High); + } + return true; + } + + /** + * no overflow + */ + private void mul64To128(int batchSize, long[] leftArray, int[] rightArray, long[] outputDecimal128Low, + long[] outputDecimal128High) { + for (int i = 0; i < batchSize; i++) { + long decimal64 = leftArray[i]; + int multiplier = rightArray[i]; + + if (decimal64 == 0 || multiplier == 0) { + outputDecimal128Low[i] = 0; + outputDecimal128High[i] = 0; + continue; + } + if (decimal64 == 1) { + outputDecimal128Low[i] = multiplier; + outputDecimal128High[i] = multiplier >= 0 ? 0 : 1; + continue; + } + if (decimal64 == -1) { + long negMultiplier = -((long) multiplier); + outputDecimal128Low[i] = negMultiplier; + outputDecimal128High[i] = negMultiplier >= 0 ? 0 : 1; + continue; + } + if (multiplier == 1) { + outputDecimal128Low[i] = decimal64; + outputDecimal128High[i] = decimal64 >= 0 ? 0 : -1; + continue; + } + if (multiplier == -1 && decimal64 != 0x8000000000000000L) { + outputDecimal128Low[i] = -decimal64; + outputDecimal128High[i] = -decimal64 >= 0 ? 0 : -1; + continue; + } + boolean positive; + long multiplierAbs = multiplier; + long decimal64Abs = Math.abs(decimal64); + if (multiplier < 0) { + multiplierAbs = -multiplierAbs; + positive = decimal64 < 0; + } else { + positive = decimal64 >= 0; + } + long sum; + + int x1 = (int) decimal64Abs; + int x2 = (int) (decimal64Abs >>> 32); + int x3 = 0; + sum = (x1 & 0xFFFFFFFFL) * multiplierAbs; + x1 = (int) sum; + sum = (x2 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x2 = (int) sum; + sum = (sum >>> 32); + x3 = (int) sum; + if (positive) { + outputDecimal128Low[i] = (x1 & 0xFFFFFFFFL) | (((long) x2) << 32); + outputDecimal128High[i] = (x3 & 0xFFFFFFFFL); + } else { + outputDecimal128Low[i] = ~((x1 & 0xFFFFFFFFL) | (((long) x2) << 32)) + 1; + outputDecimal128High[i] = ~((x3 & 0xFFFFFFFFL)); + if (outputDecimal128Low[i] == 0) { + outputDecimal128High[i] += 1; + } + } + } + } + + /** + * no overflow + */ + private void mul64To128(int batchSize, int[] sel, long[] leftArray, int[] rightArray, + long[] outputDecimal128Low, long[] outputDecimal128High) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long decimal64 = leftArray[j]; + int multiplier = rightArray[j]; + + if (decimal64 == 0 || multiplier == 0) { + outputDecimal128Low[j] = 0; + outputDecimal128High[j] = 0; + continue; + } + if (decimal64 == 1) { + outputDecimal128Low[j] = multiplier; + outputDecimal128High[j] = multiplier >= 0 ? 0 : 1; + continue; + } + if (decimal64 == -1) { + long negMultiplier = -((long) multiplier); + outputDecimal128Low[j] = negMultiplier; + outputDecimal128High[j] = negMultiplier >= 0 ? 0 : 1; + continue; + } + if (multiplier == 1) { + outputDecimal128Low[j] = decimal64; + outputDecimal128High[j] = decimal64 >= 0 ? 0 : -1; + continue; + } + if (multiplier == -1 && decimal64 != 0x8000000000000000L) { + outputDecimal128Low[j] = -decimal64; + outputDecimal128High[j] = -decimal64 >= 0 ? 0 : -1; + continue; + } + boolean positive; + long multiplierAbs = multiplier; + long decimal64Abs = Math.abs(decimal64); + if (multiplier < 0) { + multiplierAbs = -multiplierAbs; + positive = decimal64 < 0; + } else { + positive = decimal64 >= 0; + } + long sum; + + int x1 = (int) decimal64Abs; + int x2 = (int) (decimal64Abs >>> 32); + int x3 = 0; + sum = (x1 & 0xFFFFFFFFL) * multiplierAbs; + x1 = (int) sum; + sum = (x2 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x2 = (int) sum; + sum = (sum >>> 32); + x3 = (int) sum; + if (positive) { + outputDecimal128Low[j] = (x1 & 0xFFFFFFFFL) | (((long) x2) << 32); + outputDecimal128High[j] = (x3 & 0xFFFFFFFFL); + } else { + outputDecimal128Low[j] = ~((x1 & 0xFFFFFFFFL) | (((long) x2) << 32)) + 1; + outputDecimal128High[j] = ~((x3 & 0xFFFFFFFFL)); + if (outputDecimal128Low[j] == 0) { + outputDecimal128High[j] += 1; + } + } + } + } + + private void mul64UnsignedTo128(int batchSize, long[] leftArray, int[] rightArray, long[] outputDecimal128Low, + long[] outputDecimal128High) { + for (int i = 0; i < batchSize; i++) { + long decimal64 = leftArray[i]; + int multiplier = rightArray[i]; + + if (decimal64 == 0 || multiplier == 0) { + outputDecimal128Low[i] = 0; + outputDecimal128High[i] = 0; + continue; + } + if (decimal64 == 1) { + outputDecimal128Low[i] = multiplier; + outputDecimal128High[i] = multiplier >= 0 ? 0 : 1; + continue; + } + if (decimal64 == -1) { + long negMultiplier = -((long) multiplier); + outputDecimal128Low[i] = negMultiplier; + outputDecimal128High[i] = negMultiplier >= 0 ? 0 : 1; + continue; + } + if (multiplier == 1) { + outputDecimal128Low[i] = decimal64; + outputDecimal128High[i] = decimal64 >= 0 ? 0 : -1; + continue; + } + boolean positive = decimal64 >= 0; + long multiplierAbs = multiplier & 0xFFFFFFFFL; + long decimal64Abs = Math.abs(decimal64); + long sum; + + int x1 = (int) decimal64Abs; + int x2 = (int) (decimal64Abs >>> 32); + int x3 = 0; + sum = (x1 & 0xFFFFFFFFL) * multiplierAbs; + x1 = (int) sum; + sum = (x2 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x2 = (int) sum; + sum = (sum >>> 32); + x3 = (int) sum; + if (positive) { + outputDecimal128Low[i] = (x1 & 0xFFFFFFFFL) | (((long) x2) << 32); + outputDecimal128High[i] = (x3 & 0xFFFFFFFFL); + } else { + outputDecimal128Low[i] = ~((x1 & 0xFFFFFFFFL) | (((long) x2) << 32)) + 1; + outputDecimal128High[i] = ~((x3 & 0xFFFFFFFFL)); + if (outputDecimal128Low[i] == 0) { + outputDecimal128High[i] += 1; + } + } + } + } + + /** + * no overflow + */ + private void mul64UnsignedTo128(int batchSize, int[] sel, long[] leftArray, int[] rightArray, + long[] outputDecimal128Low, long[] outputDecimal128High) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long decimal64 = leftArray[j]; + int multiplier = rightArray[j]; + + if (decimal64 == 0 || multiplier == 0) { + outputDecimal128Low[j] = 0; + outputDecimal128High[j] = 0; + continue; + } + if (decimal64 == 1) { + outputDecimal128Low[j] = multiplier; + outputDecimal128High[j] = multiplier >= 0 ? 0 : 1; + continue; + } + if (decimal64 == -1) { + long negMultiplier = -((long) multiplier); + outputDecimal128Low[j] = negMultiplier; + outputDecimal128High[j] = negMultiplier >= 0 ? 0 : 1; + continue; + } + if (multiplier == 1) { + outputDecimal128Low[j] = decimal64; + outputDecimal128High[j] = decimal64 >= 0 ? 0 : -1; + continue; + } + boolean positive = decimal64 >= 0; + long multiplierAbs = multiplier & 0xFFFFFFFFL; + long decimal64Abs = Math.abs(decimal64); + long sum; + + int x1 = (int) decimal64Abs; + int x2 = (int) (decimal64Abs >>> 32); + int x3 = 0; + sum = (x1 & 0xFFFFFFFFL) * multiplierAbs; + x1 = (int) sum; + sum = (x2 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x2 = (int) sum; + sum = (sum >>> 32); + x3 = (int) sum; + if (positive) { + outputDecimal128Low[j] = (x1 & 0xFFFFFFFFL) | (((long) x2) << 32); + outputDecimal128High[j] = (x3 & 0xFFFFFFFFL); + } else { + outputDecimal128Low[j] = ~((x1 & 0xFFFFFFFFL) | (((long) x2) << 32)) + 1; + outputDecimal128High[j] = ~((x3 & 0xFFFFFFFFL)); + if (outputDecimal128Low[j] == 0) { + outputDecimal128High[j] += 1; + } + } + } + } + + private boolean doDecimal128Mul(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock leftInputVectorSlot, + IntegerBlock rightInputVectorSlot, DecimalBlock outputVectorSlot) { + outputVectorSlot.allocateDecimal128(); + int[] rightArray = rightInputVectorSlot.intArray(); + long[] outputDecimal128Low = outputVectorSlot.getDecimal128LowValues(); + long[] outputDecimal128High = outputVectorSlot.getDecimal128HighValues(); + long[] leftDecimal128Low = leftInputVectorSlot.getDecimal128LowValues(); + long[] leftDecimal128High = leftInputVectorSlot.getDecimal128HighValues(); + + if (isSelectionInUse) { + if (!mul128To128(batchSize, sel, leftDecimal128Low, leftDecimal128High, + rightArray, outputDecimal128Low, outputDecimal128High)) { + outputVectorSlot.deallocateDecimal128(); + return false; + } + return true; + } else { + if (!mul128To128(batchSize, leftDecimal128Low, leftDecimal128High, + rightArray, outputDecimal128Low, outputDecimal128High)) { + outputVectorSlot.deallocateDecimal128(); + return false; + } + return true; + } + } + + private boolean doDecimal128MulUnsigned(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock leftInputVectorSlot, + IntegerBlock rightInputVectorSlot, DecimalBlock outputVectorSlot) { + outputVectorSlot.allocateDecimal128(); + int[] rightArray = rightInputVectorSlot.intArray(); + long[] outputDecimal128Low = outputVectorSlot.getDecimal128LowValues(); + long[] outputDecimal128High = outputVectorSlot.getDecimal128HighValues(); + long[] leftDecimal128Low = leftInputVectorSlot.getDecimal128LowValues(); + long[] leftDecimal128High = leftInputVectorSlot.getDecimal128HighValues(); + + if (isSelectionInUse) { + if (!mul128UnsignedTo128(batchSize, sel, leftDecimal128Low, leftDecimal128High, + rightArray, outputDecimal128Low, outputDecimal128High)) { + outputVectorSlot.deallocateDecimal128(); + return false; + } + return true; + } else { + if (!mul128UnsignedTo128(batchSize, leftDecimal128Low, leftDecimal128High, + rightArray, outputDecimal128Low, outputDecimal128High)) { + outputVectorSlot.deallocateDecimal128(); + return false; + } + return true; + } + } + + private boolean mul128To128(int batchSize, long[] leftDecimal128Low, long[] leftDecimal128High, + int[] rightArray, long[] outputDecimal128Low, long[] outputDecimal128High) { + for (int i = 0; i < batchSize; i++) { + long decimal128Low = leftDecimal128Low[i]; + long decimal128High = leftDecimal128High[i]; + int multiplier = rightArray[i]; + + if (multiplier == 0 || (decimal128Low == 0 && decimal128High == 0)) { + outputDecimal128Low[i] = 0; + outputDecimal128High[i] = 0; + continue; + } + + if (multiplier == 1) { + outputDecimal128Low[i] = decimal128Low; + outputDecimal128High[i] = decimal128High; + continue; + } + if (multiplier == -1) { + outputDecimal128Low[i] = ~decimal128Low + 1; + outputDecimal128High[i] = ~decimal128High; + if (outputDecimal128Low[i] == 0) { + outputDecimal128High[i] += 1; + } + continue; + } + + boolean positive; + long multiplierAbs = multiplier; + if (multiplier < 0) { + multiplierAbs = -multiplierAbs; + positive = decimal128High < 0; + } else { + positive = decimal128High >= 0; + } + if (decimal128High < 0) { + decimal128Low = ~decimal128Low + 1; + decimal128High = ~decimal128High; + decimal128High += (decimal128Low == 0) ? 1 : 0; + } + long sum; + int x1 = (int) decimal128Low; + int x2 = (int) (decimal128Low >>> 32); + int x3 = (int) decimal128High; + int x4 = (int) (decimal128High >>> 32); + sum = (x1 & 0xFFFFFFFFL) * multiplierAbs; + x1 = (int) sum; + sum = (x2 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x2 = (int) sum; + sum = (x3 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x3 = (int) sum; + sum = (x4 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x4 = (int) sum; + + if ((sum >> 32) != 0) { + return false; + } + if (positive) { + outputDecimal128Low[i] = (x1 & 0xFFFFFFFFL) | (((long) x2) << 32); + outputDecimal128High[i] = (x3 & 0xFFFFFFFFL) | (((long) x4) << 32); + } else { + outputDecimal128Low[i] = ~((x1 & 0xFFFFFFFFL) | (((long) x2) << 32)) + 1; + outputDecimal128High[i] = ~((x3 & 0xFFFFFFFFL) | (((long) x4) << 32)); + if (outputDecimal128Low[i] == 0) { + outputDecimal128High[i] += 1; + } + } + } + return true; + } + + private boolean mul128To128(int batchSize, int[] sel, long[] leftDecimal128Low, long[] leftDecimal128High, + int[] rightArray, long[] outputDecimal128Low, long[] outputDecimal128High) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long decimal128Low = leftDecimal128Low[j]; + long decimal128High = leftDecimal128High[j]; + int multiplier = rightArray[j]; + + if (multiplier == 0 || (decimal128Low == 0 && decimal128High == 0)) { + outputDecimal128Low[j] = 0; + outputDecimal128High[j] = 0; + continue; + } + + if (multiplier == 1) { + outputDecimal128Low[j] = decimal128Low; + outputDecimal128High[j] = decimal128High; + return true; + } + if (multiplier == -1) { + outputDecimal128Low[j] = ~decimal128Low + 1; + outputDecimal128High[j] = ~decimal128High; + if (outputDecimal128Low[j] == 0) { + outputDecimal128High[j] += 1; + } + return true; + } + + boolean positive; + long multiplierAbs = multiplier; + if (multiplier < 0) { + multiplierAbs = -multiplierAbs; + positive = decimal128High < 0; + } else { + positive = decimal128High >= 0; + } + if (decimal128High < 0) { + decimal128Low = ~decimal128Low + 1; + decimal128High = ~decimal128High; + decimal128High += (decimal128Low == 0) ? 1 : 0; + } + long sum; + int x1 = (int) decimal128Low; + int x2 = (int) (decimal128Low >>> 32); + int x3 = (int) decimal128High; + int x4 = (int) (decimal128High >>> 32); + sum = (x1 & 0xFFFFFFFFL) * multiplierAbs; + x1 = (int) sum; + sum = (x2 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x2 = (int) sum; + sum = (x3 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x3 = (int) sum; + sum = (x4 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x4 = (int) sum; + + if ((sum >> 32) != 0) { + return false; + } + if (positive) { + outputDecimal128Low[j] = (x1 & 0xFFFFFFFFL) | (((long) x2) << 32); + outputDecimal128High[j] = (x3 & 0xFFFFFFFFL) | (((long) x4) << 32); + } else { + outputDecimal128Low[j] = ~((x1 & 0xFFFFFFFFL) | (((long) x2) << 32)) + 1; + outputDecimal128High[j] = ~((x3 & 0xFFFFFFFFL) | (((long) x4) << 32)); + if (outputDecimal128Low[j] == 0) { + outputDecimal128High[j] += 1; + } + } + } + return true; + } + + private boolean mul128UnsignedTo128(int batchSize, long[] leftDecimal128Low, long[] leftDecimal128High, + int[] rightArray, long[] outputDecimal128Low, long[] outputDecimal128High) { + for (int i = 0; i < batchSize; i++) { + long decimal128Low = leftDecimal128Low[i]; + long decimal128High = leftDecimal128High[i]; + int multiplier = rightArray[i]; + + if (multiplier == 0 || (decimal128Low == 0 && decimal128High == 0)) { + outputDecimal128Low[i] = 0; + outputDecimal128High[i] = 0; + continue; + } + + if (multiplier == 1) { + outputDecimal128Low[i] = decimal128Low; + outputDecimal128High[i] = decimal128High; + continue; + } + + boolean positive = decimal128High >= 0; + long multiplierAbs = multiplier & 0xFFFFFFFFL; + if (decimal128High < 0) { + decimal128Low = ~decimal128Low + 1; + decimal128High = ~decimal128High; + decimal128High += (decimal128Low == 0) ? 1 : 0; + } + long sum; + int x1 = (int) decimal128Low; + int x2 = (int) (decimal128Low >>> 32); + int x3 = (int) decimal128High; + int x4 = (int) (decimal128High >>> 32); + sum = (x1 & 0xFFFFFFFFL) * multiplierAbs; + x1 = (int) sum; + sum = (x2 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x2 = (int) sum; + sum = (x3 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x3 = (int) sum; + sum = (x4 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x4 = (int) sum; + + if ((sum >> 32) != 0) { + return false; + } + if (positive) { + outputDecimal128Low[i] = (x1 & 0xFFFFFFFFL) | (((long) x2) << 32); + outputDecimal128High[i] = (x3 & 0xFFFFFFFFL) | (((long) x4) << 32); + } else { + outputDecimal128Low[i] = ~((x1 & 0xFFFFFFFFL) | (((long) x2) << 32)) + 1; + outputDecimal128High[i] = ~((x3 & 0xFFFFFFFFL) | (((long) x4) << 32)); + if (outputDecimal128Low[i] == 0) { + outputDecimal128High[i] += 1; + } + } + } + return true; + } + + private boolean mul128UnsignedTo128(int batchSize, int[] sel, long[] leftDecimal128Low, long[] leftDecimal128High, + int[] rightArray, long[] outputDecimal128Low, long[] outputDecimal128High) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long decimal128Low = leftDecimal128Low[j]; + long decimal128High = leftDecimal128High[j]; + int multiplier = rightArray[j]; + + if (multiplier == 0 || (decimal128Low == 0 && decimal128High == 0)) { + outputDecimal128Low[j] = 0; + outputDecimal128High[j] = 0; + continue; + } + + if (multiplier == 1) { + outputDecimal128Low[j] = decimal128Low; + outputDecimal128High[j] = decimal128High; + continue; + } + + boolean positive = decimal128High >= 0; + long multiplierAbs = multiplier & 0xFFFFFFFFL; + if (decimal128High < 0) { + decimal128Low = ~decimal128Low + 1; + decimal128High = ~decimal128High; + decimal128High += (decimal128Low == 0) ? 1 : 0; + } + long sum; + int x1 = (int) decimal128Low; + int x2 = (int) (decimal128Low >>> 32); + int x3 = (int) decimal128High; + int x4 = (int) (decimal128High >>> 32); + sum = (x1 & 0xFFFFFFFFL) * multiplierAbs; + x1 = (int) sum; + sum = (x2 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x2 = (int) sum; + sum = (x3 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x3 = (int) sum; + sum = (x4 & 0xFFFFFFFFL) * multiplierAbs + (sum >>> 32); + x4 = (int) sum; + + if ((sum >> 32) != 0) { + return false; + } + if (positive) { + outputDecimal128Low[j] = (x1 & 0xFFFFFFFFL) | (((long) x2) << 32); + outputDecimal128High[j] = (x3 & 0xFFFFFFFFL) | (((long) x4) << 32); + } else { + outputDecimal128Low[j] = ~((x1 & 0xFFFFFFFFL) | (((long) x2) << 32)) + 1; + outputDecimal128High[j] = ~((x3 & 0xFFFFFFFFL) | (((long) x4) << 32)); + if (outputDecimal128Low[j] == 0) { + outputDecimal128High[j] += 1; + } + } + } + return true; + } + + private void doNormalMul(int batchSize, boolean isSelectionInUse, int[] sel, DecimalBlock leftInputVectorSlot, + IntegerBlock rightInputVectorSlot, DecimalBlock outputVectorSlot) { + DecimalStructure leftDec; + DecimalStructure rightDec = new DecimalStructure(); + // use cached slice + Slice cachedSlice = leftInputVectorSlot.allocCachedSlice(); + + int[] array2 = rightInputVectorSlot.intArray(); + Slice output = outputVectorSlot.getMemorySegments(); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int fromIndex = j * DECIMAL_MEMORY_SIZE; + + // wrap memory in specified position + Slice decimalMemorySegment = output.slice(fromIndex, DECIMAL_MEMORY_SIZE); + DecimalStructure toValue = new DecimalStructure(decimalMemorySegment); + + // do reset + rightDec.reset(); + + // fetch left decimal value + leftDec = new DecimalStructure((leftInputVectorSlot).getRegion(j, cachedSlice)); + + // fetch right decimal value + DecimalConverter.longToDecimal(array2[j], rightDec, isRightUnsigned); + + // do operator + FastDecimalUtils.mul(leftDec, rightDec, toValue); + } + } else { + for (int i = 0; i < batchSize; i++) { + int fromIndex = i * DECIMAL_MEMORY_SIZE; + + // wrap memory in specified position + Slice decimalMemorySegment = output.slice(fromIndex, DECIMAL_MEMORY_SIZE); + DecimalStructure toValue = new DecimalStructure(decimalMemorySegment); + + // do reset + rightDec.reset(); + + // fetch left decimal value + leftDec = new DecimalStructure((leftInputVectorSlot).getRegion(i, cachedSlice)); + + // fetch right decimal value + DecimalConverter.longToDecimal(array2[i], rightDec, isRightUnsigned); + + // do operator + FastDecimalUtils.mul(leftDec, rightDec, toValue); + } + } + outputVectorSlot.setFullState(); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastMultiplyDecimalColVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastMultiplyDecimalColVectorizedExpression.java index c5a4867a9..b93e7d0a3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastMultiplyDecimalColVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastMultiplyDecimalColVectorizedExpression.java @@ -16,12 +16,13 @@ package com.alibaba.polardbx.executor.vectorized.math; +import com.alibaba.polardbx.common.datatype.DecimalConverter; import com.alibaba.polardbx.common.datatype.DecimalStructure; import com.alibaba.polardbx.common.datatype.FastDecimalUtils; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.MathUtils; import com.alibaba.polardbx.executor.chunk.DecimalBlock; import com.alibaba.polardbx.executor.chunk.MutableChunk; -import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.EvaluationContext; import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; @@ -36,16 +37,13 @@ import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; import static com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority.SPECIAL; -/* - * This class is generated using freemarker and the DecimalAddSubMulOperatorColumnColumn.ftl template. - */ -@SuppressWarnings("unused") @ExpressionSignatures( names = {"*", "multiply"}, argumentTypes = {"Decimal", "Decimal"}, argumentKinds = {Variable, Variable}, priority = SPECIAL) public class FastMultiplyDecimalColVectorizedExpression extends AbstractVectorizedExpression { + // for fast decimal multiply long[] sum0s; long[] sum9s; @@ -67,16 +65,25 @@ public void eval(EvaluationContext ctx) { boolean isSelectionInUse = chunk.isSelectionInUse(); int[] sel = chunk.selection(); - DecimalBlock outputVectorSlot = (DecimalBlock) chunk.slotIn(outputIndex, outputDataType); + DecimalBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType).cast(DecimalBlock.class); DecimalBlock leftInputVectorSlot = - (DecimalBlock) chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()); + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()).cast(DecimalBlock.class); DecimalBlock rightInputVectorSlot = - (DecimalBlock) chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); + chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()).cast(DecimalBlock.class); + VectorizedExpressionUtils + .mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), children[1].getOutputIndex()); + if (leftInputVectorSlot.isDecimal64() && rightInputVectorSlot.isDecimal64() + && checkResultScaleDecimal64(leftInputVectorSlot.getScale(), rightInputVectorSlot.getScale(), + outputVectorSlot.getScale())) { + boolean success = doDecimal64Multiply(batchSize, isSelectionInUse, sel, + leftInputVectorSlot, rightInputVectorSlot, outputVectorSlot); + if (success) { + return; + } + } Slice output = outputVectorSlot.getMemorySegments(); - VectorizedExpressionUtils - .mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), children[1].getOutputIndex()); boolean[] isNulls = outputVectorSlot.nulls(); // prepare for fast method @@ -92,7 +99,6 @@ public void eval(EvaluationContext ctx) { if (!useFastMethod) { normalMul(chunk, batchSize, isSelectionInUse, sel, outputVectorSlot, leftInputVectorSlot, rightInputVectorSlot, output); - return; } else if (enableFastVec) { // fast multiply 1 fastMul1(batchSize, outputVectorSlot, leftInputVectorSlot, rightInputVectorSlot, isNulls); @@ -102,6 +108,223 @@ public void eval(EvaluationContext ctx) { } } + private boolean checkResultScaleDecimal64(int leftScale, int rightScale, int actualResultScale) { + int resultScale = leftScale + rightScale; + if (resultScale != actualResultScale) { + return false; + } + if (resultScale == 0) { + return true; + } + return DecimalConverter.isDecimal64(resultScale) || DecimalConverter.isDecimal128(resultScale); + } + + private boolean doDecimal64Multiply(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock leftInputVectorSlot, DecimalBlock rightInputVectorSlot, + DecimalBlock outputVectorSlot) { + long[] decimal64Output = outputVectorSlot.allocateDecimal64(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long x = leftInputVectorSlot.getLong(j); + long y = rightInputVectorSlot.getLong(j); + long result = x * y; + if (MathUtils.longMultiplyOverflow(x, y, result)) { + return doDecimal64MulTo128(batchSize, isSelectionInUse, sel, + leftInputVectorSlot, rightInputVectorSlot, outputVectorSlot); + } + + decimal64Output[j] = result; + } + } else { + for (int i = 0; i < batchSize; i++) { + long x = leftInputVectorSlot.getLong(i); + long y = rightInputVectorSlot.getLong(i); + long result = x * y; + if (MathUtils.longMultiplyOverflow(x, y, result)) { + return doDecimal64MulTo128(batchSize, isSelectionInUse, sel, + leftInputVectorSlot, rightInputVectorSlot, outputVectorSlot); + } + + decimal64Output[i] = result; + } + } + return true; + } + + /** + * decimal64 * decimal64 will not overflow decimal128 + */ + private boolean doDecimal64MulTo128(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock leftInputVectorSlot, DecimalBlock rightInputVectorSlot, + DecimalBlock outputVectorSlot) { + outputVectorSlot.allocateDecimal128(); + long[] outputDecimal128Low = outputVectorSlot.getDecimal128LowValues(); + long[] outputDecimal128High = outputVectorSlot.getDecimal128HighValues(); + + if (isSelectionInUse) { + mul64To128(batchSize, sel, + leftInputVectorSlot, rightInputVectorSlot, outputDecimal128Low, outputDecimal128High); + } else { + mul64To128(batchSize, leftInputVectorSlot, rightInputVectorSlot, outputDecimal128Low, outputDecimal128High); + } + return true; + } + + /** + * no overflow + */ + private void mul64To128(int batchSize, int[] sel, DecimalBlock leftInputVectorSlot, + DecimalBlock rightInputVectorSlot, + long[] outputDecimal128Low, long[] outputDecimal128High) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long decimal64 = leftInputVectorSlot.getLong(j); + long multiplier = rightInputVectorSlot.getLong(j); + + if (decimal64 == 0 || multiplier == 0) { + outputDecimal128Low[j] = 0; + outputDecimal128High[j] = 0; + continue; + } + if (decimal64 == 1) { + outputDecimal128Low[j] = multiplier; + outputDecimal128High[j] = multiplier >= 0 ? 0 : -1; + continue; + } + if (decimal64 == -1 && multiplier != 0x8000000000000000L) { + long negMultiplier = -multiplier; + outputDecimal128Low[j] = negMultiplier; + outputDecimal128High[j] = negMultiplier >= 0 ? 0 : -1; + continue; + } + if (multiplier == 1) { + outputDecimal128Low[j] = decimal64; + outputDecimal128High[j] = decimal64 >= 0 ? 0 : -1; + continue; + } + if (multiplier == -1 && decimal64 != 0x8000000000000000L) { + outputDecimal128Low[j] = -decimal64; + outputDecimal128High[j] = -decimal64 >= 0 ? 0 : -1; + continue; + } + boolean positive; + long multiplierAbs = multiplier; + long decimal64Abs = Math.abs(decimal64); + if (multiplier < 0) { + multiplierAbs = -multiplierAbs; + positive = decimal64 < 0; + } else { + positive = decimal64 >= 0; + } + long res; + int x1 = (int) decimal64Abs; + int x2 = (int) (decimal64Abs >>> 32); + int y1 = (int) multiplierAbs; + int y2 = (int) (multiplierAbs >>> 32); + + res = (y1 & 0xFFFFFFFFL) * (x1 & 0xFFFFFFFFL); + int z1 = (int) res; + + res = (y1 & 0xFFFFFFFFL) * (x2 & 0xFFFFFFFFL) + + (y2 & 0xFFFFFFFFL) * (x1 & 0xFFFFFFFFL) + (res >>> 32); + int z2 = (int) res; + + res = (y2 & 0xFFFFFFFFL) * (x2 & 0xFFFFFFFFL) + (res >>> 32); + int z3 = (int) res; + + res = (res >>> 32); + int z4 = (int) res; + if (positive) { + outputDecimal128Low[j] = (z1 & 0xFFFFFFFFL) | (((long) z2) << 32); + outputDecimal128High[j] = (z3 & 0xFFFFFFFFL) | (((long) z4) << 32); + } else { + outputDecimal128Low[j] = ~((z1 & 0xFFFFFFFFL) | (((long) z2) << 32)) + 1; + outputDecimal128High[j] = ~((z3 & 0xFFFFFFFFL) | (((long) z4) << 32)); + if (outputDecimal128Low[j] == 0) { + outputDecimal128High[j] += 1; + } + } + } + } + + /** + * no overflow + */ + private void mul64To128(int batchSize, DecimalBlock leftInputVectorSlot, DecimalBlock rightInputVectorSlot, + long[] outputDecimal128Low, long[] outputDecimal128High) { + for (int i = 0; i < batchSize; i++) { + long decimal64 = leftInputVectorSlot.getLong(i); + long multiplier = rightInputVectorSlot.getLong(i); + + if (decimal64 == 0 || multiplier == 0) { + outputDecimal128Low[i] = 0; + outputDecimal128High[i] = 0; + continue; + } + if (decimal64 == 1) { + outputDecimal128Low[i] = multiplier; + outputDecimal128High[i] = multiplier >= 0 ? 0 : -1; + continue; + } + if (decimal64 == -1 && multiplier != 0x8000000000000000L) { + long negMultiplier = -multiplier; + outputDecimal128Low[i] = negMultiplier; + outputDecimal128High[i] = negMultiplier >= 0 ? 0 : -1; + continue; + } + if (multiplier == 1) { + outputDecimal128Low[i] = decimal64; + outputDecimal128High[i] = decimal64 >= 0 ? 0 : -1; + continue; + } + if (multiplier == -1 && decimal64 != 0x8000000000000000L) { + outputDecimal128Low[i] = -decimal64; + outputDecimal128High[i] = -decimal64 >= 0 ? 0 : -1; + continue; + } + boolean positive; + long multiplierAbs = multiplier; + long decimal64Abs = Math.abs(decimal64); + if (multiplier < 0) { + multiplierAbs = -multiplierAbs; + positive = decimal64 < 0; + } else { + positive = decimal64 >= 0; + } + long res; + int x1 = (int) decimal64Abs; + int x2 = (int) (decimal64Abs >>> 32); + int y1 = (int) multiplierAbs; + int y2 = (int) (multiplierAbs >>> 32); + + res = (y1 & 0xFFFFFFFFL) * (x1 & 0xFFFFFFFFL); + int z1 = (int) res; + + res = (y1 & 0xFFFFFFFFL) * (x2 & 0xFFFFFFFFL) + + (y2 & 0xFFFFFFFFL) * (x1 & 0xFFFFFFFFL) + (res >>> 32); + int z2 = (int) res; + + res = (y2 & 0xFFFFFFFFL) * (x2 & 0xFFFFFFFFL) + (res >>> 32); + int z3 = (int) res; + + res = (res >>> 32); + int z4 = (int) res; + if (positive) { + outputDecimal128Low[i] = (z1 & 0xFFFFFFFFL) | (((long) z2) << 32); + outputDecimal128High[i] = (z3 & 0xFFFFFFFFL) | (((long) z4) << 32); + } else { + outputDecimal128Low[i] = ~((z1 & 0xFFFFFFFFL) | (((long) z2) << 32)) + 1; + outputDecimal128High[i] = ~((z3 & 0xFFFFFFFFL) | (((long) z4) << 32)); + if (outputDecimal128Low[i] == 0) { + outputDecimal128High[i] += 1; + } + } + } + } + private void fastMul1(int batchSize, DecimalBlock outputVectorSlot, DecimalBlock leftInputVectorSlot, DecimalBlock rightInputVectorSlot, boolean[] isNulls) { long a1, b1; @@ -161,7 +384,7 @@ private void fastMul1(int batchSize, DecimalBlock outputVectorSlot, DecimalBlock private void fastMul2(int batchSize, DecimalBlock outputVectorSlot, DecimalBlock leftInputVectorSlot, DecimalBlock rightInputVectorSlot, boolean[] isNulls) { - initForFastMethod(); + initForFastMethod(batchSize); long a1, b1; long a2, b2; @@ -240,16 +463,17 @@ private void fastMul2(int batchSize, DecimalBlock outputVectorSlot, DecimalBlock } private void normalMul(MutableChunk chunk, int batchSize, boolean isSelectionInUse, int[] sel, - RandomAccessBlock outputVectorSlot, DecimalBlock leftInputVectorSlot, + DecimalBlock outputVectorSlot, DecimalBlock leftInputVectorSlot, DecimalBlock rightInputVectorSlot, Slice output) { DecimalStructure leftDec; DecimalStructure rightDec; - DecimalStructure tmpDec = new DecimalStructure(); - boolean isNull[] = outputVectorSlot.nulls(); boolean isLeftUnsigned = children[0].getOutputDataType().isUnsigned(); boolean isRightUnsigned = children[1].getOutputDataType().isUnsigned(); + Slice leftOutput = leftInputVectorSlot.allocCachedSlice(); + Slice rightOutput = rightInputVectorSlot.allocCachedSlice(); + if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { int j = sel[i]; @@ -262,10 +486,10 @@ private void normalMul(MutableChunk chunk, int batchSize, boolean isSelectionInU // do reset // fetch left decimal value - leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(j)); + leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(j, leftOutput)); // fetch right decimal value - rightDec = new DecimalStructure(rightInputVectorSlot.getRegion(j)); + rightDec = new DecimalStructure(rightInputVectorSlot.getRegion(j, rightOutput)); // do operator FastDecimalUtils.mul(leftDec, rightDec, toValue); @@ -281,27 +505,25 @@ private void normalMul(MutableChunk chunk, int batchSize, boolean isSelectionInU // do reset // fetch left decimal value - leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(i)); + leftDec = new DecimalStructure(leftInputVectorSlot.getRegion(i, leftOutput)); // fetch right decimal value - rightDec = new DecimalStructure(rightInputVectorSlot.getRegion(i)); + rightDec = new DecimalStructure(rightInputVectorSlot.getRegion(i, rightOutput)); // do operator FastDecimalUtils.mul(leftDec, rightDec, toValue); } } + outputVectorSlot.setFullState(); } - private void initForFastMethod() { - if (sum0s != null) { - return; - } - sum0s = new long[1000]; - sum9s = new long[1000]; - sum18s = new long[1000]; - carry0s = new long[1000]; - carry9s = new long[1000]; - carry18s = new long[1000]; - nonNullSelection = new int[1000]; + private void initForFastMethod(int batchSize) { + sum0s = new long[batchSize]; + sum9s = new long[batchSize]; + sum18s = new long[batchSize]; + carry0s = new long[batchSize]; + carry9s = new long[batchSize]; + carry18s = new long[batchSize]; + nonNullSelection = new int[batchSize]; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastSubDecimalColDecimalColVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastSubDecimalColDecimalColVectorizedExpression.java new file mode 100644 index 000000000..0c26bd0f1 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastSubDecimalColDecimalColVectorizedExpression.java @@ -0,0 +1,314 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.executor.vectorized.math; + +import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; +import com.alibaba.polardbx.common.datatype.FastDecimalUtils; +import com.alibaba.polardbx.common.utils.MathUtils; +import com.alibaba.polardbx.executor.chunk.DecimalBlock; +import com.alibaba.polardbx.executor.chunk.MutableChunk; +import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.EvaluationContext; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; +import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; +import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import io.airlift.slice.Slice; + +import static com.alibaba.polardbx.common.datatype.DecimalTypeBase.DECIMAL_MEMORY_SIZE; +import static com.alibaba.polardbx.executor.vectorized.metadata.ArgumentKind.Variable; +import static com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority.SPECIAL; + +@ExpressionSignatures( + names = {"-", "subtract"}, + argumentTypes = {"Decimal", "Decimal"}, + argumentKinds = {Variable, Variable}, + priority = SPECIAL) +public class FastSubDecimalColDecimalColVectorizedExpression extends AbstractVectorizedExpression { + + static final int MAX_SCALE_DIFF = 8; + + public FastSubDecimalColDecimalColVectorizedExpression(int outputIndex, VectorizedExpression[] children) { + super(DataTypes.DecimalType, outputIndex, children); + } + + @Override + public void eval(EvaluationContext ctx) { + super.evalChildren(ctx); + MutableChunk chunk = ctx.getPreAllocatedChunk(); + int batchSize = chunk.batchSize(); + boolean isSelectionInUse = chunk.isSelectionInUse(); + int[] sel = chunk.selection(); + + DecimalBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType).cast(DecimalBlock.class); + DecimalBlock leftInputVectorSlot = + chunk.slotIn(children[0].getOutputIndex(), children[0].getOutputDataType()).cast(DecimalBlock.class); + DecimalBlock rightInputVectorSlot = + chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()).cast(DecimalBlock.class); + + if (leftInputVectorSlot.isDecimal64() && rightInputVectorSlot.isDecimal64()) { + boolean success = + doDecimal64Sub(batchSize, isSelectionInUse, sel, leftInputVectorSlot, rightInputVectorSlot, + outputVectorSlot); + if (success) { + return; + } + } + + // do normal sub + Slice output = outputVectorSlot.getMemorySegments(); + + DecimalStructure leftDec; + DecimalStructure rightDec; + + Slice leftOutput = leftInputVectorSlot.allocCachedSlice(); + Slice rightOutput = rightInputVectorSlot.allocCachedSlice(); + + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[0].getOutputIndex(), + children[1].getOutputIndex()); + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + int fromIndex = j * DECIMAL_MEMORY_SIZE; + + // wrap memory in specified position + Slice decimalMemorySegment = output.slice(fromIndex, DECIMAL_MEMORY_SIZE); + DecimalStructure toValue = new DecimalStructure(decimalMemorySegment); + + // do reset + + // fetch left decimal value + leftDec = new DecimalStructure((leftInputVectorSlot).getRegion(j, leftOutput)); + + // fetch right decimal value + rightDec = new DecimalStructure((rightInputVectorSlot).getRegion(j, rightOutput)); + + // do operator + FastDecimalUtils.sub(leftDec, rightDec, toValue); + } + } else { + for (int i = 0; i < batchSize; i++) { + int fromIndex = i * DECIMAL_MEMORY_SIZE; + + // wrap memory in specified position + Slice decimalMemorySegment = output.slice(fromIndex, DECIMAL_MEMORY_SIZE); + DecimalStructure toValue = new DecimalStructure(decimalMemorySegment); + + // do reset + + // fetch left decimal value + leftDec = new DecimalStructure((leftInputVectorSlot).getRegion(i, leftOutput)); + + // fetch right decimal value + rightDec = new DecimalStructure((rightInputVectorSlot).getRegion(i, rightOutput)); + + // do operator + FastDecimalUtils.sub(leftDec, rightDec, toValue); + } + } + outputVectorSlot.setFullState(); + } + + private boolean doDecimal64Sub(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock leftInputVectorSlot, DecimalBlock rightInputVectorSlot, + DecimalBlock outputVectorSlot) { + if (leftInputVectorSlot.getScale() == rightInputVectorSlot.getScale()) { + if (leftInputVectorSlot.getScale() != outputVectorSlot.getScale()) { + // derived type does not match, which is a rare case + return false; + } + return doDecimal64SubSameScale(batchSize, isSelectionInUse, sel, leftInputVectorSlot, rightInputVectorSlot, + outputVectorSlot); + } + return doDecimal64SubDiffScale(batchSize, isSelectionInUse, sel, leftInputVectorSlot, rightInputVectorSlot, + outputVectorSlot); + } + + private boolean doDecimal64SubSameScale(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock leftInputVectorSlot, DecimalBlock rightInputVectorSlot, + DecimalBlock outputVectorSlot) { + long[] decimal64Output = outputVectorSlot.allocateDecimal64(); + long[] leftArray = leftInputVectorSlot.decimal64Values(); + long[] rightArray = rightInputVectorSlot.decimal64Values(); + + boolean overflow = false; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long leftDec64 = leftArray[j]; + long rightDec64 = rightArray[j]; + long result = leftDec64 - rightDec64; + decimal64Output[j] = result; + + overflow |= ((leftDec64 ^ rightDec64) & (leftDec64 ^ result)) < 0; + } + } else { + for (int i = 0; i < batchSize; i++) { + long leftDec64 = leftArray[i]; + long rightDec64 = rightArray[i]; + long result = leftDec64 - rightDec64; + decimal64Output[i] = result; + + overflow |= ((leftDec64 ^ rightDec64) & (leftDec64 ^ result)) < 0; + } + } + if (overflow) { + return doDecimal64SameScaleSubTo128(batchSize, isSelectionInUse, sel, leftInputVectorSlot, + rightInputVectorSlot, + outputVectorSlot); + } + return true; + } + + /** + * decimal64 - decimal64 will not overflow + */ + private boolean doDecimal64SameScaleSubTo128(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock leftInputVectorSlot, DecimalBlock rightInputVectorSlot, + DecimalBlock outputVectorSlot) { + outputVectorSlot.allocateDecimal128(); + long[] output128Low = outputVectorSlot.getDecimal128LowValues(); + long[] output128High = outputVectorSlot.getDecimal128HighValues(); + long[] leftArray = leftInputVectorSlot.decimal64Values(); + long[] rightArray = rightInputVectorSlot.decimal64Values(); + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + long leftLow = leftArray[j]; + long rightLow = rightArray[j]; + + long leftHigh = leftLow >= 0 ? 0 : -1; + long rightHigh = rightLow >= 0 ? 0 : -1; + + long newDec128High = leftHigh - rightHigh; + long newDec128Low = leftLow - rightLow; + long borrow = ((~leftLow & rightLow) + | (~(leftLow ^ rightLow) & newDec128Low)) >>> 63; + newDec128High = newDec128High - borrow; + + output128Low[j] = newDec128Low; + output128High[j] = newDec128High; + } + } else { + for (int i = 0; i < batchSize; i++) { + long leftLow = leftArray[i]; + long rightLow = rightArray[i]; + + long leftHigh = leftLow >= 0 ? 0 : -1; + long rightHigh = rightLow >= 0 ? 0 : -1; + + long newDec128High = leftHigh - rightHigh; + long newDec128Low = leftLow - rightLow; + long borrow = ((~leftLow & rightLow) + | (~(leftLow ^ rightLow) & newDec128Low)) >>> 63; + newDec128High = newDec128High - borrow; + + output128Low[i] = newDec128Low; + output128High[i] = newDec128High; + } + } + return true; + } + + private boolean doDecimal64SubDiffScale(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock leftInputVectorSlot, DecimalBlock rightInputVectorSlot, + DecimalBlock outputVectorSlot) { + int leftScale = leftInputVectorSlot.getScale(); + int rightScale = rightInputVectorSlot.getScale(); + int maxScale = Math.max(leftScale, rightScale); + if (outputVectorSlot.getScale() != maxScale) { + // derived type does not match, which is a rare case + return false; + } + int scaleDiff = leftScale - rightScale; + int scaleDiffAbs = Math.abs(scaleDiff); + if (scaleDiffAbs > MAX_SCALE_DIFF) { + return false; + } + long[] decimal64Output = outputVectorSlot.allocateDecimal64(); + long[] leftArray = leftInputVectorSlot.decimal64Values(); + long[] rightArray = rightInputVectorSlot.decimal64Values(); + + boolean overflow = false; + final long pow = DecimalTypeBase.POW_10[scaleDiffAbs]; + if (scaleDiff > 0) { + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long leftDec64 = leftArray[j]; + long rightDec64 = rightArray[j]; + long scaledRightDec64 = rightDec64 * pow; + long result = leftDec64 - scaledRightDec64; + decimal64Output[j] = result; + + overflow |= MathUtils.longMultiplyOverflow(rightDec64, pow, scaledRightDec64); + overflow |= ((leftDec64 ^ scaledRightDec64) & (leftDec64 ^ result)) < 0; + } + } else { + for (int i = 0; i < batchSize; i++) { + long leftDec64 = leftArray[i]; + long rightDec64 = rightArray[i]; + long scaledRightDec64 = rightDec64 * pow; + long result = leftDec64 - scaledRightDec64; + decimal64Output[i] = result; + + overflow |= MathUtils.longMultiplyOverflow(rightDec64, pow, scaledRightDec64); + overflow |= ((leftDec64 ^ scaledRightDec64) & (leftDec64 ^ result)) < 0; + } + } + } else if (scaleDiff < 0) { + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long leftDec64 = leftArray[j]; + long rightDec64 = rightArray[j]; + long scaledLeftDec64 = leftDec64 * pow; + long result = scaledLeftDec64 - rightDec64; + decimal64Output[j] = result; + + overflow |= MathUtils.longMultiplyOverflow(leftDec64, pow, scaledLeftDec64); + overflow |= ((scaledLeftDec64 ^ rightDec64) & (scaledLeftDec64 ^ result)) < 0; + } + } else { + for (int i = 0; i < batchSize; i++) { + long leftDec64 = leftArray[i]; + long rightDec64 = rightArray[i]; + long scaledLeftDec64 = leftDec64 * pow; + long result = scaledLeftDec64 - rightDec64; + decimal64Output[i] = result; + + overflow |= MathUtils.longMultiplyOverflow(leftDec64, pow, scaledLeftDec64); + overflow |= ((scaledLeftDec64 ^ rightDec64) & (scaledLeftDec64 ^ result)) < 0; + } + } + + } else { + throw new IllegalStateException("Should not reach here"); + } + + if (overflow) { + outputVectorSlot.deallocateDecimal64(); + return false; + } + return true; + } +} \ No newline at end of file diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastSubLongConstDecimalColVectorizedExpression.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastSubLongConstDecimalColVectorizedExpression.java index 3f1c5d3d6..6653d6203 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastSubLongConstDecimalColVectorizedExpression.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/math/FastSubLongConstDecimalColVectorizedExpression.java @@ -18,17 +18,17 @@ import com.alibaba.polardbx.common.datatype.DecimalConverter; import com.alibaba.polardbx.common.datatype.DecimalStructure; +import com.alibaba.polardbx.common.datatype.DecimalTypeBase; import com.alibaba.polardbx.common.datatype.FastDecimalUtils; import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.MathUtils; import com.alibaba.polardbx.executor.chunk.DecimalBlock; import com.alibaba.polardbx.executor.chunk.MutableChunk; -import com.alibaba.polardbx.executor.chunk.RandomAccessBlock; import com.alibaba.polardbx.executor.vectorized.AbstractVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.EvaluationContext; import com.alibaba.polardbx.executor.vectorized.LiteralVectorizedExpression; import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; import com.alibaba.polardbx.executor.vectorized.VectorizedExpressionUtils; -import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionPriority; import com.alibaba.polardbx.executor.vectorized.metadata.ExpressionSignatures; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import io.airlift.slice.Slice; @@ -45,18 +45,39 @@ priority = SPECIAL ) public class FastSubLongConstDecimalColVectorizedExpression extends AbstractVectorizedExpression { + // avoid overflow of (left - a2) + private static final long MAX_LEFT_FOR_SIMPLE = 1_999_999_999L; + private static final long MIN_LEFT_FOR_SIMPLE = 999_999_999L; private final boolean leftIsNull; private final long left; + private final boolean useLeftWithScale; + private final long leftWithScale; public FastSubLongConstDecimalColVectorizedExpression(int outputIndex, VectorizedExpression[] children) { super(DataTypes.DecimalType, outputIndex, children); Object leftValue = ((LiteralVectorizedExpression) children[0]).getConvertedValue(); if (leftValue == null) { leftIsNull = true; - left = (long) 0; + left = 0; + leftWithScale = 0; + useLeftWithScale = true; } else { leftIsNull = false; left = (long) leftValue; + if (left == 0) { + leftWithScale = 0; + useLeftWithScale = true; + return; + } + int scale = children[1].getOutputDataType().getScale(); + if (scale < 0 || scale >= DecimalTypeBase.POW_10.length) { + leftWithScale = 0; + useLeftWithScale = false; + } else { + long power = DecimalTypeBase.POW_10[scale]; + leftWithScale = left * power; + useLeftWithScale = !MathUtils.longMultiplyOverflow(left, power, leftWithScale); + } } } @@ -73,11 +94,19 @@ public void eval(EvaluationContext ctx) { return; } - DecimalBlock outputVectorSlot = (DecimalBlock) chunk.slotIn(outputIndex, outputDataType); + DecimalBlock outputVectorSlot = chunk.slotIn(outputIndex, outputDataType).cast(DecimalBlock.class); DecimalBlock rightInputVectorSlot = - (DecimalBlock) chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()); + chunk.slotIn(children[1].getOutputIndex(), children[1].getOutputDataType()).cast(DecimalBlock.class); - Slice output = outputVectorSlot.getMemorySegments(); + VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[1].getOutputIndex()); + + if (rightInputVectorSlot.isDecimal64() && useLeftWithScale && + checkResultScale(rightInputVectorSlot.getScale(), outputVectorSlot.getScale())) { + boolean success = doDecimal64Sub(batchSize, isSelectionInUse, sel, rightInputVectorSlot, outputVectorSlot); + if (success) { + return; + } + } DecimalStructure leftDec = new DecimalStructure(); @@ -88,13 +117,13 @@ public void eval(EvaluationContext ctx) { rightInputVectorSlot.collectDecimalInfo(); boolean useFastMethod = !isSelectionInUse - && (rightInputVectorSlot.isSimple() && rightInputVectorSlot.getInt2Pos() == -1); + && (rightInputVectorSlot.isSimple() && (rightInputVectorSlot.getInt2Pos() == -1)) + && isLeftInSimpleRange(); - VectorizedExpressionUtils.mergeNulls(chunk, outputIndex, children[1].getOutputIndex()); boolean[] isNulls = outputVectorSlot.nulls(); if (!useFastMethod || !enableFastVec) { - normalSub(batchSize, isSelectionInUse, sel, rightInputVectorSlot, output, leftDec); + normalSub(batchSize, isSelectionInUse, sel, rightInputVectorSlot, outputVectorSlot, leftDec); } else { // a1 - (a2 + b2 * [-9]) // = (a1 - a2) + (0 - b2) * [-9] @@ -119,16 +148,96 @@ public void eval(EvaluationContext ctx) { sub0 = !isNeg ? sub0 : -sub0; if (sub0 < 1000_000_000) { - outputVectorSlot.setSubResult1(i, (int)sub0, (int)sub9, isNeg); + outputVectorSlot.setSubResult1(i, (int) sub0, (int) sub9, isNeg); } else { - outputVectorSlot.setSubResult2(i, 1, (int)(sub0 - 1000_000_000), (int)sub9, isNeg); + outputVectorSlot.setSubResult2(i, 1, (int) (sub0 - 1000_000_000), (int) sub9, isNeg); } } } } - private void normalSub(int batchSize, boolean isSelectionInUse, int[] sel, DecimalBlock rightInputVectorSlot, - Slice output, DecimalStructure leftDec) { + private boolean checkResultScale(int scale, int resultScale) { + return scale == resultScale; + } + + private boolean isLeftInSimpleRange() { + return left <= MAX_LEFT_FOR_SIMPLE && left >= MIN_LEFT_FOR_SIMPLE; + } + + /** + * @return success: subtraction done without overflow + */ + private boolean doDecimal64Sub(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock rightInputVectorSlot, DecimalBlock outputVectorSlot) { + long[] decimal64Output = outputVectorSlot.allocateDecimal64(); + boolean isOverflowDec64 = false; + + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long rightDec64 = rightInputVectorSlot.getLong(j); + long result = leftWithScale - rightDec64; + decimal64Output[j] = result; + isOverflowDec64 |= ((left ^ rightDec64) & (left ^ result)) < 0; + } + } else { + for (int i = 0; i < batchSize; i++) { + long rightDec64 = rightInputVectorSlot.getLong(i); + long result = leftWithScale - rightDec64; + decimal64Output[i] = result; + isOverflowDec64 |= ((left ^ rightDec64) & (left ^ result)) < 0; + } + } + if (!isOverflowDec64) { + return true; + } + + outputVectorSlot.allocateDecimal128(); + + long[] outputDec128Lows = outputVectorSlot.getDecimal128LowValues(); + long[] outputDec128Highs = outputVectorSlot.getDecimal128HighValues(); + + long leftHigh = leftWithScale >= 0 ? 0 : -1; + if (isSelectionInUse) { + for (int i = 0; i < batchSize; i++) { + int j = sel[i]; + + long rightDec128Low = rightInputVectorSlot.getLong(j); + long rightDec128High = rightDec128Low >= 0 ? 0 : -1; + + long newDec128High = leftHigh - rightDec128High; + long newDec128Low = leftWithScale - rightDec128Low; + long borrow = ((~leftWithScale & rightDec128Low) + | (~(leftWithScale ^ rightDec128Low) & newDec128Low)) >>> 63; + long resultHigh = newDec128High - borrow; + + outputDec128Lows[j] = newDec128Low; + outputDec128Highs[j] = resultHigh; + } + } else { + for (int i = 0; i < batchSize; i++) { + long rightDec128Low = rightInputVectorSlot.getLong(i); + long rightDec128High = rightDec128Low >= 0 ? 0 : -1; + + long newDec128High = leftHigh - rightDec128High; + long newDec128Low = leftWithScale - rightDec128Low; + long borrow = ((~leftWithScale & rightDec128Low) + | (~(leftWithScale ^ rightDec128Low) & newDec128Low)) >>> 63; + long resultHigh = newDec128High - borrow; + + outputDec128Lows[i] = newDec128Low; + outputDec128Highs[i] = resultHigh; + } + } + return true; + } + + private void normalSub(int batchSize, boolean isSelectionInUse, int[] sel, + DecimalBlock rightInputVectorSlot, + DecimalBlock outputVectorSlot, DecimalStructure leftDec) { + Slice output = outputVectorSlot.getMemorySegments(); + DecimalStructure rightDec; if (isSelectionInUse) { for (int i = 0; i < batchSize; i++) { @@ -165,5 +274,6 @@ private void normalSub(int batchSize, boolean isSelectionInUse, int[] sel, Decim FastDecimalUtils.sub(leftDec, rightDec, toValue); } } + outputVectorSlot.setFullState(); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/metadata/ExpressionConstructor.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/metadata/ExpressionConstructor.java index 5c9c653f4..b5a3529b9 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/metadata/ExpressionConstructor.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/vectorized/metadata/ExpressionConstructor.java @@ -18,7 +18,6 @@ import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.vectorized.VectorizedExpression; -import com.alibaba.polardbx.executor.vectorized.build.Rex2VectorizedExpressionVisitor; import com.alibaba.polardbx.optimizer.core.datatype.DataType; import java.lang.reflect.Constructor; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/whatIf/ShardingAdvisorWhatIfSchemaManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/whatIf/ShardingAdvisorWhatIfSchemaManager.java index f76d184d5..3a1062a3e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/whatIf/ShardingAdvisorWhatIfSchemaManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/whatIf/ShardingAdvisorWhatIfSchemaManager.java @@ -78,6 +78,19 @@ public ShardingAdvisorWhatIfSchemaManager(SchemaManager actualSchemaManager, this.executionContext = executionContext; } + private static String getIndexName(String tableName, List columns, WhatIfIndexType type) { + if (type == WhatIfIndexType.local) { + return tableName + CandidateIndex.WHAT_IF_INDEX_INFIX + "_" + String.join("_", columns); + } + if (type == WhatIfIndexType.gsi) { + return tableName + CandidateIndex.WHAT_IF_GSI_INFIX + "_" + String.join("_", columns); + } + if (type == WhatIfIndexType.auto) { + return tableName + CandidateIndex.WHAT_IF_AUTO_INDEX_INFIX + "_" + String.join("_", columns); + } + return null; + } + @Override protected void doInit() { for (Map.Entry entry : actualSchemaManager.getCache().entrySet()) { @@ -121,7 +134,7 @@ private TableMeta buildNewWhatIfTableMeta(TableMeta tableMeta, String alterTable // partition table SqlAlterTableRepartition sqlAlterTableRepartition = (SqlAlterTableRepartition) sqlNode; partitionInfo = PartitionInfoBuilder - .buildPartitionInfoByPartDefAst(schemaName, tableName, null, null, + .buildPartitionInfoByPartDefAst(schemaName, tableName, null, false, null, (SqlPartitionBy) sqlAlterTableRepartition.getSqlPartition(), null, new ArrayList<>(tableMeta.getPrimaryKey()), @@ -144,7 +157,7 @@ private TableMeta buildNewWhatIfTableMeta(TableMeta tableMeta, String alterTable } else { // broadcast table partitionInfo = PartitionInfoBuilder - .buildPartitionInfoByPartDefAst(schemaName, tableName, null, null, + .buildPartitionInfoByPartDefAst(schemaName, tableName, null, false, null, null, null, new ArrayList<>(tableMeta.getPrimaryKey()), tableMeta.getAllColumns(), @@ -370,20 +383,7 @@ private IndexMeta generateWhatIfIndexMeta( type)); } - private static String getIndexName(String tableName, List columns, WhatIfIndexType type) { - if (type == WhatIfIndexType.local) { - return tableName + CandidateIndex.WHAT_IF_INDEX_INFIX + "_" + String.join("_", columns); - } - if (type == WhatIfIndexType.gsi) { - return tableName + CandidateIndex.WHAT_IF_GSI_INFIX + "_" + String.join("_", columns); - } - if (type == WhatIfIndexType.auto) { - return tableName + CandidateIndex.WHAT_IF_AUTO_INDEX_INFIX + "_" + String.join("_", columns); - } - return null; - } - private enum WhatIfIndexType { - local, gsi, auto; + local, gsi, auto } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/whatIf/ShardingWhatIf.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/whatIf/ShardingWhatIf.java index a9babf483..d5c2b83d4 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/whatIf/ShardingWhatIf.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/executor/whatIf/ShardingWhatIf.java @@ -32,6 +32,7 @@ import com.alibaba.polardbx.optimizer.parse.bean.SqlParameterized; import com.alibaba.polardbx.optimizer.sharding.advisor.ShardResultForOutput; import com.alibaba.polardbx.optimizer.utils.OptimizerUtils; +import com.alibaba.polardbx.optimizer.utils.PlannerUtils; import org.apache.calcite.plan.RelOptCost; import org.apache.calcite.rel.externalize.RelDrdsWriter; import org.apache.calcite.rel.metadata.RelMetadataQuery; @@ -131,7 +132,7 @@ private void debugUse(ExecutionContext ec, List> ec.setSchemaManagers(oldSchemaManagers); ExecutionPlan planOld = Planner.getInstance().doBuildPlan(sql, ec); - RelOptCost costOld = RelMetadataQuery.instance().getCumulativeCost(planOld.getPlan()); + RelOptCost costOld = PlannerUtils.newMetadataQuery().getCumulativeCost(planOld.getPlan()); RelDrdsWriter relWriter = new RelDrdsWriter(null, SqlExplainLevel.ALL_ATTRIBUTES, false, para.getCurrentParameter(), null, null); relWriter.setExecutionContext(ec); @@ -145,7 +146,7 @@ private void debugUse(ExecutionContext ec, List> ec.setSchemaManagers(whatIfSchemaManagers); ExecutionPlan planNew = Planner.getInstance().doBuildPlan(sql, ec); - RelOptCost costNew = RelMetadataQuery.instance().getCumulativeCost(planNew.getPlan()); + RelOptCost costNew = PlannerUtils.newMetadataQuery().getCumulativeCost(planNew.getPlan()); relWriter = new RelDrdsWriter(null, SqlExplainLevel.ALL_ATTRIBUTES, false, para.getCurrentParameter(), null, null); relWriter.setExecutionContext(ec); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/group/config/OptimizedGroupConfigManager.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/group/config/OptimizedGroupConfigManager.java index e20ca0b9a..ee30cb948 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/group/config/OptimizedGroupConfigManager.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/group/config/OptimizedGroupConfigManager.java @@ -32,6 +32,7 @@ import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.config.ConfigDataMode; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillUtils; import com.alibaba.polardbx.gms.config.impl.ConnPoolConfig; import com.alibaba.polardbx.gms.ha.HaSwitchParams; import com.alibaba.polardbx.gms.ha.HaSwitcher; @@ -112,17 +113,27 @@ public void doInit() { } protected void initGroupDataSourceByMetaDb() { - Set instIds = new HashSet<>(); - instIds.add(InstIdUtil.getInstId()); - if (ServerInstIdManager.getInstance().isMasterInst()) { + + if (ConfigDataMode.isMasterMode()) { + instIds.add(InstIdUtil.getInstId()); //ignore the buildInDB which needn't the separation of reading and writing! if (!SystemDbHelper.isDBBuildIn(groupDataSource.getSchemaName())) { instIds.addAll(ServerInstIdManager.getInstance().getAllHTAPReadOnlyInstIdSet()); } - } else { + } else if (ConfigDataMode.isRowSlaveMode()) { + instIds.add(InstIdUtil.getInstId()); instIds.add(InstIdUtil.getMasterInstId()); + } else if (ConfigDataMode.isColumnarMode() && SystemDbHelper.isDBBuildInExceptCdc( + groupDataSource.getSchemaName())) { + //here still need create information_schema group datasource for columnar mode. + instIds.add(InstIdUtil.getInstId()); } + + if (instIds.isEmpty()) { + return; + } + this.listenerInstIds.clear(); this.listenerInstIds.addAll(instIds); @@ -255,7 +266,7 @@ protected synchronized void updateListenerStorageInstId(String dbName, String gr Set careInstIds = new HashSet<>(); Set careStorageInstIds = new HashSet<>(); - if (ServerInstIdManager.getInstance().isMasterInst()) { + if (ConfigDataMode.isMasterMode()) { Set htapIds = ServerInstIdManager.getInstance().getAllHTAPReadOnlyInstIdSet(); ServerInstIdManager.getInstance().getInstId2StorageIds().entrySet().stream().forEach(t -> { String inst = t.getKey(); @@ -265,7 +276,7 @@ protected synchronized void updateListenerStorageInstId(String dbName, String gr } } ); - } else { + } else if (ConfigDataMode.isRowSlaveMode()) { careInstIds.add(ServerInstIdManager.getInstance().getMasterInstId()); careInstIds.add(ServerInstIdManager.getInstance().getInstId()); ServerInstIdManager.getInstance().getInstId2StorageIds().entrySet().stream().forEach(t -> { @@ -275,7 +286,7 @@ protected synchronized void updateListenerStorageInstId(String dbName, String gr }); } - if (ServerInstIdManager.getInstance().isMasterInst()) { + if (ConfigDataMode.isMasterMode()) { //ignore the buildInDB which needn't the separation of reading and writing! if (!SystemDbHelper.isDBBuildIn(groupDataSource.getSchemaName())) { Set newInstIdSet = Sets.difference( @@ -371,20 +382,25 @@ public GroupDetailInfoListener(OptimizedGroupConfigManager groupConfigManager, S @Override public void onHandleConfig(String dataId, long newOpVersion) { HashSet careInstIds = Sets.newHashSet(); - careInstIds.add(instId); - if (ServerInstIdManager.getInstance().isMasterInst()) { + if (ConfigDataMode.isMasterMode()) { + careInstIds.add(instId); //ignore the buildInDB which needn't the separation of reading and writing! if (!SystemDbHelper.isDBBuildIn(dbName)) { ServerInstIdManager.getInstance().getAllHTAPReadOnlyInstIdSet().stream().forEach(t -> { careInstIds.add(t); }); } - } else { + this.groupConfigManager.loadGroupDataSourceByMetaDb(careInstIds); + } else if (ConfigDataMode.isRowSlaveMode()) { + careInstIds.add(instId); careInstIds.add(ServerInstIdManager.getInstance().getMasterInstId()); careInstIds.add(ServerInstIdManager.getInstance().getInstId()); + this.groupConfigManager.loadGroupDataSourceByMetaDb(careInstIds); + } else if (ConfigDataMode.isColumnarMode() && SystemDbHelper.isDBBuildInExceptCdc(dbName)) { + //here still need create information_schema group datasource for columnar mode. + careInstIds.add(instId); + this.groupConfigManager.loadGroupDataSourceByMetaDb(careInstIds); } - - this.groupConfigManager.loadGroupDataSourceByMetaDb(careInstIds); } } @@ -615,7 +631,7 @@ protected List>> buildDataSourceWra // build DatasourceWrapper for the instIds dataSourceWrapperLists = buildDataSource(appName, unitName, instIds, dbName, groupName, outputHaSwitchParamsWithReadLock, - ServerInstIdManager.getInstance().isMasterInst()); + ConfigDataMode.isMasterMode()); if (dataSourceWrapperLists.size() == 0) { throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, String.format("instId[%s] is NOT available", instIds)); @@ -826,6 +842,8 @@ protected synchronized void doDestroy() { try { unregisterHaSwitcher(); unbindGroupConfigListener(); + //clean the cache datasource for physical backfill + PhysicalBackfillUtils.destroyDataSources(); } catch (Exception e) { logger.error("we got exception when close datasource .", e); } @@ -843,13 +861,13 @@ public void destroyDataSource() { } public TAtomDataSource getDataSource(MasterSlave masterSlave) { + if (groupDataSourceHolder == null && ConfigDataMode.isColumnarMode()) { + throw new TddlRuntimeException(ErrorCode.ERR_COLUMNAR_SCHEMA, + "don't support query the table without columnar index!", new NullPointerException()); + } return this.groupDataSourceHolder.getDataSource(masterSlave); } - public GroupDataSourceHolder getGroupDataSourceHolder() { - return groupDataSourceHolder; - } - protected String getServerInstIdForGroupDataSource() { return null; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/group/jdbc/TGroupDirectConnection.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/group/jdbc/TGroupDirectConnection.java index 159cf40b1..22174a5a8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/group/jdbc/TGroupDirectConnection.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/group/jdbc/TGroupDirectConnection.java @@ -94,6 +94,14 @@ public TGroupDirectConnection(TGroupDataSource groupDataSource, MasterSlave mast this(groupDataSource, master, null, null); } + public TGroupDirectConnection(TGroupDataSource groupDataSource, Connection connection) + throws SQLException { + this.groupDataSource = groupDataSource; + this.userName = null; + this.password = null; + setConn(connection); + } + public TGroupDirectConnection(TGroupDataSource groupDataSource, MasterSlave master, String userName, String password) throws SQLException { @@ -610,7 +618,7 @@ public void discard(Throwable error) { } try { if (conn.isWrapperFor(XConnection.class)) { - conn.unwrap(XConnection.class).setLastException(new Exception("discard")); + conn.unwrap(XConnection.class).setLastException(new Exception("discard"), true); } else { // Discard pooled connection. DruidPooledConnection druidConn = conn.unwrap(DruidPooledConnection.class); @@ -626,4 +634,15 @@ public void discard(Throwable error) { log.error("Failed to discard connection on group " + groupDataSource.getDbGroupKey(), ex); } } + + @Override + public void forceRollback() throws SQLException { + try (Statement stmt = conn.createStatement()) { + stmt.execute("ROLLBACK"); + } catch (Throwable e) { + log.error("Cleanup readonly transaction branch failed on " + groupDataSource.getDbGroupKey(), e); + discard(e); + throw e; + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/group/utils/VariableProxy.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/group/utils/VariableProxy.java index fc48518c3..dd2e676fb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/group/utils/VariableProxy.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/group/utils/VariableProxy.java @@ -20,6 +20,7 @@ import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.group.jdbc.TGroupDirectConnection; import com.alibaba.polardbx.optimizer.variable.IVariableProxy; @@ -49,7 +50,11 @@ public void resetDataSource(TGroupDataSource dataSource) { this.tGroupDataSource = dataSource; } + @Override public ImmutableMap getSessionVariables() { + if (!ConfigDataMode.needDNResource()) { + return ImmutableMap.builder().build(); + } TGroupDirectConnection tGroupDirectConnection = null; try { tGroupDirectConnection = tGroupDataSource.getConnection(); @@ -72,6 +77,9 @@ public ImmutableMap getSessionVariables() { @Override public ImmutableMap getGlobalVariables() { + if (!ConfigDataMode.needDNResource()) { + return ImmutableMap.builder().build(); + } TGroupDirectConnection tGroupDirectConnection = null; try { tGroupDirectConnection = tGroupDataSource.getConnection(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/InspectIndex/TableIndexInspector.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/InspectIndex/TableIndexInspector.java index d4115afb3..8bac1b37b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/InspectIndex/TableIndexInspector.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/InspectIndex/TableIndexInspector.java @@ -20,11 +20,14 @@ import com.alibaba.polardbx.common.utils.Pair; import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; import com.alibaba.polardbx.druid.util.StringUtils; +import com.alibaba.polardbx.executor.common.GsiStatisticsManager; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; import com.alibaba.polardbx.gms.topology.DbInfoManager; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; +import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticResult; import com.alibaba.polardbx.optimizer.core.datatype.DateTimeType; import com.alibaba.polardbx.optimizer.core.datatype.TimeType; import com.alibaba.polardbx.optimizer.core.datatype.TimestampType; @@ -156,6 +159,14 @@ public static Map createTableInspectorsInSchema(Str gsiInfoByTableNameGsiName.put(tableName, new TreeMap<>(String::compareToIgnoreCase)); } gsiInfoByTableNameGsiName.get(tableName).put(gsiName, info); + //rectify gsi cardinality + if ("key".equalsIgnoreCase(info.tbPartitionPolicy)) { + String cardinalityColumn = info.tbPartitionColumns.get(0); + StatisticResult cardinalityResult = + StatisticManager.getInstance() + .getCardinality(schema, tableName, cardinalityColumn, false, false); + info.rowCardinality = cardinalityResult.getLongValue(); + } } Map> lsiInfoByTableNameGsiName = @@ -172,6 +183,11 @@ public static Map createTableInspectorsInSchema(Str } public TableIndexInspector inspectUseFrequency() { + boolean enableGsiStatisticCollection = GsiStatisticsManager.enableGsiStatisticsCollection(); + if (!enableGsiStatisticCollection) { + return this; + } + final int REPORT_USE_FREQUENCY = 100; for (InspectIndexInfo info : globalIndexInspectInfo.values()) { @@ -184,6 +200,11 @@ public TableIndexInspector inspectUseFrequency() { } public TableIndexInspector inspectAccessTime() { + boolean enableGsiStatisticCollection = GsiStatisticsManager.enableGsiStatisticsCollection(); + if (!enableGsiStatisticCollection) { + return this; + } + final int REPORT_ACCESS_EXCEED_DAYS = 30; Date reportDate = Timestamp.valueOf(ZonedDateTime.now().minusDays(REPORT_ACCESS_EXCEED_DAYS).toLocalDateTime()); @@ -202,8 +223,12 @@ public TableIndexInspector inspectAccessTime() { return this; } + /** + * 定义: discrimination = distinct(gsiRow) / gsi分区个数 + * 经验值: discrimination 大于3属正常 + */ public TableIndexInspector inspectDiscrimination() { - final Double minDiscrimination = 0.4; + final Double minDiscrimination = 3.0; final Long minRowCountToReport = 10000L; for (InspectIndexInfo info : globalIndexInspectInfo.values()) { @@ -618,7 +643,8 @@ protected void inspectDuplicateLsi() { for (int i = 0; i < indexes.size(); i++) { if (!isAutoGeneratedLsi(indexes.get(i))) { reserveIdx = i; - } else if (!isAutoGeneratedLsi(indexes.get(i)) && localIndexInspectInfo.get(indexes.get(i)).unique) { + } + if (!isAutoGeneratedLsi(indexes.get(i)) && localIndexInspectInfo.get(indexes.get(i)).unique) { reserveIdx = i; break; } @@ -664,6 +690,25 @@ protected void inspectPrefixWithPrimaryKeyLsi() { } } + protected void inspectSubFragmentInPrimaryKeyLsi() { + for (Map.Entry entry : localIndexInspectInfo.entrySet()) { + String lsiName = entry.getKey(); + if (isAutoPartitionLsi(lsiName)) { + continue; + } + InspectIndexInfo info = entry.getValue(); + if (containInPrefix(info.indexColumns, primaryColumns)) { + info.problem.put( + InspectIndexInfo.BadIndexKind.DUPLICATE_LSI, + String.format( + "index %s has the same effect as primary key", + info.indexName + ) + ); + } + } + } + protected void inspectSharedIndexColumnLsi() { Map> hashTableOnSharedIndexColumns = new TreeMap<>(String::compareToIgnoreCase); for (Map.Entry entry : localIndexInspectInfo.entrySet()) { @@ -765,6 +810,7 @@ public TableIndexInspector inspectGsi() { public TableIndexInspector inspectLsi() { inspectPrefixWithPrimaryKeyLsi(); + inspectSubFragmentInPrimaryKeyLsi(); removePrimaryKeySuffixInLsi(); inspectDuplicateLsi(); inspectSharedIndexColumnLsi(); @@ -806,13 +852,17 @@ private void checkNeedAddLsi() { } public static Set queryTableNamesFromSchema(String schema) { - final String showTablesFromSql = "show tables from `" + schema + "`"; + final String showTablesFromSql = + "select table_name from information_schema.tables where table_schema='" + schema + + "' and table_type='BASE TABLE'"; List> result = DdlHelper.getServerConfigManager().executeQuerySql(showTablesFromSql, schema, null); Set tables = new TreeSet<>(String::compareToIgnoreCase); for (Map row : result) { - for (String key : row.keySet()) { - tables.add((String) row.get(key)); + Object val = row.get("table_name"); + if (val instanceof Slice) { + String tbName = ((Slice) val).toStringUtf8(); + tables.add(tbName); } } return tables; @@ -986,8 +1036,11 @@ private static List queryGsiInspectInfoRecordFromSchema(String info.partitionSql = queryPartitionByOnGsi(schema, indexName); - if (info.rowCardinality != 0 && info.rowCount != 0) { - info.rowDiscrimination = info.rowCardinality * 1.0 / info.rowCount; + //gsi row discrimination = cardinality / partitionNum + int partitionNum = queryPartitonNumOnGsi(schema, indexName); + + if (info.rowCardinality > 0 && partitionNum != 0) { + info.rowDiscrimination = info.rowCardinality * 1.0 / partitionNum; } else { info.rowDiscrimination = 0.0; } @@ -1063,6 +1116,12 @@ private static String queryPartitionByOnGsi(String schema, String gsiName) { } } + private static int queryPartitonNumOnGsi(String schema, String gsiName) { + final String querySql = "show topology from " + quoteName(gsiName); + List> result = DdlHelper.getServerConfigManager().executeQuerySql(querySql, schema, null); + return result.size(); + } + private static String quoteName(String name) { if (!StringUtil.isNullOrEmpty(name) && !name.contains("`")) { return "`" + name + "`"; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/checktable/CheckTableUtil.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/checktable/CheckTableUtil.java index a806ad353..c37ca2bda 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/checktable/CheckTableUtil.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/checktable/CheckTableUtil.java @@ -28,6 +28,8 @@ import com.alibaba.polardbx.group.config.Weight; import com.alibaba.polardbx.group.jdbc.TGroupDataSource; import com.alibaba.polardbx.repo.mysql.spi.MyRepository; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.lang.StringUtils; import java.sql.Connection; @@ -52,28 +54,8 @@ public class CheckTableUtil { private static final Logger logger = LoggerFactory.getLogger(CheckTableUtil.class); - public static TAtomDataSource findMasterAtomForGroup(TGroupDataSource groupDs) { - TAtomDataSource targetAtom = null; - Weight targetAtomWeight = null; - boolean isFindMaster = false; - List atomList = groupDs.getAtomDataSources(); - Map atomDsWeightMaps = groupDs.getAtomDataSourceWeights(); - for (Map.Entry atomWeightItem : atomDsWeightMaps.entrySet()) { - targetAtom = atomWeightItem.getKey(); - targetAtomWeight = atomWeightItem.getValue(); - if (targetAtomWeight.w > 0) { - isFindMaster = true; - break; - } - } - - if (isFindMaster) { - return targetAtom; - } else { - targetAtom = atomList.get(0); - } - - return targetAtom; + private static String esapceStringInQuota(String input) { + return input.replace("'", "\\'"); } public static Map> getTableIndexColumns(String schemaName, String groupName, @@ -84,14 +66,14 @@ public static Map> getTableIndexColumns(String schemaName, .getGroupExecutor(groupName).getDataSource(); List> tableColumns = new ArrayList<>(); List tableNameStrs = - tableNames.stream().map(o -> String.format("'%s'", o)).collect(Collectors.toList()); + tableNames.stream().map(o -> String.format("'%s'", esapceStringInQuota(o))).collect(Collectors.toList()); String tableNameStr = StringUtils.join(tableNameStrs, ","); ResultSet rs = null; Throwable ex = null; String sql = String.format( "select table_name, index_name, column_name from information_schema.statistics where table_name in (%s) and table_schema = '%s' and index_name = '%s'", - tableNameStr, phyDbName, indexName); + tableNameStr, esapceStringInQuota(phyDbName), esapceStringInQuota(indexName)); try (Connection conn = tGroupDataSource.getConnection()) { rs = conn.createStatement().executeQuery(sql); while (rs.next()) { @@ -182,7 +164,7 @@ public static TableDescription getTableDescription(MyRepository myRepository, St Throwable ex = null; StringBuilder targetSql = new StringBuilder("describe "); - targetSql.append("`" + tableName + "`"); + targetSql.append(SqlIdentifier.surroundWithBacktick(tableName)); String sql = targetSql.toString(); if (isShadow) { sql = "select * from information_schema.columns where table_name='" + tableName + "' and TABLE_SCHEMA='" diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/checktable/ColumnDiffResult.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/checktable/ColumnDiffResult.java new file mode 100644 index 000000000..bcf45958b --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/checktable/ColumnDiffResult.java @@ -0,0 +1,88 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.repo.mysql.checktable; + +import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; +import com.github.difflib.text.DiffRow; +import com.github.difflib.text.DiffRowGenerator; +import org.apache.commons.lang3.StringUtils; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.stream.Collectors; + +public class ColumnDiffResult { + Boolean isDiff = true; + + List diffInfos = new ArrayList<>(); + + public ColumnDiffResult() { + } + + private static String convertColumnRecordToString(ColumnsRecord columnsRecord) { + return String.format("`%s` `%s`", columnsRecord.columnName, columnsRecord.columnType); + } + + public static ColumnDiffResult diffPhysicalColumnAndLogicalColumnOrder(List physicalColumns, + List logicalColumns) { + ColumnDiffResult columnDiffResult = new ColumnDiffResult(); + physicalColumns.sort(Comparator.comparing(o -> o.ordinalPosition)); + logicalColumns.sort(Comparator.comparing(o -> o.ordinalPosition)); + + List columnRows1 = physicalColumns.stream().map(ColumnDiffResult::convertColumnRecordToString).collect( + Collectors.toList()); + List columnRows2 = logicalColumns.stream().map(ColumnDiffResult::convertColumnRecordToString).collect( + Collectors.toList()); + + DiffRowGenerator generator = DiffRowGenerator.create() + .showInlineDiffs(true) + .inlineDiffByWord(true) + .oldTag(f -> "~~") + .newTag(f -> "**") + .build(); + List rows = generator.generateDiffRows( + columnRows1, columnRows2 + ); + columnDiffResult.isDiff = false; + StringBuilder sb = new StringBuilder(); + sb.append("| physical column | logical column |\n"); + sb.append("|--------|---|\n"); + for (DiffRow diffRow : rows) { + if (!diffRow.getTag().equals(DiffRow.Tag.EQUAL)) { + columnDiffResult.isDiff = true; + } + sb.append("|").append(diffRow.getOldLine()).append("|").append(diffRow.getNewLine()).append("|\n"); + } + columnDiffResult.diffInfos.add(sb.toString()); + return columnDiffResult; + } + + public Boolean diff() { + return isDiff; + } + + public List convertToRows(String tableText, String opText, String status) { + List results = new ArrayList<>(); + if (isDiff) { + for (String diffInfo : diffInfos) { + results.add(new Object[] {tableText, opText, status, diffInfo}); + } + } + return results; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/common/ResultSetHelper.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/common/ResultSetHelper.java index a62e49d1c..d5e811153 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/common/ResultSetHelper.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/common/ResultSetHelper.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; +import com.alibaba.polardbx.gms.metadb.table.TablesRecord; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.config.table.TableColumnUtils; import com.alibaba.polardbx.optimizer.config.table.TableMeta; @@ -87,4 +88,22 @@ public static List fetchLogicalColumnsInOrder(String schemaName, } } + public static TablesRecord fetchLogicalTableRecord(String schemaName, String tableName) { + TableInfoManager tableInfoManager = new TableInfoManager(); + + try (Connection metaDbConn = MetaDbUtil.getConnection()) { + tableInfoManager.setConnection(metaDbConn); + TablesRecord tableRecord = tableInfoManager.queryTable(schemaName, tableName, false); + if (tableRecord == null) { + // Check if there is an ongoing RENAME TABLE operation, so search with new table name. + tableRecord = tableInfoManager.queryTable(schemaName, tableName, true); + } + return tableRecord; + } catch (SQLException e) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GET_CONNECTION, e, e.getMessage()); + } finally { + tableInfoManager.setConnection(null); + } + } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/common/ResultSetWrapper.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/common/ResultSetWrapper.java index 4eb8ee1aa..064ce9b41 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/common/ResultSetWrapper.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/common/ResultSetWrapper.java @@ -16,11 +16,136 @@ package com.alibaba.polardbx.repo.mysql.common; +import com.alibaba.polardbx.common.jdbc.InvalidDate; +import com.alibaba.polardbx.common.jdbc.ZeroDate; +import com.alibaba.polardbx.common.jdbc.ZeroTimestamp; +import com.alibaba.polardbx.common.utils.TStringUtil; +import com.alibaba.polardbx.common.utils.time.core.MysqlDateTime; +import com.alibaba.polardbx.common.utils.time.core.OriginalDate; +import com.alibaba.polardbx.common.utils.time.core.OriginalTime; +import com.alibaba.polardbx.common.utils.time.core.OriginalTimestamp; +import com.alibaba.polardbx.common.utils.time.parser.StringTimeParser; +import com.alibaba.polardbx.repo.mysql.spi.MyJdbcHandler; import com.mysql.jdbc.ResultSetImpl; import org.openjdk.jol.info.ClassLayout; +import java.sql.Date; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; + // TODO(moyi) remove it, which is not used in x-protocol public class ResultSetWrapper { public static long INSTANCE_MEM_SIZE = ClassLayout.parseClass(ResultSetWrapper.class).instanceSize() + ClassLayout.parseClass(ResultSetImpl.class).instanceSize(); + + private ResultSet rs; + private boolean isClosed = false; + private MyJdbcHandler jdbcHandler; + + public ResultSetWrapper(ResultSet rs, MyJdbcHandler MyJdbcHandler) { + this.rs = rs; + + if (rs == null) { + rs = null; + } + this.jdbcHandler = MyJdbcHandler; + } + + public Object getObject(int columnIndex) throws SQLException { + return getObject(rs, columnIndex); + } + + public static Object getObject(ResultSet rs, int columnIndex) throws SQLException { + try { + Object obj = rs.getObject(columnIndex); + try { + if (obj instanceof Timestamp || obj instanceof Date || obj instanceof Time) { + // Handle zero month or zero day, which is allowed in DATE/DATETIME: + // https://dev.mysql.com/doc/refman/5.7/en/date-and-time-types.html + // However if it's returned by JDBC as Timestamp or Date, it will be converted to another date. + // Also java.util.Time does not keep microsecond part. + byte[] rawBytes = new byte[rs.getAsciiStream(columnIndex).available()]; + rs.getAsciiStream(columnIndex).read(rawBytes); + MysqlDateTime mysqlDateTime = StringTimeParser.parseString(rawBytes, + obj instanceof Timestamp ? Types.TIMESTAMP : obj instanceof Date ? Types.DATE : Types.TIME); + if (obj instanceof Timestamp) { + OriginalTimestamp t = new OriginalTimestamp(mysqlDateTime); + return t; + } else if (obj instanceof Date) { + OriginalDate t = new OriginalDate(mysqlDateTime); + return t; + } else { + OriginalTime t = new OriginalTime(mysqlDateTime); + return t; + } + } + } catch (Throwable e) { + // Do nothing, just return the object before conversion. + } + return obj; + } catch (SQLException ex) { + if (TStringUtil.containsIgnoreCase(ex.getMessage(), "0000-00-00") + && TStringUtil.containsIgnoreCase(ex.getMessage(), "can not be represented as java.sql.Timestamp")) { + Timestamp ts = ZeroTimestamp.instance; + return ts; + } else if (TStringUtil.containsIgnoreCase(ex.getMessage(), "0000-00-00") + && TStringUtil.containsIgnoreCase(ex.getMessage(), "can not be represented as java.sql.Date")) { + ZeroDate ts = ZeroDate.instance; + return ts; + } else if (TStringUtil.containsIgnoreCase(ex.getMessage(), "can not be represented as java.sql.Date")) { + return InvalidDate.instance; // Mainly for year type + } else if (rs.getMetaData().getColumnType(columnIndex) == 92 && TStringUtil + .containsIgnoreCase(ex.getMessage(), "Bad format for Time")) { + try { + byte[] rawBytes = new byte[rs.getAsciiStream(columnIndex).available()]; + rs.getAsciiStream(columnIndex).read(rawBytes); + MysqlDateTime mysqlDateTime = StringTimeParser.parseString(rawBytes, Types.TIME); + OriginalTime t = new OriginalTime(mysqlDateTime); + return t; + } catch (Exception e) { + throw ex; + } + } else { + throw ex; + } + } + } + + + public byte[] getBytes(int columnIndex) throws SQLException { + return getBytes(rs, columnIndex); + } + + public static byte[] getBytes(ResultSet rs, int columnIndex) throws SQLException { + try { + return rs.getBytes(columnIndex); + } catch (SQLException ex) { + if (TStringUtil.containsIgnoreCase(ex.getMessage(), "0000-00-00") + && TStringUtil.containsIgnoreCase(ex.getMessage(), "can not be represented as java.sql.Timestamp")) { + return String.valueOf(getObject(rs, columnIndex)).getBytes(); + } else if (TStringUtil.containsIgnoreCase(ex.getMessage(), "0000-00-00") + && TStringUtil.containsIgnoreCase(ex.getMessage(), "can not be represented as java.sql.Date")) { + return String.valueOf(getObject(rs, columnIndex)).getBytes(); + } else if (TStringUtil.containsIgnoreCase(ex.getMessage(), "can not be represented as java.sql.Date")) { + return String.valueOf(getObject(rs, columnIndex)).getBytes(); // Reading + // Friendly + } else if (rs.getMetaData().getColumnType(columnIndex) == 92 && TStringUtil + .containsIgnoreCase(ex.getMessage(), "Bad format for Time")) { + try { + byte[] rawBytes = new byte[rs.getAsciiStream(columnIndex).available()]; + rs.getAsciiStream(columnIndex).read(rawBytes); + String str = new String(rawBytes); + Time t = Time.valueOf(str); + return String.valueOf(t).getBytes(); + } catch (Exception e) { + throw ex; + } + } else { + throw ex; + } + } + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/AlterTableGroupBackfillHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/AlterTableGroupBackfillHandler.java index c52e682bf..6f3360c80 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/AlterTableGroupBackfillHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/AlterTableGroupBackfillHandler.java @@ -23,6 +23,7 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.executor.backfill.Loader; import com.alibaba.polardbx.executor.corrector.Checker; import com.alibaba.polardbx.executor.corrector.Reporter; import com.alibaba.polardbx.executor.cursor.Cursor; @@ -35,7 +36,6 @@ import com.alibaba.polardbx.executor.partitionmanagement.corrector.AlterTableGroupReporter; import com.alibaba.polardbx.executor.partitionmanagement.fastchecker.AlterTableGroupFastChecker; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.AlterTableGroupBackfill; import com.alibaba.polardbx.optimizer.utils.PhyTableOperationUtil; @@ -75,6 +75,9 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { BackfillExecutor backfillExecutor = new BackfillExecutor((List inputs, ExecutionContext executionContext1) -> { QueryConcurrencyPolicy queryConcurrencyPolicy = getQueryConcurrencyPolicy(executionContext1); + if (Loader.canUseBackfillReturning(executionContext1, schemaName)) { + queryConcurrencyPolicy = QueryConcurrencyPolicy.GROUP_CONCURRENT_BLOCK; + } List inputCursors = new ArrayList<>(inputs.size()); executeWithConcurrentPolicy(executionContext1, inputs, queryConcurrencyPolicy, inputCursors, schemaName); return inputCursors; @@ -82,7 +85,9 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { executionContext = clearSqlMode(executionContext.copy()); - upgradeEncoding(executionContext, schemaName, logicalTable); + if (!executionContext.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY)) { + upgradeEncoding(executionContext, schemaName, logicalTable); + } PhyTableOperationUtil.disableIntraGroupParallelism(schemaName, executionContext); @@ -157,53 +162,32 @@ boolean fastCheck(ExecutionContext executionContext, Map> dstPhyDbAndTables) { long startTime = System.currentTimeMillis(); - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "FastChecker for alter tablegroup, schema [{0}] logical table [{1}] start", schemaName, logicalTable)); - final int fastCheckerParallelism = - executionContext.getParamManager().getInt(ConnectionParams.TABLEGROUP_REORG_FASTCHECKER_PARALLELISM); FastChecker fastChecker = AlterTableGroupFastChecker .create(schemaName, logicalTable, srcPhyDbAndTables, - dstPhyDbAndTables, fastCheckerParallelism, executionContext); + dstPhyDbAndTables, executionContext); boolean fastCheckResult = false; - final int maxRetryTimes = - executionContext.getParamManager().getInt(ConnectionParams.FASTCHECKER_RETRY_TIMES); - int tryTimes = 0; - while (tryTimes < maxRetryTimes && fastCheckResult == false) { - try { - fastCheckResult = fastChecker.check(executionContext); - } catch (TddlNestableRuntimeException e) { - if (StringUtils.containsIgnoreCase(e.getMessage(), "acquire lock timeout")) { - //if acquire lock timeout, we will retry - if (tryTimes < maxRetryTimes - 1) { - try { - TimeUnit.MILLISECONDS.sleep(2000L * (1 + tryTimes)); - } catch (InterruptedException ex) { - throw new TddlNestableRuntimeException(ex); - } - continue; - } else { - throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, - "alter tablegroup fastchecker retry exceed max times", e); - } - } else { - //other exception, we simply throw out - throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e, - "alter tablegroup fastchecker failed to check"); - } - } finally { - tryTimes += 1; - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( - "FastChecker for alter tablegroup, schema [{0}] logical src table [{1}] finish, time use [{2}], check result [{3}]", - schemaName, logicalTable, - (System.currentTimeMillis() - startTime) / 1000.0, - fastCheckResult ? "pass" : "not pass") - ); - if (!fastCheckResult) { - EventLogger.log(EventType.DDL_WARN, "FastChecker failed"); - } + try { + fastCheckResult = fastChecker.check(executionContext); + } catch (TddlNestableRuntimeException e) { + //other exception, we simply throw out + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e, + "alter tablegroup fastchecker failed to check"); + } finally { + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( + "FastChecker for alter tablegroup, schema [{0}] logical src table [{1}] finish, time use [{2}], check result [{3}]", + schemaName, logicalTable, + (System.currentTimeMillis() - startTime) / 1000.0, + fastCheckResult ? "pass" : "not pass") + ); + if (!fastCheckResult) { + EventLogger.log(EventType.DDL_WARN, "FastChecker failed"); + } else { + EventLogger.log(EventType.DDL_INFO, "FastChecker succeed"); } } return fastCheckResult; @@ -220,6 +204,7 @@ private void checkInCN(AlterTableGroupBackfill backfill, ExecutionContext execut executionContext.getParamManager().getLong(ConnectionParams.TABLEGROUP_REORG_CHECK_PARALLELISM); final long earlyFailNumber = executionContext.getParamManager().getLong(ConnectionParams.TABLEGROUP_REORG_EARLY_FAIL_NUMBER); + final boolean useBinary = executionContext.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY); String schemaName = backfill.getSchemaName(); String logicalTable = backfill.getLogicalTableName(); @@ -233,6 +218,7 @@ private void checkInCN(AlterTableGroupBackfill backfill, ExecutionContext execut speedMin, speedLimit, parallelism, + useBinary, SqlSelect.LockMode.UNDEF, SqlSelect.LockMode.UNDEF, executionContext, @@ -262,6 +248,9 @@ private void checkInCN(AlterTableGroupBackfill backfill, ExecutionContext execut final List checkerReports = reporter.getCheckerReports(); if (!checkerReports.isEmpty()) { + for (CheckerManager.CheckerReport report : checkerReports) { + SQLRecorderLogger.ddlLogger.error("report detail: " + report); + } // Some error found. throw GeneralUtil.nestedException( "alter tableGroup checker found error after backfill. Please try to rollback/recover this job"); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/CommonDDLHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ChangeRuleVersionHandler.java similarity index 100% rename from polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/CommonDDLHandler.java rename to polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ChangeRuleVersionHandler.java diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ClearSeqCacheHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ClearSeqCacheHandler.java index d43771fc2..17c758a79 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ClearSeqCacheHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ClearSeqCacheHandler.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.ClearSeqCacheSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; import org.apache.calcite.sql.SqlClearSeqCache; @@ -46,7 +47,7 @@ Cursor doHandle(LogicalDal logicalPlan, ExecutionContext context) { String seqName = names.get(names.size() - 1); boolean isAll = "ALL".equalsIgnoreCase(seqName); - SyncManagerHelper.sync(new ClearSeqCacheSyncAction(schemaName, seqName, isAll, true)); + SyncManagerHelper.sync(new ClearSeqCacheSyncAction(schemaName, seqName, isAll, true), SyncScope.ALL); return new AffectRowCursor(isAll ? 0 : 1); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/CommandHandlerFactoryMyImp.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/CommandHandlerFactoryMyImp.java index 0fa6d39ed..a6308e5a0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/CommandHandlerFactoryMyImp.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/CommandHandlerFactoryMyImp.java @@ -16,24 +16,58 @@ package com.alibaba.polardbx.repo.mysql.handler; +import com.alibaba.polardbx.druid.support.logging.Log; +import com.alibaba.polardbx.executor.handler.HandlerCommon; import com.alibaba.polardbx.executor.handler.LogicalAlterDatabaseHandler; +import com.alibaba.polardbx.executor.handler.LogicalAlterInstanceHandler; +import com.alibaba.polardbx.executor.handler.LogicalCancelReplicaCheckTableHandler; import com.alibaba.polardbx.executor.handler.LogicalChangeMasterHandler; import com.alibaba.polardbx.executor.handler.LogicalChangeReplicationFilterHandler; import com.alibaba.polardbx.executor.handler.LogicalClearCclRulesHandler; import com.alibaba.polardbx.executor.handler.LogicalClearCclTriggersHandler; +import com.alibaba.polardbx.executor.handler.LogicalContinueReplicaCheckTableHandler; import com.alibaba.polardbx.executor.handler.LogicalContinueScheduleHandler; import com.alibaba.polardbx.executor.handler.LogicalCreateCclRuleHandler; import com.alibaba.polardbx.executor.handler.LogicalCreateCclTriggerHandler; +import com.alibaba.polardbx.executor.handler.LogicalStartReplicaCheckTableHandler; +import com.alibaba.polardbx.executor.handler.LogicalPauseReplicaCheckTableHandler; +import com.alibaba.polardbx.executor.handler.LogicalReplicaHashcheckHandler; +import com.alibaba.polardbx.executor.handler.LogicalCreateSecurityEntityHandler; +import com.alibaba.polardbx.executor.handler.LogicalDropSecurityEntityHandler; +import com.alibaba.polardbx.executor.handler.LogicalImportSequenceHandler; +import com.alibaba.polardbx.executor.handler.LogicalResetReplicaCheckTableHandler; +import com.alibaba.polardbx.executor.handler.LogicalShowReplicaCheckDiffHandler; +import com.alibaba.polardbx.executor.handler.LogicalShowReplicaCheckProgressHandler; +import com.alibaba.polardbx.executor.handler.ddl.LogicalAlterStoragePoolHandler; +import com.alibaba.polardbx.executor.handler.ddl.LogicalClearFileStorageHandler; +import com.alibaba.polardbx.executor.handler.ddl.LogicalCreateJavaFunctionHandler; +import com.alibaba.polardbx.executor.handler.LogicalFlushLogsHandler; +import com.alibaba.polardbx.executor.handler.LogicalSetCdcGlobalHandler; +import com.alibaba.polardbx.executor.handler.ddl.LogicalCreateJavaFunctionHandler; +import com.alibaba.polardbx.executor.handler.LogicalCreateSecurityLabelComponentHandler; +import com.alibaba.polardbx.executor.handler.LogicalCreateSecurityLabelHandler; +import com.alibaba.polardbx.executor.handler.LogicalCreateSecurityPolicyHandler; +import com.alibaba.polardbx.executor.handler.LogicalDropSecurityLabelComponentHandler; +import com.alibaba.polardbx.executor.handler.LogicalDropSecurityLabelHandler; +import com.alibaba.polardbx.executor.handler.LogicalDropSecurityPolicyHandler; +import com.alibaba.polardbx.executor.handler.LogicalGrantSecurityLabelHandler; +import com.alibaba.polardbx.executor.handler.LogicalRevokeSecurityLabelHandler; +import com.alibaba.polardbx.executor.handler.LogicalSetCdcGlobalHandler; import com.alibaba.polardbx.executor.handler.LogicalCreateScheduleHandler; import com.alibaba.polardbx.executor.handler.LogicalDropCclRuleHandler; import com.alibaba.polardbx.executor.handler.LogicalDropCclTriggerHandler; import com.alibaba.polardbx.executor.handler.LogicalDropScheduleHandler; import com.alibaba.polardbx.executor.handler.LogicalFireScheduleHandler; import com.alibaba.polardbx.executor.handler.LogicalFlushLogsHandler; +import com.alibaba.polardbx.executor.handler.LogicalFlushLogsHandler; +import com.alibaba.polardbx.executor.handler.LogicalImportSequenceHandler; +import com.alibaba.polardbx.executor.handler.LogicalPauseReplicaCheckTableHandler; import com.alibaba.polardbx.executor.handler.LogicalPauseScheduleHandler; import com.alibaba.polardbx.executor.handler.LogicalRebalanceHandler; import com.alibaba.polardbx.executor.handler.LogicalRebalanceMasterHandler; +import com.alibaba.polardbx.executor.handler.LogicalReplicaHashcheckHandler; import com.alibaba.polardbx.executor.handler.LogicalResetMasterHandler; +import com.alibaba.polardbx.executor.handler.LogicalResetReplicaCheckTableHandler; import com.alibaba.polardbx.executor.handler.LogicalResetSlaveHandler; import com.alibaba.polardbx.executor.handler.LogicalRestartMasterHandler; import com.alibaba.polardbx.executor.handler.LogicalSetCdcGlobalHandler; @@ -58,6 +92,9 @@ import com.alibaba.polardbx.executor.handler.LogicalShowPartitionsHandler; import com.alibaba.polardbx.executor.handler.LogicalShowPartitionsHeatmapHandler; import com.alibaba.polardbx.executor.handler.LogicalShowProfileHandler; +import com.alibaba.polardbx.executor.handler.LogicalShowPruneTraceHandler; +import com.alibaba.polardbx.executor.handler.LogicalShowReplicaCheckDiffHandler; +import com.alibaba.polardbx.executor.handler.LogicalShowReplicaCheckProgressHandler; import com.alibaba.polardbx.executor.handler.LogicalShowRuleHandler; import com.alibaba.polardbx.executor.handler.LogicalShowRuleStatusHandler; import com.alibaba.polardbx.executor.handler.LogicalShowSequencesHandler; @@ -71,6 +108,7 @@ import com.alibaba.polardbx.executor.handler.LogicalShowTraceHandler; import com.alibaba.polardbx.executor.handler.LogicalSlowSqlCclHandler; import com.alibaba.polardbx.executor.handler.LogicalStartMasterHandler; +import com.alibaba.polardbx.executor.handler.LogicalStartReplicaCheckTableHandler; import com.alibaba.polardbx.executor.handler.LogicalStartSlaveHandler; import com.alibaba.polardbx.executor.handler.LogicalStopMasterHandler; import com.alibaba.polardbx.executor.handler.LogicalStopSlaveHandler; @@ -116,6 +154,7 @@ import com.alibaba.polardbx.executor.handler.ddl.LogicalAlterTableSplitPartitionByHotValueHandler; import com.alibaba.polardbx.executor.handler.ddl.LogicalAlterTableSplitPartitionHandler; import com.alibaba.polardbx.executor.handler.ddl.LogicalAlterTableTruncatePartitionHandler; +import com.alibaba.polardbx.executor.handler.ddl.LogicalCheckCciHandler; import com.alibaba.polardbx.executor.handler.ddl.LogicalCheckGsiHandler; import com.alibaba.polardbx.executor.handler.ddl.LogicalCommonDdlHandler; import com.alibaba.polardbx.executor.handler.ddl.LogicalCreateDatabaseHandler; @@ -143,6 +182,7 @@ import com.alibaba.polardbx.executor.handler.ddl.LogicalDropTableHandler; import com.alibaba.polardbx.executor.handler.ddl.LogicalDropViewHandler; import com.alibaba.polardbx.executor.handler.ddl.LogicalGenericDdlHandler; +import com.alibaba.polardbx.executor.handler.ddl.LogicalImportDatabaseHandler; import com.alibaba.polardbx.executor.handler.ddl.LogicalInsertOverwriteHandler; import com.alibaba.polardbx.executor.handler.ddl.LogicalMergeTableGroupHandler; import com.alibaba.polardbx.executor.handler.ddl.LogicalMoveDatabaseHandler; @@ -179,6 +219,7 @@ import com.alibaba.polardbx.optimizer.core.rel.MoveTableBackfill; import com.alibaba.polardbx.optimizer.core.rel.PhyQueryOperation; import com.alibaba.polardbx.optimizer.core.rel.PhyViewUnion; +import com.alibaba.polardbx.optimizer.core.rel.PhysicalBackfill; import com.alibaba.polardbx.optimizer.core.rel.dal.BaseDalOperation; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalAlterSystemLeader; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalAlterSystemRefreshStorage; @@ -188,6 +229,7 @@ import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterDatabase; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterFileStorage; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterFunction; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterInstance; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterJoinGroup; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterProcedure; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterRule; @@ -225,7 +267,10 @@ import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAlterTableTruncatePartition; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalAnalyzeTable; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalChangeConsensusLeader; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCheckCci; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCheckGsi; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalConvertAllSequences; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalClearFileStorage; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCreateDatabase; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCreateFileStorage; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCreateFunction; @@ -251,6 +296,8 @@ import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropTableGroup; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropView; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalGenericDdl; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalImportDatabase; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalImportSequence; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalInsertOverwrite; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalInspectIndex; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalMergeTableGroup; @@ -315,10 +362,12 @@ public CommandHandlerFactoryMyImp(IRepository repo) { LOGICAL_SEQUENCE_DDL_HANDLER = new LogicalSequenceDdlHandler(repo); LOGICAL_CREATE_TABLE_HANDLER = new LogicalCreateTableHandler(repo); + LOGICAL_CONVERT_TABLE_MODE_HANDLER = new LogicalConvertTableModeHandler(repo); LOGICAL_ALTER_TABLE_HANDLER = new LogicalAlterTableHandler(repo); LOGICAL_ALTER_FILESTORAGE_HANDLER = new LogicalAlterFileStoragHandler(repo); LOGICAL_DROP_FILESTORAGE_HANDLER = new LogicalDropFileStorageHandler(repo); LOGICAL_CREATE_FILESTORAGE_HANDLER = new LogicalCreateFileStorageHandler(repo); + LOGICAL_CLEAR_FILESTORAGE_HANDLER = new LogicalClearFileStorageHandler(repo); LOGICAL_RENAME_TABLE_HANDLER = new LogicalRenameTableHandler(repo); LOGICAL_RENAME_TABLES_HANDLER = new LogicalRenameTablesHandler(repo); LOGICAL_INSERT_OVERWRITE_HANDLER = new LogicalInsertOverwriteHandler(repo); @@ -353,6 +402,7 @@ public CommandHandlerFactoryMyImp(IRepository repo) { LOGICAL_SHOW_DS_HANDLER = new LogicalShowDsHandler(repo); LOGICAL_SHOW_DB_STATUS_HANDLER = new LogicalShowDbStatusHandler(repo); LOGICAL_SHOW_TRACE_HANDLER = new LogicalShowTraceHandler(repo); + LOGICAL_SHOW_PRUNE_TRACE_HANDLER = new LogicalShowPruneTraceHandler(repo); LOGICAL_SHOW_SEQUENCES_HANDLER = new LogicalShowSequencesHandler(repo); LOGICAL_SHOW_RULE_HANDLER = new LogicalShowRuleHandler(repo); LOGICAL_SHOW_GRANTS_HANDLER = new PolarShowGrantsHandler(repo); @@ -365,12 +415,16 @@ public CommandHandlerFactoryMyImp(IRepository repo) { LOGICAL_SHOW_PROFILE_HANDLER = new LogicalShowProfileHandler(repo); LOGICAL_SHOW_TABLE_INFO_HANDLER = new LogicalShowTableInfoHandler(repo); LOGICAL_SHOW_LOCALITY_INFO_HANDLER = new LogicalShowLocalityInfoHandler(repo); + LOGICAL_SHOW_PHYSICAL_DDL_HANDLER = new LogicalShowPhysicalDdlHandler(repo); LOGICAL_SHOW_HOTKEY_HANDLER = new LogicalShowHotkeyHandler(repo); LOGICAL_DESC_HANDLER = new LogicalDescHandler(repo); LOGICAL_EXPLAIN_HANDLER = new LogicalExplainHandler(repo); LOGICAL_BASELINE_HANDLER = new LogicalBaselineHandler(repo); LOGICAL_CHECK_TABLE_HANDLER = new LogicalCheckTableHandler(repo); + + LOGICAL_CHECK_COLUMNAR_PARTITION_HANDLER = new LogicalCheckColumnarPartitionHandler(repo); + LOGICAL_KILL_HANDLER = new LogicalKillHandler(repo); LOGICAL_ANALYZE_TABLE_HANDLER = new LogicalAnalyzeTableDdlHandler(repo); LOGICAL_SHOW_RECYCLEBIN_HANDLER = new LogicalShowRecyclebinHandler(repo); @@ -378,18 +432,24 @@ public CommandHandlerFactoryMyImp(IRepository repo) { LOGICAL_CREATE_DATABASE_HANDLER = new LogicalCreateDatabaseHandler(repo); LOGICAL_CREATE_DATABASE_LIKE_AS_HANDLER = new LogicalCreateDatabaseLikeAsHandler(repo); LOGICAL_ALTER_DATABASE_HANDLER = new LogicalAlterDatabaseHandler(repo); + LOGICAL_ALTER_INSTANCE_HANDLER = new LogicalAlterInstanceHandler(repo); LOGICAL_SHOW_CONVERT_TABLE_HANDLER = new LogicalShowConvertTableHandler(repo); LOGICAL_DROP_DATABASE_HANDLER = new LogicalDropDatabaseHandler(repo); LOGICAL_CREATE_JAVA_FUNCTION_HANDLER = new LogicalCreateJavaFunctionHandler(repo); LOGICAL_DROP_JAVA_FUNCTION_HANDLER = new LogicalDropJavaFunctionHandler(repo); + LOGICAL_IMPORT_DATABASE = new LogicalImportDatabaseHandler(repo); + + LOGICAL_IMPORT_SEQUENCE = new LogicalImportSequenceHandler(repo); + SHOW_DDL_JOBS_HANDLER = new DdlEngineShowJobsHandler(repo); RECOVER_DDL_JOBS_HANDLER = new DdlEngineRecoverJobsHandler(repo); CANCEL_DDL_JOBS_HANDLER = new DdlEngineCancelJobsHandler(repo); ROLLBACK_DDL_JOBS_HANDLER = new DdlEngineRollbackJobsHandler(repo); INSPECT_DDL_JOBS_CACHE_HANDLER = new DdlEngineInspectCacheHandler(repo); LOGICAL_CHECK_GSI_HANDLER = new LogicalCheckGsiHandler(repo); + LOGICAL_CHECK_CCI_HANDLER = new LogicalCheckCciHandler(repo); PAUSE_DDL_JOBS_HANDLER = new DdlEnginePauseJobsHandler(repo); PAUSE_REBALANCE_JOBS_HANDLER = new DdlEnginePauseRebalanceHandler(repo); @@ -420,6 +480,7 @@ public CommandHandlerFactoryMyImp(IRepository repo) { LOGICAL_MOVE_DATABASES_HANDLER = new LogicalMoveDatabaseHandler(repo); COLUMN_BACKFILL_HANDLER = new ColumnBackfillHandler(repo); SHOW_GLOBAL_INDEX_HANDLER = new ShowGlobalIndexHandler(repo); + SHOW_COLUMNAR_INDEX_HANDLER = new ShowColumnarIndexHandler(repo); SHOW_METADATA_LOCK_HANDLER = new ShowMetadataLockHandler(repo); SHOW_TRANS_HANDLER = new ShowTransHandler(repo); @@ -449,6 +510,14 @@ public CommandHandlerFactoryMyImp(IRepository repo) { LOGICAL_RESTART_MASTER_HANDLER = new LogicalRestartMasterHandler(repo); LOGICAL_REBALANCE_MASTER_HANDLER = new LogicalRebalanceMasterHandler(repo); LOGICAL_RESET_MASTER_HANDLER = new LogicalResetMasterHandler(repo); + LOGICAL_REPLICA_HASHCHECK_HANDLER = new LogicalReplicaHashcheckHandler(repo); + LOGICAL_START_REPLICA_CHECK_HANDLER = new LogicalStartReplicaCheckTableHandler(repo); + LOGICAL_PAUSE_REPLICA_CHECK_HANDLER = new LogicalPauseReplicaCheckTableHandler(repo); + LOGICAL_CANCEL_REPLICA_CHECK_HANDLER = new LogicalCancelReplicaCheckTableHandler(repo); + LOGICAL_RESET_REPLICA_CHECK_HANDLER = new LogicalResetReplicaCheckTableHandler(repo); + LOGICAL_CONTINUE_REPLICA_CHECK_HANDLER = new LogicalContinueReplicaCheckTableHandler(repo); + LOGICAL_SHOW_REPLICA_CHECK_PROGRESS_HANDLER = new LogicalShowReplicaCheckProgressHandler(repo); + LOGICAL_SHOW_REPLICA_CHECK_DIFF_HANDLER = new LogicalShowReplicaCheckDiffHandler(repo); CREATE_CCL_RULE_HANDLER = new LogicalCreateCclRuleHandler(repo); DROP_CCL_RULE_HANDLER = new LogicalDropCclRuleHandler(repo); @@ -468,10 +537,22 @@ public CommandHandlerFactoryMyImp(IRepository repo) { CONTINUE_SCHEDULE_HANDLER = new LogicalContinueScheduleHandler(repo); FIRE_SCHEDULE_HANDLER = new LogicalFireScheduleHandler(repo); + CREATE_SECURITY_LABEL_COMPONENT_HANDLER = new LogicalCreateSecurityLabelComponentHandler(repo); + DROP_SECURITY_LABEL_COMPONENT_HANDLER = new LogicalDropSecurityLabelComponentHandler(repo); + CREATE_SECURITY_LABEL_HANDLER = new LogicalCreateSecurityLabelHandler(repo); + DROP_SECURITY_LABEL_HANDLER = new LogicalDropSecurityLabelHandler(repo); + CREATE_SECURITY_POLICY_HANDLER = new LogicalCreateSecurityPolicyHandler(repo); + DROP_SECURITY_POLICY_HANDLER = new LogicalDropSecurityPolicyHandler(repo); + CREATE_SECURITY_ENTITY_HANDLER = new LogicalCreateSecurityEntityHandler(repo); + DROP_SECURITY_ENTITY_HANDLER = new LogicalDropSecurityEntityHandler(repo); + GRANT_SECURITY_LABEL_HANDLER = new LogicalGrantSecurityLabelHandler(repo); + REVOKE_SECURITY_LABEL_HANDLER = new LogicalRevokeSecurityLabelHandler(repo); + LOGICAL_SET_DEFAULT_ROLE_HANDLER = new LogicalSetDefaultRoleHandler(repo); ALTER_TABLEGROUP_BACKFILL_HANDLER = new AlterTableGroupBackfillHandler(repo); CREATE_TABLEGROUP_HANDLER = new LogicalCreateTableGroupHandler(repo); DROP_TABLEGROUP_HANDLER = new LogicalDropTableGroupHandler(repo); + PHYSICAL_BACKFILL_HANDLER = new PhysicalBackfillHandler(repo); LOGICAL_OUT_FILE_HANDLER = new LogicalOutFileHandler(repo); LOGICAL_ALTER_TABLEGROUP_SPLIT_PARTITION_HANDLER = new LogicalAlterTableGroupSplitPartitionHandler(repo); LOGICAL_ALTER_TABLEGROUP_MERGE_PARTITION_HANDLER = new LogicalAlterTableGroupMergePartitionHandler(repo); @@ -547,6 +628,7 @@ public CommandHandlerFactoryMyImp(IRepository repo) { private final LogicalCommonDdlHandler LOGICAL_ALTER_TABLE_HANDLER; private final LogicalCommonDdlHandler LOGICAL_ALTER_FILESTORAGE_HANDLER; private final LogicalCommonDdlHandler LOGICAL_DROP_FILESTORAGE_HANDLER; + private final LogicalCommonDdlHandler LOGICAL_CLEAR_FILESTORAGE_HANDLER; private final LogicalCommonDdlHandler LOGICAL_CREATE_FILESTORAGE_HANDLER; private final LogicalCommonDdlHandler LOGICAL_ALTER_TABLE_REPARTITION_HANDLER; private final LogicalCommonDdlHandler LOGICAL_ALTER_TABLE_PARTITION_COUNT_HANDLER; @@ -615,6 +697,7 @@ public CommandHandlerFactoryMyImp(IRepository repo) { private final PlanHandler LOGICAL_SHOW_DS_HANDLER; private final PlanHandler LOGICAL_SHOW_DB_STATUS_HANDLER; private final PlanHandler LOGICAL_SHOW_TRACE_HANDLER; + private final PlanHandler LOGICAL_SHOW_PRUNE_TRACE_HANDLER; private final PlanHandler LOGICAL_SHOW_SEQUENCES_HANDLER; private final PlanHandler LOGICAL_SHOW_RULE_HANDLER; private final PlanHandler LOGICAL_SHOW_GRANTS_HANDLER; @@ -628,6 +711,7 @@ public CommandHandlerFactoryMyImp(IRepository repo) { private final PlanHandler LOGICAL_SHOW_TABLE_INFO_HANDLER; private final PlanHandler LOGICAL_SHOW_LOCALITY_INFO_HANDLER; + private final PlanHandler LOGICAL_SHOW_PHYSICAL_DDL_HANDLER; private final PlanHandler LOGICAL_SHOW_HOTKEY_HANDLER; private final PlanHandler LOGICAL_DESC_HANDLER; @@ -637,8 +721,15 @@ public CommandHandlerFactoryMyImp(IRepository repo) { private final PlanHandler PHY_QUERY_HANDLER; private final PlanHandler LOGICAL_CHECK_TABLE_HANDLER; + + private final PlanHandler LOGICAL_CHECK_COLUMNAR_PARTITION_HANDLER; + private final PlanHandler LOGICAL_KILL_HANDLER; private final PlanHandler LOGICAL_ANALYZE_TABLE_HANDLER; + + private final PlanHandler LOGICAL_IMPORT_DATABASE; + + private final PlanHandler LOGICAL_IMPORT_SEQUENCE; private final PlanHandler LOGICAL_SHOW_RECYCLEBIN_HANDLER; private final PlanHandler LOGICAL_EXPLAIN_HANDLER; @@ -647,8 +738,11 @@ public CommandHandlerFactoryMyImp(IRepository repo) { private final PlanHandler LOGICAL_CREATE_DATABASE_LIKE_AS_HANDLER; private final PlanHandler LOGICAL_ALTER_DATABASE_HANDLER; + private final PlanHandler LOGICAL_ALTER_INSTANCE_HANDLER; private final PlanHandler LOGICAL_SHOW_CONVERT_TABLE_HANDLER; + + private final PlanHandler LOGICAL_CONVERT_TABLE_MODE_HANDLER; private final PlanHandler LOGICAL_DROP_DATABASE_HANDLER; private final PlanHandler LOGICAL_CREATE_JAVA_FUNCTION_HANDLER; @@ -686,8 +780,10 @@ public CommandHandlerFactoryMyImp(IRepository repo) { private final PlanHandler GSI_BACKFILL_HANDLER; private final PlanHandler LOGICAL_CHECK_GSI_HANDLER; + private final PlanHandler LOGICAL_CHECK_CCI_HANDLER; private final PlanHandler COLUMN_BACKFILL_HANDLER; private final PlanHandler SHOW_GLOBAL_INDEX_HANDLER; + private final PlanHandler SHOW_COLUMNAR_INDEX_HANDLER; private final PlanHandler SHOW_METADATA_LOCK_HANDLER; private final PlanHandler SHOW_TRANS_HANDLER; @@ -717,12 +813,21 @@ public CommandHandlerFactoryMyImp(IRepository repo) { private final PlanHandler LOGICAL_RESTART_MASTER_HANDLER; private final PlanHandler LOGICAL_REBALANCE_MASTER_HANDLER; private final PlanHandler LOGICAL_RESET_MASTER_HANDLER; + private final PlanHandler LOGICAL_REPLICA_HASHCHECK_HANDLER; + private final PlanHandler LOGICAL_START_REPLICA_CHECK_HANDLER; + private final PlanHandler LOGICAL_PAUSE_REPLICA_CHECK_HANDLER; + private final PlanHandler LOGICAL_CONTINUE_REPLICA_CHECK_HANDLER; + private final PlanHandler LOGICAL_CANCEL_REPLICA_CHECK_HANDLER; + private final PlanHandler LOGICAL_RESET_REPLICA_CHECK_HANDLER; + private final PlanHandler LOGICAL_SHOW_REPLICA_CHECK_PROGRESS_HANDLER; + private final PlanHandler LOGICAL_SHOW_REPLICA_CHECK_DIFF_HANDLER; private final PlanHandler CREATE_CCL_RULE_HANDLER; private final PlanHandler DROP_CCL_RULE_HANDLER; private final PlanHandler SHOW_CCL_RULE_HANDLER; private final PlanHandler CLEAR_CCL_RULES_HANDLER; private final PlanHandler ALTER_TABLEGROUP_BACKFILL_HANDLER; + private final PlanHandler PHYSICAL_BACKFILL_HANDLER; private final PlanHandler CREATE_TABLEGROUP_HANDLER; private final PlanHandler DROP_TABLEGROUP_HANDLER; @@ -739,6 +844,16 @@ public CommandHandlerFactoryMyImp(IRepository repo) { private final PlanHandler PAUSE_SCHEDULE_HANDLER; private final PlanHandler CONTINUE_SCHEDULE_HANDLER; private final PlanHandler FIRE_SCHEDULE_HANDLER; + private final PlanHandler CREATE_SECURITY_LABEL_COMPONENT_HANDLER; + private final PlanHandler DROP_SECURITY_LABEL_COMPONENT_HANDLER; + private final PlanHandler CREATE_SECURITY_LABEL_HANDLER; + private final PlanHandler DROP_SECURITY_LABEL_HANDLER; + private final PlanHandler CREATE_SECURITY_POLICY_HANDLER; + private final PlanHandler DROP_SECURITY_POLICY_HANDLER; + private final PlanHandler CREATE_SECURITY_ENTITY_HANDLER; + private final PlanHandler DROP_SECURITY_ENTITY_HANDLER; + private final PlanHandler GRANT_SECURITY_LABEL_HANDLER; + private final PlanHandler REVOKE_SECURITY_LABEL_HANDLER; private final PlanHandler LOGICAL_SET_DEFAULT_ROLE_HANDLER; @@ -908,7 +1023,14 @@ public PlanHandler getCommandHandler(RelNode logicalPlan, ExecutionContext execu } else if (logicalPlan instanceof LogicalDropView) { return LOGICAL_DROP_VIEW_HANDLER; } else if (logicalPlan instanceof LogicalCreateTable) { - return LOGICAL_CREATE_TABLE_HANDLER; + LogicalCreateTable logicalCreateTable = (LogicalCreateTable) logicalPlan; + if (logicalCreateTable.getSqlCreateTable() != null && logicalCreateTable.getSqlCreateTable() + .isOnlyConvertTableMode()) { + return LOGICAL_CONVERT_TABLE_MODE_HANDLER; + + } else { + return LOGICAL_CREATE_TABLE_HANDLER; + } } else if (logicalPlan instanceof LogicalAlterTable) { return LOGICAL_ALTER_TABLE_HANDLER; } else if (logicalPlan instanceof LogicalAlterTablePartitionCount) { @@ -939,18 +1061,18 @@ public PlanHandler getCommandHandler(RelNode logicalPlan, ExecutionContext execu return LOGICAL_GENERIC_DDL_HANDLER; } else if (logicalPlan instanceof LogicalCheckGsi) { return LOGICAL_CHECK_GSI_HANDLER; + } else if (logicalPlan instanceof LogicalCheckCci) { + return LOGICAL_CHECK_CCI_HANDLER; } else if (logicalPlan instanceof AlterTableGroupBackfill) { return ALTER_TABLEGROUP_BACKFILL_HANDLER; + } else if (logicalPlan instanceof PhysicalBackfill) { + return PHYSICAL_BACKFILL_HANDLER; } else if (logicalPlan instanceof LogicalAlterFileStorage) { return LOGICAL_ALTER_FILESTORAGE_HANDLER; - } else if (logicalPlan instanceof LogicalAlterFileStorage) { - return LOGICAL_ALTER_FILESTORAGE_HANDLER; - } else if (logicalPlan instanceof LogicalDropFileStorage) { - return LOGICAL_DROP_FILESTORAGE_HANDLER; - } else if (logicalPlan instanceof LogicalCreateFileStorage) { - return LOGICAL_CREATE_FILESTORAGE_HANDLER; } else if (logicalPlan instanceof LogicalDropFileStorage) { return LOGICAL_DROP_FILESTORAGE_HANDLER; + } else if (logicalPlan instanceof LogicalClearFileStorage) { + return LOGICAL_CLEAR_FILESTORAGE_HANDLER; } else if (logicalPlan instanceof LogicalCreateFileStorage) { return LOGICAL_CREATE_FILESTORAGE_HANDLER; } else if (logicalPlan instanceof PhyQueryOperation) { @@ -1040,6 +1162,8 @@ public PlanHandler getCommandHandler(RelNode logicalPlan, ExecutionContext execu return LOGICAL_ALTER_PROCEDURE_HANDLER; } else if (logicalPlan instanceof LogicalAlterFunction) { return LOGICAL_ALTER_FUNCTION_HANDLER; + } else if (logicalPlan instanceof LogicalConvertAllSequences) { + return CONVERT_ALL_SEQUENCES_HANDLER; } else if (logicalPlan instanceof BaseDalOperation) { if (logicalPlan instanceof PhyShow) { @@ -1055,6 +1179,8 @@ public PlanHandler getCommandHandler(RelNode logicalPlan, ExecutionContext execu return LOGICAL_SHOW_TABLE_INFO_HANDLER; case SHOW_LOCALITY_INFO: return LOGICAL_SHOW_LOCALITY_INFO_HANDLER; + case SHOW_PHYSICAL_DDL: + return LOGICAL_SHOW_PHYSICAL_DDL_HANDLER; case SHOW_HOTKEY: return LOGICAL_SHOW_HOTKEY_HANDLER; case SHOW_CREATE_DATABASE: @@ -1105,6 +1231,8 @@ public PlanHandler getCommandHandler(RelNode logicalPlan, ExecutionContext execu return LOGICAL_SHOW_TABLE_ACCESS_HANDLER; case SHOW_TRACE: return LOGICAL_SHOW_TRACE_HANDLER; + case SHOW_PRUNE_TRACE: + return LOGICAL_SHOW_PRUNE_TRACE_HANDLER; case SHOW_SEQUENCES: return LOGICAL_SHOW_SEQUENCES_HANDLER; case SHOW_RULE: @@ -1120,6 +1248,8 @@ public PlanHandler getCommandHandler(RelNode logicalPlan, ExecutionContext execu return LOGICAL_DESC_HANDLER; case CHECK_TABLE: return LOGICAL_CHECK_TABLE_HANDLER; + case CHECK_COLUMNAR_PARTITION: + return LOGICAL_CHECK_COLUMNAR_PARTITION_HANDLER; case KILL: return LOGICAL_KILL_HANDLER; case ANALYZE_TABLE: @@ -1162,12 +1292,12 @@ public PlanHandler getCommandHandler(RelNode logicalPlan, ExecutionContext execu return CLEAR_SEQ_CACHE_HANDLER; case INSPECT_SEQ_RANGE: return INSPECT_GROUP_SEQ_RANGE_HANDLER; - case CONVERT_ALL_SEQUENCES: - return CONVERT_ALL_SEQUENCES_HANDLER; case BASELINE: return LOGICAL_BASELINE_HANDLER; case SHOW_GLOBAL_INDEX: return SHOW_GLOBAL_INDEX_HANDLER; + case SHOW_COLUMNAR_INDEX: + return SHOW_COLUMNAR_INDEX_HANDLER; case SHOW_METADATA_LOCK: return SHOW_METADATA_LOCK_HANDLER; case SHOW_TRANS: @@ -1210,10 +1340,26 @@ public PlanHandler getCommandHandler(RelNode logicalPlan, ExecutionContext execu return LOGICAL_REBALANCE_MASTER_HANDLER; case RESET_MASTER: return LOGICAL_RESET_MASTER_HANDLER; + case REPLICA_HASH_CHECK: + return LOGICAL_REPLICA_HASHCHECK_HANDLER; case SET_CDC_GLOBAL: return LOGICAL_SET_CDC_GLOBAL_HANDLER; case FLUSH_LOGS: return LOGICAL_FLUSH_LOGS_HANDLER; + case START_REPLICA_CHECK: + return LOGICAL_START_REPLICA_CHECK_HANDLER; + case PAUSE_REPLICA_CHECK: + return LOGICAL_PAUSE_REPLICA_CHECK_HANDLER; + case CONTINUE_REPLICA_CHECK: + return LOGICAL_CONTINUE_REPLICA_CHECK_HANDLER; + case CANCEL_REPLICA_CHECK: + return LOGICAL_CANCEL_REPLICA_CHECK_HANDLER; + case RESET_REPLICA_CHECK: + return LOGICAL_RESET_REPLICA_CHECK_HANDLER; + case SHOW_REPLICA_CHECK_PROGRESS: + return LOGICAL_SHOW_REPLICA_CHECK_PROGRESS_HANDLER; + case SHOW_REPLICA_CHECK_DIFF: + return LOGICAL_SHOW_REPLICA_CHECK_DIFF_HANDLER; case SQL_SET_DEFAULT_ROLE: return LOGICAL_SET_DEFAULT_ROLE_HANDLER; @@ -1253,6 +1399,26 @@ public PlanHandler getCommandHandler(RelNode logicalPlan, ExecutionContext execu return CONTINUE_SCHEDULE_HANDLER; case FIRE_SCHEDULE: return FIRE_SCHEDULE_HANDLER; + case CREATE_SECURITY_LABEL_COMPONENT: + return CREATE_SECURITY_LABEL_COMPONENT_HANDLER; + case DROP_SECURITY_LABEL_COMPONENT: + return DROP_SECURITY_LABEL_COMPONENT_HANDLER; + case CREATE_SECURITY_LABEL: + return CREATE_SECURITY_LABEL_HANDLER; + case DROP_SECURITY_LABEL: + return DROP_SECURITY_LABEL_HANDLER; + case CREATE_SECURITY_POLICY: + return CREATE_SECURITY_POLICY_HANDLER; + case DROP_SECURITY_POLICY: + return DROP_SECURITY_POLICY_HANDLER; + case CREATE_SECURITY_ENTITY: + return CREATE_SECURITY_ENTITY_HANDLER; + case DROP_SECURITY_ENTITY: + return DROP_SECURITY_ENTITY_HANDLER; + case GRANT_SECURITY_LABEL: + return GRANT_SECURITY_LABEL_HANDLER; + case REVOKE_SECURITY_LABEL: + return REVOKE_SECURITY_LABEL_HANDLER; case SHOW_CREATE_TABLEGROUP: return LOGICAL_SHOW_CREATE_TABLEGROUP_HANDLER; default: @@ -1288,6 +1454,12 @@ public PlanHandler getCommandHandler(RelNode logicalPlan, ExecutionContext execu return LOGICAL_INSPECT_INDEX_HANDLER; } else if (logicalPlan instanceof LogicalAnalyzeTable) { return LOGICAL_ANALYZE_TABLE_HANDLER; + } else if (logicalPlan instanceof LogicalImportDatabase) { + return LOGICAL_IMPORT_DATABASE; + } else if (logicalPlan instanceof LogicalImportSequence) { + return LOGICAL_IMPORT_SEQUENCE; + } else if (logicalPlan instanceof LogicalAlterInstance) { + return LOGICAL_ALTER_INSTANCE_HANDLER; } throw new AssertionError("Unsupported RelNode: " + logicalPlan.getClass().getSimpleName()); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ConvertAllSequencesHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ConvertAllSequencesHandler.java index 62d31617f..9501bba91 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ConvertAllSequencesHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ConvertAllSequencesHandler.java @@ -17,62 +17,54 @@ package com.alibaba.polardbx.repo.mysql.handler; import com.alibaba.polardbx.common.utils.TStringUtil; -import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.ddl.job.factory.ConvertAllSequencesJobFactory; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory; +import com.alibaba.polardbx.executor.handler.ddl.LogicalCommonDdlHandler; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.executor.sync.ClearSeqCacheSyncAction; -import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.gms.metadb.seq.SequencesAccessor; import com.alibaba.polardbx.gms.metadb.table.SchemataAccessor; import com.alibaba.polardbx.gms.metadb.table.SchemataRecord; import com.alibaba.polardbx.gms.topology.SystemDbHelper; -import com.alibaba.polardbx.gms.util.SeqTypeUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.sequence.exception.SequenceException; import org.apache.calcite.sql.SqlConvertAllSequences; +import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.TreeSet; import static com.alibaba.polardbx.common.constants.SequenceAttribute.Type; -public class ConvertAllSequencesHandler extends AbstractDalHandler { +public class ConvertAllSequencesHandler extends LogicalCommonDdlHandler { public ConvertAllSequencesHandler(IRepository repo) { super(repo); } @Override - Cursor doHandle(LogicalDal logicalPlan, ExecutionContext executionContext) { - SqlConvertAllSequences stmt = (SqlConvertAllSequences) logicalPlan.getNativeSqlNode(); + protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + SqlConvertAllSequences stmt = (SqlConvertAllSequences) logicalDdlPlan.getNativeSqlNode(); Type fromType = stmt.getFromType(); Type toType = stmt.getToType(); String schemaName = stmt.getSchemaName(); boolean allSchemata = stmt.isAllSchemata(); - ArrayResultCursor resultCursor = new ArrayResultCursor("CONVERT_ALL_SEQUENCES"); - resultCursor.addColumn("SCHEMA_NAME", DataTypes.StringType); - resultCursor.addColumn("NUM_OF_CONVERTED_SEQUENCES", DataTypes.StringType); - resultCursor.addColumn("REMARK", DataTypes.StringType); - resultCursor.initMeta(); - final Set userSchemata = new HashSet<>(); List schemataRecords = SchemataAccessor.getAllSchemata(); schemataRecords.stream() .filter(s -> !SystemDbHelper.isDBBuildIn(s.schemaName)) .forEach(s -> userSchemata.add(s.schemaName.toLowerCase())); + final Set schemaToBeConvert = new TreeSet<>(String::compareToIgnoreCase); if (allSchemata) { - for (String schema : userSchemata) { - convert(schema, fromType, toType, resultCursor); - } + schemaToBeConvert.addAll(userSchemata); } else if (TStringUtil.isNotBlank(schemaName)) { if (userSchemata.contains(schemaName)) { - convert(schemaName, fromType, toType, resultCursor); + schemaToBeConvert.add(schemaName); } else { throw new SequenceException("Invalid schema name '" + schemaName + "'"); } @@ -80,24 +72,15 @@ Cursor doHandle(LogicalDal logicalPlan, ExecutionContext executionContext) { throw new SequenceException("Schema name should not be empty"); } - return resultCursor; - } - - private void convert(String schemaName, Type fromType, Type toType, ArrayResultCursor resultCursor) { - int numSequencesConverted = 0; - String remark = ""; - - boolean newSeqNotInvolved = fromType != Type.NEW && toType != Type.NEW; - if (SeqTypeUtil.isNewSeqSupported(schemaName) || newSeqNotInvolved) { - try { - numSequencesConverted = SequencesAccessor.change(schemaName, fromType, toType); - SyncManagerHelper.sync(new ClearSeqCacheSyncAction(schemaName, null, true, false)); - } catch (Exception e) { - remark = "Failed due to " + e.getMessage(); - } - } + DdlJobFactory convertAllSequencesJobFactory = new ConvertAllSequencesJobFactory( + new ArrayList<>(schemaToBeConvert), + fromType, + toType, + !allSchemata, + executionContext + ); - resultCursor.addRow(new Object[] {schemaName, numSequencesConverted, remark}); + return convertAllSequencesJobFactory.create(); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/DropTableGroupHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/DropTableGroupHandler.java deleted file mode 100644 index a31f11867..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/DropTableGroupHandler.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.repo.mysql.handler; - -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.utils.GeneralUtil; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; -import com.alibaba.polardbx.executor.ddl.job.task.tablegroup.TableGroupSyncTask; -import com.alibaba.polardbx.executor.handler.HandlerCommon; -import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.executor.sync.TableGroupSyncAction; -import com.alibaba.polardbx.gms.partition.TablePartitionAccessor; -import com.alibaba.polardbx.gms.partition.TablePartitionRecord; -import com.alibaba.polardbx.gms.tablegroup.PartitionGroupAccessor; -import com.alibaba.polardbx.gms.tablegroup.TableGroupAccessor; -import com.alibaba.polardbx.gms.tablegroup.TableGroupConfig; -import com.alibaba.polardbx.gms.tablegroup.TableGroupRecord; -import com.alibaba.polardbx.gms.topology.DbInfoManager; -import com.alibaba.polardbx.gms.util.MetaDbLogUtil; -import com.alibaba.polardbx.gms.util.MetaDbUtil; -import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalDropTableGroup; -import com.alibaba.polardbx.optimizer.tablegroup.TableGroupInfoManager; -import org.apache.calcite.rel.RelNode; - -import java.sql.Connection; -import java.util.List; - -/** - * Created by luoyanxin. - * - * @author luoyanxin - */ -public class DropTableGroupHandler extends HandlerCommon { - - public static Logger logger = LoggerFactory.getLogger(DropTableGroupHandler.class); - - public DropTableGroupHandler(IRepository repo) { - super(repo); - } - - @Override - public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { - LogicalDropTableGroup logicalDropTableGroup = (LogicalDropTableGroup) logicalPlan; - String schemaName = logicalDropTableGroup.getSchemaName(); - if (schemaName == null) { - schemaName = executionContext.getSchemaName(); - } - boolean isNewPart = DbInfoManager.getInstance().isNewPartitionDb(schemaName); - if (!isNewPart) { - throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, - "can't execute the drop tablegroup command in non-partitioning database"); - } - String tableGroupName = logicalDropTableGroup.getTableGroupName(); - boolean isIfExists = logicalDropTableGroup.isIfExists(); - TableGroupInfoManager tableGroupInfoManager = - OptimizerContext.getContext(schemaName).getTableGroupInfoManager(); - TableGroupConfig tableGroupConfig = tableGroupInfoManager.getTableGroupConfigByName(tableGroupName); - if (tableGroupConfig != null && !GeneralUtil.isEmpty(tableGroupConfig.getAllTables())) { - throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, - String.format("The tablegroup[%s] is not empty, can't drop it", tableGroupName)); - } else { - TableGroupAccessor tableGroupAccessor = new TableGroupAccessor(); - PartitionGroupAccessor partitionGroupAccessor = new PartitionGroupAccessor(); - TablePartitionAccessor tablePartitionAccessor = new TablePartitionAccessor(); - try (Connection connection = MetaDbUtil.getConnection()) { - connection.setAutoCommit(false); - tableGroupAccessor.setConnection(connection); - partitionGroupAccessor.setConnection(connection); - tablePartitionAccessor.setConnection(connection); - List - tableGroupRecordList = - tableGroupAccessor.getTableGroupsBySchemaAndName(schemaName, tableGroupName, false); - if (GeneralUtil.isEmpty(tableGroupRecordList)) { - if (!isIfExists) { - throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, - String.format("drop tablegroup error, tablegroup[%s] is not exist", tableGroupName)); - } else { - SyncManagerHelper - .sync(new TableGroupSyncAction(schemaName, tableGroupName)); - return new AffectRowCursor(new int[] {1}); - } - } - assert tableGroupRecordList.size() == 1; - Long tableGroupId = tableGroupRecordList.get(0).id; - List tablePartitionRecords = - tablePartitionAccessor.getTablePartitionsByDbNameGroupId(schemaName, tableGroupId); - if (!GeneralUtil.isEmpty(tablePartitionRecords)) { - throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, - String.format("The tablegroup[%s] is not empty, can't drop it", tableGroupName)); - } - tableGroupAccessor.deleteTableGroupsById(schemaName, tableGroupId); - partitionGroupAccessor.deletePartitionGroupsByTableGroupId(tableGroupId, false); - connection.commit(); - } catch (Throwable ex) { - MetaDbLogUtil.META_DB_LOG.error(ex); - throw GeneralUtil.nestedException(ex); - } - SyncManagerHelper.sync(new TableGroupSyncAction(schemaName, tableGroupName), schemaName); - } - - return new AffectRowCursor(new int[] {1}); - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/GatherHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/GatherHandler.java index 65ffdc6ee..c16919dde 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/GatherHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/GatherHandler.java @@ -20,12 +20,10 @@ import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.cursor.impl.GatherCursor; -import com.alibaba.polardbx.executor.cursor.impl.MultiCursorAdapter; import com.alibaba.polardbx.executor.handler.HandlerCommon; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.PhyQueryOperation; import com.alibaba.polardbx.optimizer.core.row.Row; -import org.apache.calcite.plan.RelOptUtil; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlKind; @@ -52,12 +50,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { kind.belongsTo(SqlKind.DML) || kind.belongsTo(SqlKind.DDL) || kind.belongsTo(SqlKind.SQL_SET_QUERY); } Cursor cursor = ExecutorHelper.executeByCursor(relNode, executionContext, false); - if (cursor instanceof MultiCursorAdapter) { - // handle logicalView - inputCursors.addAll(((MultiCursorAdapter) cursor).getSubCursors()); - } else { - inputCursors.add(cursor); - } + inputCursors.add(cursor); } if (useUpdate) { int affectRows = 0; @@ -69,7 +62,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } return new AffectRowCursor(affectRows); } else { - return new GatherCursor(inputCursors, executionContext); + return inputCursors.size() == 1 ? inputCursors.get(0) : new GatherCursor(inputCursors, executionContext); } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/GsiBackfillHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/GsiBackfillHandler.java index 6d1245022..f9c6abe9c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/GsiBackfillHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/GsiBackfillHandler.java @@ -18,8 +18,10 @@ import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.properties.MetricLevel; import com.alibaba.polardbx.common.properties.ParamManager; import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.executor.backfill.Loader; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.ddl.job.task.gsi.CheckGsiTask; @@ -28,6 +30,7 @@ import com.alibaba.polardbx.executor.handler.HandlerCommon; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.GsiBackfill; import com.alibaba.polardbx.optimizer.utils.PhyTableOperationUtil; @@ -35,7 +38,6 @@ import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlSelect; -import org.apache.commons.collections.MapUtils; import java.util.ArrayList; import java.util.List; @@ -64,23 +66,43 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { List indexNames = backfill.getIndexNames(); List columnsName = backfill.getColumns(); Map virtualColumnMap = backfill.getVirtualColumnMap(); + Map backfillColumnMap = backfill.getBackfillColumnMap(); + List modifyStringColumns = backfill.getModifyStringColumns(); + boolean useChangeSet = backfill.isUseChangeSet(); + boolean modifyColumn = backfill.isModifyColumn(); BackfillExecutor backfillExecutor = new BackfillExecutor((List inputs, ExecutionContext executionContext1) -> { QueryConcurrencyPolicy queryConcurrencyPolicy = getQueryConcurrencyPolicy(executionContext1); + if (Loader.canUseBackfillReturning(executionContext1, schemaName)) { + queryConcurrencyPolicy = QueryConcurrencyPolicy.GROUP_CONCURRENT_BLOCK; + } List inputCursors = new ArrayList<>(inputs.size()); executeWithConcurrentPolicy(executionContext1, inputs, queryConcurrencyPolicy, inputCursors, schemaName); return inputCursors; }); - // modify partition key column type, do not clear sql_mode - if (MapUtils.isEmpty(virtualColumnMap)) { + boolean useBinary = executionContext.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY); + boolean omcForce = executionContext.getParamManager().getBoolean(ConnectionParams.OMC_FORCE_TYPE_CONVERSION); + boolean canUseReturning = Loader.canUseBackfillReturning(executionContext, schemaName); + + // online modify column, does not clear sql_mode + if (modifyColumn) { + executionContext = setChangeSetApplySqlMode(executionContext.copy()); + if (!useBinary && !omcForce) { + // select + insert, need encoding + upgradeEncoding(executionContext, schemaName, baseTableName); + } + // 暂时不使用 backfill insert ignore returning 优化,因为无法处理 sql_mode 严格模式行为 + canUseReturning = false; + } else { executionContext = clearSqlMode(executionContext.copy()); + if (!useBinary) { + upgradeEncoding(executionContext, schemaName, baseTableName); + } } - upgradeEncoding(executionContext, schemaName, baseTableName); - - executionContext.getExtraCmds().put(ConnectionProperties.MPP_METRIC_LEVEL, 1); + executionContext.getExtraCmds().put(ConnectionProperties.MPP_METRIC_LEVEL, MetricLevel.SQL.metricLevel); PhyTableOperationUtil.disableIntraGroupParallelism(schemaName, executionContext); @@ -92,15 +114,23 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { assert indexNames.size() > 0; affectRows = backfillExecutor .addColumnsBackfill(schemaName, baseTableName, indexNames, columnsName, executionContext); + } else if (backfill.isMirrorCopy()) { + // Normal creating GSI. + assert 1 == indexNames.size(); + affectRows = + backfillExecutor.mirrorCopyGsiBackfill(schemaName, baseTableName, indexNames.get(0), useChangeSet, + useBinary, executionContext); } else { // Normal creating GSI. assert 1 == indexNames.size(); - affectRows = backfillExecutor.backfill(schemaName, baseTableName, indexNames.get(0), executionContext); + affectRows = + backfillExecutor.backfill(schemaName, baseTableName, indexNames.get(0), useBinary, useChangeSet, + canUseReturning, modifyStringColumns, executionContext); } // Check GSI immediately after creation by default. final ParamManager pm = executionContext.getParamManager(); - boolean check = pm.getBoolean(ConnectionParams.GSI_CHECK_AFTER_CREATION); + boolean check = pm.getBoolean(ConnectionParams.GSI_CHECK_AFTER_CREATION) && !useChangeSet; if (!check) { return new AffectRowCursor(affectRows); } @@ -110,13 +140,14 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { // TODO(moyi) separate check to another task for (String indexName : indexNames) { + baseTableName = getPrimaryTableName(schemaName, baseTableName, backfill.isMirrorCopy(), executionContext); boolean isPrimaryBroadCast = OptimizerContext.getContext(schemaName).getRuleManager().isBroadCast(baseTableName); boolean isGsiBroadCast = OptimizerContext.getContext(schemaName).getRuleManager().isBroadCast(indexName); CheckGsiTask checkTask = new CheckGsiTask(schemaName, baseTableName, indexName, lockMode, lockMode, params, false, "", - isPrimaryBroadCast, isGsiBroadCast, virtualColumnMap); + isPrimaryBroadCast, isGsiBroadCast, virtualColumnMap, backfillColumnMap); checkTask.checkInBackfill(executionContext); } @@ -124,4 +155,13 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { return new AffectRowCursor(affectRows); } + public static String getPrimaryTableName(String schemaName, String baseTableName, boolean mirrorCopy, + ExecutionContext executionContext) { + String primaryTableName = baseTableName; + TableMeta sourceTableMeta = executionContext.getSchemaManager(schemaName).getTable(baseTableName); + if (mirrorCopy && sourceTableMeta.isGsi()) { + primaryTableName = sourceTableMeta.getGsiTableMetaBean().gsiMetaBean.tableName; + } + return primaryTableName; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/InspectRuleVersionHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/InspectRuleVersionHandler.java index 85c36c942..19ef9d261 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/InspectRuleVersionHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/InspectRuleVersionHandler.java @@ -27,6 +27,7 @@ import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; import com.alibaba.polardbx.gms.metadb.table.TableStatus; import com.alibaba.polardbx.gms.metadb.table.TablesExtRecord; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -72,7 +73,8 @@ private void handleGMS(ArrayResultCursor resultCursor, ExecutionContext executio Pair tableCount = fetchTableCount(executionContext.getSchemaName()); resultCursor.addRow(new Object[] {"META_DB", tableCount.getKey() + "/" + tableCount.getValue(), "", "", ""}); - List>> resultSets = SyncManagerHelper.sync(new InspectRuleVersionSyncAction()); + List>> resultSets = SyncManagerHelper.sync(new InspectRuleVersionSyncAction(), + SyncScope.CURRENT_ONLY); for (List> resultSet : resultSets) { if (resultSet != null && resultSet.size() > 0) { for (Map row : resultSet) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/InspectSeqRangeHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/InspectSeqRangeHandler.java index 3ddd96e04..d8eba9f33 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/InspectSeqRangeHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/InspectSeqRangeHandler.java @@ -21,6 +21,7 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.InspectSeqRangeSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; @@ -67,7 +68,8 @@ Cursor doHandle(LogicalDal logicalPlan, ExecutionContext executionContext) { // Get current ranges from all servers. List>> resultSets = - SyncManagerHelper.sync(new InspectSeqRangeSyncAction(schemaName, seqName), schemaName); + SyncManagerHelper.sync(new InspectSeqRangeSyncAction(schemaName, seqName), schemaName, + SyncScope.ALL); for (List> resultSet : resultSets) { if (resultSet != null) { for (Map row : resultSet) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemLeaderHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemLeaderHandler.java index ef927ae3e..80bc7d3bd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemLeaderHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemLeaderHandler.java @@ -26,6 +26,7 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.AlterSystemReloadLeaderSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalAlterSystemLeader; import org.apache.calcite.rel.RelNode; @@ -57,7 +58,7 @@ private int syncForceToBeLeader(String nodeId) { List>> results; try { results = SyncManagerHelper.sync( - new AlterSystemReloadLeaderSyncAction(nodeId)); + new AlterSystemReloadLeaderSyncAction(nodeId), SyncScope.MASTER_ONLY); } catch (Throwable e) { logger.error(e); throw new TddlNestableRuntimeException(e); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemRefreshStorageHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemRefreshStorageHandler.java index 56cbfe862..6c4acaf9a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemRefreshStorageHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemRefreshStorageHandler.java @@ -26,6 +26,7 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.AlterSystemRefreshStorageSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalAlterSystemRefreshStorage; import org.apache.calcite.rel.RelNode; @@ -56,7 +57,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { private void syncRefreshStorage(String dnId, String vipAddr, String user, String encPasswd) { try { - SyncManagerHelper.sync(new AlterSystemRefreshStorageSyncAction(dnId, vipAddr, user, encPasswd)); + SyncManagerHelper.sync(new AlterSystemRefreshStorageSyncAction(dnId, vipAddr, user, encPasswd), + SyncScope.ALL); } catch (Throwable e) { logger.error(e); throw new TddlNestableRuntimeException(e); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemReloadStorageHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemReloadStorageHandler.java index 75b84932e..f11105c06 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemReloadStorageHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAlterSystemReloadStorageHandler.java @@ -27,9 +27,8 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.sync.AlterSystemReloadStorageSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.gms.ha.HaSwitchParams; import com.alibaba.polardbx.gms.ha.impl.StorageInstHaContext; -import com.alibaba.polardbx.gms.metadb.ccl.CclTriggerRecord; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.gms.topology.StorageInfoRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -37,7 +36,6 @@ import org.apache.calcite.rel.RelNode; import org.apache.commons.lang.StringUtils; -import java.util.Date; import java.util.List; /** @@ -63,7 +61,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { private void syncReloadStorage(List dnList) { try { - SyncManagerHelper.sync(new AlterSystemReloadStorageSyncAction(dnList)); + SyncManagerHelper.sync(new AlterSystemReloadStorageSyncAction(dnList), SyncScope.ALL); } catch (Throwable e) { logger.error(e); throw new TddlNestableRuntimeException(e); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAnalyzeTableDdlHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAnalyzeTableDdlHandler.java index 49c8fd4ad..eaf5bf16c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAnalyzeTableDdlHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAnalyzeTableDdlHandler.java @@ -16,12 +16,15 @@ package com.alibaba.polardbx.repo.mysql.handler; +import com.alibaba.polardbx.common.utils.LoggerUtil; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionProperties; import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.ddl.job.task.basic.AnalyzeTablePhyDdlTask; +import com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcAnalyzeTableMarkTask; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJobFactory; import com.alibaba.polardbx.executor.ddl.newengine.job.ExecutableDdlJob; @@ -30,6 +33,7 @@ import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskAccessor; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; +import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.GlobalIndexMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; @@ -40,6 +44,7 @@ import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.util.Pair; +import org.apache.calcite.util.Util; import org.apache.commons.collections.CollectionUtils; import java.sql.Connection; @@ -53,9 +58,7 @@ * @author wumu */ public class LogicalAnalyzeTableDdlHandler extends LogicalCommonDdlHandler { - private static final Logger logger = LoggerFactory.getLogger("STATISTICS"); - - private final int ANALYZE_TABLE_DEFAULT_SPEED = 100000000; + private static final Logger logger = LoggerUtil.statisticsLogger; public LogicalAnalyzeTableDdlHandler(IRepository repo) { super(repo); @@ -63,15 +66,28 @@ public LogicalAnalyzeTableDdlHandler(IRepository repo) { @Override protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { - if (executionContext.getExtraCmds().get(ConnectionProperties.ANALYZE_TABLE_SPEED_LIMITATION) == null) { - executionContext.getExtraCmds().put(ConnectionProperties.ANALYZE_TABLE_SPEED_LIMITATION, - ANALYZE_TABLE_DEFAULT_SPEED); - } LogicalAnalyzeTable logicalAnalyzeTable = (LogicalAnalyzeTable) logicalDdlPlan; final SqlAnalyzeTableDdl analyzeTable = (SqlAnalyzeTableDdl) logicalAnalyzeTable.getNativeSqlNode(); - List> tableNameList = - extractTableList(analyzeTable.getTableNames(), executionContext.getSchemaName(), executionContext); + + String defaultSchemaName = executionContext.getSchemaName(); + + List> tableNameList = new ArrayList<>(); + for (SqlNode tableSqlNode : analyzeTable.getTableNames()) { + String schemaName = defaultSchemaName; + String tableName; + if (tableSqlNode instanceof SqlIdentifier) { + if (((SqlIdentifier) tableSqlNode).names.size() == 2) { + schemaName = ((SqlIdentifier) tableSqlNode).names.get(0); + } + } + tableName = Util.last(((SqlIdentifier) tableSqlNode).names); + OptimizerContext optimizerContext = OptimizerContext.getContext(schemaName); + if (optimizerContext == null) { + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_DATABASE, schemaName); + } + tableNameList.add(Pair.of(schemaName, tableName)); + } ExecutableDdlJob result = new ExecutableDdlJob(); if (!tableNameList.isEmpty()) { @@ -90,6 +106,10 @@ protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext e } } + CdcAnalyzeTableMarkTask cdcAnalyzeTableMarkTask = new CdcAnalyzeTableMarkTask(executionContext.getSchemaName(), + buildCdcMarkTableName(analyzeTable.getTableNames())); + result.addTask(cdcAnalyzeTableMarkTask); + return result; } @@ -116,21 +136,18 @@ record = accessor.archiveQuery(jobId, taskId); AnalyzeTablePhyDdlTask task = (AnalyzeTablePhyDdlTask) deSerializeTask(record.name, record.value); List schemaNames = task.getSchemaNames(); List tableNames = task.getTableNames(); - List useHill = task.getUseHll(); - List success = task.getSuccess(); + List useHlls = task.getUseHll(); + List msg = task.getMsg(); for (int i = 0; i < tableNames.size(); ++i) { String schemaName = schemaNames.get(i); String table = tableNames.get(i); - if (!useHill.get(i)) { + if (!useHlls.get(i)) { result.addRow(new Object[] {schemaName + "." + table, "analyze", "use hll", "false"}); } - if (success.get(i)) { - result.addRow(new Object[] {schemaName + "." + table, "analyze", "status", "OK"}); - } else { - result.addRow(new Object[] {schemaName + "." + table, "analyze", "status", "FAIL"}); - } + result.addRow(new Object[] {schemaName + "." + table, "analyze", "status", msg.get(i)}); +// } } } catch (Throwable ex) { @@ -141,6 +158,18 @@ record = accessor.archiveQuery(jobId, taskId); return result; } + private String buildCdcMarkTableName(List tables) { + if (tables == null || tables.isEmpty()) { + return "*"; + } else { + if (tables.size() > 1) { + return "*"; + } else { + return ((SqlIdentifier) tables.get(0)).getLastName(); + } + } + } + private List> extractTableList(List tableNameSqlNodeList, String currentSchemaName, ExecutionContext ec) { if (CollectionUtils.isEmpty(tableNameSqlNodeList)) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAnalyzeTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAnalyzeTableHandler.java deleted file mode 100644 index ad6c4dab9..000000000 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalAnalyzeTableHandler.java +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.repo.mysql.handler; - -import com.alibaba.druid.util.JdbcUtils; -import com.alibaba.polardbx.common.exception.TddlRuntimeException; -import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.properties.ConnectionProperties; -import com.alibaba.polardbx.common.utils.Pair; -import com.alibaba.polardbx.common.utils.logger.Logger; -import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.config.ConfigDataMode; -import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; -import com.alibaba.polardbx.executor.handler.HandlerCommon; -import com.alibaba.polardbx.executor.spi.IDataSourceGetter; -import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.executor.utils.SchemaMetaUtil; -import com.alibaba.polardbx.gms.module.LogLevel; -import com.alibaba.polardbx.gms.module.LogPattern; -import com.alibaba.polardbx.gms.module.Module; -import com.alibaba.polardbx.gms.module.ModuleLogInfo; -import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.TableMeta; -import com.alibaba.polardbx.optimizer.config.table.statistic.StatisticManager; -import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; -import com.alibaba.polardbx.optimizer.core.planner.PlanCache; -import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; -import com.alibaba.polardbx.optimizer.exception.TableNotFoundException; -import com.alibaba.polardbx.repo.mysql.spi.MyDataSourceGetter; -import org.apache.calcite.rel.RelNode; -import org.apache.calcite.sql.SqlAnalyzeTable; -import org.apache.calcite.sql.SqlIdentifier; -import org.apache.calcite.sql.SqlNode; -import org.apache.calcite.util.Util; -import org.apache.commons.lang.StringUtils; - -import javax.sql.DataSource; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -import static com.alibaba.polardbx.common.properties.ConnectionParams.ENABLE_HLL; -import static com.alibaba.polardbx.executor.gms.util.StatisticUtils.forceAnalyzeColumns; - -/** - * @author chenmo.cm - */ -public class LogicalAnalyzeTableHandler extends HandlerCommon { - private static final Logger logger = LoggerFactory.getLogger("STATISTICS"); - - public LogicalAnalyzeTableHandler(IRepository repo) { - super(repo); - } - - public final String ANALYZE_TABLE_SQL = "ANALYZE TABLE "; - private final int ANALYZE_TABLE_DEFAULT_SPEED = 100000000; - - @Override - public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { - final LogicalDal dal = (LogicalDal) logicalPlan; - final SqlAnalyzeTable analyzeTable = (SqlAnalyzeTable) dal.getNativeSqlNode(); - - if (executionContext.getExtraCmds().get(ConnectionProperties.ANALYZE_TABLE_SPEED_LIMITATION) == null) { - executionContext.getExtraCmds().put(ConnectionProperties.ANALYZE_TABLE_SPEED_LIMITATION, - ANALYZE_TABLE_DEFAULT_SPEED); - } - - String defaultSchemaName = executionContext.getSchemaName(); - - List> schemaTables = new ArrayList<>(); - for (SqlNode tableSqlNode : analyzeTable.getTableNames()) { - String schemaName = defaultSchemaName; - String tableName; - if (tableSqlNode instanceof SqlIdentifier) { - if (((SqlIdentifier) tableSqlNode).names.size() == 2) { - schemaName = ((SqlIdentifier) tableSqlNode).names.get(0); - } - } - tableName = Util.last(((SqlIdentifier) tableSqlNode).names); - OptimizerContext optimizerContext = OptimizerContext.getContext(schemaName); - if (optimizerContext == null) { - throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_DATABASE, schemaName); - } - schemaTables.add(Pair.of(schemaName, tableName)); - } - - ArrayResultCursor result = new ArrayResultCursor("analyzeTable"); - result.addColumn("Table", DataTypes.StringType); - result.addColumn("Op", DataTypes.StringType); - result.addColumn("Msg_type", DataTypes.StringType); - result.addColumn("Msg_text", DataTypes.StringType); - result.initMeta(); - - long start = System.currentTimeMillis(); - for (Pair pair : schemaTables) { - String schemaName = pair.getKey(); - String table = pair.getValue(); - - IDataSourceGetter mysqlDsGetter = new MyDataSourceGetter(schemaName); - - TableMeta tableMeta; - try { - if (ConfigDataMode.isMasterMode()) { - doAnalyzeOneLogicalTable(schemaName, table, mysqlDsGetter, executionContext); - } - tableMeta = - OptimizerContext.getContext(schemaName).getLatestSchemaManager().getTable(table); - } catch (TableNotFoundException e) { - result.addRow(new Object[] {schemaName + "." + table, "analyze", "status", "NO TABLE"}); - continue; - } - - if (tableMeta == null) { - result.addRow(new Object[] {schemaName + "." + table, "analyze", "status", "NO TABLE"}); - continue; - } else { - if (!executionContext.getParamManager().getBoolean(ENABLE_HLL) || !SchemaMetaUtil - .checkSupportHll(schemaName)) { - result.addRow(new Object[] {schemaName + "." + table, "analyze", "use hll", "false"}); - } - } - if (OptimizerContext.getContext(schemaName).getRuleManager().getTableRule(table) == null) { - logger.warn( - "no table rule for logicalTableName = " + table + ", analyze this table as the single table!"); - } - - boolean success = forceAnalyzeColumns(schemaName, table); - if (success) { - result.addRow(new Object[] {schemaName + "." + table, "analyze", "status", "OK"}); - } else { - result.addRow(new Object[] {schemaName + "." + table, "analyze", "status", "FAIL"}); - } - - // refresh plancache - PlanCache.getInstance().invalidate(table); - } - long end = System.currentTimeMillis(); - ModuleLogInfo.getInstance() - .logRecord(Module.STATISTICS, - LogPattern.PROCESS_END, - new String[] { - "analyze table " + schemaTables.stream().map(pair -> { - String schemaName = pair.getKey() == null ? defaultSchemaName : pair.getKey(); - String tableName = pair.getValue(); - return schemaName + "." + tableName; - }).collect(Collectors.joining(",")), - "consuming " + (end - start) / 1000.0 + " seconds " + executionContext.getTraceId() - }, - LogLevel.NORMAL); - return result; - } - - protected void doAnalyzeOneLogicalTable(String schemaName, String logicalTableName, - IDataSourceGetter mysqlDsGetter, ExecutionContext executionContext) { - List> keys = - StatisticManager.getInstance().buildStatisticKey(schemaName, logicalTableName, executionContext); - for (Pair key : keys) { - String group = key.getKey(); - String physicalTableName = key.getValue(); - doAnalyzeOnePhysicalTable(group, physicalTableName, mysqlDsGetter); - } - } - - protected void doAnalyzeOnePhysicalTable(String group, String physicalTableName, IDataSourceGetter mysqlDsGetter) { - if (StringUtils.isEmpty(physicalTableName)) { - return; - } - physicalTableName = physicalTableName.toLowerCase(); - DataSource ds = mysqlDsGetter.getDataSource(group); - if (ds == null) { - logger.error("Analyze physical table " + physicalTableName - + " cannot be fetched, datasource is null, group name is " + group); - return; - } - Connection conn = null; - PreparedStatement stmt = null; - try { - conn = ds.getConnection(); - stmt = conn.prepareStatement(ANALYZE_TABLE_SQL + physicalTableName); - stmt.execute(); - } catch (Exception e) { - logger.error("Analyze physical table " + physicalTableName + " ERROR"); - return; - } finally { - JdbcUtils.close(stmt); - JdbcUtils.close(conn); - } - } -} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalBaselineHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalBaselineHandler.java index c1937c44f..7a3ac9b3f 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalBaselineHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalBaselineHandler.java @@ -26,12 +26,14 @@ import com.alibaba.polardbx.executor.sync.BaselineLoadSyncAction; import com.alibaba.polardbx.executor.sync.BaselinePersistSyncAction; import com.alibaba.polardbx.executor.sync.SyncManagerHelper; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.PlannerContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.planner.ExecutionPlan; import com.alibaba.polardbx.optimizer.core.planner.Planner; import com.alibaba.polardbx.optimizer.core.planner.SqlConverter; +import com.alibaba.polardbx.optimizer.core.rel.DirectTableOperation; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalBaseline; import com.alibaba.polardbx.optimizer.hint.util.HintConverter; import com.alibaba.polardbx.optimizer.planmanager.BaselineInfo; @@ -236,6 +238,10 @@ private Cursor baselineAdd(String hint, String parameterizedSql, ExecutionContex return result; } String planJsonString = PlanManagerUtil.relNodeToJson(plan); + if (!PlanManagerUtil.baselineSupported(plan)) { + throw new TddlRuntimeException(ERR_BASELINE, "not support baseline add"); + } + PlanInfo planInfo = PlanManager.getInstance() .createPlanInfo(schemaName, planJsonString, plan, baselineInfo.getId(), executionContext.getTraceId(), @@ -280,10 +286,10 @@ private Cursor baselineLPCVD(List idList, ExecutionContext executionContex for (Long id : idList) { switch (operation.toUpperCase()) { case "LOAD": - SyncManagerHelper.syncWithDefaultDB(new BaselineLoadSyncAction()); + SyncManagerHelper.syncWithDefaultDB(new BaselineLoadSyncAction(), SyncScope.CURRENT_ONLY); break; case "PERSIST": - SyncManagerHelper.syncWithDefaultDB(new BaselinePersistSyncAction()); + SyncManagerHelper.syncWithDefaultDB(new BaselinePersistSyncAction(), SyncScope.CURRENT_ONLY); break; case "DELETE": { BaselineSyncController baselineSyncController = new BaselineSyncController(); @@ -299,7 +305,12 @@ private Cursor baselineLPCVD(List idList, ExecutionContext executionContex for (BaselineInfo baselineInfo : PlanManager.getInstance().getBaselineMap(schemaName).values()) { for (PlanInfo planInfo : baselineInfo.getAcceptedPlans().values()) { if (planInfo.getId() == id) { - baselineSyncController.deletePlan(schemaName, baselineInfo, planInfo); + if (baselineInfo.getAcceptedPlans().size() == 1) { + baselineSyncController.deleteBaseline(schemaName, baselineInfo); + break; + } else { + baselineSyncController.deletePlan(schemaName, baselineInfo, planInfo); + } } } for (PlanInfo planInfo : baselineInfo.getUnacceptedPlans().values()) { @@ -315,10 +326,10 @@ private Cursor baselineLPCVD(List idList, ExecutionContext executionContex } else { switch (operation.toUpperCase()) { case "LOAD": - SyncManagerHelper.syncWithDefaultDB(new BaselineLoadSyncAction()); + SyncManagerHelper.syncWithDefaultDB(new BaselineLoadSyncAction(), SyncScope.CURRENT_ONLY); break; case "PERSIST": - SyncManagerHelper.syncWithDefaultDB(new BaselinePersistSyncAction()); + SyncManagerHelper.syncWithDefaultDB(new BaselinePersistSyncAction(), SyncScope.CURRENT_ONLY); break; case "DELETE_ALL": { BaselineSyncController baselineSyncController = new BaselineSyncController(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalCheckColumnarPartitionHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalCheckColumnarPartitionHandler.java new file mode 100644 index 000000000..c807a2506 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalCheckColumnarPartitionHandler.java @@ -0,0 +1,171 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.repo.mysql.handler; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.handler.HandlerCommon; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.privilege.PolarAccountInfo; +import com.alibaba.polardbx.gms.util.MetaDbUtil; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.PlannerContext; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.function.calc.scalar.CanAccessTable; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlCheckColumnarPartition; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlNode; +import org.apache.calcite.util.Pair; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.List; +import java.util.Set; + +public class LogicalCheckColumnarPartitionHandler extends HandlerCommon { + + static final String GET_PARTITION_INFO = + "select a.table_name as table_name, a.index_name as index_name, partition_name, " + + "SUM(CASE WHEN b.file_name LIKE '%.orc' THEN 1 ELSE 0 END) as orc_files, " + + "SUM(CASE WHEN b.file_name LIKE '%.orc' THEN table_rows ELSE 0 END) as orc_rows, " + + "SUM(CASE WHEN b.file_name LIKE '%.csv' THEN 1 ELSE 0 END) as csv_files, " + + "SUM(CASE WHEN b.file_name LIKE '%.csv' THEN table_rows ELSE 0 END) as csv_rows " + + "from files b join columnar_table_mapping a " + + "on b.logical_table_name = a.table_id " + + "where a.table_schema = ? " + + "and a.table_name = ? " + + "and a.index_name = ? " + + "and b.logical_schema_name = ? " + + "and (b.file_name like '%.orc' or b.file_name like '%.csv') " + + "and b.remove_ts is null " + + "group by b.partition_name;\n"; + + public LogicalCheckColumnarPartitionHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + final LogicalDal dal = (LogicalDal) logicalPlan; + final SqlCheckColumnarPartition checkColumnar = (SqlCheckColumnarPartition) dal.getNativeSqlNode(); + + String appName = PlannerContext.getPlannerContext(logicalPlan).getSchemaName(); + SqlNode tableName = checkColumnar.getTableName(); + Pair schemaAndTable = null; + if (tableName instanceof SqlIdentifier && ((SqlIdentifier) tableName).names.size() == 2) { + List names = ((SqlIdentifier) tableName).names; + schemaAndTable = Pair.of(names.get(0), names.get(1)); + } else { + schemaAndTable = Pair.of(appName, tableName.toString()); + } + + // if not have privilege, throw error + if (!CanAccessTable.verifyPrivileges(schemaAndTable.getKey(), schemaAndTable.getValue(), executionContext)) { + PolarAccountInfo user = executionContext.getPrivilegeContext().getPolarUserInfo(); + throw new TddlRuntimeException(ErrorCode.ERR_CHECK_PRIVILEGE_FAILED, "check columnar partition", + user.getUsername(), user.getHost()); + } + + ArrayResultCursor result = new ArrayResultCursor("checkColumnarPartition"); + result.addColumn("Logical_Table", DataTypes.StringType); + result.addColumn("Columnar_Index", DataTypes.StringType); + result.addColumn("Partition", DataTypes.StringType); + result.addColumn("Orc_Files", DataTypes.UIntegerType); + result.addColumn("Orc_Rows", DataTypes.ULongType); + result.addColumn("Csv_Files", DataTypes.UIntegerType); + result.addColumn("Csv_Rows", DataTypes.ULongType); + result.addColumn("Extra", DataTypes.StringType); + + TableMeta tableMeta = OptimizerContext.getContext(schemaAndTable.getKey()).getLatestSchemaManager() + .getTable(schemaAndTable.getValue()); + + // not have columnar index, just return + if (tableMeta.getColumnarIndexPublished() == null) { + return result; + } + + Set columnarNames = tableMeta.getColumnarIndexPublished().keySet(); + + try (Connection metaDbConn = MetaDbUtil.getConnection(); + PreparedStatement statement = metaDbConn.prepareStatement(GET_PARTITION_INFO)) { + for (String columnarName : columnarNames) { + statement.setString(1, schemaAndTable.getKey()); + statement.setString(2, schemaAndTable.getValue()); + statement.setString(3, columnarName); + statement.setString(4, schemaAndTable.getKey()); + + long minOrcFiles = Long.MAX_VALUE, maxOrcFiles = Long.MIN_VALUE; + long minOrcRows = Long.MAX_VALUE, maxOrcRows = Long.MIN_VALUE; + long minCsvFiles = Long.MAX_VALUE, maxCsvFiles = Long.MIN_VALUE; + long minCsvRows = Long.MAX_VALUE, maxCsvRows = Long.MIN_VALUE; + + long totalOrcFiles = 0, totalOrcRows = 0, totalCsvFiles = 0, totalCsvRows = 0; + try (ResultSet rs = statement.executeQuery()) { + while (rs.next()) { + String logicalName = rs.getString("table_name"); + String indexName = rs.getString("index_name"); + String partitionName = rs.getString("partition_name"); + long orcFiles = rs.getLong("orc_files"); + long orcRows = rs.getLong("orc_rows"); + long csvFiles = rs.getLong("csv_files"); + long csvRows = rs.getLong("csv_rows"); + + // write partition info + result.addRow(new Object[] { + logicalName, indexName, partitionName, orcFiles, orcRows, csvFiles, csvRows, ""}); + + // update min/max info + minOrcFiles = Math.min(orcFiles, minOrcFiles); + maxOrcFiles = Math.max(orcFiles, maxOrcFiles); + minOrcRows = Math.min(orcRows, minOrcRows); + maxOrcRows = Math.max(orcRows, maxOrcRows); + minCsvFiles = Math.min(csvFiles, minCsvFiles); + maxCsvFiles = Math.max(csvFiles, maxCsvFiles); + minCsvRows = Math.min(csvRows, minCsvRows); + maxCsvRows = Math.max(csvRows, maxCsvRows); + + totalOrcFiles += orcFiles; + totalOrcRows += orcRows; + totalCsvFiles += csvFiles; + totalCsvRows += csvRows; + } + + // add statistic info + result.addRow(new Object[] { + schemaAndTable.getValue(), columnarName, "all", totalOrcFiles, totalOrcRows, totalCsvFiles, + totalCsvRows, + String.format("orc files: %s - %s, orc rows: %s - %s, csv files: %s - %s, csv rows: %s - %s", + minOrcFiles, maxOrcFiles, minOrcRows, maxOrcRows, minCsvFiles, maxCsvFiles, minCsvRows, + maxCsvRows)}); + } catch (SQLException ex) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, ex, "get partition information failed!"); + } + } + } catch (SQLException ex) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, ex, "get partition information failed!"); + } + return result; + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalCheckTableHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalCheckTableHandler.java index 336dc515b..58e9e2e35 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalCheckTableHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalCheckTableHandler.java @@ -21,6 +21,8 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.model.Group; import com.alibaba.polardbx.common.model.Group.GroupType; +import com.alibaba.polardbx.common.properties.BooleanConfigParam; +import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; @@ -32,6 +34,8 @@ import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.engine.FileSystemGroup; import com.alibaba.polardbx.gms.engine.FileSystemManager; +import com.alibaba.polardbx.gms.metadb.record.RecordConverter; +import com.alibaba.polardbx.gms.metadb.table.ColumnsInfoSchemaRecord; import com.alibaba.polardbx.gms.metadb.table.ColumnsRecord; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; @@ -67,6 +71,7 @@ import com.alibaba.polardbx.optimizer.partition.common.PartitionTableType; import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; import com.alibaba.polardbx.repo.mysql.checktable.CheckTableUtil; +import com.alibaba.polardbx.repo.mysql.checktable.ColumnDiffResult; import com.alibaba.polardbx.repo.mysql.checktable.FieldDescription; import com.alibaba.polardbx.repo.mysql.checktable.TableCheckResult; import com.alibaba.polardbx.repo.mysql.checktable.TableDescription; @@ -202,6 +207,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { protected void doCheckTableColumn(String schemaName, String logicalTableName, ExecutionContext executionContext, ArrayResultCursor result) { + Boolean checkLogicalColumnOrder = + executionContext.getParamManager().getBoolean(ConnectionParams.CHECK_LOGICAL_COLUMN_ORDER); String tableText = String.format("%s.%s:Columns", schemaName, logicalTableName); String opText = "check"; String statusText = "status"; @@ -221,22 +228,59 @@ protected void doCheckTableColumn(String schemaName, String logicalTableName, Ex if (logicalTablePartInfo.getPartitionBy().getSubPartitionBy() != null) { firstPartSpec = firstPartSpec.getSubPartitions().get(0); } - String firstPhysicalGroupName = firstPartSpec.getLocation().getGroupKey(); - String firstPhysicalTableName = firstPartSpec.getLocation().getPhyTableName(); + String physicalGroupName = firstPartSpec.getLocation().getGroupKey(); + String physicalTableName = firstPartSpec.getLocation().getPhyTableName(); TableDescription firstPhysicalTableDesc = CheckTableUtil.getTableDescription((MyRepository) this.repo, - firstPhysicalGroupName, - firstPhysicalTableName, + physicalGroupName, + physicalTableName, false, schemaName); try (Connection connection = MetaDbUtil.getConnection()) { + Boolean columnOrderSame = true; TableInfoManager tableInfoManager = new TableInfoManager(); tableInfoManager.setConnection(connection); - List columnsRecordList = tableInfoManager.queryColumns(schemaName, logicalTableName); + List logicalColumnsRecord = tableInfoManager.queryColumns(schemaName, logicalTableName); + if (checkLogicalColumnOrder) { + List columnNames = + logicalColumnsRecord.stream().map(o -> o.columnName).collect(Collectors.toList()); + TGroupDataSource dataSource = + (TGroupDataSource) ExecutorContext.getContext(schemaName).getTopologyExecutor() + .getGroupExecutor(physicalGroupName).getDataSource(); + List physicalColumnsInfo; + Map> columnsJdbcExtInfo; + try (Connection phyDbConn = dataSource.getConnection()) { + String physicalDbName = buildPhysicalDbNameFromGroupName(dataSource.getDbGroupKey()); + physicalColumnsInfo = + tableInfoManager.fetchColumnInfoSchema(physicalDbName, physicalTableName, columnNames, + phyDbConn); + columnsJdbcExtInfo = tableInfoManager.fetchColumnJdbcExtInfo( + physicalDbName, physicalTableName, dataSource); + } catch (SQLException e) { + logger.error(String.format( + "error occurs while checking table column, schemaName: %s, tableName: %s", schemaName, + logicalTableName), e); + throw GeneralUtil.nestedException(e); + } + List physicalColumnsRecord = + RecordConverter.convertColumn(physicalColumnsInfo, columnsJdbcExtInfo, schemaName, + logicalTableName); + ColumnDiffResult columnDiffResult = + ColumnDiffResult.diffPhysicalColumnAndLogicalColumnOrder(physicalColumnsRecord, + logicalColumnsRecord); + + if (columnDiffResult.diff()) { + statusText = "Error"; + columnOrderSame = false; + List columnDiffRows = columnDiffResult.convertToRows(tableText, opText, statusText); + for (Object[] columDiffRow : columnDiffRows) { + result.addRow(columDiffRow); + } + } + } -// Map logicalTableMetaDbDesc = new LinkedHashMap<>(); List logicalTableDesc = new ArrayList<>(); - for (ColumnsRecord columnsRecord : columnsRecordList) { + for (ColumnsRecord columnsRecord : logicalColumnsRecord) { FieldDescription fieldDescription = new FieldDescription(); fieldDescription.setFieldDefault(columnsRecord.columnDefault); fieldDescription.setFieldKey(columnsRecord.columnKey); @@ -255,9 +299,11 @@ protected void doCheckTableColumn(String schemaName, String logicalTableName, Ex } TableCheckResult checkResult = CheckTableUtil.verifyLogicalAndPhysicalMeta(firstPhysicalTableDesc, logicalTableDesc); - if (!isCheckResultNormal(checkResult)) { + if (!isCheckResultNormal(checkResult) || !columnOrderSame) { statusText = "Error"; - outputFieldCheckResults(result, tableText, opText, statusText, checkResult, isBroadCast); + if (!isCheckResultNormal(checkResult)) { + outputFieldCheckResults(result, tableText, opText, statusText, checkResult, isBroadCast); + } } else { String msgContent = statusOK; result.addRow(new Object[] {tableText, opText, statusText, msgContent}); @@ -291,7 +337,7 @@ protected void doCheckFileStorageTable(String schemaName, String logicalTableNam int stripeNum = 0; try { // check the existence of file record in oss - if (!fileSystemGroup.exists(fileMeta.getFileName())) { + if (!fileSystemGroup.exists(fileMeta.getFileName(), false)) { result.addRow(new Object[] { tableText, opText, MsgType.error.name(), "File " + fileMeta.getFileName() + " doesn't exist"}); @@ -325,7 +371,7 @@ protected void doCheckFileStorageTable(String schemaName, String logicalTableNam } // check the existence of bloom filter in oss if (!StringUtil.isEmpty(path)) { - if (!fileSystemGroup.exists(path)) { + if (!fileSystemGroup.exists(path, false)) { result.addRow(new Object[] { tableText, opText, MsgType.error.name(), "Bloom filter " + path + " doesn't exist"}); @@ -425,11 +471,11 @@ protected void doCheckForOnePartTableGsi(String schemaName, String logicalTableN } for (GsiMetaManager.GsiTableMetaBean bean : meta.getTableMeta().values()) { - if (bean.gsiMetaBean != null) { + if (bean.gsiMetaBean != null && !bean.gsiMetaBean.columnarIndex) { GsiMetaManager.GsiIndexMetaBean gsiMetaBean = bean.gsiMetaBean; - doCheckForOnePartTableTopology(schemaName, gsiMetaBean.indexTableName, executionContext, result, + doCheckForOnePartTableTopology(schemaName, gsiMetaBean.indexName, executionContext, result, logicalTableName); - doCheckTableGsiCoveringColumns(schemaName, gsiMetaBean.indexTableName, logicalTableName, + doCheckTableGsiCoveringColumns(schemaName, gsiMetaBean.indexName, logicalTableName, executionContext, result); } } @@ -574,7 +620,7 @@ protected void doCheckForOneTable(String schemaName, String appName, String logi // 标识该表是否广播表 isBroadcastTable = tableRule.isBroadcast(); - // We should check each group for broadcast table. + // We should check each group for broadcast table if (isBroadcastTable) { referenceGroupName = defaultDbIndex; referenceTableName = physicalTableName; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalConvertTableModeHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalConvertTableModeHandler.java new file mode 100644 index 000000000..11b387f80 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalConvertTableModeHandler.java @@ -0,0 +1,107 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.repo.mysql.handler; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConnectionParams; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlCreateTableStatement; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.handler.HandlerCommon; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.executor.utils.DrdsToAutoTableCreationSqlUtil; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCreateTable; +import com.alibaba.polardbx.optimizer.parse.FastsqlUtils; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlCreateTable; + +import static java.lang.Math.min; + +/** + * Created by zhuqiwei. + * + * @author zhuqiwei + */ +public class LogicalConvertTableModeHandler extends HandlerCommon { + private static final Logger logger = LoggerFactory.getLogger(LogicalConvertTableModeHandler.class); + + public LogicalConvertTableModeHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + final LogicalCreateTable logicalCreateTable = (LogicalCreateTable) logicalPlan; + final SqlCreateTable sqlCreateTable = (SqlCreateTable) logicalCreateTable.relDdl.sqlNode; + + validate(sqlCreateTable, executionContext); + + String sourceSql = sqlCreateTable.getSourceSql(); + String autoModeSql = null; + String errorMsg = null; + boolean errorHappened = false; + try { + final int maxPhyPartitionNum = + min(executionContext.getParamManager().getInt(ConnectionParams.MAX_PHYSICAL_PARTITION_COUNT), + executionContext.getParamManager() + .getInt(ConnectionParams.CREATE_DATABASE_MAX_PARTITION_FOR_DEBUG)); + final int maxPartitionColumnNum = + executionContext.getParamManager().getInt(ConnectionParams.MAX_PARTITION_COLUMN_COUNT); + autoModeSql = DrdsToAutoTableCreationSqlUtil.convertDrdsModeCreateTableSqlToAutoModeSql(sourceSql, false, + maxPhyPartitionNum, maxPartitionColumnNum); + } catch (Exception e) { + errorHappened = true; + errorMsg = e.getMessage(); + } + + ArrayResultCursor cursor = getShowConvertTableModeResultCursor(); + cursor.addRow(new Object[] { + !errorHappened ? "True" : "False", + autoModeSql, + errorMsg + }); + return cursor; + } + + private ArrayResultCursor getShowConvertTableModeResultCursor() { + ArrayResultCursor cursor = new ArrayResultCursor("TABLE_CONVERSION_RESULT"); + cursor.addColumn("SUCCESS", DataTypes.StringType); + cursor.addColumn("RESULT", DataTypes.StringType); + cursor.addColumn("ERROR_MSG", DataTypes.StringType); + cursor.initMeta(); + return cursor; + } + + protected void validate(SqlCreateTable sqlCreateTable, ExecutionContext executionContext) { + String originSql = sqlCreateTable.getSourceSql(); + final MySqlCreateTableStatement createTableStatement = + (MySqlCreateTableStatement) FastsqlUtils.parseSql(originSql).get(0); + + if (createTableStatement.getPartitioning() != null) { + throw new TddlRuntimeException(ErrorCode.ERR_PARTITION_MANAGEMENT, "create table sql must be drds mode"); + } + + if (sqlCreateTable.getSourceSql() == null) { + throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, "source sql is null"); + } + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalCreateJoinGroupHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalCreateJoinGroupHandler.java index a07d28b70..ed9919ebd 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalCreateJoinGroupHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalCreateJoinGroupHandler.java @@ -22,10 +22,10 @@ import com.alibaba.polardbx.common.utils.TStringUtil; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; -import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.ddl.job.factory.CreateJoinGroupJobFactory; import com.alibaba.polardbx.executor.ddl.job.validator.JoinGroupValidator; -import com.alibaba.polardbx.executor.handler.HandlerCommon; +import com.alibaba.polardbx.executor.ddl.newengine.job.DdlJob; +import com.alibaba.polardbx.executor.handler.ddl.LogicalCommonDdlHandler; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.locality.LocalityDesc; import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; @@ -38,10 +38,9 @@ import com.alibaba.polardbx.gms.util.MetaDbLogUtil; import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.ddl.BaseDdlOperation; import com.alibaba.polardbx.optimizer.core.rel.ddl.LogicalCreateJoinGroup; -import com.alibaba.polardbx.optimizer.locality.LocalityInfo; import com.alibaba.polardbx.optimizer.locality.LocalityInfoUtils; -import org.apache.calcite.rel.RelNode; import org.apache.calcite.sql.SqlCreateJoinGroup; import org.apache.commons.collections.CollectionUtils; @@ -55,7 +54,7 @@ * * @author luoyanxin */ -public class LogicalCreateJoinGroupHandler extends HandlerCommon { +public class LogicalCreateJoinGroupHandler extends LogicalCommonDdlHandler { public static Logger logger = LoggerFactory.getLogger(LogicalCreateJoinGroupHandler.class); @@ -64,25 +63,41 @@ public LogicalCreateJoinGroupHandler(IRepository repo) { } @Override - public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { - LogicalCreateJoinGroup logicalCreateJoinGroup = (LogicalCreateJoinGroup) logicalPlan; + protected DdlJob buildDdlJob(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + LogicalCreateJoinGroup logicalCreateJoinGroup = (LogicalCreateJoinGroup) logicalDdlPlan; SqlCreateJoinGroup sqlNode = (SqlCreateJoinGroup) logicalCreateJoinGroup.getNativeSqlNode(); String schemaName = logicalCreateJoinGroup.getSchemaName(); if (schemaName == null) { schemaName = executionContext.getSchemaName(); } + String joinGroupName = logicalCreateJoinGroup.getTableJoinName(); + + return CreateJoinGroupJobFactory.create(schemaName, joinGroupName, sqlNode.getLocality(), + logicalCreateJoinGroup.isIfNotExists(), executionContext); + } + + @Override + protected boolean validatePlan(BaseDdlOperation logicalDdlPlan, ExecutionContext executionContext) { + LogicalCreateJoinGroup logicalCreateJoinGroup = (LogicalCreateJoinGroup) logicalDdlPlan; + SqlCreateJoinGroup sqlNode = (SqlCreateJoinGroup) logicalCreateJoinGroup.getNativeSqlNode(); + String schemaName = logicalCreateJoinGroup.getSchemaName(); + if (schemaName == null) { + schemaName = executionContext.getSchemaName(); + } + + // validate if schema is auto partition mode boolean isNewPart = DbInfoManager.getInstance().isNewPartitionDb(schemaName); if (!isNewPart) { throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, "can't execute the create joingroup command in non-partitioning database"); } + + // validate join group characters String joinGroupName = logicalCreateJoinGroup.getTableJoinName(); JoinGroupValidator.validateJoinGroupName(joinGroupName); - boolean isIfNotExists = logicalCreateJoinGroup.isIfNotExists(); - String locality = sqlNode.getLocality(); - // validate the locality + String locality = sqlNode.getLocality(); if (TStringUtil.isNotBlank(locality)) { LocalityDesc desc = LocalityInfoUtils.parse(locality); @@ -106,6 +121,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } } + // validate existence + boolean isIfNotExists = logicalCreateJoinGroup.isIfNotExists(); JoinGroupInfoAccessor joinGroupInfoAccessor = new JoinGroupInfoAccessor(); try (Connection connection = MetaDbUtil.getConnection()) { joinGroupInfoAccessor.setConnection(connection); @@ -113,24 +130,17 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { joinGroupInfoAccessor.getJoinGroupInfoByName(schemaName, joinGroupName, true); if (joinGroupInfoRecord != null) { if (isIfNotExists) { - return new AffectRowCursor(new int[] {0}); + return true; } else { throw new TddlRuntimeException(ErrorCode.ERR_JOIN_GROUP_ALREADY_EXISTS, String.format("Create joingroup error, joingroup[%s] has already exist", joinGroupName)); } - } else { - JoinGroupInfoRecord record = new JoinGroupInfoRecord(); - record.tableSchema = schemaName; - record.joinGroupName = joinGroupName; - record.locality = locality; - joinGroupInfoAccessor.addJoinGroup(record, isIfNotExists); } - } catch (Throwable ex) { MetaDbLogUtil.META_DB_LOG.error(ex); throw GeneralUtil.nestedException(ex); } - return new AffectRowCursor(new int[] {1}); + return false; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalExplainHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalExplainHandler.java index a4bbfc5f6..6d493d7bf 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalExplainHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalExplainHandler.java @@ -16,22 +16,22 @@ package com.alibaba.polardbx.repo.mysql.handler; -import com.alibaba.polardbx.optimizer.core.rel.DirectMultiDBTableOperation; -import com.google.common.collect.Lists; import com.alibaba.polardbx.common.exception.NotSupportException; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.cursor.impl.MultiCursorAdapter; +import com.alibaba.polardbx.executor.cursor.impl.GatherCursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.DirectMultiDBTableOperation; import com.alibaba.polardbx.optimizer.core.rel.DirectTableOperation; import com.alibaba.polardbx.optimizer.core.rel.LogicalModifyView; import com.alibaba.polardbx.optimizer.core.rel.LogicalView; import com.alibaba.polardbx.optimizer.core.rel.PhyTableOperation; import com.alibaba.polardbx.optimizer.core.rel.SingleTableOperation; import com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy; +import com.google.common.collect.Lists; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rex.RexDynamicParam; import org.apache.calcite.sql.SqlKind; @@ -106,6 +106,10 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { inputCursors, schema); - return MultiCursorAdapter.wrap(inputCursors); + if (inputCursors.size() == 1) { + return inputCursors.get(0); + } else { + return new GatherCursor(inputCursors, executionContext); + } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInfoSchemaQueryHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInfoSchemaQueryHandler.java index 6ec5467b4..48d3473b2 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInfoSchemaQueryHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInfoSchemaQueryHandler.java @@ -380,11 +380,13 @@ protected List showFullTables(LogicalShow showNode, LogicalInfoSchemaC infoSchemaContext.getExecutionContext()); boolean needRemoveGsi = false; + boolean needRemoveColumnar = false; boolean needRemoveNonPublic = false; try { TableMeta tableMeta = schemaManager.getTable(tableName); needRemoveGsi = tableMeta.isGsi(); + needRemoveColumnar = tableMeta.isColumnar(); needRemoveNonPublic = tableMeta.getStatus() != TableStatus.PUBLIC; tablesAutoPartInfo.put(tableName, tableMeta.isAutoPartition()); } catch (Throwable t) { @@ -393,7 +395,7 @@ protected List showFullTables(LogicalShow showNode, LogicalInfoSchemaC } if (isRecycleBinTable || isTableWithoutPrivileges || needRemoveGsi || needRemoveNonPublic - || isTruncateTmpTable) { + || isTruncateTmpTable || needRemoveColumnar) { iter.remove(); } } @@ -1055,8 +1057,6 @@ private BigDecimal getDecimalValue(ResultSet rs, String columnName) throws SQLEx } private TGroupDataSource getGroupDataSource(String groupName, LogicalInfoSchemaContext infoSchemaContext) { - TGroupDataSource defaultGroupDataSource = null; - OptimizerContext optimizerContext = infoSchemaContext.getOptimizerContext(); if (TStringUtil.isEmpty(groupName)) { @@ -1064,9 +1064,7 @@ private TGroupDataSource getGroupDataSource(String groupName, LogicalInfoSchemaC } DataSource dataSource = infoSchemaContext.getRealRepo().getDataSource(groupName); - - defaultGroupDataSource = (TGroupDataSource) dataSource; - return defaultGroupDataSource; + return (TGroupDataSource) dataSource; } private TAtomDsConfDO getAtomRuntimeConfig(TGroupDataSource groupDataSource) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInsertHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInsertHandler.java index b18c375a6..c931e4df8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInsertHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInsertHandler.java @@ -28,6 +28,7 @@ import com.alibaba.polardbx.common.jdbc.Parameters; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.properties.MetricLevel; import com.alibaba.polardbx.common.utils.CaseInsensitive; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.TStringUtil; @@ -228,8 +229,9 @@ protected int executeInsert(LogicalInsert logicalInsert, ExecutionContext execut final boolean checkPrimaryKey = logicalInsert.isSimpleInsert() && executionContext.getParamManager().getBoolean(ConnectionParams.PRIMARY_KEY_CHECK) && !logicalInsert.isPushablePrimaryKeyCheck(); + TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(tableName); final boolean checkForeignKey = logicalInsert.isSimpleInsert() && - executionContext.foreignKeyChecks(); + executionContext.foreignKeyChecks() && tableMeta.hasForeignKey(); if (null != logicalInsert.getPrimaryInsertWriter() && !logicalInsert.hasHint() && executionContext .getParamManager().getBoolean(ConnectionParams.GSI_CONCURRENT_WRITE_OPTIMIZE)) { @@ -324,7 +326,7 @@ protected int executeInsert(LogicalInsert logicalInsert, ExecutionContext execut assert shardResults.size() == inputs.size(); if (!logicalInsert.hasHint() && executionContext.getParams() != null && GlobalIndexMeta.hasIndex(tableName, schemaName, executionContext)) { - executionContext.getExtraCmds().put(ConnectionProperties.MPP_METRIC_LEVEL, 1); + executionContext.getExtraCmds().put(ConnectionProperties.MPP_METRIC_LEVEL, MetricLevel.SQL.metricLevel); return executeIndex(tableName, insertSharder.getSqlTemplate(), logicalInsert, @@ -1064,7 +1066,7 @@ protected SqlNode buildSth(SqlNode sqlNode) { List> values = null; boolean firstBatch = true; - ExecutionContext insertEc = executionContext.copy(); + ExecutionContext insertEc = executionContext.copy(new Parameters(executionContext.getParamMap())); // Update duplicate key update list if necessary final Map duplicateKeyParamMapping = new HashMap<>(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInsertIgnoreHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInsertIgnoreHandler.java index 269f934a5..1708c07a3 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInsertIgnoreHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalInsertIgnoreHandler.java @@ -66,12 +66,14 @@ import com.alibaba.polardbx.repo.mysql.spi.MyPhyTableModifyCursor; import com.google.common.collect.ImmutableList; import org.apache.calcite.linq4j.Ord; +import org.apache.calcite.plan.RelOptTable; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlSelect.LockMode; import org.apache.calcite.util.Pair; +import org.apache.commons.collections.MapUtils; import java.util.ArrayList; import java.util.Arrays; @@ -149,23 +151,21 @@ protected int doExecute(LogicalInsert insert, ExecutionContext executionContext, final ExecutorContext executorContext = ExecutorContext.getContext(schemaName); final TopologyHandler topologyHandler = executorContext.getTopologyHandler(); final boolean allDnUseXDataSource = isAllDnUseXDataSource(topologyHandler); - final boolean gsiCanUseReturning = GlobalIndexMeta - .isAllGsi(insertIgnore.getTargetTables().get(0), executionContext, GlobalIndexMeta::canWrite); + final boolean gsiCanUseReturning = + isGsiCanUseReturning(insertIgnore.getTargetTables().get(0), executionContext); final boolean isColumnMultiWriting = TableColumnUtils.isModifying(schemaName, tableName, executionContext); final boolean checkPrimaryKey = executionContext.getParamManager().getBoolean(ConnectionParams.PRIMARY_KEY_CHECK); final boolean checkForeignKey = executionContext.foreignKeyChecks() && tableMeta.hasForeignKey(); - final boolean isModifyPartitionKey = - TableColumnUtils.isModifyPrimaryKey(schemaName, tableName, executionContext); // Disable returning when doing column multi-writing since we have not tested it yet boolean canUseReturning = executorContext.getStorageInfoManager().supportsReturning() && executionContext.getParamManager() .getBoolean(ConnectionParams.DML_USE_RETURNING) && allDnUseXDataSource && gsiCanUseReturning && !isBroadcast && !ComplexTaskPlanUtils.canWrite(tableMeta) && !isColumnMultiWriting - && !checkPrimaryKey && !checkForeignKey && !isModifyPartitionKey; + && !checkPrimaryKey && !checkForeignKey; if (canUseReturning) { canUseReturning = noDuplicateOrNullValues(insertIgnore, insertEc); @@ -769,6 +769,17 @@ protected List buildSelects(LogicalInsertIgnore insertIgnore, LockMode assert oc != null; final TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(tableName); + Map columnMapping = + TableColumnUtils.getColumnMultiWriteMapping(tableMeta.getTableColumnMeta()); + if (MapUtils.isNotEmpty(columnMapping)) { + insertColumns = insertColumns.stream().map(e -> columnMapping.getOrDefault(e.toLowerCase(), e)) + .collect(Collectors.toList()); + } + + // 下面这一大段很复杂的代码-- 非常不宜阅读 + // 大概就是构造一个 value 位置的映射和上层函数中的 uk 位置的映射,以及 pk 的值 + // ukIndexOffset 竟然是调用ta的函数中 uk 数组的位置 + final Map columnIndexMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); Ord.zip(insertColumns).forEach(o -> columnIndexMap.put(o.getValue(), o.getKey())); @@ -959,7 +970,15 @@ protected List> getDuplicatedValues(LogicalInsertIgnore insert, Loc List> currentUkSets = new ArrayList<>(); for (List uk : currentUkColumnList) { final Set ukSet = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); - ukSet.addAll(uk); + for (String columnName : uk) { + ColumnMeta columnMeta = currentTableMeta.getColumnIgnoreCase(columnName); + if (columnMeta != null && columnMeta.getMappingName() != null && !columnMeta.getMappingName() + .isEmpty()) { + ukSet.add(columnMeta.getMappingName()); + } else { + ukSet.add(columnName); + } + } currentUkSets.add(ukSet); } ukSets.addAll(currentUkSets); @@ -1273,10 +1292,18 @@ private static List> getDeduplicatedParamsWithNew return results; } - protected static boolean identicalRow(List before, List after, List rowColumnMetas) { + protected static boolean identicalRow(List before, List after, List rowColumnMetas, + boolean checkJsonByStringCompare) { final GroupKey beforeKey = new GroupKey(before.toArray(), rowColumnMetas); final GroupKey afterKey = new GroupKey(after.toArray(), rowColumnMetas); // should use equalsForUpdate or may get wrong result when different types - return beforeKey.equalsForUpdate(afterKey); + return beforeKey.equalsForUpdate(afterKey, checkJsonByStringCompare); + } + + private boolean isGsiCanUseReturning(RelOptTable primary, ExecutionContext ec) { + final boolean gsiCanUseReturning = GlobalIndexMeta + .isAllGsi(primary, ec, GlobalIndexMeta::canWrite) && + !ComplexTaskPlanUtils.isAnyUGsi(primary, ec, ComplexTaskPlanUtils::isBackfillInProgress); + return gsiCanUseReturning; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalLoadDataHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalLoadDataHandler.java index 55886225f..477767b4e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalLoadDataHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalLoadDataHandler.java @@ -16,71 +16,33 @@ package com.alibaba.polardbx.repo.mysql.handler; -import com.alibaba.polardbx.common.CrcAccumulator; -import com.alibaba.polardbx.common.Engine; -import com.alibaba.polardbx.common.OrderInvariantHasher; import com.alibaba.polardbx.common.constants.SequenceAttribute; import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.jdbc.Parameters; -import com.alibaba.polardbx.common.orc.OrcBloomFilter; -import com.alibaba.polardbx.common.oss.OSSMetaLifeCycle; -import com.alibaba.polardbx.common.oss.access.OSSKey; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.properties.MetricLevel; import com.alibaba.polardbx.common.properties.PropUtil; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.Pair; -import com.alibaba.polardbx.common.utils.TStringUtil; -import com.alibaba.polardbx.common.utils.TreeMaps; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.common.utils.logger.MDC; import com.alibaba.polardbx.common.utils.memory.SizeOf; -import com.alibaba.polardbx.executor.archive.columns.ColumnProvider; -import com.alibaba.polardbx.executor.archive.columns.ColumnProviders; -import com.alibaba.polardbx.executor.archive.writer.OSSBackFillTimer; -import com.alibaba.polardbx.executor.archive.writer.OSSBackFillWriterTask; import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.cursor.AbstractCursor; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; -import com.alibaba.polardbx.executor.ddl.job.meta.FileStorageBackFillAccessor; -import com.alibaba.polardbx.executor.ddl.job.meta.TableMetaChanger; -import com.alibaba.polardbx.executor.ddl.newengine.meta.SchemaEvolutionAccessorDelegate; -import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; import com.alibaba.polardbx.executor.gsi.utils.Transformer; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.executor.sync.SyncManagerHelper; -import com.alibaba.polardbx.executor.sync.TableMetaChangeSyncAction; -import com.alibaba.polardbx.executor.sync.TablesMetaChangeForOssSyncAction; -import com.alibaba.polardbx.executor.sync.TablesMetaChangeSyncAction; import com.alibaba.polardbx.executor.utils.ExecUtils; -import com.alibaba.polardbx.executor.utils.failpoint.FailPoint; -import com.alibaba.polardbx.gms.engine.FileSystemUtils; -import com.alibaba.polardbx.gms.metadb.evolution.ColumnMappingRecord; -import com.alibaba.polardbx.gms.metadb.seq.SequenceBaseRecord; -import com.alibaba.polardbx.gms.metadb.table.ColumnMetaAccessor; -import com.alibaba.polardbx.gms.metadb.table.ColumnMetasRecord; -import com.alibaba.polardbx.gms.metadb.table.FilesAccessor; -import com.alibaba.polardbx.gms.metadb.table.FilesRecord; -import com.alibaba.polardbx.gms.metadb.table.TableInfoManager; -import com.alibaba.polardbx.gms.metadb.table.TablesAccessor; -import com.alibaba.polardbx.gms.util.MetaDbUtil; import com.alibaba.polardbx.optimizer.OptimizerContext; -import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.config.table.GlobalIndexMeta; -import com.alibaba.polardbx.optimizer.config.table.OrcMetaUtils; -import com.alibaba.polardbx.optimizer.config.table.PolarDBXOrcSchema; -import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.context.LoadDataContext; import com.alibaba.polardbx.optimizer.core.CursorMeta; -import com.alibaba.polardbx.optimizer.core.datatype.DataType; -import com.alibaba.polardbx.optimizer.core.field.SessionProperties; -import com.alibaba.polardbx.optimizer.core.function.calc.scalar.filter.In; -import com.alibaba.polardbx.optimizer.core.function.calc.scalar.string.Ord; import com.alibaba.polardbx.optimizer.core.rel.LogicalInsert; import com.alibaba.polardbx.optimizer.core.rel.LogicalInsert.HandlerParams; import com.alibaba.polardbx.optimizer.core.rel.PhyTableInsertSharder; @@ -89,76 +51,31 @@ import com.alibaba.polardbx.optimizer.core.rel.SimpleShardProcessor; import com.alibaba.polardbx.optimizer.core.rel.dml.writer.InsertWriter; import com.alibaba.polardbx.optimizer.core.row.ArrayRow; -import com.alibaba.polardbx.optimizer.core.row.OssLoadDataRow; import com.alibaba.polardbx.optimizer.core.row.Row; -import com.alibaba.polardbx.optimizer.parse.FastSqlParserException; import com.alibaba.polardbx.optimizer.partition.PartitionInfo; import com.alibaba.polardbx.optimizer.partition.PartitionInfoManager; -import com.alibaba.polardbx.optimizer.partition.pruning.PhysicalPartitionInfo; import com.alibaba.polardbx.optimizer.rule.TddlRuleManager; import com.alibaba.polardbx.optimizer.sequence.ISequenceManager; import com.alibaba.polardbx.optimizer.sequence.SequenceManagerProxy; import com.alibaba.polardbx.optimizer.utils.CalciteUtils; import com.alibaba.polardbx.optimizer.utils.IDistributedTransaction; -import com.alibaba.polardbx.optimizer.utils.ITimestampOracle; import com.alibaba.polardbx.optimizer.utils.PhyTableOperationUtil; -import com.aliyun.oss.OSS; -import com.google.common.base.Preconditions; import com.google.common.base.Splitter; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ListenableFuture; -import lombok.Data; -import org.apache.calcite.avatica.Meta; import org.apache.calcite.rel.RelNode; -import org.apache.commons.collections.ArrayStack; import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.exec.vector.ColumnVector; -import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector; -import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; -import org.apache.orc.OrcConf; -import org.apache.orc.OrcFile; -import org.apache.orc.Reader; -import org.apache.orc.StripeInformation; -import org.apache.orc.TypeDescription; -import org.apache.orc.Writer; -import org.apache.orc.impl.RecordReaderImpl; -import org.apache.orc.impl.WriterImpl; -import org.jetbrains.annotations.Blocking; -import javax.validation.constraints.Null; -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileReader; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.sql.Connection; -import java.text.SimpleDateFormat; import java.util.ArrayList; -import java.util.Calendar; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; -import java.util.UUID; -import java.util.Vector; -import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.FutureTask; import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; -import java.util.stream.Collectors; -import static com.alibaba.polardbx.common.constants.SequenceAttribute.AUTO_SEQ_PREFIX; import static com.alibaba.polardbx.optimizer.context.LoadDataContext.END; -import static java.lang.Math.min; public class LogicalLoadDataHandler extends LogicalInsertHandler { @@ -166,9 +83,6 @@ public class LogicalLoadDataHandler extends LogicalInsertHandler { public PropUtil.LOAD_NULL_MODE null_mode; - private final Object UPLOAD_END = new Object(); - private final Object FLUSH_END = new Object(); - public LogicalLoadDataHandler(IRepository repo) { super(repo); } @@ -195,14 +109,6 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { PhyTableOperationUtil.enableIntraGroupParallelism(schemaName, executionContext); int affectRows; - // check if load data to OSS engine - String logicalTableName = logicalInsert.getLogicalTableName(); - TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(logicalTableName); - if (tableMeta.getEngine() == Engine.OSS) { - affectRows = executeOssLoadData(logicalInsert, executionContext, tableMeta); - return new AffectRowCursor(affectRows); - } - long oldLastInsertId = executionContext.getConnection().getLastInsertId(); try { affectRows = executeInsert(logicalInsert, executionContext, handlerParams); @@ -240,146 +146,6 @@ private void buildSimpleShard(LogicalInsert logicalInsert, LoadDataContext loadD } } - protected int executeOssLoadData(LogicalInsert logicalInsert, ExecutionContext executionContext, - TableMeta tableMeta) { - int affectRows = 0; - LoadDataContext loadDataContext = executionContext.getLoadDataContext(); - - // set use batch - String schemaName = executionContext.getSchemaName(); - final TddlRuleManager or = OptimizerContext.getContext(schemaName).getRuleManager(); - final boolean inSingleDb = or.isTableInSingleDb(logicalInsert.getLogicalTableName()); - boolean useBatchMode = - executionContext.getParamManager().getBoolean(ConnectionParams.LOAD_DATA_USE_BATCH_MODE); - if (inSingleDb) { - useBatchMode = false; - } - loadDataContext.setUseBatch(useBatchMode); - - int producerNum = executionContext.getParamManager().getInt(ConnectionParams.OSS_LOAD_DATA_PRODUCERS); - int maxConsumerNum = executionContext.getParamManager().getInt(ConnectionParams.OSS_LOAD_DATA_MAX_CONSUMERS); - int flushNum = executionContext.getParamManager().getInt(ConnectionParams.OSS_LOAD_DATA_FLUSHERS); - int uploadNum = executionContext.getParamManager().getInt(ConnectionParams.OSS_LOAD_DATA_UPLOADERS); - - int physicalTableNum = tableMeta.getPartitionInfo().getAllPhysicalPartitionCount(); - Map> physicalPartitionInfos = - tableMeta.getPartitionInfo().getPhysicalPartitionTopology(new ArrayList<>()); - - int consumerNum = Math.min(maxConsumerNum, physicalTableNum); - flushNum = Math.max(consumerNum, flushNum); - - List producers = new ArrayList<>(); - Map consumers = new HashMap<>(); - List flushers = new ArrayList<>(); - List uploaders = new ArrayList<>(); - - Map consumerMap = new HashMap<>(); - Map> consumeQueues = new HashMap<>(); - Map> flushQueues = new HashMap<>(); - BlockingQueue uploadBlockingQueue = new LinkedBlockingQueue<>(); - - // build consumers - for (List infos : physicalPartitionInfos.values()) { - for (PhysicalPartitionInfo info : infos) { - int consumeId = info.getPartBitSetIdx() % consumerNum; - consumerMap.put(info.getPhyTable(), consumeId); - if (!consumeQueues.containsKey(consumeId)) { - BlockingQueue consumeBlockingQueue = new LinkedBlockingQueue<>(); - consumeQueues.put(consumeId, consumeBlockingQueue); - int flushId = consumeId % flushNum; - if (!flushQueues.containsKey(flushId)) { - BlockingQueue flushBlockingQueue = new LinkedBlockingQueue<>(); - flushQueues.put(flushId, flushBlockingQueue); - } - OssLoadDataConsumer ossLoadDataConsumer = - new OssLoadDataConsumer(loadDataContext, executionContext, consumeBlockingQueue, tableMeta, - uploadBlockingQueue, flushQueues.get(flushId), info); - consumers.put(consumeId, ossLoadDataConsumer); - } else { - consumers.get(consumeId).addPhyTableContext(info); - } - } - } - - // build producers and run - for (int i = 0; i < producerNum; i++) { - OssLoadDataProducer ossLoadDataProducer = - new OssLoadDataProducer(loadDataContext, executionContext, logicalInsert, - consumeQueues, consumerMap); - ossLoadDataProducer.doProduce(); - producers.add(ossLoadDataProducer); - } - - // run consumers - for (OssLoadDataConsumer consumer : consumers.values()) { - consumer.doConsume(); - } - - // run flushers - for (int i = 0; i < flushNum && flushQueues.containsKey(i); i++) { - OssLoadDataFlusher ossLoadDataFlusher = new OssLoadDataFlusher(executionContext, flushQueues.get(i)); - ossLoadDataFlusher.doFlush(); - flushers.add(ossLoadDataFlusher); - } - - // build uploaders and run - for (int i = 0; i < uploadNum; i++) { - OssLoadDataUploader ossLoadDataUploader = - new OssLoadDataUploader(loadDataContext, executionContext, uploadBlockingQueue, tableMeta); - ossLoadDataUploader.doUpload(); - uploaders.add(ossLoadDataUploader); - } - - try { - // wait produce finish - for (OssLoadDataProducer producer : producers) { - producer.produceDoneFuture.get(); - } - - for (OssLoadDataConsumer consumer : consumers.values()) { - consumer.ossLoadDataQueueAddEND(); - } - - // wait consume finish - for (OssLoadDataConsumer consumer : consumers.values()) { - affectRows += consumer.consumeDoneFuture.get(); - } - - for (BlockingQueue flushBlockingQueue : flushQueues.values()) { - flushBlockingQueue.add(FLUSH_END); - } - - // wait flush finish - for (OssLoadDataFlusher flusher : flushers) { - flusher.flushDoneFuture.get(); - } - uploadBlockingQueue.add(UPLOAD_END); - - // wait flush finish - for (OssLoadDataUploader uploader : uploaders) { - uploader.uploadDoneFuture.get(); - } - - OssLoadDataPersistAndSync ossLoadDataPersistAndSync = - new OssLoadDataPersistAndSync(executionContext, uploaders, tableMeta); - // write to metaDb - ossLoadDataPersistAndSync.writeMetaDbTransaction(); - - // sync metadata - ossLoadDataPersistAndSync.tableSync(); - } catch (Throwable t) { - loadDataContext.finish(t); - for (OssLoadDataConsumer consumer : consumers.values()) { - consumer.ossLoadDataQueueClear(); - consumer.ossLoadDataQueueAddEND(); - } - uploadBlockingQueue.add(UPLOAD_END); - throw new TddlNestableRuntimeException(t); - } - - return affectRows; - } - @Override protected int executeInsert(LogicalInsert logicalInsert, ExecutionContext executionContext, HandlerParams handlerParams) { @@ -496,7 +262,7 @@ protected int executeInsert(LogicalInsert logicalInsert, ExecutionContext execut public Pair, List> concurrentCursors( ExecutionContext executionContext, LogicalInsert logicalInsert) { - executionContext.getExtraCmds().put(ConnectionProperties.MPP_METRIC_LEVEL, 0); + executionContext.getExtraCmds().put(ConnectionProperties.MPP_METRIC_LEVEL, MetricLevel.DEFAULT.metricLevel); String schemaName = logicalInsert.getSchemaName(); if (StringUtils.isEmpty(schemaName)) { schemaName = executionContext.getSchemaName(); @@ -533,7 +299,7 @@ public Pair, List> concurrentCursors } } else { if (useTrans) { - realParallism = min(groupConnIdSet.size(), realParallism); + realParallism = Math.min(groupConnIdSet.size(), realParallism); } } @@ -644,6 +410,7 @@ private void takePhyOpAndThenExec(Map mdcContext) { currentCusor = ExecutorContext.getContext( executionContext.getSchemaName()).getTopologyExecutor().execByExecPlanNode( (PhyTableOperation) object, executionContext); + int num = ExecUtils.getAffectRowsByCursor(currentCusor); affectNum += num; @@ -851,943 +618,4 @@ private List getAllRelNode( } - public class OssLoadDataProducer { - private final Logger - logger = LoggerFactory.getLogger(OssLoadDataProducer.class); - - private LoadDataContext loadDataContext; - private ExecutionContext executionContext; - private LogicalInsert logicalInsert; - private Map> consumeQueues; - - private Map consumerMap; - public ListenableFuture produceDoneFuture; - - public OssLoadDataProducer(LoadDataContext loadDataContext, - ExecutionContext executionContext, LogicalInsert logicalInsert, - Map> consumeQueues, - Map consumerMap) { - this.loadDataContext = loadDataContext; - this.executionContext = executionContext.copy(); - this.logicalInsert = logicalInsert; - this.consumeQueues = consumeQueues; - this.consumerMap = consumerMap; - } - - public void doProduce() { - - final Map mdcContext = MDC.getCopyOfContextMap(); - this.produceDoneFuture = executionContext.getExecutorService().submitListenableFuture( - executionContext.getSchemaName(), executionContext.getTraceId(), -1, - () -> { - MDC.setContextMap(mdcContext); - return produce(); - }, executionContext.getRuntimeStatistics()); - } - - public Object produce() { - try { - while (true) { - - if (loadDataContext.getThrowable() != null) { - throw loadDataContext.getThrowable(); - } - - List lines = loadDataContext.getParameters().take(); - if (lines == END) { - loadDataContext.getParameters().add(END); - break; - } - List> batchParams = new ArrayList<>(); - long totalMemory = 0L; - for (int i = 0; i < lines.size(); i++) { - String line = lines.get(i); - totalMemory += SizeOf.sizeOfCharArray(line.length()); - List fields = - Lists.newArrayList(Splitter.on(loadDataContext.getFieldTerminatedBy()).split(line)); - if (loadDataContext.getAutoFillColumnIndex() != -1) { - fields.add(loadDataContext.getAutoFillColumnIndex(), - loadDataContext.isInSingleDb() ? "NULL" : - SequenceManagerProxy.getInstance().nextValue(executionContext.getSchemaName(), - ISequenceManager.AUTO_SEQ_PREFIX + loadDataContext.getTableName()).toString()); - } - Map parameterContexts = Transformer.buildColumnParam( - loadDataContext.getMetaList(), fields, loadDataContext.getCharset(), null_mode); - batchParams.add(parameterContexts); - } - if (batchParams.size() > 0) { - Parameters parameters = new Parameters(); - parameters.setBatchParams(batchParams); - executionContext.setParams(parameters); - List allPhyPlan = getAllRelNode( - logicalInsert, executionContext); - - for (RelNode relNode : allPhyPlan) { - PhyTableOperation phyTableOperation = (PhyTableOperation) relNode; - int hashId = consumerMap.get(phyTableOperation.getTableNames().get(0).get(0)); - if (phyTableOperation.getBatchParameters() != null) { - consumeQueues.get(hashId).add(relNode); - } - } - consumeQueues.get(0).add(totalMemory); - } - } - } catch (Throwable t) { - logger.error("OssLoadDataProducer failed", t); - - try { - loadDataContext.finish(t); - } catch (Throwable ignore) { - //ignore - } - if (consumeQueues != null) { - consumeQueues.values().stream().forEach(q -> { - q.clear(); - q.add(END); - }); - } - } - return null; - } - - private List getAllRelNode( - LogicalInsert logicalInsert, - ExecutionContext executionContext) { - List allPhyPlan = new ArrayList<>(); - - if (null != logicalInsert.getPrimaryInsertWriter()) { - // Get plan for primary - final InsertWriter primaryWriter = logicalInsert.getPrimaryInsertWriter(); - List inputs = primaryWriter.getInput(executionContext); - - allPhyPlan.addAll(inputs); - } else { - - List shardResults = new ArrayList<>(); - PhyTableInsertSharder insertSharder = new PhyTableInsertSharder(logicalInsert, - executionContext.getParams(), - SequenceAttribute.getAutoValueOnZero(executionContext.getSqlMode())); - allPhyPlan.addAll(logicalInsert.getInput(insertSharder, shardResults, executionContext)); - } - return allPhyPlan; - } - - } - - public class OssLoadDataConsumer { - @Data - public class UploadContext { - private OSSKey ossKey; - private String localFilePath; - private long fileSize; - private final String physicalSchema; - private final String physicalTable; - private final String physicalPartitionName; - public FilesRecord filesRecord; - - public UploadContext(PhyTableContext phyTableContext, int fileIndex) { - this.localFilePath = phyTableContext.localFilePaths.get(fileIndex); - this.ossKey = phyTableContext.ossKeys.get(fileIndex); - - this.filesRecord = phyTableContext.filesRecords.get(fileIndex); - this.physicalSchema = phyTableContext.physicalSchema; - this.physicalTable = phyTableContext.physicalTable; - this.physicalPartitionName = phyTableContext.physicalPartitionName; - } - - public void setFileSize(Long fileSize) { - this.fileSize = fileSize; - } - } - - public class FlushContext { - private boolean ifUpload; - private UploadContext uploadContext; - private VectorizedRowBatch batch; - private Writer writer; - - public FlushContext(VectorizedRowBatch batch, Writer writer, boolean ifUpload) { - this.batch = batch; - this.writer = writer; - this.ifUpload = ifUpload; - } - - public void setUploadContext( - UploadContext uploadContext) { - this.uploadContext = uploadContext; - } - - public ByteBuffer getSerializedTail(String localFilePath) { - try { - Configuration conf = new Configuration(); - Reader reader = OrcFile.createReader(new Path(localFilePath), - OrcFile.readerOptions(conf)); - return reader.getSerializedFileFooter(); - } catch (IOException e) { - e.printStackTrace(); - } - return null; - } - - public void flush() { - try { - writer.addRowBatch(batch); - if (ifUpload) { - writer.close(); - - // update fileRecord after writer close - File localFile = new File(uploadContext.getLocalFilePath()); - long fileSize = localFile.length(); - - ByteBuffer tailBuffer = this.getSerializedTail(uploadContext.getLocalFilePath()); - byte[] fileMeta = new byte[tailBuffer.remaining()]; - tailBuffer.get(fileMeta); - - uploadContext.getFilesRecord().setExtentSize(fileSize); - uploadContext.getFilesRecord().setFileMeta(fileMeta); - try { - ossUploadQueue.put(uploadContext); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - } catch (IOException e) { - throw GeneralUtil.nestedException(e); - } - } - } - - public class PhyTableContext { - private final String physicalSchema; - private final String physicalTable; - private String physicalPartitionName; - public List filesRecords; - private OrderInvariantHasher orderInvariantHasher; - private volatile int currentFileIndex; - private List ossKeys; - private List localFilePaths; - private List tableRows; - private List fileChecksum; - private Writer writer; - private VectorizedRowBatch batch; - private List lowerRows; - private List upperRows; - private long totalRows; - - public PhyTableContext(PhysicalPartitionInfo info) { - this.lowerRows = new ArrayList<>(); - this.upperRows = new ArrayList<>(); - - this.currentFileIndex = 0; - this.localFilePaths = new ArrayList<>(); - this.ossKeys = new ArrayList<>(); - this.tableRows = new ArrayList<>(); - this.fileChecksum = new ArrayList<>(); - this.filesRecords = new ArrayList<>(); - - this.batch = schema.createRowBatch(indexStride); - - this.physicalSchema = info.getGroupKey(); - this.physicalTable = info.getPhyTable(); - initNextFile(); - } - - public void initNextFile() { - if (this.currentFileIndex == this.localFilePaths.size()) { - final String uniqueId = UUID.randomUUID().toString(); - String currentLocalFilePath = - OSSKey.localFilePath(physicalSchema, physicalTable, uniqueId); - - this.localFilePaths.add(currentLocalFilePath); - - // todo : get physicalPartitionName, temporally empty - OSSKey currentOssKey = - TStringUtil.isEmpty(physicalPartitionName) - ? OSSKey.createTableFileOSSKey(physicalSchema, physicalTable, uniqueId) - : OSSKey.createTableFileOSSKey(physicalSchema, physicalTable, physicalPartitionName, - uniqueId); - - this.ossKeys.add(currentOssKey); - File tmpFile = new File(currentLocalFilePath); - if (tmpFile.exists()) { - tmpFile.delete(); - } - - // create File Record for metaDb - createFileMeta(currentFileIndex); - - if (this.localFilePaths.size() == 1) { - lowerRows.add(null); - } else { - lowerRows.add(upperRows.get(upperRows.size() - 1)); - } - upperRows.add(null); - - this.orderInvariantHasher = new OrderInvariantHasher(); - Path path = new Path(currentLocalFilePath); - try { - path.getFileSystem(conf).setWriteChecksum(false); - path.getFileSystem(conf).setVerifyChecksum(false); - OrcFile.WriterOptions opts = OrcFile.writerOptions(conf).setSchema(schema); - writer = new WriterImpl(path.getFileSystem(opts.getConfiguration()), path, opts); - } catch (IOException e) { - throw GeneralUtil.nestedException(e); - } - - this.totalRows = 0L; - } - } - - private void createFileMeta(int fileIndex) { - // construct files record - FilesRecord filesRecord = new FilesRecord(); - filesRecord.fileName = this.getOssKey(fileIndex).toString(); - filesRecord.fileType = this.getOssKey(fileIndex).getFileType().toString(); - filesRecord.fileMeta = new byte[] {}; - filesRecord.tableCatalog = ""; - filesRecord.tableSchema = physicalSchema; - filesRecord.tableName = physicalTable; - filesRecord.engine = engine.name(); - filesRecord.status = ""; - filesRecord.lifeCycle = OSSMetaLifeCycle.CREATING.ordinal(); - filesRecord.localPath = this.localFilePaths.get(fileIndex); - filesRecord.logicalSchemaName = logicalSchema; - filesRecord.logicalTableName = logicalTable; - filesRecord.localPartitionName = physicalPartitionName; - - // write to db later with all other info - filesRecords.add(filesRecord); - } - - public OSSKey getOssKey(int fileIndex) { - return this.ossKeys.get(fileIndex); - } - - public void prepareForFlush(int fileIndex) { - this.currentFileIndex++; - - this.fileChecksum.add(orderInvariantHasher.getResult()); - - this.tableRows.add(this.totalRows); - totalRows = 0; - - FilesRecord filesRecord = filesRecords.get(fileIndex); - - // update filesRecord - filesRecord.lifeCycle = OSSMetaLifeCycle.READY.ordinal(); - filesRecord.setTableRows(this.tableRows.get(fileIndex)); - filesRecord.setFileHash(this.fileChecksum.get(fileIndex)); - } - - public void consume(PhyTableOperation phyOp) { - for (Map batchParam : phyOp.getBatchParameters()) { - this.totalRows++; - - // build row from phyOp - OssLoadDataRow row = new OssLoadDataRow(batchParam); - - // fill row data to batch - int rowNumber = batch.size++; - CrcAccumulator accumulator = new CrcAccumulator(); - for (int columnId = 1; columnId < redundantId; columnId++) { - ColumnProvider columnProvider = columnProviders.get(columnId - 1); - ColumnVector columnVector = batch.cols[columnId - 1]; - DataType dataType = dataTypes.get(columnId - 1); - - int redundantColumnId = redundantMap[columnId - 1]; - if (redundantColumnId == -1) { - // data convert - columnProvider - .putRow(columnVector, rowNumber, row, columnId, dataType, - sessionProperties.getTimezone(), Optional.ofNullable(accumulator)); - } else { - // data convert with redundant sort key - ColumnVector redundantColumnVector = batch.cols[redundantColumnId - 1]; - columnProvider.putRow(columnVector, redundantColumnVector, rowNumber, row, columnId, - dataType, - sessionProperties.getTimezone(), Optional.ofNullable(accumulator)); - } - } - - // Merge the crc result of the last row. - long crcResult = accumulator.getResult(); - orderInvariantHasher.add(crcResult); - accumulator.reset(); - - // flush the batch to disk - if (batch.size == batch.getMaxSize()) { - boolean ifUpload = this.totalRows >= maxRowsPerFile; - - FlushContext flushContext = - new FlushContext(batch, writer, ifUpload); - if (ifUpload) { - int fileIndex = this.currentFileIndex; - totalEffectRows += this.totalRows; - prepareForFlush(fileIndex); - UploadContext uploadContext = new UploadContext(this, fileIndex); - flushContext.setUploadContext(uploadContext); - - initNextFile(); - } - ossFlushQueue.add(flushContext); - - this.batch = schema.createRowBatch(indexStride); - } - } - } - - public void consumeFinish() throws Exception { - try { - // get all effect rows (load rows)s - if (this.totalRows != 0) { - FlushContext flushContext = - new FlushContext(batch, writer, true); - int fileIndex = this.currentFileIndex; - totalEffectRows += this.totalRows; - prepareForFlush(fileIndex); - UploadContext uploadContext = new UploadContext(this, fileIndex); - flushContext.setUploadContext(uploadContext); - ossFlushQueue.add(flushContext); - } - } catch (Throwable t) { - throw new Exception(t); - } - } - } - - private final Logger - logger = LoggerFactory.getLogger(OssLoadDataConsumer.class); - private final TableMeta tableMeta; - private final Engine engine; - private final Configuration conf; - private PolarDBXOrcSchema polarDBXOrcSchema; - private final TypeDescription schema; - private Map phyTableContextMap; - private List columnMetas; - private List dataTypes; - private final List exceptionsWhenClose = new ArrayList<>(); - private LoadDataContext loadDataContext; - private ExecutionContext executionContext; - private BlockingQueue ossLoadDataQueue; - private BlockingQueue ossUploadQueue; - private BlockingQueue ossFlushQueue; - public ListenableFuture consumeDoneFuture; - private List columnProviders; - private long maxRowsPerFile; - String logicalSchema; - String logicalTable; - int indexStride; - String versionName; - private int redundantId; - private int[] redundantMap; - SessionProperties sessionProperties; - int totalEffectRows; - - public OssLoadDataConsumer(LoadDataContext loadDataContext, - ExecutionContext executionContext, BlockingQueue queue, - TableMeta tableMeta, - BlockingQueue uploadBlockingQueue, - BlockingQueue flushBlockingQueue, - PhysicalPartitionInfo info) { - this.loadDataContext = loadDataContext; - this.executionContext = executionContext.copy(); - this.ossLoadDataQueue = queue; - this.tableMeta = tableMeta; - this.maxRowsPerFile = executionContext.getParamManager().getLong(ConnectionParams.OSS_MAX_ROWS_PER_FILE); - this.ossUploadQueue = uploadBlockingQueue; - this.ossFlushQueue = flushBlockingQueue; - - this.engine = this.tableMeta.getEngine(); - this.logicalSchema = this.tableMeta.getSchemaName(); - this.logicalTable = this.tableMeta.getTableName(); - - sessionProperties = SessionProperties.fromExecutionContext(executionContext); - - totalEffectRows = 0; - - // read field_id - Map columnToFieldIdMap = new SchemaEvolutionAccessorDelegate>() { - @Override - protected Map invoke() { - Map map = TreeMaps.caseInsensitiveMap(); - for (ColumnMappingRecord record : - columnMappingAccessor.querySchemaTable(logicalSchema, logicalTable)) { - map.put(record.getColumnName(), record.getFieldIdString()); - } - return map; - } - }.execute(); - - // build orc schema - this.polarDBXOrcSchema = - OrcMetaUtils.buildPolarDBXOrcSchema(tableMeta, Optional.of(columnToFieldIdMap), false); - - this.schema = polarDBXOrcSchema.getSchema(); - // data config - this.conf = OrcMetaUtils.getConfiguration(executionContext, polarDBXOrcSchema); - - this.indexStride = (int) conf.getLong("orc.row.index.stride", 1000); - this.versionName = OrcConf.WRITE_FORMAT.getString(conf); - - this.redundantId = polarDBXOrcSchema.getRedundantId(); - this.redundantMap = polarDBXOrcSchema.getRedundantMap(); - - this.columnProviders = ColumnProviders.getColumnProviders(this.polarDBXOrcSchema); - - this.columnMetas = polarDBXOrcSchema.getColumnMetas(); - - this.dataTypes = columnMetas.stream().map(ColumnMeta::getDataType).collect(Collectors.toList()); - - phyTableContextMap = new HashMap<>(); - - PhyTableContext phyTableContext = new PhyTableContext(info); - - phyTableContextMap.put(info.getPhyTable(), phyTableContext); - } - - public void addPhyTableContext(PhysicalPartitionInfo info) { - PhyTableContext phyTableContext = new PhyTableContext(info); - phyTableContextMap.put(info.getPhyTable(), phyTableContext); - } - - public void doConsume() { - this.consumeDoneFuture = executionContext.getExecutorService().submitListenableFuture( - executionContext.getSchemaName(), executionContext.getTraceId(), -1, - this::consume, executionContext.getRuntimeStatistics()); - } - - private int consume() { - try { - while (true) { - if (loadDataContext.getThrowable() != null) { - throw loadDataContext.getThrowable(); - } - - Object object = ossLoadDataQueue.take(); - if (object == END) { - ossLoadDataQueue.add(END); - break; - } - if (object instanceof Long) { - loadDataContext.getDataCacheManager().releaseMemory((Long) object); - continue; - } - - PhyTableOperation phyOp = (PhyTableOperation) object; - String phyTableName = phyOp.getTableNames().get(0).get(0); - PhyTableContext phyTableContext = phyTableContextMap.get(phyTableName); - - phyTableContext.consume(phyOp); - } - for (PhyTableContext phyTableContext : phyTableContextMap.values()) { - phyTableContext.consumeFinish(); - } - return totalEffectRows; - } catch (Throwable t) { - exceptionsWhenClose.add(t); - logger.error("OssLoadDataExecuter consume failed", t); - try { - loadDataContext.finish(t); - } catch (Throwable ignore) { - //ignore - } - if (ossLoadDataQueue != null) { - ossLoadDataQueue.clear(); - ossLoadDataQueue.add(END); - } - return -1; - } - } - - public void ossLoadDataQueueAddEND() { - ossLoadDataQueue.add(END); - } - - public void ossLoadDataQueueClear() { - ossLoadDataQueue.clear(); - } - } - - public class OssLoadDataFlusher { - private final Logger - logger = LoggerFactory.getLogger(OssLoadDataFlusher.class); - private ExecutionContext executionContext; - private BlockingQueue ossFlushQueue; - public ListenableFuture flushDoneFuture; - - public OssLoadDataFlusher(ExecutionContext executionContext, BlockingQueue ossFlushQueue) { - this.executionContext = executionContext; - this.ossFlushQueue = ossFlushQueue; - } - - public void doFlush() { - this.flushDoneFuture = executionContext.getExecutorService().submitListenableFuture( - executionContext.getSchemaName(), executionContext.getTraceId(), -1, - this::flush, executionContext.getRuntimeStatistics()); - } - - private boolean flush() { - try { - while (true) { - Object object = ossFlushQueue.take(); - - if (object == FLUSH_END) { - ossFlushQueue.add(FLUSH_END); - break; - } - - OssLoadDataConsumer.FlushContext flushContext = (OssLoadDataConsumer.FlushContext) object; - - flushContext.flush(); - } - return true; - } catch (Throwable t) { - return false; - } - } - - } - - public class OssLoadDataUploader { - private final Logger - logger = LoggerFactory.getLogger(OssLoadDataUploader.class); - private final TableMeta tableMeta; - private final Engine engine; - private final Configuration conf; - private List columnMetasRecords; - public List filesRecords; - private TypeDescription bfSchema; - private List bfColumnProviders; - private ExecutionContext executionContext; - private String logicalSchema; - private String logicalTable; - private BlockingQueue ossUploadQueue; - final private boolean removeTmpFiles; - public ListenableFuture uploadDoneFuture; - private double fpp; - - public OssLoadDataUploader(LoadDataContext loadDataContext, - ExecutionContext executionContext, - BlockingQueue uploadQueue, - TableMeta tableMeta) { - this.executionContext = executionContext.copy(); - this.ossUploadQueue = uploadQueue; - this.tableMeta = tableMeta; - - this.engine = this.tableMeta.getEngine(); - this.logicalSchema = this.tableMeta.getSchemaName(); - this.logicalTable = this.tableMeta.getTableName(); - - // read field_id - Map columnToFieldIdMap = new SchemaEvolutionAccessorDelegate>() { - @Override - protected Map invoke() { - Map map = TreeMaps.caseInsensitiveMap(); - for (ColumnMappingRecord record : - columnMappingAccessor.querySchemaTable(logicalSchema, logicalTable)) { - map.put(record.getColumnName(), record.getFieldIdString()); - } - return map; - } - }.execute(); - - // build orc schema - PolarDBXOrcSchema polarDBXOrcSchema = - OrcMetaUtils.buildPolarDBXOrcSchema(tableMeta, Optional.of(columnToFieldIdMap), false); - - this.bfColumnProviders = ColumnProviders.getBfColumnProviders(polarDBXOrcSchema); - - // data config - this.conf = OrcMetaUtils.getConfiguration(executionContext, polarDBXOrcSchema); - - this.bfSchema = polarDBXOrcSchema.getBfSchema(); - - this.filesRecords = new ArrayList<>(); - this.columnMetasRecords = new ArrayList<>(); - - this.removeTmpFiles = true; - - this.fpp = conf.getDouble("orc.bloom.filter.fpp", 0.01D); - } - - public void doUpload() { - this.uploadDoneFuture = executionContext.getExecutorService().submitListenableFuture( - executionContext.getSchemaName(), executionContext.getTraceId(), -1, - this::upload, executionContext.getRuntimeStatistics()); - } - - private boolean upload() { - try { - while (true) { - Object object = ossUploadQueue.take(); - - if (object == UPLOAD_END) { - ossUploadQueue.add(UPLOAD_END); - break; - } - - OssLoadDataConsumer.UploadContext uploadContext = (OssLoadDataConsumer.UploadContext) object; - - filesRecords.add(uploadContext.filesRecord); - - uploadToOss(uploadContext); - - if (shouldPutBloomFilter()) { - putBloomFilter(uploadContext); - } - - // delete file from local. - if (removeTmpFiles) { - File tmpFile = new File(uploadContext.getLocalFilePath()); - if (tmpFile.exists()) { - tmpFile.delete(); - } - } - } - return true; - } catch (Throwable t) { - return false; - } - } - - public void uploadToOss(OssLoadDataConsumer.UploadContext writerContext) { - try { - String localFilePath = writerContext.getLocalFilePath(); - OSSKey ossKey = writerContext.getOssKey(); - - File localFile = new File(localFilePath); - writerContext.setFileSize(localFile.length()); - logger.info("orc generation done: " + localFilePath); - logger.info("file size(in bytes): " + writerContext.getFileSize()); - - FileSystemUtils.writeFile(localFile, ossKey.toString(), this.engine); - logger.info("file upload done: " + localFilePath); - } catch (Exception e) { - throw GeneralUtil.nestedException(e); - } - } - - private boolean shouldPutBloomFilter() { - return this.bfSchema != null && !this.bfSchema.getChildren().isEmpty(); - } - - public void putBloomFilter(OssLoadDataConsumer.UploadContext uploadContext) { - try { - // prepare for index file - final String uniqueId = UUID.randomUUID().toString(); - OSSKey metaKey = OSSKey.createBloomFilterFileOSSKey( - uploadContext.getPhysicalSchema(), - uploadContext.getPhysicalTable(), - uniqueId, - "", - 0 - ); - String localIndexFilePath = metaKey.localPath(); - File localIndexFile = new File(localIndexFilePath); - - int lastOffset, currentOffset = 0; - - // construct for all index key. - // for each orc file - String localFilePath = uploadContext.getLocalFilePath(); - - try (FileOutputStream outputStream = new FileOutputStream(localIndexFile); - Reader reader = OrcFile.createReader(new Path(localFilePath), OrcFile.readerOptions(conf))) { - - List stripes = reader.getStripes(); - for (int stripeIndex = 0; stripeIndex < stripes.size(); stripeIndex++) { - // for each stripe - StripeInformation stripe = stripes.get(stripeIndex); - Reader.Options readerOptions = new Reader.Options(conf) - .schema(bfSchema) - .range(stripe.getOffset(), stripe.getLength()); - - long stripeRows = stripe.getNumberOfRows(); - final List children = bfSchema.getChildren(); - - // - bloom filter - Map bloomFilterMap = new HashMap<>(); - for (TypeDescription child : children) { - OrcBloomFilter bloomFilter = new OrcBloomFilter(stripeRows, fpp); - bloomFilterMap.put(child.getId(), bloomFilter); - } - - try (RecordReaderImpl rows = (RecordReaderImpl) reader.rows(readerOptions)) { - VectorizedRowBatch batch = bfSchema.createRowBatch(); - while (rows.nextBatch(batch)) { - int batchSize = batch.size; - for (int col = 0; col < children.size(); col++) { - // for each column, put vector to bloom filter. - TypeDescription child = children.get(col); - ColumnVector vector = batch.cols[col]; - OrcBloomFilter bf = bloomFilterMap.get(child.getId()); - bfColumnProviders.get(col).putBloomFilter(vector, bf, 0, batchSize); - } - } - } - - for (int col = 0; col < children.size(); col++) { - // for each column in this stripe, - // upload meta file, and record meta info. - int colId = children.get(col).getId(); - String colName = bfSchema.getFieldNames().get(col); - - // serialize the bloom-filter data to local file - // update files table - OrcBloomFilter bf = bloomFilterMap.get(colId); - int writtenBytes = OrcBloomFilter.serialize(outputStream, bf); - lastOffset = currentOffset; - currentOffset += writtenBytes; - - storeColumnMeta(uploadContext, uploadContext.getOssKey(), metaKey, - lastOffset, stripeIndex, stripe, colId, colName, writtenBytes); - } - } - - storeIndexFileMeta(uploadContext, metaKey, localIndexFilePath, localIndexFile); - } finally { - if (removeTmpFiles) { - if (localIndexFile.exists()) { - localIndexFile.delete(); - } - } - } - } catch (IOException e) { - throw GeneralUtil.nestedException(e); - } - } - - private void storeColumnMeta(OssLoadDataConsumer.UploadContext uploadContext, OSSKey fileKey, OSSKey metaKey, - int lastOffset, - int stripeIndex, - StripeInformation stripe, int colId, String colName, int writtenBytes) { - // store the column meta for - ColumnMetasRecord columnMetasRecord = new ColumnMetasRecord(); - columnMetasRecord.tableFileName = fileKey.toString(); - columnMetasRecord.tableName = uploadContext.getPhysicalTable(); - columnMetasRecord.tableSchema = uploadContext.getPhysicalSchema(); - columnMetasRecord.stripeIndex = stripeIndex; - columnMetasRecord.stripeOffset = stripe.getOffset(); - columnMetasRecord.stripeLength = stripe.getLength(); - columnMetasRecord.columnName = colName; - columnMetasRecord.columnIndex = colId; - columnMetasRecord.bloomFilterPath = metaKey.toString(); - columnMetasRecord.bloomFilterOffset = lastOffset; - columnMetasRecord.bloomFilterLength = writtenBytes; - columnMetasRecord.isMerged = 1; - columnMetasRecord.lifeCycle = OSSMetaLifeCycle.READY.ordinal(); - columnMetasRecord.engine = this.engine.name(); - columnMetasRecord.logicalSchemaName = logicalSchema; - columnMetasRecord.logicalTableName = logicalTable; - - columnMetasRecords.add(columnMetasRecord); - } - - private void storeIndexFileMeta(OssLoadDataConsumer.UploadContext uploadContext, OSSKey metaKey, - String localIndexFilePath, - File localIndexFile) throws IOException { - // handle index file - // write to metaDB files table. - FilesRecord filesRecord = new FilesRecord(); - filesRecord.fileName = metaKey.toString(); - filesRecord.fileType = metaKey.getFileType().toString(); - filesRecord.tableSchema = uploadContext.getPhysicalSchema(); - filesRecord.tableName = uploadContext.getPhysicalTable(); - filesRecord.tableCatalog = ""; - filesRecord.engine = this.engine.name(); - filesRecord.lifeCycle = OSSMetaLifeCycle.READY.ordinal(); - filesRecord.localPath = localIndexFilePath; - filesRecord.status = ""; - filesRecord.logicalSchemaName = logicalSchema; - filesRecord.logicalTableName = logicalTable; - filesRecord.localPartitionName = uploadContext.physicalPartitionName; - - filesRecords.add(filesRecord); - - // upload to oss - FileSystemUtils.writeFile(localIndexFile, metaKey.toString(), this.engine); - } - } - - public class OssLoadDataPersistAndSync { - private final Logger - logger = LoggerFactory.getLogger(OssLoadDataPersistAndSync.class); - - private final List uploaders; - private final ExecutionContext executionContext; - private final String logicalSchema; - private final String logicalTable; - - public OssLoadDataPersistAndSync(ExecutionContext executionContext, List uploaders, - TableMeta tableMeta) { - this.executionContext = executionContext; - this.uploaders = uploaders; - - this.logicalSchema = tableMeta.getSchemaName(); - this.logicalTable = tableMeta.getTableName(); - } - - public void writeMetaDbTransaction() { - // get tso to fill commitTs and taskId - final ITimestampOracle timestampOracle = - executionContext.getTransaction().getTransactionManagerUtil().getTimestampOracle(); - if (null == timestampOracle) { - throw new UnsupportedOperationException("Do not support timestamp oracle"); - } - long ts = timestampOracle.nextTimestamp(); - - for (OssLoadDataUploader uploader : uploaders) { - for (FilesRecord filesRecord : uploader.filesRecords) { - filesRecord.commitTs = ts; - } - } - - try (Connection metaDbConn = MetaDbUtil.getConnection()) { - try { - // 用事务保证原子性 - MetaDbUtil.beginTransaction(metaDbConn); - - // fill in with same tso - for (OssLoadDataUploader uploader : uploaders) { - for (ColumnMetasRecord columnMetasRecord : uploader.columnMetasRecords) { - columnMetasRecord.taskId = ts; - TableMetaChanger.addOssColumnMeta(metaDbConn, logicalSchema, logicalTable, - columnMetasRecord); - } - - for (FilesRecord filesRecord : uploader.filesRecords) { - filesRecord.commitTs = ts; - // no task id , fill with ts - filesRecord.taskId = ts; - TableMetaChanger.addOssFileWithTso(metaDbConn, logicalSchema, logicalTable, - filesRecord); - } - } - - // update table version - TablesAccessor tableAccessor = new TablesAccessor(); - tableAccessor.setConnection(metaDbConn); - long tableMetaVersion = tableAccessor.getTableMetaVersionForUpdate(logicalSchema, logicalTable); - long newVersion = tableMetaVersion + 1; - tableAccessor.updateVersion(logicalSchema, logicalTable, newVersion); - - MetaDbUtil.commit(metaDbConn); - } catch (Exception e) { - MetaDbUtil.rollback(metaDbConn, e, null, null); - throw GeneralUtil.nestedException(e); - } finally { - MetaDbUtil.endTransaction(metaDbConn, logger); - } - } catch (Exception e) { - throw GeneralUtil.nestedException(e); - } - } - - public void tableSync() { - long trxId = executionContext.getTransaction().getId(); - SyncManagerHelper.sync(new TablesMetaChangeForOssSyncAction(logicalSchema, - Collections.singletonList(logicalTable), - executionContext.getConnId(), trxId)); - } - } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalModifyHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalModifyHandler.java index 52f3ab7e3..a74896bfe 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalModifyHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalModifyHandler.java @@ -110,8 +110,10 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { final RelNode input = modify.getInput(); checkModifyLimitation(modify, executionContext); + TableMeta tableMeta = + executionContext.getSchemaManager(modify.getSchemaName()).getTable(modify.getLogicalTableName()); final boolean checkForeignKey = - executionContext.foreignKeyChecks(); + executionContext.foreignKeyChecks() && (tableMeta.hasForeignKey() || tableMeta.hasReferencedForeignKey()); final boolean foreignKeyChecksForUpdateDelete = executionContext.getParamManager().getBoolean(ConnectionParams.FOREIGN_KEY_CHECKS_FOR_UPDATE_DELETE); @@ -154,6 +156,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { final boolean skipUnchangedRow = executionContext.getParamManager().getBoolean(ConnectionParams.DML_RELOCATE_SKIP_UNCHANGED_ROW); + final boolean checkJsonByStringCompare = + executionContext.getParamManager().getBoolean(ConnectionParams.DML_CHECK_JSON_BY_STRING_COMPARE); int affectRows = 0; Cursor selectCursor = null; @@ -211,7 +215,7 @@ public List> apply(DistinctWriter distinctWriter) { return rowSet.distinctRowSetWithoutNullThenRemoveSameRow(distinctWriter, modify.getSetColumnTargetMappings().get(tableIndex), modify.getSetColumnSourceMappings().get(tableIndex), - modify.getSetColumnMetas().get(tableIndex)); + modify.getSetColumnMetas().get(tableIndex), checkJsonByStringCompare); } }; @@ -525,16 +529,17 @@ protected void beforeModifyCheck(LogicalModify logicalModify, ExecutionContext e final RelOptTable table = logicalModify.getTableInfo().getSrcInfos().get(tableIndex).getRefTable(); final Pair qn = RelUtils.getQualifiedTableName(table); final TableMeta tableMeta = executionContext.getSchemaManager(qn.left).getTable(qn.right); + int columnCnt = tableMeta.getAllColumns().size(); List> rows = new ArrayList<>(); for (List value : values) { List row = new ArrayList<>(); - for (int i = 0; i < tableMeta.getAllColumns().size(); i++) { + for (int i = 0; i < columnCnt; i++) { row.add(value.get(i + index)); } rows.add(row); } - index += tableMeta.getAllColumns().size(); + index += columnCnt; if (logicalModify.getOperation() == TableModify.Operation.DELETE) { LogicalModify modify = null; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalModifyViewHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalModifyViewHandler.java index c2f78724c..8f696740b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalModifyViewHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalModifyViewHandler.java @@ -19,6 +19,7 @@ import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.properties.MetricLevel; import com.alibaba.polardbx.common.utils.CaseInsensitive; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.cursor.Cursor; @@ -136,7 +137,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { if (!logicalModifyView.hasHint() && executionContext.getParams() != null && GlobalIndexMeta.hasIndex(logicalModifyView.getLogicalTableName(), schemaName, executionContext)) { // TODO add this back - executionContext.getExtraCmds().put(ConnectionProperties.MPP_METRIC_LEVEL, 1); + executionContext.getExtraCmds().put(ConnectionProperties.MPP_METRIC_LEVEL, MetricLevel.SQL.metricLevel); // If target column does not occur in any GSI index columns, // the index updating is not needed. @@ -170,7 +171,7 @@ private boolean needUpdateGSI(LogicalModifyView logicalModifyView, SqlUpdate sql tableNameAndIndexMetas.put(tableName, indexMetas); } for (TableMeta indexMeta : indexMetas) { - if (indexMeta.getColumnIgnoreCase(columName) != null) { + if (indexMeta.containsColumn(columName)) { return true; } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalRelocateHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalRelocateHandler.java index 697c5e1dc..cca022fba 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalRelocateHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalRelocateHandler.java @@ -98,9 +98,10 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { checkUpdateDeleteLimitLimitation(relocate.getOriginalSqlNode(), executionContext); RelNode input = relocate.getInput(); - + TableMeta tableMeta = + executionContext.getSchemaManager(relocate.getSchemaName()).getTable(relocate.getLogicalTableName()); final boolean checkForeignKey = - executionContext.foreignKeyChecks(); + executionContext.foreignKeyChecks() && (tableMeta.hasForeignKey() || tableMeta.hasReferencedForeignKey()); final boolean foreignKeyChecksForUpdateDelete = executionContext.getParamManager().getBoolean(ConnectionParams.FOREIGN_KEY_CHECKS_FOR_UPDATE_DELETE); @@ -153,6 +154,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { final boolean skipUnchangedRow = executionContext.getParamManager().getBoolean(ConnectionParams.DML_RELOCATE_SKIP_UNCHANGED_ROW); + final boolean checkJsonByStringCompare = + executionContext.getParamManager().getBoolean(ConnectionParams.DML_CHECK_JSON_BY_STRING_COMPARE); try { // Do select @@ -221,7 +224,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { rowSet = buildChangedRowSet(distinctValues, returnColumns, relocate.getSetColumnTargetMappings().get(tableIndex), relocate.getSetColumnSourceMappings().get(tableIndex), - relocate.getSetColumnMetas().get(tableIndex)); + relocate.getSetColumnMetas().get(tableIndex), checkJsonByStringCompare); if (rowSet == null) { continue; } @@ -237,7 +240,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { final Mapping sourceMap = relocate.getSetColumnSourceMappings().get(tableIndex); final List metas = relocate.getSetColumnMetas().get(tableIndex); for (List row : (useRowSet ? rowSet.getRows() : distinctValues)) { - affectRows += identicalRow(row, targetMap, sourceMap, metas) ? 0 : 1; + affectRows += + identicalRow(row, targetMap, sourceMap, metas, checkJsonByStringCompare) ? 0 : 1; } } @@ -275,24 +279,25 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } protected static boolean identicalRow(List row, Mapping setColumnTargetMapping, - Mapping setColumnSourceMapping, List setColumnMetas) { + Mapping setColumnSourceMapping, List setColumnMetas, + boolean checkJsonByStringCompare) { final List targets = Mappings.permute(row, setColumnTargetMapping); final List sources = Mappings.permute(row, setColumnSourceMapping); final GroupKey targetKey = new GroupKey(targets.toArray(), setColumnMetas); final GroupKey sourceKey = new GroupKey(sources.toArray(), setColumnMetas); - return sourceKey.equalsForUpdate(targetKey); + return sourceKey.equalsForUpdate(targetKey, checkJsonByStringCompare); } public static RowSet buildChangedRowSet(List> values, List returnColumns, Mapping setColumnTargetMapping, Mapping setColumnSourceMapping, - List setColumnMetas) { + List setColumnMetas, boolean checkJsonByStringCompare) { final List> changedValues = new ArrayList<>(); for (List row : values) { final List targets = Mappings.permute(row, setColumnTargetMapping); final List sources = Mappings.permute(row, setColumnSourceMapping); final GroupKey targetKey = new GroupKey(targets.toArray(), setColumnMetas); final GroupKey sourceKey = new GroupKey(sources.toArray(), setColumnMetas); - if (!targetKey.equalsForUpdate(sourceKey)) { + if (!targetKey.equalsForUpdate(sourceKey, checkJsonByStringCompare)) { changedValues.add(row); } } @@ -461,16 +466,17 @@ protected void beforeModifyCheck(LogicalRelocate logicalRelocate, String schemaN DistinctWriter writer = primaryRelocateWriter.containsKey(tableIndex) ? primaryRelocateWriter.get(tableIndex).getModifyWriter() : primaryDistinctWriter.get(tableIndex); + int columnCnt = tableMeta.getAllColumns().size(); List> rows = new ArrayList<>(); for (List value : values) { List row = new ArrayList<>(); - for (int i = 0; i < tableMeta.getAllColumns().size(); i++) { + for (int i = 0; i < columnCnt; i++) { row.add(value.get(i + index)); } rows.add(row); } - index += tableMeta.getAllColumns().size(); + index += columnCnt; if (logicalRelocate.getOperation() == TableModify.Operation.DELETE) { LogicalModify modify = null; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalReplaceHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalReplaceHandler.java index 55a668647..4735a7f3c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalReplaceHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalReplaceHandler.java @@ -31,6 +31,7 @@ import com.alibaba.polardbx.executor.utils.NewGroupKey; import com.alibaba.polardbx.optimizer.OptimizerContext; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.rel.LogicalDynamicValues; @@ -38,6 +39,7 @@ import com.alibaba.polardbx.optimizer.core.rel.LogicalReplace; import com.alibaba.polardbx.optimizer.core.rel.dml.DistinctWriter; import com.alibaba.polardbx.optimizer.core.rel.dml.Writer; +import com.alibaba.polardbx.optimizer.core.rel.dml.util.ClassifyResult; import com.alibaba.polardbx.optimizer.core.rel.dml.util.DuplicateCheckResult; import com.alibaba.polardbx.optimizer.core.rel.dml.util.RowClassifier; import com.alibaba.polardbx.optimizer.core.rel.dml.util.SourceRows; @@ -103,7 +105,9 @@ protected int doExecute(LogicalInsert insert, ExecutionContext executionContext, "REPLACE on table having VIRTUAL/STORED generated column in unique key"); } - final boolean checkForeignKey = executionContext.foreignKeyChecks(); + TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(tableName); + final boolean checkForeignKey = + executionContext.foreignKeyChecks() && (tableMeta.hasForeignKey() || tableMeta.hasReferencedForeignKey()); int affectRows = 0; if (input instanceof LogicalDynamicValues) { @@ -160,8 +164,7 @@ protected int doExecute(LogicalInsert insert, ExecutionContext executionContext, usePartFieldChecker)); if (checkForeignKey) { - // Do need to check Pk for replace - beforeInsertCheck(replace, classifiedRows, false, true, executionContext); + fkConstraintAndCascade(replaceEc, replace, schemaName, tableName, input, classifiedRows); } try { @@ -454,6 +457,8 @@ protected List bindInsertRows(LogicalReplace replace, List executionContext.getParamManager().getBoolean(ConnectionParams.DML_SKIP_IDENTICAL_ROW_CHECK) || ( replace.isHasJsonColumn() && executionContext.getParamManager() .getBoolean(ConnectionParams.DML_SKIP_IDENTICAL_JSON_ROW_CHECK)); + final boolean checkJsonByStringCompare = + executionContext.getParamManager().getBoolean(ConnectionParams.DML_CHECK_JSON_BY_STRING_COMPARE); // 2. Check each insert row Ord.zip(currentBatchParameters).forEach(o -> { @@ -522,7 +527,7 @@ protected List bindInsertRows(LogicalReplace replace, List // Compare entire row if (skipIdenticalRowCheck || multiUk || !identicalRow(duplicatedRow.after, newCheckRow.after, - rowColumnMetas)) { + rowColumnMetas, checkJsonByStringCompare)) { duplicatedRow.affectedRows++; } @@ -718,12 +723,10 @@ private static List>> buildDuplicateChecke return result; } - protected void beforeInsertCheck(LogicalInsert logicalInsert, List classifiedRows, - boolean checkPk, boolean checkFk, ExecutionContext executionContext) { + List> getInsertValues(LogicalInsert logicalInsert, List classifiedRows, + ExecutionContext executionContext) { LogicalDynamicValues input = RelUtils.getRelInput(logicalInsert); final ImmutableList rexRow = input.getTuples().get(0); - List insertColumns = input.getRowType().getFieldNames().stream().map(String::toUpperCase).collect( - Collectors.toList()); List> values = new ArrayList<>(); for (DuplicateCheckResult classifiedRow : classifiedRows) { @@ -732,8 +735,34 @@ protected void beforeInsertCheck(LogicalInsert logicalInsert, List classifiedRows) { + // Do need to check Fk for replace + Map>>> fkPlans = replace.getFkPlans(); + List insertColumns = input.getRowType().getFieldNames().stream().map(String::toUpperCase).collect( + Collectors.toList()); + List> values = getInsertValues(replace, classifiedRows, executionContext); + + // Constraint for insert + beforeInsertCheck(replace, values, insertColumns, false, true, executionContext); + + // Cascade for delete + values.clear(); + final ReplaceRelocateWriter primaryRelocateWriter = replace.getPrimaryRelocateWriter(); + final Function rowBuilder = (wr) -> SourceRows.createFromValues(classifiedRows); + final RowClassifier rowClassifier = buildRowClassifier(replace, executionContext, schemaName); + + if (primaryRelocateWriter != null) { + final SourceRows duplicatedRows = rowBuilder.apply(primaryRelocateWriter.getDeleteWriter()); + final ClassifyResult classified = + rowClassifier.apply(primaryRelocateWriter, duplicatedRows, new ClassifyResult()); + values = classified.deleteRows; + } - beforeInsertCheck(logicalInsert, values, insertColumns, checkPk, checkFk, executionContext); + beforeDeleteFkCascade(replace, schemaName, tableName, executionContext, values, fkPlans, 1); } private static class DuplicateCheckRow { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalSetDefaultRoleHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalSetDefaultRoleHandler.java index 4614512f3..3877e1987 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalSetDefaultRoleHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalSetDefaultRoleHandler.java @@ -16,11 +16,16 @@ package com.alibaba.polardbx.repo.mysql.handler; -import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlSetDefaultRoleStatement; +import com.alibaba.polardbx.common.cdc.CdcDdlMarkVisibility; +import com.alibaba.polardbx.common.cdc.CdcManagerHelper; +import com.alibaba.polardbx.common.cdc.DdlScope; +import com.alibaba.polardbx.common.cdc.ICdcManager; +import com.alibaba.polardbx.common.ddl.newengine.DdlType; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.druid.sql.dialect.mysql.ast.statement.MySqlSetDefaultRoleStatement; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.handler.HandlerCommon; @@ -31,16 +36,20 @@ import com.alibaba.polardbx.gms.privilege.PolarPrivManager; import com.alibaba.polardbx.gms.privilege.PolarRolePrivilege; import com.alibaba.polardbx.gms.privilege.PrivilegeKind; +import com.alibaba.polardbx.gms.topology.SystemDbHelper; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlSetDefaultRole; import org.apache.calcite.sql.SqlUserName; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import static com.alibaba.polardbx.common.exception.code.ErrorCode.ERR_SERVER; +import static com.alibaba.polardbx.executor.ddl.job.task.cdc.CdcMarkUtil.buildExtendParameter; /** * Handle set default role statements. @@ -98,9 +107,8 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { .map(manager::getAndCheckExactUser) .collect(Collectors.toList()); - toUsers - .forEach( - user -> user.getRolePrivileges().checkRolesGranted(roles.stream().map(PolarAccountInfo::getAccount))); + toUsers.forEach( + user -> user.getRolePrivileges().checkRolesGranted(roles.stream().map(PolarAccountInfo::getAccount))); PolarRolePrivilege.DefaultRoleState defaultRoleState = PolarRolePrivilege.DefaultRoleState.from(sqlNode.getDefaultRoleSpec()); @@ -120,8 +128,26 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { ); manager.triggerReload(); + markDdlForCdc(executionContext); return new AffectRowCursor(toUsers.size() * roles.size()); } + //TODO cdc@jinwu + private void markDdlForCdc(ExecutionContext executionContext) { + Map param = buildExtendParameter(executionContext); + param.put(ICdcManager.CDC_DDL_SCOPE, DdlScope.Instance); + + CdcManagerHelper.getInstance().notifyDdlNew( + SystemDbHelper.DEFAULT_DB_NAME, + "*", + SqlKind.SQL_SET_DEFAULT_ROLE.name(), + executionContext.getOriginSql(), + DdlType.UNSUPPORTED, + null, + null, + CdcDdlMarkVisibility.Protected, + param); + } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalShowLocalityInfoHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalShowLocalityInfoHandler.java index 3eab389d4..b1bd1e5c0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalShowLocalityInfoHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalShowLocalityInfoHandler.java @@ -155,8 +155,7 @@ private Cursor handleNewPartitionTable(ExecutionContext executionContext, String partitionGroupRecordList = tableGroupConfig.getPartitionGroupRecords(); objectName = tableGroupConfig.getTableGroupRecord().getTg_name(); locality = tableGroupConfig.getLocalityDesc().toString(); - List tableList = - tableGroupConfig.getAllTables().stream().map(o -> o.getTableName()).collect(Collectors.toList()); + List tableList = tableGroupConfig.getAllTables(); String tableListString = String.join(",", tableList); result.addRow(new Object[] {objectId, objectName, "tablegroup", locality, tableListString}); for (PartitionGroupRecord partitionGroupRecord : partitionGroupRecordList) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalShowPhysicalDdlHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalShowPhysicalDdlHandler.java new file mode 100644 index 000000000..6d1d0cd27 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalShowPhysicalDdlHandler.java @@ -0,0 +1,520 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.repo.mysql.handler; + +import com.alibaba.druid.pool.DruidDataSource; +import com.alibaba.polardbx.atom.TAtomDataSource; +import com.alibaba.polardbx.common.model.Group; +import com.alibaba.polardbx.common.utils.GeneralUtil; +import com.alibaba.polardbx.common.utils.Pair; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.ddl.twophase.TwoPhaseDdlUtils; +import com.alibaba.polardbx.executor.handler.HandlerCommon; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.topology.DbInfoManager; +import com.alibaba.polardbx.gms.topology.DbTopologyManager; +import com.alibaba.polardbx.gms.util.GroupInfoUtil; +import com.alibaba.polardbx.group.jdbc.TGroupDataSource; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalShow; +import com.alibaba.polardbx.repo.mysql.spi.MyRepository; +import com.alibaba.polardbx.rpc.compatible.XDataSource; +import io.grpc.netty.shaded.io.netty.util.internal.StringUtil; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlShowPhysicalDdl; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * @author jinkun.taojinkun + */ +public class LogicalShowPhysicalDdlHandler extends HandlerCommon { + + private static final Logger logger = LoggerFactory.getLogger(LogicalShowPhysicalDdlHandler.class); + + public LogicalShowPhysicalDdlHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + final LogicalShow show = (LogicalShow) logicalPlan; + final SqlShowPhysicalDdl showPhysicalDdl = (SqlShowPhysicalDdl) show.getNativeSqlNode(); + + String schemaName = showPhysicalDdl.getSchema(); + if (DbInfoManager.getInstance().isNewPartitionDb(schemaName)) { + return handle(executionContext, schemaName, showPhysicalDdl.isStatus(), true); + } else { + return handle(executionContext, schemaName, showPhysicalDdl.isStatus(), false); + } + } + + public class PhysicalDdlResult { + public String getPhysicalDbName() { + return physicalDbName; + } + + public String getPhysicalTableName() { + return physicalTableName; + } + + public Long getProcessId() { + return processId; + } + + public String getPhase() { + return phase; + } + + public String getState() { + return state; + } + + String key; + String physicalDbName; + + String physicalTableName; + + Long processId; + + String phase; + + String state; + + String processInfo = ""; + + String processState = ""; + + Long time = -1L; + + String reachedPreparedMoment = ""; + + String reachedCommitMoment = ""; + + String commitMoment = ""; + + String prepareMoment = ""; + + Long preparedRunningConsumeBlocks = -1L; + + Long preparedRunningConsumeTime = -1L; + + Long commitConsumeBlocks = -1L; + + Long commitConsumeTime = -1L; + + Long lockTableTime = -1L; + + public void setProcessInfo(String processInfo) { + this.processInfo = processInfo; + } + + public void setProcessState(String processState) { + this.processState = processState; + } + + public void setTime(Long time) { + this.time = time; + } + + public PhysicalDdlResult(String key, String physicalDbName, String physicalTableName, Long processId, + String phase, + String state + ) { + this.key = key; + this.physicalDbName = physicalDbName; + this.physicalTableName = physicalTableName; + this.processId = processId; + this.phase = phase; + this.state = state; + } + + public PhysicalDdlResult(String physicalTableName, String reachedPreparedMoment, String reachedCommitMoment, + String commitMoment, String prepareMoment, + Long preparedRunningConsumeBlocks, Long preparedRunningConsumeTime, + Long commitConsumeBlocks, + Long commitConsumeTime, Long lockTableTime) { + this.physicalTableName = physicalTableName; + this.reachedPreparedMoment = reachedPreparedMoment; + this.reachedCommitMoment = reachedCommitMoment; + this.commitMoment = commitMoment; + this.prepareMoment = prepareMoment; + this.preparedRunningConsumeBlocks = preparedRunningConsumeBlocks; + this.preparedRunningConsumeTime = preparedRunningConsumeTime; + this.commitConsumeBlocks = commitConsumeBlocks; + this.commitConsumeTime = commitConsumeTime; + this.lockTableTime = lockTableTime; + } + + public void assignProfInfo(PhysicalDdlResult physicalDdlResult) { + this.reachedPreparedMoment = physicalDdlResult.reachedPreparedMoment; + this.reachedCommitMoment = physicalDdlResult.reachedCommitMoment; + this.commitMoment = physicalDdlResult.commitMoment; + this.prepareMoment = physicalDdlResult.prepareMoment; + this.preparedRunningConsumeBlocks = physicalDdlResult.preparedRunningConsumeBlocks; + this.preparedRunningConsumeTime = physicalDdlResult.preparedRunningConsumeTime; + this.commitConsumeBlocks = physicalDdlResult.commitConsumeBlocks; + this.commitConsumeTime = physicalDdlResult.commitConsumeTime; + this.lockTableTime = physicalDdlResult.lockTableTime; + } + + } + + private Cursor handle(ExecutionContext executionContext, String schemaName, Boolean status, Boolean newPartition) { + ArrayResultCursor result = getShowPhysicalDdlResultCursor(); + Throwable ex = null; + MyRepository repo = (MyRepository) this.repo; + List allGroups = OptimizerContext.getActiveGroups(); + Map physicalDdlResultMap = new HashMap<>(); + List physicalDdlResults = new ArrayList<>(); + String showFullPhysicalDdlStatsSql = TwoPhaseDdlUtils.SQL_STATS_FULL_TWO_PHASE_DDL; + String showFullPhysicalDdlProfSql = TwoPhaseDdlUtils.SQL_PROF_FULL_TWO_PHASE_DDL; + String showPhysicalProcessSql = TwoPhaseDdlUtils.SQL_SHOW_PROCESS_LIST; + Set> visitedDnSet = new HashSet<>(); + for (Group group : allGroups) { + if (!group.getType().equals(Group.GroupType.MYSQL_JDBC)) { + continue; + } + + TGroupDataSource groupDataSource = (TGroupDataSource) repo.getDataSource(group.getName()); + if (groupDataSource == null) { + continue; + } + TAtomDataSource atom = TwoPhaseDdlUtils.findMasterAtomForGroup(groupDataSource); + if (!visitedDnSet.add(new Pair<>(atom.getHost(), atom.getPort()))) { + // 防止不同的库在相同的实例上导致的重复 + continue; + } + final DataSource dataSource = atom.getDataSource(); + Connection conn = null; + + try { + if (dataSource instanceof DruidDataSource) { + DruidDataSource druid = (DruidDataSource) dataSource; + conn = druid.createPhysicalConnection().getPhysicalConnection(); + } else if (dataSource instanceof XDataSource) { + conn = dataSource.getConnection(); + } else { + throw GeneralUtil.nestedException("Unknown datasource. " + dataSource.getClass()); + } + Map physicalDdlProfInfoMap = new HashMap<>(); + ResultSet rsPhysicalDdlProf = conn.createStatement().executeQuery(showFullPhysicalDdlProfSql); + while (rsPhysicalDdlProf.next()) { + String physicalTableName = rsPhysicalDdlProf.getString("PHYSICAL_TABLE"); + String reachedPreparedMoment = rsPhysicalDdlProf.getString("REACHED_PREPARED_MOMENT"); + String reachedCommitMoment = rsPhysicalDdlProf.getString("REACHED_COMMIT_MOMENT"); + String commitMoment = rsPhysicalDdlProf.getString("COMMIT_MOMENT"); + String prepareMoment = rsPhysicalDdlProf.getString("PREPARE_MOMENT"); + Long preparedRunningConsumeBlocks = rsPhysicalDdlProf.getLong("PREPARED_RUNNING_CONSUME_BLOCKS"); + Long preparedRunningConsumeTime = rsPhysicalDdlProf.getLong("PREPARED_RUNNING_CONSUME_TIME"); + Long commitConsumeBlocks = rsPhysicalDdlProf.getLong("COMMIT_CONSUME_BLOCKS"); + Long commitConsumeTime = rsPhysicalDdlProf.getLong("COMMIT_CONSUME_TIME"); + Long lockTableTime = rsPhysicalDdlProf.getLong("LOCK_TABLE_TIME"); + PhysicalDdlResult physicalDdlResult = new PhysicalDdlResult( + physicalTableName, + reachedPreparedMoment, + reachedCommitMoment, + commitMoment, + prepareMoment, + preparedRunningConsumeBlocks, + preparedRunningConsumeTime, + commitConsumeBlocks, + commitConsumeTime, + lockTableTime + ); + physicalDdlProfInfoMap.put(physicalTableName, physicalDdlResult); + } + ResultSet rsPhysicalDdlStats = conn.createStatement().executeQuery(showFullPhysicalDdlStatsSql); + while (rsPhysicalDdlStats.next()) { + String key = rsPhysicalDdlStats.getString("KEY"); + String phyDbName = rsPhysicalDdlStats.getString("PHYSICAL_DB"); + String physicalTableName = rsPhysicalDdlStats.getString("PHYSICAL_TABLE"); + String phase = rsPhysicalDdlStats.getString("PHASE"); + String state = rsPhysicalDdlStats.getString("STATE"); + Long processId = rsPhysicalDdlStats.getLong("PROCESS_ID"); + PhysicalDdlResult physicalDdlResult = new PhysicalDdlResult( + key, + phyDbName, + physicalTableName, + processId, + phase, + state + ); + if (physicalDdlProfInfoMap.containsKey(physicalTableName)) { + physicalDdlResult.assignProfInfo(physicalDdlProfInfoMap.get(physicalTableName)); + } + if (processId == -1L) { + physicalDdlResults.add(physicalDdlResult); + } else { + physicalDdlResultMap.put(buildDdlResultKey(phyDbName, processId), physicalDdlResult); + } + } + + ResultSet rsPhysicalProcess = conn.createStatement().executeQuery(showPhysicalProcessSql); + while (rsPhysicalProcess.next()) { + Long processId = rsPhysicalProcess.getLong("Id"); + String phyDbName = rsPhysicalProcess.getString("db"); + String ddlResultKey = buildDdlResultKey(phyDbName, processId); + if (physicalDdlResultMap.containsKey(ddlResultKey)) { + String processState = rsPhysicalProcess.getString("State"); + String processInfo = rsPhysicalProcess.getString("Info"); + Long time = rsPhysicalProcess.getLong("Time"); + PhysicalDdlResult physicalDdlResult = physicalDdlResultMap.get(ddlResultKey); + physicalDdlResult.setProcessInfo(processInfo); + physicalDdlResult.setProcessState(processState); + physicalDdlResult.setTime(time); + + } + } + conn.close(); + conn = null; + } catch (SQLException e) { + logger.error("error when show physical ddl", e); + } finally { + if (conn != null) { + try { + conn.close(); + } catch (SQLException e) { + logger.error(e); + } + } + } + } + physicalDdlResults.addAll(physicalDdlResultMap.values()); + Set groupNames = DbTopologyManager.getGroupNameToStorageInstIdMap(schemaName).keySet(); + Set physicalDbNames = + groupNames.stream().map(GroupInfoUtil::buildPhysicalDbNameFromGroupName).collect(Collectors.toSet()); + physicalDdlResults = + physicalDdlResults.stream().filter(o -> physicalDbNames.contains(o.physicalDbName)).collect( + Collectors.toList()); + + if (!status) { + outputPhysicalDdlResult(result, physicalDdlResults); + return result; + } else { + return handleShowPhysicalDdlStatus(physicalDdlResults); + } + } + + class ClusterPhyDbStatus { + String phase; + String phyDbName; + int totalCount; + Map stateCount; + + String logicalTableName; + + ClusterPhyDbStatus(String phyDbName, String logicalTableName, String phase) { + this.phase = phase; + this.phyDbName = phyDbName; + this.stateCount = new HashMap<>(); + this.logicalTableName = logicalTableName; + this.totalCount = 0; + } + + void appendResult(PhysicalDdlResult physicalDdlResult) { + this.totalCount += 1; + if (!this.stateCount.containsKey(physicalDdlResult.state)) { + this.stateCount.put(physicalDdlResult.state, 0); + } + this.stateCount.put(physicalDdlResult.state, this.stateCount.get(physicalDdlResult.state) + 1); + } + + @Override + public String toString() { + String totalCountString = String.format("total Physical Table Count: %d, ", totalCount); + List stateCountStrings = new ArrayList<>(); + for (String state : stateCount.keySet()) { + int count = stateCount.get(state); + stateCountStrings.add(String.format("%s Count:%d", state, count)); + } + return totalCountString + StringUtil.join(", ", stateCountStrings); + } + } + + public Cursor handleShowPhysicalDdlStatus(List physicalDdlResults) { + ArrayResultCursor result = getShowPhysicalDdlStatusResultCursor(); + + Map phyDbStatus = new HashMap<>(); + for (PhysicalDdlResult physicalDdlResult : physicalDdlResults) { + if (!phyDbStatus.containsKey(physicalDdlResult.key)) { + String logicalTableName = + TwoPhaseDdlUtils.buildLogicalTableNameFromTwoPhaseKeyAndPhyDbName(physicalDdlResult.key, + physicalDdlResult.physicalDbName); + phyDbStatus.put(physicalDdlResult.key, + new ClusterPhyDbStatus(physicalDdlResult.physicalDbName, logicalTableName, + physicalDdlResult.phase)); + } + phyDbStatus.get(physicalDdlResult.key).appendResult(physicalDdlResult); + } + for (String key : phyDbStatus.keySet()) { + ClusterPhyDbStatus clusterPhyDbStatus = phyDbStatus.get(key); + result.addRow(new Object[] { + clusterPhyDbStatus.logicalTableName, + clusterPhyDbStatus.phyDbName, + clusterPhyDbStatus.phase, + clusterPhyDbStatus.toString() + }); + } + return result; + + } + +// String targetSql1 = TwoPhaseDdlUtils.SQL_STATS_FULL_TWO_PHASE_DDL; +// String targetSql2 = "show processlist"; +// Map physicalDdlResults = new HashMap<>(); +// for(String phyDbName:phyDbNames){ +// String groupName = buildGroupNameFromPhysicalDb(phyDbName); +// TGroupDataSource tGroupDataSource = (TGroupDataSource) ExecutorContext.getContext(schemaName).getTopologyExecutor() +// .getGroupExecutor(groupName).getDataSource(); +// try(Connection conn = (Connection)tGroupDataSource.getConnection()){ +// ResultSet rs1 = conn.createStatement().executeQuery(targetSql1); +// ResultSet rs2 = conn.createStatement().executeQuery(targetSql2); +// while(rs1.next()) { +// String physicalTableName = rs1.getString("PHYSICAL_TABLE"); +// String phase = rs2.getString("PHASE"); +// String state = rs2.getString("WAIT_RUNNING"); +// Long processId = rs2.getLong("PROCESS_ID"); +// PhysicalDdlResult physicalDdlResult = new PhysicalDdlResult( +// phyDbName, +// physicalTableName, +// processId, +// phase, +// state +// ); +// physicalDdlResults.put(getDdlResultKey(phyDbName, processId), physicalDdlResult); +// } +// while(rs2.next()) { +// Long processId = rs2.getLong("ID"); +// String ddlResultKey = getDdlResultKey(phyDbName, processId); +// if(physicalDdlResults.containsKey(ddlResultKey)) { +// String processState = rs2.getString("State"); +// String processInfo = rs2.getString("Info"); +// Long time = rs2.getLong("Time"); +// PhysicalDdlResult physicalDdlResult = physicalDdlResults.get(ddlResultKey); +// physicalDdlResult.setProcessInfo(processInfo); +// physicalDdlResult.setProcessState(processState); +// physicalDdlResult.setTime(time); +// } +// } +// }catch(Throwable e){ +// logger.error(e); +// ex = e; +// } finally { +// if (ex != null) { +// GeneralUtil.nestedException(ex); +// } +// } +// } + + public void outputPhysicalDdlResult(ArrayResultCursor result, Collection physicalDdlResults) { + List sortedPhysicalDdlResults = + physicalDdlResults.stream().sorted(Comparator.comparing(PhysicalDdlResult::getPhysicalDbName) + .thenComparing(PhysicalDdlResult::getPhase) + .thenComparing(PhysicalDdlResult::getState) + .thenComparing(PhysicalDdlResult::getPhysicalTableName) + ).collect(Collectors.toList()); + for (PhysicalDdlResult physicalDdlResult : sortedPhysicalDdlResults) { + result.addRow(new Object[] { + physicalDdlResult.physicalDbName, + physicalDdlResult.physicalTableName, + physicalDdlResult.phase, + physicalDdlResult.state, + physicalDdlResult.processId, + physicalDdlResult.processState, + physicalDdlResult.time, + + physicalDdlResult.reachedPreparedMoment, + physicalDdlResult.reachedCommitMoment, + physicalDdlResult.commitMoment, + physicalDdlResult.prepareMoment, + physicalDdlResult.preparedRunningConsumeBlocks, + physicalDdlResult.preparedRunningConsumeTime, + physicalDdlResult.commitConsumeBlocks, + physicalDdlResult.commitConsumeTime, + physicalDdlResult.lockTableTime + }); + } + } + + private ArrayResultCursor getShowPhysicalDdlStatusResultCursor() { + ArrayResultCursor result = new ArrayResultCursor("PHYSICAL_DDL_STATUS"); + result.addColumn("LOGICAL_TABLE_NAME", DataTypes.StringType); + result.addColumn("PHYSICAL_DB_NAME", DataTypes.StringType); + result.addColumn("PHASE", DataTypes.StringType); + result.addColumn("CONTENT", DataTypes.StringType); + return result; + } + + private ArrayResultCursor getShowPhysicalDdlResultCursor() { + ArrayResultCursor result = new ArrayResultCursor("PHYSICAL_DDL_INFO"); + result.addColumn("PHYSICAL_DB_NAME", DataTypes.StringType); + result.addColumn("PHYSICAL_TABLE_NAME", DataTypes.StringType); + result.addColumn("PHASE", DataTypes.StringType); + result.addColumn("STATE", DataTypes.StringType); + result.addColumn("PROCESS_ID", DataTypes.LongType); + result.addColumn("PROCESS_STATE", DataTypes.StringType); + result.addColumn("TIME", DataTypes.LongType); + + result.addColumn("REACHED_PREPARED_MOMENT", DataTypes.StringType); + result.addColumn("REACHED_COMMIT_MOMENT", DataTypes.StringType); + result.addColumn("COMMIT_MOMENT", DataTypes.StringType); + result.addColumn("PREPARE_MOMENT", DataTypes.StringType); + result.addColumn("PREPARED_RUNNING_CONSUME_BLOCKS", DataTypes.LongType); + result.addColumn("PREPARED_RUNNING_CONSUME_TIME", DataTypes.LongType); + result.addColumn("COMMIT_CONSUME_BLOCKS", DataTypes.LongType); + result.addColumn("COMMIT_CONSUME_TIME", DataTypes.LongType); + result.addColumn("LOCK_TABLE_TIME", DataTypes.LongType); +// result.addColumn("RUN_TIME_SECOND", DataTypes.LongType); +// result.addColumn("WAIT_ON_BARRIER", DataTypes.StringType); +// result.addColumn("WAIT_TIME", DataTypes.LongType); +// result.addColumn("PHYSICAL_TABLE_SIZE", DataTypes.LongType); +// result.addColumn("PHYSICAL_TABLE_ROWS", DataTypes.LongType); +// result.addColumn("PHYSICAL_TABLE_LINE", DataTypes.LongType); +// result.addColumn("PHYSICAL_TABLE_ROW_LOG_SIZE", DataTypes.LongType); +// result.addColumn("PHYSICAL_TABLE_ROW_LOG_REPLAY_TIME", DataTypes.LongType); +// result.addColumn("COPY_TMP_TABLE_TIME", DataTypes.LongType); +// result.addColumn("REPLAY_ROW_LOG_TIME", DataTypes.LongType); +// result.addColumn("COMMIT_TIME", DataTypes.LongType); +// result.addColumn("PREPARE_TIME", DataTypes.LongType); +// result.addColumn("WAIT_TIME", DataTypes.LongType); +// result.addColumn("DDL_STMT", DataTypes.StringType); + result.initMeta(); + return result; + } + + private String buildDdlResultKey(String physicalDbName, Long processId) { + return String.format("%s_%d", physicalDbName, processId); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalShowVariablesMyHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalShowVariablesMyHandler.java index 9c2e808a8..3beebca5e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalShowVariablesMyHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalShowVariablesMyHandler.java @@ -22,13 +22,13 @@ import com.alibaba.polardbx.common.properties.ConfigParam; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.common.properties.DynamicConfig; import com.alibaba.polardbx.common.properties.LongConfigParam; import com.alibaba.polardbx.common.properties.ParamManager; import com.alibaba.polardbx.common.properties.SystemPropertiesHelper; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.config.ConfigDataMode; -import com.alibaba.polardbx.config.InstanceRoleManager; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.ExecutorCursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; @@ -36,6 +36,8 @@ import com.alibaba.polardbx.executor.operator.FilterExec; import com.alibaba.polardbx.executor.operator.ResultSetCursorExec; import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.config.impl.InstConfUtil; +import com.alibaba.polardbx.gms.config.impl.MetaDbInstConfigManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.TddlRelDataTypeSystemImpl; import com.alibaba.polardbx.optimizer.core.TddlTypeFactoryImpl; @@ -86,6 +88,10 @@ public void collectDnVariables( } } + if (!ConfigDataMode.needDNResource()) { + return; + } + //extract dn variables from mysql Cursor cursor = null; @@ -138,15 +144,62 @@ public void collectCnVariables(TreeMap variables, ExecutionConte BatchInsertPolicy.getVariableName().toLowerCase(Locale.ROOT), executionContext.getConnection().getBatchInsertPolicy(executionContext.getExtraCmds()).getName()); - // DRDS_INSTANCE_ROLE + // POLARDBX_INSTANCE_ROLE variables.put( - InstanceRoleManager.INSTANCE_ROLE_VARIABLE.toLowerCase(Locale.ROOT), - InstanceRoleManager.INSTANCE.getInstanceRole()); + ConfigDataMode.INSTANCE_ROLE_VARIABLE.toLowerCase(Locale.ROOT), + ConfigDataMode.getInstanceRole()); // SHARE_READ_VIEW variables.put( TransactionAttribute.SHARE_READ_VIEW.toLowerCase(Locale.ROOT), executionContext.isShareReadView()); + + // XA_TSO + variables.put( + ConnectionProperties.ENABLE_XA_TSO.toLowerCase(Locale.ROOT), + InstConfUtil.getBool(ConnectionParams.ENABLE_XA_TSO)); + + // XA_TSO + variables.put( + ConnectionProperties.ENABLE_AUTO_COMMIT_TSO.toLowerCase(Locale.ROOT), + InstConfUtil.getBool(ConnectionParams.ENABLE_AUTO_COMMIT_TSO)); + + // AUTO_SP + variables.put( + ConnectionProperties.ENABLE_AUTO_SAVEPOINT.toLowerCase(Locale.ROOT), + executionContext.isSupportAutoSavepoint()); + + // ENABLE_X_PROTO_OPT_FOR_AUTO_SP + // AUTO_SP + variables.put( + ConnectionProperties.ENABLE_X_PROTO_OPT_FOR_AUTO_SP.toLowerCase(Locale.ROOT), + DynamicConfig.getInstance().enableXProtoOptForAutoSp()); + + if (null != executionContext.getTransaction()) { + // TRX_TYPE + variables.put( + "TRX_CLASS", + executionContext.getTransaction().getClass().getSimpleName() + ); + } + + // TRX_RECOVER + variables.put( + ConnectionProperties.ENABLE_TRANSACTION_RECOVER_TASK.toLowerCase(Locale.ROOT), + InstConfUtil.getBool(ConnectionParams.ENABLE_TRANSACTION_RECOVER_TASK) + ); + + // TRX_LOG_METHOD + variables.put( + ConnectionProperties.TRX_LOG_METHOD.toLowerCase(Locale.ROOT), + DynamicConfig.getInstance().getTrxLogMethod() + ); + + // INSTANCE_READ_ONLY + variables.put( + ConnectionProperties.INSTANCE_READ_ONLY.toLowerCase(Locale.ROOT), + MetaDbInstConfigManager.getInstance().getCnVariableConfigMap() + .getProperty(ConnectionProperties.INSTANCE_READ_ONLY, "false")); } public void updateReturnVariables(TreeMap variables, ExecutionContext executionContext) { @@ -154,22 +207,17 @@ public void updateReturnVariables(TreeMap variables, ExecutionCo SSLVariables.fill(variables); if (variables.containsKey("max_allowed_packet")) { - String maxAllowedPacket = System.getProperty("maxAllowedPacket", String.valueOf(1024 * 1024)); String maxAllowedPacketCustom = String.valueOf(executionContext.getParamManager().getLong(ConnectionParams.MAX_ALLOWED_PACKET)); - if (StringUtils.isNotEmpty(maxAllowedPacketCustom)) { - maxAllowedPacket = maxAllowedPacketCustom; - } - - variables.put("max_allowed_packet", maxAllowedPacket); + variables.put("max_allowed_packet", maxAllowedPacketCustom); } if (variables.containsKey("max_user_connections")) { - variables.put("max_user_connections", System.getProperty("maxConnection", "20000")); + variables.put("max_user_connections", DynamicConfig.getInstance().getMaxConnections()); } if (variables.containsKey("max_connections")) { - variables.put("max_connections", System.getProperty("maxConnection", "20000")); + variables.put("max_connections", DynamicConfig.getInstance().getMaxConnections()); } if (variables.containsKey("autocommit")) { @@ -183,9 +231,11 @@ public void updateReturnVariables(TreeMap variables, ExecutionCo if (variables.containsKey("read_only")) { if (ConfigDataMode.isMasterMode()) { variables.put("read_only", "OFF"); - } else if (ConfigDataMode.isSlaveMode()) { + } else if (ConfigDataMode.isReadOnlyMode()) { variables.put("read_only", "ON"); } + } else if (ConfigDataMode.isReadOnlyMode()) { + variables.put("read_only", "ON"); } boolean allSequencesGroupOrTime = SequenceManagerProxy.getInstance() diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalTableDataMigrationBackfillHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalTableDataMigrationBackfillHandler.java index df3daa586..bdcaa9a86 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalTableDataMigrationBackfillHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalTableDataMigrationBackfillHandler.java @@ -21,43 +21,32 @@ import com.alibaba.polardbx.common.exception.TddlNestableRuntimeException; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; -import com.alibaba.polardbx.common.model.Group; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.properties.ConnectionProperties; +import com.alibaba.polardbx.executor.backfill.Loader; import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; -import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; import com.alibaba.polardbx.executor.fastchecker.FastChecker; import com.alibaba.polardbx.executor.gsi.BackfillExecutor; import com.alibaba.polardbx.executor.handler.HandlerCommon; -import com.alibaba.polardbx.executor.handler.LogicalShowTopologyHandler; import com.alibaba.polardbx.executor.partitionmanagement.fastchecker.LogicalTableDataMigrationFastChecker; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.metadb.table.IndexStatus; import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.LogicalTableDataMigrationBackfill; -import com.alibaba.polardbx.optimizer.core.rel.MoveTableBackfill; import com.alibaba.polardbx.optimizer.utils.PhyTableOperationUtil; import com.alibaba.polardbx.optimizer.utils.QueryConcurrencyPolicy; import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.rel.RelNode; -import org.apache.calcite.sql.SqlShowTopology; -import org.apache.commons.lang3.StringUtils; import java.text.MessageFormat; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.concurrent.TimeUnit; import static com.alibaba.polardbx.executor.utils.ExecUtils.getQueryConcurrencyPolicy; -import static com.alibaba.polardbx.gms.topology.SystemDbHelper.DEFAULT_DB_NAME; /** * Created by zhuqiwei. @@ -79,6 +68,9 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext ec) { BackfillExecutor backfillExecutor = new BackfillExecutor((List inputs, ExecutionContext executionContext) -> { QueryConcurrencyPolicy queryConcurrencyPolicy = getQueryConcurrencyPolicy(executionContext); + if (Loader.canUseBackfillReturning(executionContext, dstSchemaName)) { + queryConcurrencyPolicy = QueryConcurrencyPolicy.GROUP_CONCURRENT_BLOCK; + } List inputCursors = new ArrayList<>(inputs.size()); executeWithConcurrentPolicy(executionContext, inputs, queryConcurrencyPolicy, inputCursors, dstSchemaName); @@ -86,7 +78,10 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext ec) { }); ec = clearSqlMode(ec.copy()); - upgradeEncoding(ec, srcSchemaName, srcLogicalTable); + + if (!ec.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY)) { + upgradeEncoding(ec, srcSchemaName, srcLogicalTable); + } PhyTableOperationUtil.disableIntraGroupParallelism(dstSchemaName, ec); @@ -120,66 +115,41 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext ec) { protected boolean fastcheck(String schemaSrc, String schemaDst, String logicalTableSrc, String logicalTableDst, ExecutionContext ec) { long startTime = System.currentTimeMillis(); - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "FastChecker for table data migration, srcSchema [{0}] dstSchema [{1}] logical table src [{2}] logical table dst [{3}]start", schemaSrc, schemaDst, logicalTableSrc, logicalTableDst)); - final int fastCheckerParallelism = - ec.getParamManager().getInt(ConnectionParams.CREATE_DATABASE_AS_FASTCHECKER_PARALLELISM); List fastCheckers = - LogicalTableDataMigrationFastChecker.create(schemaSrc, schemaDst, logicalTableSrc, logicalTableDst, - fastCheckerParallelism, ec); + LogicalTableDataMigrationFastChecker.create(schemaSrc, schemaDst, logicalTableSrc, logicalTableDst, ec); if (fastCheckers == null || fastCheckers.isEmpty()) { throw new TddlRuntimeException(ErrorCode.ERR_EXECUTOR, "failed to create table data migration fastchecker"); } boolean checkResult = false; - final int maxRetryTimes = - ec.getParamManager().getInt(ConnectionParams.CREATE_DATABASE_AS_FASTCHECKER_RETRY_TIMES); - - int tryTimes = 0; - while (tryTimes < maxRetryTimes && checkResult == false) { - try { - boolean singleResult = true; - for (FastChecker fastChecker : fastCheckers) { - singleResult = fastChecker.check(ec); - if (singleResult == false) { - break; - } - } - checkResult = singleResult; - } catch (TddlNestableRuntimeException e) { - if (StringUtils.containsIgnoreCase(e.getMessage(), "acquire lock timeout")) { - //if acquire lock timeout, we will retry - if (tryTimes < maxRetryTimes - 1) { - try { - TimeUnit.MILLISECONDS.sleep(2000L * (1 + tryTimes)); - } catch (InterruptedException ex) { - throw new TddlNestableRuntimeException(ex); - } - continue; - } else { - throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, - "table data migration fastchecker retry exceed max times", e); - } - } else if (StringUtils.containsIgnoreCase(e.getMessage(), "fetch phy table digest timeout")) { - throw e; - } else { - //other exception, we simply throw out - throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e, - "table data migration fastchecker failed to check"); - } - } finally { - tryTimes += 1; - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( - "FastChecker for table data migration, srcSchema [{0}] dstSchema [{1}] logical tableSrc [{2}] logical tableDst [{3}] finish, time use [{4}], check result [{5}]", - schemaSrc, schemaDst, logicalTableSrc, logicalTableDst, - (System.currentTimeMillis() - startTime) / 1000.0, checkResult ? "pass" : "not pass")); - if (!checkResult) { - EventLogger.log(EventType.DDL_WARN, "FastChecker failed"); + try { + boolean singleResult = true; + for (FastChecker fastChecker : fastCheckers) { + singleResult = fastChecker.check(ec); + if (singleResult == false) { + break; } } + checkResult = singleResult; + } catch (TddlNestableRuntimeException e) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e, + "table data migration fastchecker failed to check"); + } finally { + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( + "FastChecker for table data migration, srcSchema [{0}] dstSchema [{1}] logical tableSrc [{2}] logical tableDst [{3}] finish, time use [{4}], check result [{5}]", + schemaSrc, schemaDst, logicalTableSrc, logicalTableDst, + (System.currentTimeMillis() - startTime) / 1000.0, checkResult ? "pass" : "not pass")); + if (!checkResult) { + EventLogger.log(EventType.DDL_WARN, "FastChecker failed"); + } else { + EventLogger.log(EventType.DDL_INFO, "FastChecker succeed"); + } } + return checkResult; } @@ -194,7 +164,7 @@ protected List getTableGsiNames(String schemaName, String primaryTableNa metaManager.getTableAndIndexMeta(primaryTableName, EnumSet.of(IndexStatus.PUBLIC)); for (GsiMetaManager.GsiTableMetaBean bean : meta.getTableMeta().values()) { - if (bean.gsiMetaBean != null) { + if (bean.gsiMetaBean != null && !bean.gsiMetaBean.columnarIndex) { GsiMetaManager.GsiIndexMetaBean bean1 = bean.gsiMetaBean; allGsiNames.add(bean1.indexName); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalUpsertHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalUpsertHandler.java index e51190b82..286fb2c05 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalUpsertHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalUpsertHandler.java @@ -44,6 +44,7 @@ import com.alibaba.polardbx.optimizer.core.rel.LogicalDynamicValues; import com.alibaba.polardbx.optimizer.core.rel.LogicalInsert; import com.alibaba.polardbx.optimizer.core.rel.LogicalUpsert; +import com.alibaba.polardbx.optimizer.core.rel.dml.DistinctWriter; import com.alibaba.polardbx.optimizer.core.rel.dml.Writer; import com.alibaba.polardbx.optimizer.core.rel.dml.util.DuplicateCheckResult; import com.alibaba.polardbx.optimizer.core.rel.dml.util.RowClassifier; @@ -84,6 +85,7 @@ import java.util.TreeSet; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiPredicate; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -118,8 +120,9 @@ protected int doExecute(LogicalInsert insert, ExecutionContext executionContext, final boolean checkPrimaryKey = executionContext.getParamManager().getBoolean(ConnectionParams.PRIMARY_KEY_CHECK) && !upsert.isPushablePrimaryKeyCheck(); + TableMeta tableMeta = executionContext.getSchemaManager(schemaName).getTable(tableName); final boolean checkForeignKey = - executionContext.foreignKeyChecks(); + executionContext.foreignKeyChecks() && (tableMeta.hasForeignKey() || tableMeta.hasReferencedForeignKey()); // For batch upsert, change params index. if (upsert.getBatchSize() > 0) { @@ -166,8 +169,9 @@ protected int doExecute(LogicalInsert insert, ExecutionContext executionContext, final List classifiedRows = new ArrayList<>(bindInsertRows(upsert, selectedRows, convertedValues, upsertEc, usePartFieldChecker)); - if (checkPrimaryKey || checkForeignKey) { - // TODO(qianjing): support upsert Pk Fk check, now we just throw exception in OptimizeLogicalInsertRule + if (checkForeignKey) { + fkConstraintAndCascade(upsertEc, executionContext, upsert, schemaName, tableName, upsert.getInput(), + classifiedRows); } upsertEc.setPhySqlId(upsertEc.getPhySqlId() + 1); @@ -419,6 +423,8 @@ protected List bindInsertRows(LogicalUpsert upsert, upsert.isHasJsonColumn() && upsertEc.getParamManager() .getBoolean(ConnectionParams.DML_SKIP_IDENTICAL_JSON_ROW_CHECK))) && upsertEc.getParamManager() .getBoolean(ConnectionParams.DML_SKIP_TRIVIAL_UPDATE); + final boolean checkJsonByStringCompare = + upsertEc.getParamManager().getBoolean(ConnectionParams.DML_CHECK_JSON_BY_STRING_COMPARE); final Map columnValueMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); Ord.zip(insertColumns).forEach(o -> columnValueMap.put(o.getValue(), rexRow.get(o.getKey()))); @@ -570,7 +576,8 @@ protected List bindInsertRows(LogicalUpsert upsert, } if (skipTrivialUpdate && !rowResult.doInsert) { - rowResult.trivial = identicalRow(row.before, row.after, rowColumnMeta); + rowResult.trivial = + identicalRow(row.before, row.after, rowColumnMeta, checkJsonByStringCompare); } // should deal affected rows inside update check @@ -790,6 +797,37 @@ private static List>> buildD return ukCheckerMapList; } + void fkConstraintAndCascade(ExecutionContext fkEc, ExecutionContext ec, LogicalUpsert upsert, String schemaName, + String tableName, RelNode input, List classifiedRows) { + // Do need to check Fk for replace + Map>>> fkPlans = upsert.getFkPlans(); + List insertColumns = input.getRowType().getFieldNames().stream().map(String::toUpperCase).collect( + Collectors.toList()); + List> values = + getInputValues(RelUtils.getRelInput(upsert), ec.getParams().getBatchParameters(), ec); + + // Constraint for insert + beforeInsertCheck(upsert, values, insertColumns, false, true, ec); + + // Cascade for update + values.clear(); + DistinctWriter writer; + if (!upsert.isModifyPartitionKey()) { + writer = (upsert).getPrimaryUpsertWriter().getUpdaterWriter(); + } else { + writer = (upsert).getPrimaryRelocateWriter().getModifyWriter(); + } + final Function rowBuilder = (wr) -> SourceRows.createFromValues(classifiedRows); + if (writer != null) { + final SourceRows duplicatedRows = rowBuilder.apply(writer); + for (DuplicateCheckResult duplicateCheckResult : duplicatedRows.valueRows) { + values.add(duplicateCheckResult.updateSource); + } + } + + beforeUpdateFkCascade(upsert, schemaName, tableName, fkEc, values, null, null, fkPlans, 1); + } + private static class DuplicateCheckRow implements Comparable { /** * Origin row value from table, null if to-be-inserted row not duplicate with existing row in table @@ -893,9 +931,12 @@ public void doUpdate(LogicalUpsert upsert, Map newRow upsertEc.getParamManager().getBoolean(ConnectionParams.DML_SKIP_IDENTICAL_ROW_CHECK) || ( upsert.isHasJsonColumn() && upsertEc.getParamManager() .getBoolean(ConnectionParams.DML_SKIP_IDENTICAL_JSON_ROW_CHECK)); + final boolean checkJsonByStringCompare = + upsertEc.getParamManager().getBoolean(ConnectionParams.DML_CHECK_JSON_BY_STRING_COMPARE); // Check identical row final boolean isIdenticalRow = - !skipIdenticalRowCheck && identicalRow(withoutAppended, this.after, rowColumnMeta); + !skipIdenticalRowCheck && identicalRow(withoutAppended, this.after, rowColumnMeta, + checkJsonByStringCompare); final List updated = isIdenticalRow ? withoutAppended : withAppended; this.affectedRows += isIdenticalRow ? (upsertEc.isClientFoundRows() ? 1 : 0) : 2; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalViewHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalViewHandler.java index b12829529..b30a75c36 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalViewHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/LogicalViewHandler.java @@ -19,18 +19,18 @@ import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.druid.sql.ast.SqlType; import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.cursor.AbstractCursor; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.cursor.impl.GatherCursor; import com.alibaba.polardbx.executor.cursor.impl.LogicalViewResultCursor; -import com.alibaba.polardbx.executor.cursor.impl.MultiCursorAdapter; import com.alibaba.polardbx.executor.handler.HandlerCommon; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.executor.utils.SubqueryUtils; import com.alibaba.polardbx.optimizer.config.table.ColumnMeta; import com.alibaba.polardbx.optimizer.context.ExecutionContext; -import com.alibaba.polardbx.optimizer.context.ScalarSubQueryExecContext; import com.alibaba.polardbx.optimizer.core.rel.LogicalView; import com.alibaba.polardbx.optimizer.core.rel.ReplaceCallWithLiteralVisitor; import com.alibaba.polardbx.optimizer.utils.CalciteUtils; @@ -66,7 +66,7 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { throw new RuntimeException("multi-get is not supported in Cursor executor"); } - PhyTableOperationUtil.enableIntraGroupParallelism(logicalView.getSchemaName(),executionContext); + PhyTableOperationUtil.enableIntraGroupParallelism(logicalView.getSchemaName(), executionContext); QueryConcurrencyPolicy queryConcurrencyPolicy = ExecUtils.getQueryConcurrencyPolicy(executionContext, logicalView); @@ -81,8 +81,13 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { if (executionContext.getParams() != null) { params = executionContext.getParams().getCurrentParameter(); } - ReplaceCallWithLiteralVisitor visitor = new ReplaceCallWithLiteralVisitor(Lists.newArrayList(), - params, RexUtils.getEvalFunc(executionContext), true); + ReplaceCallWithLiteralVisitor visitor = null; + //不是select类型,或包含 flashback,需要替换不确定性函数 + if (executionContext.getSqlType() != SqlType.SELECT || logicalView.getFlashback() != null) { + visitor = new ReplaceCallWithLiteralVisitor(Lists.newArrayList(), + params, RexUtils.getEvalFunc(executionContext), true); + } + // Dynamic functions will be calculated in buildSqlTemplate() final SqlSelect sqlTemplate = (SqlSelect) logicalView.getSqlTemplate(visitor, executionContext); if (executionContext.isModifyCrossDb()) { @@ -129,7 +134,11 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { LogicalViewResultCursor lvRs = new LogicalViewResultCursor(resultCursor, executionContext); newInputCursors.add(lvRs); } - return MultiCursorAdapter.wrap(newInputCursors); + if (newInputCursors.size() == 1) { + return newInputCursors.get(0); + } else { + return new GatherCursor(newInputCursors, executionContext); + } } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/MergeSortHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/MergeSortHandler.java index 90abfaa44..72e34b3a8 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/MergeSortHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/MergeSortHandler.java @@ -19,8 +19,8 @@ import com.alibaba.polardbx.common.jdbc.ParameterContext; import com.alibaba.polardbx.executor.ExecutorHelper; import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.GatherCursor; import com.alibaba.polardbx.executor.cursor.impl.MergeSortCursor; -import com.alibaba.polardbx.executor.cursor.impl.MultiCursorAdapter; import com.alibaba.polardbx.executor.handler.HandlerCommon; import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.executor.utils.OrderByOption; @@ -31,8 +31,8 @@ import org.apache.calcite.rel.RelNode; import org.apache.calcite.rex.RexDynamicParam; import org.apache.calcite.rex.RexLiteral; +import org.weakref.jmx.internal.guava.collect.Lists; -import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -71,16 +71,14 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { } } - List inputCursors = new ArrayList<>(); - Cursor inputCursor = ExecutorHelper.executeByCursor(mergeSort.getInput(), executionContext, false); - if (inputCursor instanceof MultiCursorAdapter) { - inputCursors.addAll(((MultiCursorAdapter) inputCursor).getSubCursors()); + if (inputCursor instanceof GatherCursor) { + return new MergeSortCursor( + executionContext, ((GatherCursor) inputCursor).getCursors(), orderBys, skip, fetch); } else { - inputCursors.add(inputCursor); + return new MergeSortCursor( + executionContext, Lists.newArrayList(inputCursor), orderBys, skip, fetch); } - - return new MergeSortCursor(executionContext, inputCursors, orderBys, skip, fetch); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/MoveTableBackfillHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/MoveTableBackfillHandler.java index 61f19ac6e..fbcc4c08b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/MoveTableBackfillHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/MoveTableBackfillHandler.java @@ -35,7 +35,6 @@ import com.alibaba.polardbx.executor.scaleout.corrector.MoveTableReporter; import com.alibaba.polardbx.executor.scaleout.fastchecker.MoveTableFastChecker; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.executor.ddl.util.ChangeSetUtils; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.MoveTableBackfill; import com.alibaba.polardbx.optimizer.utils.PhyTableOperationUtil; @@ -85,7 +84,9 @@ public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { PhyTableOperationUtil.disableIntraGroupParallelism(schemaName, executionContext); - upgradeEncoding(executionContext, schemaName, logicalTable); + if (!executionContext.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY)) { + upgradeEncoding(executionContext, schemaName, logicalTable); + } Map> sourcePhyTables = backfill.getSourcePhyTables(); Map> targetPhyTables = backfill.getTargetPhyTables(); @@ -138,54 +139,31 @@ boolean fastCheck(MoveTableBackfill backfill, String schemaName = backfill.getSchemaName(); String logicalTable = backfill.getLogicalTableName(); - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( "FastChecker for move table, schema [{0}] logical src table [{1}] logic dst table [{2}] start", schemaName, logicalTable, logicalTable)); - final int fastCheckerParallelism = - executionContext.getParamManager().getInt(ConnectionParams.SCALEOUT_FASTCHECKER_PARALLELISM); - FastChecker fastChecker = MoveTableFastChecker - .create(schemaName, backfill.getLogicalTableName(), backfill.getSourceTargetGroup(), + .create(schemaName, backfill.getLogicalTableName(), backfill.getSourcePhyTables(), - backfill.getTargetPhyTables(), fastCheckerParallelism, executionContext); + backfill.getTargetPhyTables(), executionContext); boolean fastCheckResult = false; - final int maxRetryTimes = - executionContext.getParamManager().getInt(ConnectionParams.FASTCHECKER_RETRY_TIMES); - - int tryTimes = 0; - while (tryTimes < maxRetryTimes && fastCheckResult == false) { - try { - fastCheckResult = fastChecker.check(executionContext); - } catch (TddlNestableRuntimeException e) { - if (StringUtils.containsIgnoreCase(e.getMessage(), "acquire lock timeout")) { - //if acquire lock timeout, we will retry - if (tryTimes < maxRetryTimes - 1) { - try { - TimeUnit.MILLISECONDS.sleep(2000L * (1 + tryTimes)); - } catch (InterruptedException ex) { - throw new TddlNestableRuntimeException(ex); - } - continue; - } else { - throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, - "move table fastchecker retry exceed max times", e); - } - } else { - //other exception, we simply throw out - throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e, - "move table fastchecker failed to check"); - } - } finally { - tryTimes += 1; - SQLRecorderLogger.ddlLogger.warn(MessageFormat.format( - "FastChecker for move table, schema [{0}] logical src table [{1}] logic dst table [{2}] finish, time use [{3}], check result [{4}]", - schemaName, logicalTable, logicalTable, - (System.currentTimeMillis() - startTime) / 1000.0, - fastCheckResult ? "pass" : "not pass") - ); - if (!fastCheckResult) { - EventLogger.log(EventType.DDL_WARN, "FastChecker failed"); - } + + try { + fastCheckResult = fastChecker.check(executionContext); + } catch (TddlNestableRuntimeException e) { + throw new TddlRuntimeException(ErrorCode.ERR_SCALEOUT_EXECUTE, e, + "move table fastchecker failed to check"); + } finally { + SQLRecorderLogger.ddlLogger.info(MessageFormat.format( + "FastChecker for move table, schema [{0}] logical src table [{1}] logic dst table [{2}] finish, time use [{3}], check result [{4}]", + schemaName, logicalTable, logicalTable, + (System.currentTimeMillis() - startTime) / 1000.0, + fastCheckResult ? "pass" : "not pass") + ); + if (!fastCheckResult) { + EventLogger.log(EventType.DDL_WARN, "FastChecker failed"); + } else { + EventLogger.log(EventType.DDL_INFO, "FastChecker succeed"); } } return fastCheckResult; @@ -202,6 +180,7 @@ void checkInCN(MoveTableBackfill backfill, ExecutionContext executionContext) { executionContext.getParamManager().getLong(ConnectionParams.SCALEOUT_CHECK_PARALLELISM); final long earlyFailNumber = executionContext.getParamManager().getLong(ConnectionParams.SCALEOUT_EARLY_FAIL_NUMBER); + final boolean useBinary = executionContext.getParamManager().getBoolean(ConnectionParams.BACKFILL_USING_BINARY); String schemaName = backfill.getSchemaName(); String logicalTable = backfill.getLogicalTableName(); @@ -216,6 +195,7 @@ void checkInCN(MoveTableBackfill backfill, ExecutionContext executionContext) { speedMin, speedLimit, parallelism, + useBinary, SqlSelect.LockMode.UNDEF, SqlSelect.LockMode.UNDEF, executionContext, diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/PhysicalBackfillHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/PhysicalBackfillHandler.java new file mode 100644 index 000000000..952fd58bf --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/PhysicalBackfillHandler.java @@ -0,0 +1,67 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.repo.mysql.handler; + +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; +import com.alibaba.polardbx.executor.handler.HandlerCommon; +import com.alibaba.polardbx.executor.physicalbackfill.PhysicalBackfillExecutor; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.rel.PhysicalBackfill; +import com.alibaba.polardbx.optimizer.utils.PhyTableOperationUtil; +import org.apache.calcite.rel.RelNode; + +import java.util.Map; +import java.util.Set; + +import static com.alibaba.polardbx.executor.utils.ExecUtils.getQueryConcurrencyPolicy; + +/** + * Created by luoyanxin. + * + * @author luoyanxin + */ +public class PhysicalBackfillHandler extends HandlerCommon { + + public PhysicalBackfillHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(RelNode logicalPlan, ExecutionContext executionContext) { + PhysicalBackfill backfill = (PhysicalBackfill) logicalPlan; + String schemaName = backfill.getSchemaName(); + String logicalTable = backfill.getLogicalTableName(); + + PhysicalBackfillExecutor backfillExecutor = new PhysicalBackfillExecutor(); + + executionContext = clearSqlMode(executionContext.copy()); + + upgradeEncoding(executionContext, schemaName, logicalTable); + + PhyTableOperationUtil.disableIntraGroupParallelism(schemaName, executionContext); + + Map> sourcePhyTables = backfill.getSourcePhyTables(); + Map> targetPhyTables = backfill.getTargetPhyTables(); + Map sourceTargetGroup = backfill.getSourceTargetGroup(); + boolean isBroadcast = backfill.getBroadcast(); + backfillExecutor.backfill(schemaName, logicalTable, sourcePhyTables, targetPhyTables, sourceTargetGroup, + isBroadcast, executionContext); + return new AffectRowCursor(0); + } +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/DDLNewHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/RefreshLocalRulesHandler.java similarity index 100% rename from polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/DDLNewHandler.java rename to polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/RefreshLocalRulesHandler.java diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ShowColumnarIndexHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ShowColumnarIndexHandler.java new file mode 100644 index 000000000..b4d623671 --- /dev/null +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ShowColumnarIndexHandler.java @@ -0,0 +1,179 @@ +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.repo.mysql.handler; + +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.utils.logger.Logger; +import com.alibaba.polardbx.common.utils.logger.LoggerFactory; +import com.alibaba.polardbx.druid.util.JdbcUtils; +import com.alibaba.polardbx.executor.common.ExecutorContext; +import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; +import com.alibaba.polardbx.executor.handler.HandlerCommon; +import com.alibaba.polardbx.executor.spi.IRepository; +import com.alibaba.polardbx.gms.metadb.MetaDbDataSource; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableEvolutionAccessor; +import com.alibaba.polardbx.gms.metadb.table.ColumnarTableEvolutionRecord; +import com.alibaba.polardbx.gms.metadb.table.IndexStatus; +import com.alibaba.polardbx.optimizer.OptimizerContext; +import com.alibaba.polardbx.optimizer.config.table.GsiMetaManager; +import com.alibaba.polardbx.optimizer.config.table.SchemaManager; +import com.alibaba.polardbx.optimizer.config.table.TableMeta; +import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; +import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import com.alibaba.polardbx.optimizer.exception.TableNotFoundException; +import com.alibaba.polardbx.optimizer.partition.PartitionByDefinition; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlShowColumnarIndex; +import org.apache.commons.collections.CollectionUtils; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * @version 1.0 + */ +public class ShowColumnarIndexHandler extends HandlerCommon { + private static final Logger logger = LoggerFactory.getLogger(ShowColumnarIndexHandler.class); + + public ShowColumnarIndexHandler(IRepository repo) { + super(repo); + } + + @Override + public Cursor handle(final RelNode logicalPlan, ExecutionContext executionContext) { + ArrayResultCursor resultCursor = buildResultCursor(); + + SqlShowColumnarIndex showGlobalIndex = (SqlShowColumnarIndex) ((LogicalDal) logicalPlan).getNativeSqlNode(); + SqlIdentifier tableNameNode = (SqlIdentifier) showGlobalIndex.getTable(); + String schemaName = (tableNameNode != null && 2 == tableNameNode.names.size()) ? tableNameNode.names.get(0) : + executionContext.getSchemaName(); + ExecutorContext executorContext = ExecutorContext.getContext(schemaName); + if (null == executionContext) { + throw new TddlRuntimeException(ErrorCode.ERR_UNKNOWN_DATABASE, schemaName); + } + + GsiMetaManager metaManager = executorContext.getGsiManager().getGsiMetaManager(); + + GsiMetaManager.GsiMetaBean meta; + if (null == tableNameNode) { + meta = metaManager.getAllGsiMetaBean(schemaName); + } else { + String tableName = tableNameNode.getLastName(); + SchemaManager sm = OptimizerContext.getContext(schemaName).getLatestSchemaManager(); + TableMeta tableMeta = sm.getTableWithNull(tableName); + if (tableMeta == null) { + throw new TableNotFoundException(ErrorCode.ERR_TABLE_NOT_EXIST, tableName); + } + meta = metaManager.getTableAndIndexMeta(tableName, IndexStatus.ALL); + } + + SchemaManager schemaManager = executionContext.getSchemaManager(); + Connection metaDbConn = null; + try { + try { + metaDbConn = MetaDbDataSource.getInstance().getConnection(); + } catch (Exception e) { + // ignore + logger.warn("failed to get metadb connection: ", e); + } + for (GsiMetaManager.GsiTableMetaBean bean : meta.getTableMeta().values()) { + if (bean.gsiMetaBean != null && bean.gsiMetaBean.columnarIndex) { + GsiMetaManager.GsiIndexMetaBean bean1 = bean.gsiMetaBean; + String pkNames = + bean1.indexColumns.stream().map(col -> col.columnName).collect(Collectors.joining(", ")); + String partitionKeyNames = pkNames; + String coveringNames = + bean1.coveringColumns.stream().map(col -> col.columnName).collect(Collectors.joining(", ")); + String partitionStrategy = ""; + Integer partitionCount = null; + TableMeta tableMeta = schemaManager.getTable(bean1.indexName); + if (tableMeta != null && tableMeta.getPartitionInfo() != null) { + PartitionByDefinition partitionBy = tableMeta.getPartitionInfo().getPartitionBy(); + partitionStrategy = partitionBy.getStrategy().name(); + partitionCount = partitionBy.getPartitions().size(); + partitionKeyNames = String.join(", ", partitionBy.getPartitionColumnNameList()); + } + + String options = null; + if (metaDbConn != null) { + try { + options = getColumnarIndexOptions(metaDbConn, bean1.tableSchema, bean1.indexName); + } catch (Exception e) { + // ignore + logger.warn("failed to query columnar option info", e); + } + } + + Object[] row = new Object[] { + bean1.tableSchema, bean1.tableName, bean1.indexName, Boolean.toString(bean1.clusteredIndex), + pkNames, coveringNames, partitionKeyNames, partitionStrategy, partitionCount, + pkNames, options, bean1.indexStatus}; + resultCursor.addRow(row); + } + } + } finally { + JdbcUtils.close(metaDbConn); + } + + return resultCursor; + } + + private String getColumnarIndexOptions(Connection metaDbConn, + String schemaName, + String indexName) { + ColumnarTableEvolutionAccessor accessor = new ColumnarTableEvolutionAccessor(); + accessor.setConnection(metaDbConn); + List records = + accessor.querySchemaIndexLatest(schemaName, indexName); + if (CollectionUtils.isEmpty(records)) { + logger.warn("empty columnar_table_evolution record: " + indexName); + return null; + } + Map options = records.get(0).options; + return ColumnarTableEvolutionRecord.serializeToJson(options); + } + + private ArrayResultCursor buildResultCursor() { + + ArrayResultCursor resultCursor = new ArrayResultCursor("COLUMNAR_INDEXES"); + + resultCursor.addColumn("SCHEMA", DataTypes.StringType); + resultCursor.addColumn("TABLE", DataTypes.StringType); + resultCursor.addColumn("INDEX_NAME", DataTypes.StringType); + resultCursor.addColumn("CLUSTERED", DataTypes.StringType); + resultCursor.addColumn("PK_NAMES", DataTypes.StringType); + resultCursor.addColumn("COVERING_NAMES", DataTypes.StringType); + resultCursor.addColumn("PARTITION_KEY", DataTypes.StringType); + resultCursor.addColumn("PARTITION_STRATEGY", DataTypes.StringType); + resultCursor.addColumn("PARTITION_COUNT", DataTypes.IntegerType); + resultCursor.addColumn("SORT_KEY", DataTypes.StringType); + resultCursor.addColumn("OPTIONS", DataTypes.StringType); + resultCursor.addColumn("STATUS", DataTypes.StringType); + + resultCursor.initMeta(); + + return resultCursor; + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ShowGlobalIndexHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ShowGlobalIndexHandler.java index cccf277a3..e4a8a9995 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ShowGlobalIndexHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ShowGlobalIndexHandler.java @@ -70,7 +70,7 @@ public Cursor handle(final RelNode logicalPlan, ExecutionContext executionContex } for (GsiMetaManager.GsiTableMetaBean bean : meta.getTableMeta().values()) { - if (bean.gsiMetaBean != null) { + if (bean.gsiMetaBean != null && !bean.gsiMetaBean.columnarIndex) { GsiMetaManager.GsiIndexMetaBean bean1 = bean.gsiMetaBean; Object[] row = new Object[] { bean1.tableSchema, bean1.tableName, bean1.nonUnique ? 1 : 0, bean1.indexName, diff --git a/polardbx-server/src/main/java/com/alibaba/polardbx/config/loader/ManagerAppLoader.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/AsyncDDLInspectJobCacheHandler.java similarity index 100% rename from polardbx-server/src/main/java/com/alibaba/polardbx/config/loader/ManagerAppLoader.java rename to polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/AsyncDDLInspectJobCacheHandler.java diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineCancelJobsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineCancelJobsHandler.java index d38f05b0b..847fad5ce 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineCancelJobsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineCancelJobsHandler.java @@ -22,6 +22,7 @@ import com.alibaba.polardbx.common.ddl.newengine.DdlTaskState; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; +import com.alibaba.polardbx.common.properties.ConfigParam; import com.alibaba.polardbx.common.properties.ConnectionParams; import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.common.utils.TStringUtil; @@ -44,10 +45,12 @@ import org.apache.commons.lang3.StringUtils; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; +import static com.alibaba.polardbx.common.ddl.newengine.DdlPlanState.PAUSE_ON_NON_MAINTENANCE_WINDOW; import static com.alibaba.polardbx.common.ddl.newengine.DdlPlanState.SUCCESS; import static com.alibaba.polardbx.common.ddl.newengine.DdlPlanState.TERMINATED; import static com.alibaba.polardbx.common.ddl.newengine.DdlType.ALTER_TABLEGROUP; @@ -63,20 +66,40 @@ public DdlEngineCancelJobsHandler(IRepository repo) { @Override public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionContext) { SqlCancelDdlJob command = (SqlCancelDdlJob) logicalPlan.getNativeSqlNode(); - return doCancel(command.isAll(), command.getJobIds(), executionContext); + + if (command.isAll()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + if (command.getJobIds() == null || command.getJobIds().isEmpty()) { + return new AffectRowCursor(0); + } + + if (command.getJobIds().size() > 1) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + return doCancel(command.getJobIds().get(0), executionContext); } - public Cursor doCancel(boolean isAll, List jobIds, ExecutionContext executionContext) { + public Cursor doCancel(Long jobId, ExecutionContext executionContext) { boolean enableOperateSubJob = executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_OPERATE_SUBJOB); boolean cancelSubJob = executionContext.getParamManager().getBoolean(ConnectionParams.CANCEL_SUBJOB); - List records = - fetchRecords(executionContext.getSchemaName(), isAll, jobIds); - for (DdlEngineRecord record : records) { - if (REBALANCE.name().equalsIgnoreCase(record.ddlType)) { - // update ddl plan state - DdlPlanState afterState; + DdlEngineRecord record = schedulerManager.fetchRecordByJobId(jobId); + if (record == null) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "The ddl job does not exist"); + } + + if (REBALANCE.name().equalsIgnoreCase(record.ddlType)) { + // update ddl plan state + DdlPlanState afterState; + boolean cancelDueToOutOfMaintennanceWidows = + executionContext.getParamManager().getBoolean(ConnectionParams.CANCEL_REBALANCE_JOB_DUE_MAINTENANCE); + if (cancelDueToOutOfMaintennanceWidows) { + afterState = PAUSE_ON_NON_MAINTENANCE_WINDOW; + } else { if (record.ddlStmt.toLowerCase().contains("drain_node")) { // fail afterState = TERMINATED; @@ -84,92 +107,58 @@ public Cursor doCancel(boolean isAll, List jobIds, ExecutionContext execut // success afterState = SUCCESS; } - String message = String.format("update state:[%s] by rollback the rebalance ddl", afterState.name()); - RebalanceDdlPlanManager rebalanceDdlPlanManager = new RebalanceDdlPlanManager(); - rebalanceDdlPlanManager.updateRebalanceScheduleState(record.jobId, afterState, message); } + String message = String.format("update state:[%s] by rollback the rebalance ddl", afterState.name()); + RebalanceDdlPlanManager rebalanceDdlPlanManager = new RebalanceDdlPlanManager(); + rebalanceDdlPlanManager.updateRebalanceScheduleState(record.jobId, afterState, message); } - records.stream().forEach(record -> { - DdlState state = DdlState.valueOf(record.state); + DdlState state = DdlState.valueOf(record.state); - if (!(state == DdlState.RUNNING || state == DdlState.PAUSED)) { - String errMsg = String.format("Only RUNNING/PAUSED jobs can be cancelled, but job %s is in %s state. ", - record.jobId, record.state); - if (StringUtils.equalsIgnoreCase(record.state, DdlState.ROLLBACK_PAUSED.name())) { - errMsg += String.format("You may want to try command: continue ddl %s", record.jobId); - } - throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, errMsg); + if (!(state == DdlState.RUNNING || state == DdlState.PAUSED)) { + String errMsg = String.format("Only RUNNING/PAUSED jobs can be cancelled, but job %s is in %s state. ", + record.jobId, record.state); + if (StringUtils.equalsIgnoreCase(record.state, DdlState.ROLLBACK_PAUSED.name())) { + errMsg += String.format("You may want to try command: continue ddl %s", record.jobId); } + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, errMsg); + } - if (!record.isSupportCancel() || !AlterTableRollbacker.checkIfRollbackable(record.ddlStmt)) { - String detail = (AlterTableRollbacker.checkIfRollbackable(record.ddlStmt) ? "original DDL itself" : - "the DDL operations") + " cannot be rolled back"; - String errMsg = "Cancel/rollback is not supported for job %s because %s%s. Please try: continue ddl %s"; + if (!isSupportCancel(record)) { + String detail = (AlterTableRollbacker.checkIfRollbackable(record.ddlStmt) ? "original DDL itself" : + "the DDL operations") + " cannot be rolled back"; + String errMsg = "Cancel/rollback is not supported for job %s because %s%s. Please try: continue ddl %s"; - if (state == DdlState.RUNNING) { - // Pause the job first. - DdlEnginePauseJobsHandler pauseJobsHandler = new DdlEnginePauseJobsHandler(repo); - pauseJobsHandler.doPause(isAll, jobIds, executionContext); - record.state = DdlState.PAUSED.name(); - } - - Optional phyDdlTaskRecord = checkIfAllShardsNotDone(record); - - if (phyDdlTaskRecord != null && record.isSupportCancel()) { - // There is no shard done, so we can roll the job back right now. - if (phyDdlTaskRecord.isPresent()) { - // The physical DDL task is still DIRTY, so we should set the task to READY for rollback. - new DdlEngineAccessorDelegate() { - @Override - protected Integer invoke() { - phyDdlTaskRecord.get().state = DdlTaskState.READY.name(); - phyDdlTaskRecord.get().exceptionAction = DdlExceptionAction.ROLLBACK.name(); - return engineTaskAccessor.updateTask(phyDdlTaskRecord.get()); - } - }.execute(); - } else { - // The physical ddl task has been set already, so nothing to do. - } - } else { - // Otherwise, there is at least one shard done, so raise an exception to explain it. - if (state == DdlState.RUNNING) { - throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, - String.format(errMsg, record.jobId, detail, ", so the DDL job has been paused instead", - record.jobId)); - } - if (state == DdlState.PAUSED) { - throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, - String.format(errMsg, record.jobId, detail, "", record.jobId)); - } - } + if (state == DdlState.RUNNING) { + // Pause the job first. + DdlEnginePauseJobsHandler pauseJobsHandler = new DdlEnginePauseJobsHandler(repo); + pauseJobsHandler.doPause(jobId, executionContext); + record.state = DdlState.PAUSED.name(); } - }); - int countDone = 0; - for (DdlEngineRecord record : records) { - if (record.isSubJob() && !enableOperateSubJob) { - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on subjob is not allowed"); - } + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, + String.format(errMsg, record.jobId, detail, "", record.jobId)); + } - List rollbackJobs = new ArrayList<>(); - List traceIds = new ArrayList<>(); + if (record.isSubJob() && !enableOperateSubJob) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on subjob is not allowed"); + } - cancelJob(record, cancelSubJob, rollbackJobs, traceIds); + List rollbackJobs = new ArrayList<>(); + List traceIds = new ArrayList<>(); - countDone += rollbackJobs.size(); - } + cancelJob(record, cancelSubJob, rollbackJobs, traceIds); DdlHelper.waitToContinue(DdlConstants.MEDIAN_WAITING_TIME); - DdlEngineRequester.notifyLeader(executionContext.getSchemaName(), jobIds); + Collections.reverse(rollbackJobs); + DdlEngineRequester.notifyLeader(record.schemaName, rollbackJobs); boolean asyncMode = executionContext.getParamManager().getBoolean(ConnectionParams.PURE_ASYNC_DDL_MODE); - if (!asyncMode && CollectionUtils.isNotEmpty(records) && CollectionUtils.size(records) == 1) { - DdlEngineRecord record = records.get(0); + if (!asyncMode) { respond(record.schemaName, record.jobId, executionContext, false, true); } - return new AffectRowCursor(new int[] {countDone}); + return new AffectRowCursor(rollbackJobs.size()); } private void cancelJob(DdlEngineRecord record, boolean subJob, List rollbackJobs, List traceIds) { @@ -189,12 +178,13 @@ private void cancelJob(DdlEngineRecord record, boolean subJob, List rollba rollbackJobs.add(record.jobId); traceIds.add(record.traceId); + // 先中断父任务 + DdlHelper.interruptJobs(record.schemaName, Collections.singletonList(record.jobId)); + DdlHelper.killActivePhyDDLs(record.schemaName, record.traceId); + if (subJob) { cancelSubJobs(record.jobId, rollbackJobs, traceIds); } - - DdlHelper.interruptJobs(record.schemaName, rollbackJobs); - DdlHelper.killActivePhyDDLs(record.schemaName, traceIds); } } else if (DdlState.PAUSED == DdlState.valueOf(record.state)) { if (schedulerManager.tryUpdateDdlState( @@ -205,7 +195,7 @@ private void cancelJob(DdlEngineRecord record, boolean subJob, List rollba rollbackJobs.add(record.jobId); - DdlHelper.interruptJobs(record.schemaName, rollbackJobs); + DdlHelper.interruptJobs(record.schemaName, Collections.singletonList(record.jobId)); } } } @@ -227,32 +217,32 @@ private void cancelSubJobs(long jobId, List rollbackJobs, List tra } } - private Optional checkIfAllShardsNotDone(DdlEngineRecord record) { - boolean allShardsNotDone = true; + private boolean isSupportCancel(DdlEngineRecord record) { + boolean supportCancel = record.isSupportCancel(); - List taskRecords = fetchTasks(record.jobId); + List subJobs = schedulerManager.fetchSubJobsRecursive(record.jobId, false); - Optional phyDdlTaskRecord = taskRecords.stream().filter( - tr -> TStringUtil.containsIgnoreCase(tr.name, "PhyDdlTask") && DdlTaskState.DIRTY.name() - .equalsIgnoreCase(tr.state)).findFirst(); + List subJobIds = GeneralUtil.emptyIfNull(subJobs) + .stream().flatMap(x -> x.fetchAllSubJobs().stream()).collect(Collectors.toList()); - if (phyDdlTaskRecord.isPresent() && TStringUtil.isNotEmpty(phyDdlTaskRecord.get().extra)) { - String[] shards = phyDdlTaskRecord.get().extra.split(DdlConstants.SEMICOLON); - for (String shard : shards) { - String[] flags = shard.split(DdlConstants.COLON); - if (flags.length > 3) { - if (Boolean.valueOf(flags[2])) { - allShardsNotDone = false; - break; - } - } else { - allShardsNotDone = false; - break; + if (CollectionUtils.isEmpty(subJobIds)) { + return supportCancel; + } + + List records = schedulerManager.fetchRecords(subJobIds); + for (DdlEngineRecord subJobRecord : GeneralUtil.emptyIfNull(records)) { + if (MOVE_DATABASE.name().equalsIgnoreCase(record.ddlType) + || ALTER_TABLEGROUP.name().equalsIgnoreCase(record.ddlType)) { + if (!record.isSupportCancel()) { + continue; } } + if (DdlState.FINISHED.contains(DdlState.valueOf(subJobRecord.state))) { + continue; + } + supportCancel = (supportCancel && subJobRecord.isSupportCancel()); } - return allShardsNotDone ? phyDdlTaskRecord : null; + return supportCancel; } - } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineContinueJobsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineContinueJobsHandler.java index c278b65c1..59ec0f101 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineContinueJobsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineContinueJobsHandler.java @@ -42,50 +42,63 @@ public DdlEngineContinueJobsHandler(IRepository repo) { @Override public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionContext) { SqlContinueDdlJob command = (SqlContinueDdlJob) logicalPlan.getNativeSqlNode(); - return doContinue(command.isAll(), command.getJobIds(), executionContext); + + if (command.isAll()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + if (command.getJobIds() == null || command.getJobIds().isEmpty()) { + return new AffectRowCursor(0); + } + + if (command.getJobIds().size() > 1) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + return doContinue(command.getJobIds().get(0), executionContext); } - public Cursor doContinue(boolean isAll, List jobIds, ExecutionContext executionContext) { + public Cursor doContinue(Long jobId, ExecutionContext executionContext) { boolean enableOperateSubJob = executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_OPERATE_SUBJOB); - List records = - fetchRecords(executionContext.getSchemaName(), isAll, jobIds); - records.stream().forEach(e -> { - DdlState state = DdlState.valueOf(e.state); - if (!(state == DdlState.PAUSED || state == DdlState.ROLLBACK_PAUSED)) { - throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, String.format( - "Only PAUSED/ROLLBACK_PAUSED jobs can be continued, but job %s is in %s state", e.jobId, e.state)); - } - if (!e.isSupportContinue()) { - throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, String.format( - "Continue/recover is not supported for job %s. Please try: cancel ddl %s", e.jobId, e.jobId)); - } - if (e.isSubJob() && !enableOperateSubJob) { - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on subjob is not allowed"); - } - }); + DdlEngineRecord record = schedulerManager.fetchRecordByJobId(jobId); + if (record == null) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "The ddl job does not exist"); + } + + DdlState state = DdlState.valueOf(record.state); + if (!(state == DdlState.PAUSED || state == DdlState.ROLLBACK_PAUSED)) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, String.format( + "Only PAUSED/ROLLBACK_PAUSED jobs can be continued, but job %s is in %s state", record.jobId, + record.state)); + } + if (!record.isSupportContinue()) { + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, String.format( + "Continue/recover is not supported for job %s. Please try: cancel ddl %s", record.jobId, record.jobId)); + } + if (record.isSubJob() && !enableOperateSubJob) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on subjob is not allowed"); + } int countDone = 0; List jobIdList = new ArrayList<>(); - for (DdlEngineRecord record : records) { - if (DdlState.PAUSED == DdlState.valueOf(record.state)) { - if (schedulerManager.tryUpdateDdlState( - record.schemaName, - record.jobId, - DdlState.PAUSED, - DdlState.RUNNING)) { - jobIdList.add(record.jobId); - countDone++; - } - } else if (DdlState.ROLLBACK_PAUSED == DdlState.valueOf(record.state)) { - if (schedulerManager.tryUpdateDdlState( - record.schemaName, - record.jobId, - DdlState.ROLLBACK_PAUSED, - DdlState.ROLLBACK_RUNNING)) { - jobIdList.add(record.jobId); - countDone++; - } + if (DdlState.PAUSED == DdlState.valueOf(record.state)) { + if (schedulerManager.tryUpdateDdlState( + record.schemaName, + record.jobId, + DdlState.PAUSED, + DdlState.RUNNING)) { + jobIdList.add(record.jobId); + countDone++; + } + } else if (DdlState.ROLLBACK_PAUSED == DdlState.valueOf(record.state)) { + if (schedulerManager.tryUpdateDdlState( + record.schemaName, + record.jobId, + DdlState.ROLLBACK_PAUSED, + DdlState.ROLLBACK_RUNNING)) { + jobIdList.add(record.jobId); + countDone++; } } @@ -95,12 +108,11 @@ public Cursor doContinue(boolean isAll, List jobIds, ExecutionContext exec boolean asyncMode = executionContext.getParamManager().getBoolean(ConnectionParams.PURE_ASYNC_DDL_MODE); boolean checkResponseInMemory = executionContext.getParamManager().getBoolean(ConnectionParams.CHECK_RESPONSE_IN_MEM); - if (!asyncMode && CollectionUtils.isNotEmpty(records) && CollectionUtils.size(records) == 1) { - DdlEngineRecord record = records.get(0); + if (!asyncMode) { respond(record.schemaName, record.jobId, executionContext, checkResponseInMemory, true); } - return new AffectRowCursor(new int[] {countDone}); + return new AffectRowCursor(countDone); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineInspectCacheHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineInspectCacheHandler.java index f55901f97..09b2db50c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineInspectCacheHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineInspectCacheHandler.java @@ -29,6 +29,7 @@ import com.alibaba.polardbx.gms.node.GmsNodeManager.GmsNode; import com.alibaba.polardbx.gms.sync.GmsSyncManagerHelper; import com.alibaba.polardbx.gms.sync.IGmsSyncAction; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; import org.apache.calcite.sql.SqlInspectDdlJobCache; @@ -48,34 +49,36 @@ public DdlEngineInspectCacheHandler(IRepository repo) { @Override public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionContext) { - SqlNode sqlNode = logicalPlan.getNativeSqlNode(); - - if (!(sqlNode instanceof SqlInspectDdlJobCache)) { - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_UNEXPECTED, "Unknown SQL Node: " - + sqlNode.getKind().name()); - } - - String schemaName = executionContext.getSchemaName(); - ArrayResultCursor resultCursor = DdlCacheCollectionSyncAction.buildResultCursor(); - - IGmsSyncAction syncAction = new DdlCacheCollectionSyncAction(schemaName); - GmsSyncManagerHelper.sync(syncAction, schemaName, results -> { - if (results != null) { - for (Pair>> result : results) { - if (result != null && result.getValue() != null) { - for (Map row : result.getValue()) { - if (row != null) { - resultCursor.addRow(buildRow(row)); - } - } - } - } - } - }); - - //legacy engine + throw new UnsupportedOperationException(); +// SqlNode sqlNode = logicalPlan.getNativeSqlNode(); +// +// if (!(sqlNode instanceof SqlInspectDdlJobCache)) { +// throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_UNEXPECTED, "Unknown SQL Node: " +// + sqlNode.getKind().name()); +// } +// +// String schemaName = executionContext.getSchemaName(); +// ArrayResultCursor resultCursor = DdlCacheCollectionSyncAction.buildResultCursor(); +// +// IGmsSyncAction syncAction = new DdlCacheCollectionSyncAction(schemaName); +// GmsSyncManagerHelper.sync(syncAction, schemaName, SyncScope.ALL, results -> { +// if (results != null) { +// for (Pair>> result : results) { +// if (result != null && result.getValue() != null) { +// for (Map row : result.getValue()) { +// if (row != null) { +// resultCursor.addRow(buildRow(row)); +// } +// } +// } +// } +// } +// }); +// +// //legacy engine // List>> results = -// SyncManagerHelper.sync(new CacheCollectionSyncAction(executionContext.getSchemaName()), schemaName); +// SyncManagerHelper.sync(new CacheCollectionSyncAction(executionContext.getSchemaName()), schemaName, +// SyncScope.MASTER_ONLY); // if (results != null) { // for (List> result : results) { // if (result != null) { @@ -87,19 +90,19 @@ public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionC // } // } // } - - resultCursor.addRow(new Object[] { - ENGINE_TYPE_DAG, - LeaseRecord.getLeaseHolder(), - "", - "", - "leader:" + DdlHelper.hasDdlLeadership(), - "", - "", - "" - }); - - return resultCursor; +// +// resultCursor.addRow(new Object[] { +// ENGINE_TYPE_DAG, +// LeaseRecord.getLeaseHolder(), +// "", +// "", +// "leader:" + DdlHelper.hasDdlLeadership(), +// "", +// "", +// "" +// }); +// +// return resultCursor; } private Object[] buildRow(Map row) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineJobsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineJobsHandler.java index 64044c0ae..c8018d4fb 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineJobsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineJobsHandler.java @@ -80,10 +80,6 @@ protected List fetchTasks(long jobId) { return schedulerManager.fetchTaskRecord(jobId); } - protected void interruptJob(String schemaName, List jobIds) { - DdlRequest ddlRequest = new DdlRequest(schemaName, jobIds); - GmsSyncManagerHelper.sync(new DdlInterruptSyncAction(ddlRequest), schemaName); - } /** * ported from com.taobao.tddl.executor.ddl.engine.AsyncDDLManager#concatJobIds(java.util.List) @@ -103,6 +99,6 @@ protected void respond(String schemaName, boolean rollbackOpt) { DdlRequest ddlRequest = new DdlRequest(schemaName.toLowerCase(), Lists.newArrayList(jobId)); DdlEngineRequester.respond(ddlRequest, new DdlJobManager(), executionContext, checkResponseInMemory, - rollbackOpt); + rollbackOpt, false); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEnginePauseJobsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEnginePauseJobsHandler.java index 01f9a1513..8544b3c9c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEnginePauseJobsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEnginePauseJobsHandler.java @@ -16,8 +16,9 @@ package com.alibaba.polardbx.repo.mysql.handler.ddl.newengine; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.common.properties.ConnectionParams; -import com.alibaba.polardbx.common.utils.logger.Logger; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.ddl.newengine.DdlEngineRequester; @@ -25,16 +26,11 @@ import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; -import com.alibaba.polardbx.statistics.SQLRecorderLogger; import org.apache.calcite.sql.SqlPauseDdlJob; import org.apache.commons.collections.CollectionUtils; -import java.util.List; - public class DdlEnginePauseJobsHandler extends DdlEngineJobsHandler { - private final static Logger LOG = SQLRecorderLogger.ddlEngineLogger; - public DdlEnginePauseJobsHandler(IRepository repo) { super(repo); } @@ -42,25 +38,37 @@ public DdlEnginePauseJobsHandler(IRepository repo) { @Override public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionContext) { SqlPauseDdlJob command = (SqlPauseDdlJob) logicalPlan.getNativeSqlNode(); - return doPause(command.isAll(), command.getJobIds(), executionContext); + if (command.isAll()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + if (command.getJobIds() == null || command.getJobIds().isEmpty()) { + return new AffectRowCursor(0); + } + + if (command.getJobIds().size() > 1) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + return doPause(command.getJobIds().get(0), executionContext); } - public Cursor doPause(boolean isAll, List jobIds, ExecutionContext executionContext) { + public Cursor doPause(Long jobId, ExecutionContext executionContext) { boolean enableOperateSubJob = executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_OPERATE_SUBJOB); boolean enableContinueRunningSubJob = executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_CONTINUE_RUNNING_SUBJOB); - List records = fetchRecords(executionContext.getSchemaName(), isAll, jobIds); + DdlEngineRecord record = schedulerManager.fetchRecordByJobId(jobId); int countDone = - DdlEngineRequester.pauseJobs(records, enableOperateSubJob, enableContinueRunningSubJob, executionContext); + DdlEngineRequester.pauseJob(record, enableOperateSubJob, enableContinueRunningSubJob, executionContext); - boolean asyncPause = executionContext.getParamManager().getBoolean(ConnectionParams.ASYNC_PAUSE); - if (!asyncPause && CollectionUtils.isNotEmpty(records) && CollectionUtils.size(records) == 1) { - DdlEngineRecord record = records.get(0); + boolean asyncPause = executionContext.getParamManager().getBoolean(ConnectionParams.PURE_ASYNC_DDL_MODE) + || executionContext.getParamManager().getBoolean(ConnectionParams.ASYNC_PAUSE); + if (!asyncPause) { try { respond(record.schemaName, record.jobId, executionContext, false, true); } catch (Exception e) { @@ -68,7 +76,7 @@ public Cursor doPause(boolean isAll, List jobIds, ExecutionContext executi } } - return new AffectRowCursor(new int[] {countDone}); + return new AffectRowCursor(countDone); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEnginePauseRebalanceHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEnginePauseRebalanceHandler.java index 1698943a3..b7b0c5234 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEnginePauseRebalanceHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEnginePauseRebalanceHandler.java @@ -51,35 +51,47 @@ public DdlEnginePauseRebalanceHandler(IRepository repo) { @Override public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionContext) { SqlPauseRebalanceJob command = (SqlPauseRebalanceJob) logicalPlan.getNativeSqlNode(); + if (command.isAll()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + if (command.getJobIds() == null || command.getJobIds().isEmpty()) { + return new AffectRowCursor(0); + } + + if (command.getJobIds().size() > 1) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } - return doPause(command.isAll(), command.getJobIds(), executionContext); + return doPause(command.getJobIds().get(0), executionContext); } - public Cursor doPause(boolean isAll, List jobIds, ExecutionContext executionContext) { + public Cursor doPause(Long jobId, ExecutionContext executionContext) { boolean enableOperateSubJob = executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_OPERATE_SUBJOB); boolean enableContinueRunningSubJob = executionContext.getParamManager().getBoolean(ConnectionParams.ENABLE_CONTINUE_RUNNING_SUBJOB); - List records = fetchRecords(executionContext.getSchemaName(), isAll, jobIds); + DdlEngineRecord record = schedulerManager.fetchRecordByJobId(jobId); - for (DdlEngineRecord record : records) { - if (!REBALANCE.name().equalsIgnoreCase(record.ddlType) - && !ALTER_TABLEGROUP.name().equalsIgnoreCase(record.ddlType) - && !MOVE_DATABASE.name().equalsIgnoreCase(record.ddlType)) { - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, - "Operation on non-rebalance job is not allowed"); - } + if (record == null) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "The ddl job does not exist"); } - int countDone = - DdlEngineRequester.pauseJobs(records, enableOperateSubJob, enableContinueRunningSubJob, executionContext); + if (!REBALANCE.name().equalsIgnoreCase(record.ddlType) + && !ALTER_TABLEGROUP.name().equalsIgnoreCase(record.ddlType) + && !MOVE_DATABASE.name().equalsIgnoreCase(record.ddlType)) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "Operation on non-rebalance job is not allowed"); + } - boolean asyncPause = executionContext.getParamManager().getBoolean(ConnectionParams.ASYNC_PAUSE); - if (!asyncPause && CollectionUtils.isNotEmpty(records) && CollectionUtils.size(records) == 1) { - DdlEngineRecord record = records.get(0); + int countDone = + DdlEngineRequester.pauseJob(record, enableOperateSubJob, enableContinueRunningSubJob, executionContext); + boolean asyncPause = executionContext.getParamManager().getBoolean(ConnectionParams.PURE_ASYNC_DDL_MODE) + || executionContext.getParamManager().getBoolean(ConnectionParams.ASYNC_PAUSE); + if (!asyncPause) { try { respond(record.schemaName, record.jobId, executionContext, false, true); } catch (RuntimeException e) { @@ -87,6 +99,6 @@ public Cursor doPause(boolean isAll, List jobIds, ExecutionContext executi } } - return new AffectRowCursor(new int[] {countDone}); + return new AffectRowCursor(countDone); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineRecoverJobsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineRecoverJobsHandler.java index e552a1b54..47deb848b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineRecoverJobsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineRecoverJobsHandler.java @@ -16,7 +16,10 @@ package com.alibaba.polardbx.repo.mysql.handler.ddl.newengine; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; @@ -31,7 +34,20 @@ public DdlEngineRecoverJobsHandler(IRepository repo) { @Override public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionContext) { SqlRecoverDdlJob command = (SqlRecoverDdlJob) logicalPlan.getNativeSqlNode(); - return doContinue(command.isAll(), command.getJobIds(), executionContext); + + if (command.isAll()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + if (command.getJobIds() == null || command.getJobIds().isEmpty()) { + return new AffectRowCursor(0); + } + + if (command.getJobIds().size() > 1) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + return doContinue(command.getJobIds().get(0), executionContext); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineRollbackJobsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineRollbackJobsHandler.java index 8ebbcd6d7..142228c8c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineRollbackJobsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineRollbackJobsHandler.java @@ -16,13 +16,11 @@ package com.alibaba.polardbx.repo.mysql.handler.ddl.newengine; -import com.alibaba.polardbx.common.ddl.newengine.DdlPlanState; import com.alibaba.polardbx.common.exception.TddlRuntimeException; import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.executor.cursor.Cursor; -import com.alibaba.polardbx.executor.partitionmanagement.rebalance.RebalanceDdlPlanManager; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; -import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; import org.apache.calcite.sql.SqlRollbackDdlJob; @@ -42,7 +40,20 @@ public DdlEngineRollbackJobsHandler(IRepository repo) { @Override public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionContext) { SqlRollbackDdlJob command = (SqlRollbackDdlJob) logicalPlan.getNativeSqlNode(); - return doCancel(command.isAll(), command.getJobIds(), executionContext); + + if (command.isAll()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + if (command.getJobIds() == null || command.getJobIds().isEmpty()) { + return new AffectRowCursor(0); + } + + if (command.getJobIds().size() > 1) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + return doCancel(command.getJobIds().get(0), executionContext); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowDdlStatsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowDdlStatsHandler.java index fc9d4f45c..146213750 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowDdlStatsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowDdlStatsHandler.java @@ -21,11 +21,14 @@ import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.ddl.newengine.DdlEngineStats; import com.alibaba.polardbx.executor.ddl.workqueue.ChangeSetThreadPool; +import com.alibaba.polardbx.executor.ddl.workqueue.FastCheckerThreadPool; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.executor.ddl.workqueue.BackFillThreadPool; +import com.alibaba.polardbx.executor.utils.ExecUtils; import com.alibaba.polardbx.gms.node.GmsNodeManager.GmsNode; import com.alibaba.polardbx.gms.sync.GmsSyncManagerHelper; import com.alibaba.polardbx.gms.sync.IGmsSyncAction; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; import org.apache.commons.collections.CollectionUtils; @@ -58,7 +61,7 @@ protected Cursor doHandle(LogicalDal logicalPlan, ExecutionContext executionCont ChangeSetThreadPool.updateStats(); // Merge stats from all nodes - GmsSyncManagerHelper.sync(sync, executionContext.getSchemaName(), results -> { + GmsSyncManagerHelper.sync(sync, executionContext.getSchemaName(), SyncScope.MASTER_ONLY, results -> { if (results == null) { return; } @@ -91,6 +94,14 @@ public Object sync() { // backfill parallelism BackFillThreadPool.updateStats(); ChangeSetThreadPool.updateStats(); + + //only leader update the fastchecker stats + if (ExecUtils.hasLeadership(null)) { + FastCheckerThreadPool.getInstance().updateStats(); + } else { + FastCheckerThreadPool.getInstance().clearStats(); + } + for (DdlEngineStats.Metric metric : DdlEngineStats.getAllMetrics().values()) { result.addRow(metric.toRow()); } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowJobsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowJobsHandler.java index 2ff5a4607..c1886b61a 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowJobsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowJobsHandler.java @@ -29,10 +29,16 @@ import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlEngineSchedulerManager; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlJobManager; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; +import com.alibaba.polardbx.executor.ddl.workqueue.FastCheckerThreadPool; import com.alibaba.polardbx.executor.gsi.GsiBackfillManager; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineRecord; import com.alibaba.polardbx.gms.metadb.misc.DdlEngineTaskRecord; +import com.alibaba.polardbx.gms.node.GmsNodeManager; +import com.alibaba.polardbx.gms.sync.GmsSyncManagerHelper; +import com.alibaba.polardbx.gms.sync.IGmsSyncAction; +import com.alibaba.polardbx.gms.sync.SyncScope; +import com.alibaba.polardbx.optimizer.context.AsyncDDLContext; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; @@ -40,6 +46,7 @@ import com.alibaba.polardbx.repo.mysql.handler.LogicalShowProcesslistHandler; import com.google.common.collect.ImmutableMap; import io.airlift.slice.Slice; +import lombok.Data; import org.apache.calcite.sql.SqlShowDdlJobs; import org.apache.calcite.sql.SqlShowProcesslist; import org.apache.calcite.sql.parser.SqlParserPos; @@ -50,6 +57,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.TreeMap; import java.util.stream.Collectors; import static com.alibaba.polardbx.common.ddl.newengine.DdlConstants.ENGINE_TYPE_DAG; @@ -105,18 +113,51 @@ public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionC ArrayResultCursor resultCursor = buildResultCursor(isFull); if (CollectionUtils.isNotEmpty(records)) { gsiBackfillManager = new GsiBackfillManager(executionContext.getSchemaName()); + + List ddlJobIds = records.stream().map(DdlEngineRecord::getJobId).collect(Collectors.toList()); + + //handle fastchecker info + Map mergedResult = new TreeMap<>(); + FastCheckerInfoSyncAction syncAction = new FastCheckerInfoSyncAction(ddlJobIds); + GmsSyncManagerHelper.sync(syncAction, executionContext.getSchemaName(), SyncScope.MASTER_ONLY, results -> { + if (results == null) { + return; + } + + for (Pair>> result : results) { + if (CollectionUtils.isEmpty(result.getValue())) { + continue; + } + for (Map row : result.getValue()) { + long jobId = DataTypes.LongType.convertFrom(row.get("DDL_JOB_ID")); + long taskSum = DataTypes.LongType.convertFrom(row.get("TASK_SUM")); + long taskFinished = DataTypes.LongType.convertFrom(row.get("TASK_FINISHED")); + + mergedResult.putIfAbsent(jobId, new FastCheckerThreadPool.FastCheckerInfo()); + mergedResult.get(jobId).getPhyTaskSum().addAndGet((int) taskSum); + mergedResult.get(jobId).getPhyTaskFinished().addAndGet((int) taskFinished); + } + } + }); + // If the jobs on new DDL engine, then show them. for (DdlEngineRecord record : records) { if (!isFull && record.isSubJob()) { continue; } + + FastCheckerThreadPool.FastCheckerInfo fcInfo = Optional.ofNullable(mergedResult.get(record.jobId)) + .orElse(new FastCheckerThreadPool.FastCheckerInfo()); if (record.ddlType.equalsIgnoreCase("CREATE_DATABASE_LIKE_AS")) { - List createDatabaseRows = processCreateDatabaseLikeAsJob(record, isFull); + List createDatabaseRows = processCreateDatabaseLikeAsJob(record, isFull, fcInfo); createDatabaseRows.forEach( row -> resultCursor.addRow(row) ); } else { - resultCursor.addRow(buildRow(record, isFull)); + resultCursor.addRow(buildRow(record, + isFull, + fcInfo + )); } } } @@ -136,6 +177,8 @@ private ArrayResultCursor buildResultCursor(boolean isFull) { resultCursor.addColumn("TOTAL_BACKFILL_PROGRESS", DataTypes.StringType); resultCursor.addColumn("CURRENT_PHY_DDL_PROGRESS", DataTypes.StringType); resultCursor.addColumn("PROGRESS", DataTypes.StringType); + resultCursor.addColumn("FASTCHECKER_TASK_NUM", DataTypes.StringType); + resultCursor.addColumn("FASTCHECKER_TASK_FINISHED", DataTypes.StringType); resultCursor.addColumn("START_TIME", DataTypes.StringType); resultCursor.addColumn("END_TIME", DataTypes.StringType); resultCursor.addColumn("ELAPSED_TIME(MS)", DataTypes.StringType); @@ -158,7 +201,8 @@ private ArrayResultCursor buildResultCursor(boolean isFull) { private static int MAX_SHOW_LEN = 5000; - private Object[] buildRow(DdlEngineRecord record, boolean isFull) { + private Object[] buildRow(DdlEngineRecord record, boolean isFull, + FastCheckerThreadPool.FastCheckerInfo fastCheckerInfo) { String phyProcess = checkPhyProcess(record); if (phyProcess != null && phyProcess != StringUtils.EMPTY) { phyProcess = phyProcess.substring(0, Math.min(phyProcess.length(), MAX_SHOW_LEN)); @@ -189,6 +233,8 @@ private Object[] buildRow(DdlEngineRecord record, boolean isFull) { backfillProgress, record.progress + PERCENTAGE, totalProgress, + fastCheckerInfo.getPhyTaskSum().get(), + fastCheckerInfo.getPhyTaskFinished().get(), gmtCreated, gmtModified, elapsedTime, phyProcess, cancelable, NONE, record.responseNode, record.executionNode, record.traceId, record.ddlStmt, ddlResult, NONE @@ -199,12 +245,15 @@ private Object[] buildRow(DdlEngineRecord record, boolean isFull) { backfillProgress, record.progress + PERCENTAGE, totalProgress, + fastCheckerInfo.getPhyTaskSum().get(), + fastCheckerInfo.getPhyTaskFinished().get(), gmtCreated, gmtModified, elapsedTime, phyProcess, cancelable }; } } - private List processCreateDatabaseLikeAsJob(DdlEngineRecord record, boolean isFull) { + private List processCreateDatabaseLikeAsJob(DdlEngineRecord record, boolean isFull, + FastCheckerThreadPool.FastCheckerInfo fcInfo) { int i = 0; final Map columnIndexForShowFull = ImmutableMap.builder() @@ -233,7 +282,7 @@ private List processCreateDatabaseLikeAsJob(DdlEngineRecord record, bo List result = new ArrayList<>(); if (!isFull) { - Object[] baseRow = buildRow(record, isFull); + Object[] baseRow = buildRow(record, isFull, fcInfo); List> createDatabaseResult = queryCreateDatabaseTaskResultFromViewByJobId(record.jobId); String schemaSrc = null, schemaDst = null; for (Map createDatabaseResultItem : createDatabaseResult) { @@ -258,7 +307,7 @@ private List processCreateDatabaseLikeAsJob(DdlEngineRecord record, bo } List> createDatabaseResult = queryCreateDatabaseTaskResultFromViewByJobId(record.jobId); - Object[] baseRow = buildRow(record, isFull); + Object[] baseRow = buildRow(record, isFull, fcInfo); String schemaSrc = null, schemaDst = null; for (Map createDatabaseResultItem : createDatabaseResult) { Object[] subRow = baseRow.clone(); @@ -467,4 +516,32 @@ private long safeParseLong(String str) { } } + @Data + public static class FastCheckerInfoSyncAction implements IGmsSyncAction { + List ddlJobsId; + + public FastCheckerInfoSyncAction(List ddlJobsId) { + this.ddlJobsId = ddlJobsId; + } + + @Override + public Object sync() { + ArrayResultCursor resultCursor = new ArrayResultCursor("FASTCHECKER"); + resultCursor.addColumn("DDL_JOB_ID", DataTypes.LongType); + resultCursor.addColumn("TASK_SUM", DataTypes.LongType); + resultCursor.addColumn("TASK_FINISHED", DataTypes.LongType); + + for (Long jobId : ddlJobsId) { + FastCheckerThreadPool.FastCheckerInfo checkerInfo = FastCheckerThreadPool.getInstance() + .queryCheckTaskInfo(jobId); + if (checkerInfo != null) { + resultCursor.addRow( + new Object[] {jobId, checkerInfo.getPhyTaskSum().get(), checkerInfo.getPhyTaskFinished().get()} + ); + } + } + return resultCursor; + } + } + } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowRebalanceBackFillHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowRebalanceBackFillHandler.java index 0d6f474a9..2707852f0 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowRebalanceBackFillHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowRebalanceBackFillHandler.java @@ -16,18 +16,23 @@ package com.alibaba.polardbx.repo.mysql.handler.ddl.newengine; +import com.alibaba.polardbx.common.utils.GeneralUtil; import com.alibaba.polardbx.executor.cursor.Cursor; import com.alibaba.polardbx.executor.cursor.impl.ArrayResultCursor; import com.alibaba.polardbx.executor.ddl.job.task.CostEstimableDdlTask; import com.alibaba.polardbx.executor.ddl.newengine.meta.DdlPlanManager; import com.alibaba.polardbx.executor.ddl.newengine.utils.DdlHelper; import com.alibaba.polardbx.executor.ddl.newengine.utils.TaskHelper; +import com.alibaba.polardbx.executor.handler.subhandler.InformationSchemaRebalanceProgressHandler; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.gms.scheduler.DdlPlanRecord; import com.alibaba.polardbx.optimizer.context.ExecutionContext; +import com.alibaba.polardbx.optimizer.core.datatype.DataType; import com.alibaba.polardbx.optimizer.core.datatype.DataTypeUtil; import com.alibaba.polardbx.optimizer.core.datatype.DataTypes; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; +import org.apache.calcite.rel.type.RelDataTypeFieldImpl; +import org.apache.calcite.sql.type.SqlTypeName; import org.apache.commons.collections.CollectionUtils; import javax.validation.constraints.NotNull; @@ -48,7 +53,7 @@ public DdlEngineShowRebalanceBackFillHandler(IRepository repo) { @Override protected Cursor doHandle(LogicalDal logicalPlan, ExecutionContext executionContext) { - List> result = DdlHelper.getServerConfigManager().executeQuerySql( + List> logicBackfillResult = DdlHelper.getServerConfigManager().executeQuerySql( "SELECT " + "T1.DDL_JOB_ID, " + "SUM(T1.`CURRENT_SPEED(ROWS/SEC)`) AS `CURRENT_SPEED(ROWS/SEC)`," + @@ -62,39 +67,47 @@ protected Cursor doHandle(LogicalDal logicalPlan, ExecutionContext executionCont DEFAULT_DB_NAME, null ); - ArrayResultCursor cursor = buildResultCursor(); - if (CollectionUtils.isEmpty(result)) { - return cursor; - } - for (Map map : result) { - if (map.get(DDL_JOB_ID) == null) { - continue; - } - final long jobId = parseLong(map.get(DDL_JOB_ID)); - final Optional ddlPlanRecordOptional = planManager.getDdlPlanByJobId(jobId); - long previousFinishedRows = 0L; - if (ddlPlanRecordOptional.isPresent()) { - CostEstimableDdlTask.CostInfo costInfo = - TaskHelper.decodeCostInfo(ddlPlanRecordOptional.get().getExtras()); - if (costInfo != null) { - previousFinishedRows += costInfo.rows; + ArrayResultCursor logicalBackfillResultCursor = buildLogicalBackfillResultCursor(); + boolean logicalBackfillProgress = false; + if (CollectionUtils.isNotEmpty(logicBackfillResult)) { + for (Map map : logicBackfillResult) { + if (map.get(DDL_JOB_ID) == null) { + continue; + } + logicalBackfillProgress = true; + final long jobId = parseLong(map.get(DDL_JOB_ID)); + final Optional ddlPlanRecordOptional = planManager.getDdlPlanByJobId(jobId); + long previousFinishedRows = 0L; + if (ddlPlanRecordOptional.isPresent()) { + CostEstimableDdlTask.CostInfo costInfo = + TaskHelper.decodeCostInfo(ddlPlanRecordOptional.get().getExtras()); + if (costInfo != null) { + previousFinishedRows += costInfo.rows; + } } - } - long finishedRows = parseLong(map.get(FINISHED_ROWS)); - long approximateTotalRows = parseLong(map.get(APPROXIMATE_TOTAL_ROWS)); + long finishedRows = parseLong(map.get(FINISHED_ROWS)); + long approximateTotalRows = parseLong(map.get(APPROXIMATE_TOTAL_ROWS)); - cursor.addRow( - new Object[] { - map.get(DDL_JOB_ID), - map.get(CURRENT_SPEED), - map.get(AVERAGE_SPEED), - finishedRows + previousFinishedRows, - approximateTotalRows + previousFinishedRows - } - ); + logicalBackfillResultCursor.addRow( + new Object[] { + map.get(DDL_JOB_ID), + map.get(CURRENT_SPEED), + map.get(AVERAGE_SPEED), + finishedRows + previousFinishedRows, + approximateTotalRows + previousFinishedRows + } + ); + } } - return cursor; + if (!logicalBackfillProgress) { + ArrayResultCursor physicalBackfillResultCursor = buildPhysicalBackfillResultCursor(); + InformationSchemaRebalanceProgressHandler.buildRebalanceBackFillView(physicalBackfillResultCursor); + if (GeneralUtil.isNotEmpty(physicalBackfillResultCursor.getRows())) { + return physicalBackfillResultCursor; + } + } + return logicalBackfillResultCursor; } private long parseLong(@NotNull Object val) { @@ -105,7 +118,7 @@ private String parseString(@NotNull Object val) { return String.valueOf(DataTypeUtil.toJavaObject(null, val)); } - private ArrayResultCursor buildResultCursor() { + private ArrayResultCursor buildLogicalBackfillResultCursor() { ArrayResultCursor resultCursor = new ArrayResultCursor("REBALANCE_BACKFILL"); resultCursor.addColumn(DDL_JOB_ID, DataTypes.StringType); resultCursor.addColumn(CURRENT_SPEED, DataTypes.StringType); @@ -116,4 +129,25 @@ private ArrayResultCursor buildResultCursor() { return resultCursor; } + private ArrayResultCursor buildPhysicalBackfillResultCursor() { + ArrayResultCursor resultCursor = new ArrayResultCursor("REBALANCE_PROOGRESS"); + int i = 0; + resultCursor.addColumn("JOB_ID", DataTypes.StringType); + resultCursor.addColumn("TABLE_SCHEMA", DataTypes.StringType); + resultCursor.addColumn("STAGE", DataTypes.StringType); + resultCursor.addColumn("STATE", DataTypes.StringType); + resultCursor.addColumn("PROGRESS", DataTypes.DoubleType); + resultCursor.addColumn("TOTAL_TASK", DataTypes.IntegerType); + resultCursor.addColumn("FINISHED_TASK", DataTypes.IntegerType); + resultCursor.addColumn("RUNNING_TASK", DataTypes.IntegerType); + resultCursor.addColumn("NOTSTARTED_TASK", DataTypes.IntegerType); + resultCursor.addColumn("FAILED_TASK", DataTypes.IntegerType); + resultCursor.addColumn("INFO", DataTypes.StringType); + resultCursor.addColumn("START_TIME", DataTypes.TimestampType); + resultCursor.addColumn("LAST_UPDATE_TIME", DataTypes.TimestampType); + resultCursor.addColumn("DDL_STMT", DataTypes.StringType); + + resultCursor.initMeta(); + return resultCursor; + } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowResultsHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowResultsHandler.java index 265bad111..d304f8c86 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowResultsHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineShowResultsHandler.java @@ -30,6 +30,7 @@ import com.alibaba.polardbx.gms.node.GmsNodeManager.GmsNode; import com.alibaba.polardbx.gms.sync.GmsSyncManagerHelper; import com.alibaba.polardbx.gms.sync.IGmsSyncAction; +import com.alibaba.polardbx.gms.sync.SyncScope; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; import com.google.common.collect.ImmutableMap; @@ -74,7 +75,7 @@ public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionC //new engine IGmsSyncAction syncAction = new DdlResponseCollectSyncAction(schemaName, jobIds); - GmsSyncManagerHelper.sync(syncAction, schemaName, results -> { + GmsSyncManagerHelper.sync(syncAction, schemaName, SyncScope.MASTER_ONLY, results -> { if (results == null) { return; } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineSkipRebalanceSubjobHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineSkipRebalanceSubjobHandler.java index 5a1ac2540..508358e98 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineSkipRebalanceSubjobHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineSkipRebalanceSubjobHandler.java @@ -33,6 +33,7 @@ import org.apache.commons.collections.CollectionUtils; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static com.alibaba.polardbx.common.ddl.newengine.DdlType.ALTER_TABLEGROUP; @@ -57,84 +58,87 @@ public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionC "Operation with skip all rebalance subjob is not allowed"); } - List allJobIds = command.getJobIds(); + if (command.getJobIds() == null || command.getJobIds().isEmpty()) { + return new AffectRowCursor(0); + } + + if (command.getJobIds().size() > 1) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } - return doSkipSubjob(allJobIds, executionContext); + return doSkipSubjob(command.getJobIds().get(0), executionContext); } - public Cursor doSkipSubjob(List jobIds, ExecutionContext executionContext) { + public Cursor doSkipSubjob(Long jobId, ExecutionContext executionContext) { int countDone = 0; - List records = - fetchRecords(executionContext.getSchemaName(), false, jobIds); - List skipSubjobs = new ArrayList<>(); + DdlEngineRecord record = schedulerManager.fetchRecordByJobId(jobId); + if (record == null) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "The ddl job does not exist"); + } - for (DdlEngineRecord record : records) { - DdlState state = DdlState.valueOf(record.state); + DdlState state = DdlState.valueOf(record.state); - if (!record.isSubJob()) { - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, - "Operation on non-subjob is not allowed"); - } + if (!record.isSubJob()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "Operation on non-subjob is not allowed"); + } - if (!ALTER_TABLEGROUP.name().equalsIgnoreCase(record.ddlType) - && !MOVE_DATABASE.name().equalsIgnoreCase(record.ddlType)) { - throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, - "Operation on non-rebalance subjob is not allowed"); - } + if (!ALTER_TABLEGROUP.name().equalsIgnoreCase(record.ddlType) + && !MOVE_DATABASE.name().equalsIgnoreCase(record.ddlType)) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + "Operation on non-rebalance subjob is not allowed"); + } - if (!(state == DdlState.RUNNING || state == DdlState.PAUSED || state == DdlState.ROLLBACK_PAUSED)) { - String errMsg = String.format( - "Only RUNNING/PAUSED/ROLLBACK_PAUSE jobs can be cancelled, but job %s is in %s state. ", - record.jobId, record.state); - throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, errMsg); - } + if (!(state == DdlState.RUNNING || state == DdlState.PAUSED || state == DdlState.ROLLBACK_PAUSED)) { + String errMsg = String.format( + "Only RUNNING/PAUSED/ROLLBACK_PAUSE jobs can be cancelled, but job %s is in %s state. ", + record.jobId, record.state); + throw new TddlRuntimeException(ErrorCode.ERR_GMS_GENERIC, errMsg); + } - if (!record.isSupportCancel()) { - continue; - } + if (!record.isSupportCancel()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, + String.format("The subjob %s can not be cancelled", jobId)); + } - // change state - if (DdlState.RUNNING == DdlState.valueOf(record.state)) { - if (schedulerManager.tryUpdateDdlState( - record.schemaName, - record.jobId, - DdlState.RUNNING, - DdlState.ROLLBACK_TO_READY)) { - - updateSupportedCommands(record.jobId, record.isSupportContinue(), record.isSupportCancel(), true); - skipSubjobs.add(record.jobId); - } - } else if (DdlState.PAUSED == DdlState.valueOf(record.state)) { - if (schedulerManager.tryUpdateDdlState( - record.schemaName, - record.jobId, - DdlState.PAUSED, - DdlState.ROLLBACK_TO_READY)) { - - updateSupportedCommands(record.jobId, record.isSupportContinue(), record.isSupportCancel(), true); - skipSubjobs.add(record.jobId); - } - } else if (DdlState.ROLLBACK_PAUSED == DdlState.valueOf(record.state)) { - if (schedulerManager.tryUpdateDdlState( - record.schemaName, - record.jobId, - DdlState.ROLLBACK_PAUSED, - DdlState.ROLLBACK_TO_READY)) { - - updateSupportedCommands(record.jobId, record.isSupportContinue(), record.isSupportCancel(), true); - skipSubjobs.add(record.jobId); - } + // change state + if (DdlState.RUNNING == DdlState.valueOf(record.state)) { + if (schedulerManager.tryUpdateDdlState( + record.schemaName, + record.jobId, + DdlState.RUNNING, + DdlState.ROLLBACK_TO_READY)) { + + updateSupportedCommands(record.jobId, record.isSupportContinue(), record.isSupportCancel(), true); + } + } else if (DdlState.PAUSED == DdlState.valueOf(record.state)) { + if (schedulerManager.tryUpdateDdlState( + record.schemaName, + record.jobId, + DdlState.PAUSED, + DdlState.ROLLBACK_TO_READY)) { + + updateSupportedCommands(record.jobId, record.isSupportContinue(), record.isSupportCancel(), true); + } + } else if (DdlState.ROLLBACK_PAUSED == DdlState.valueOf(record.state)) { + if (schedulerManager.tryUpdateDdlState( + record.schemaName, + record.jobId, + DdlState.ROLLBACK_PAUSED, + DdlState.ROLLBACK_TO_READY)) { + + updateSupportedCommands(record.jobId, record.isSupportContinue(), record.isSupportCancel(), true); } } DdlHelper.waitToContinue(DdlConstants.MEDIAN_WAITING_TIME); - DdlHelper.interruptJobs(executionContext.getSchemaName(), skipSubjobs); - DdlEngineRequester.notifyLeader(executionContext.getSchemaName(), skipSubjobs); - - boolean asyncPause = executionContext.getParamManager().getBoolean(ConnectionParams.ASYNC_PAUSE); - if (!asyncPause && CollectionUtils.isNotEmpty(records) && CollectionUtils.size(records) == 1) { - DdlEngineRecord record = records.get(0); + DdlHelper.interruptJobs(record.schemaName, Collections.singletonList(jobId)); + DdlHelper.killActivePhyDDLs(record.schemaName, record.traceId); + DdlEngineRequester.notifyLeader(record.schemaName, Collections.singletonList(jobId)); + boolean asyncPause = executionContext.getParamManager().getBoolean(ConnectionParams.PURE_ASYNC_DDL_MODE) + || executionContext.getParamManager().getBoolean(ConnectionParams.ASYNC_PAUSE); + if (!asyncPause) { try { respond(record.schemaName, record.jobId, executionContext, false, true); } catch (Exception e) { @@ -142,6 +146,6 @@ public Cursor doSkipSubjob(List jobIds, ExecutionContext executionContext) } } - return new AffectRowCursor(new int[] {countDone}); + return new AffectRowCursor(countDone); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineTerminateRebalanceHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineTerminateRebalanceHandler.java index f481fae8f..a1153c299 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineTerminateRebalanceHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/ddl/newengine/DdlEngineTerminateRebalanceHandler.java @@ -16,7 +16,10 @@ package com.alibaba.polardbx.repo.mysql.handler.ddl.newengine; +import com.alibaba.polardbx.common.exception.TddlRuntimeException; +import com.alibaba.polardbx.common.exception.code.ErrorCode; import com.alibaba.polardbx.executor.cursor.Cursor; +import com.alibaba.polardbx.executor.cursor.impl.AffectRowCursor; import com.alibaba.polardbx.executor.spi.IRepository; import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.dal.LogicalDal; @@ -37,9 +40,19 @@ public DdlEngineTerminateRebalanceHandler(IRepository repo) { public Cursor doHandle(final LogicalDal logicalPlan, ExecutionContext executionContext) { SqlTerminateRebalanceJob command = (SqlTerminateRebalanceJob) logicalPlan.getNativeSqlNode(); - List allJobIds = command.getJobIds(); + if (command.isAll()) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } - return doCancel(false, allJobIds, executionContext); + if (command.getJobIds() == null || command.getJobIds().isEmpty()) { + return new AffectRowCursor(0); + } + + if (command.getJobIds().size() > 1) { + throw new TddlRuntimeException(ErrorCode.ERR_DDL_JOB_ERROR, "Operation on multi ddl jobs is not allowed"); + } + + return doCancel(command.getJobIds().get(0), executionContext); } } diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/execute/LogicalRelocateExecuteJob.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/execute/LogicalRelocateExecuteJob.java index b46abc808..140cfb55e 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/execute/LogicalRelocateExecuteJob.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/handler/execute/LogicalRelocateExecuteJob.java @@ -76,6 +76,8 @@ public void execute(List> values, long memorySize) throws Exception final boolean skipUnchangedRow = executionContext.getParamManager().getBoolean(ConnectionParams.DML_RELOCATE_SKIP_UNCHANGED_ROW); + final boolean checkJsonByStringCompare = + executionContext.getParamManager().getBoolean(ConnectionParams.DML_CHECK_JSON_BY_STRING_COMPARE); for (Integer tableIndex : relocate.getSetColumnMetas().keySet()) { RowSet rowSet = new RowSet(values, returnColumns); @@ -93,7 +95,7 @@ public void execute(List> values, long memorySize) throws Exception rowSet = LogicalRelocateHandler.buildChangedRowSet(distinctValues, returnColumns, relocate.getSetColumnTargetMappings().get(tableIndex), relocate.getSetColumnSourceMappings().get(tableIndex), - relocate.getSetColumnMetas().get(tableIndex)); + relocate.getSetColumnMetas().get(tableIndex), checkJsonByStringCompare); if (rowSet == null) { continue; } @@ -109,7 +111,7 @@ public void execute(List> values, long memorySize) throws Exception final Mapping sourceMap = relocate.getSetColumnSourceMappings().get(tableIndex); final List metas = relocate.getSetColumnMetas().get(tableIndex); for (List row : (useRowSet ? rowSet.getRows() : distinctValues)) { - affectRows += identicalRow(row, targetMap, sourceMap, metas) ? 0 : 1; + affectRows += identicalRow(row, targetMap, sourceMap, metas, checkJsonByStringCompare) ? 0 : 1; } } @@ -134,12 +136,13 @@ public void execute(List> values, long memorySize) throws Exception } protected static boolean identicalRow(List row, Mapping setColumnTargetMapping, - Mapping setColumnSourceMapping, List setColumnMetas) { + Mapping setColumnSourceMapping, List setColumnMetas, + boolean checkJsonByStringCompare) { final List targets = Mappings.permute(row, setColumnTargetMapping); final List sources = Mappings.permute(row, setColumnSourceMapping); final GroupKey targetKey = new GroupKey(targets.toArray(), setColumnMetas); final GroupKey sourceKey = new GroupKey(sources.toArray(), setColumnMetas); - return sourceKey.equalsForUpdate(targetKey); + return sourceKey.equalsForUpdate(targetKey, checkJsonByStringCompare); } private int executeRelocateWriter(RelocateWriter relocateWriter, RowSet rowSet) throws Exception { @@ -152,6 +155,8 @@ private int executeRelocateWriter(RelocateWriter relocateWriter, RowSet rowSet) final RelocateWriter rw = w.unwrap(RelocateWriter.class); final boolean usePartFieldChecker = rw.isUsePartFieldChecker() && executionContext.getParamManager().getBoolean(ConnectionParams.DML_USE_NEW_SK_CHECKER); + final boolean checkJsonByStringCompare = + executionContext.getParamManager().getBoolean(ConnectionParams.DML_CHECK_JSON_BY_STRING_COMPARE); final List skSources = Mappings.permute(row, rw.getIdentifierKeySourceMapping()); final List skTargets = Mappings.permute(row, rw.getIdentifierKeyTargetMapping()); @@ -186,7 +191,7 @@ private int executeRelocateWriter(RelocateWriter relocateWriter, RowSet rowSet) final GroupKey skTargetKey = new GroupKey(skTargets.toArray(), rw.getIdentifierKeyMetas()); final GroupKey skSourceKey = new GroupKey(skSources.toArray(), rw.getIdentifierKeyMetas()); - return skTargetKey.equalsForUpdate(skSourceKey); + return skTargetKey.equalsForUpdate(skSourceKey, checkJsonByStringCompare); } }; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/spi/MyJdbcHandler.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/spi/MyJdbcHandler.java index f023b6aa2..91a773b7c 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/spi/MyJdbcHandler.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/repo/mysql/spi/MyJdbcHandler.java @@ -42,6 +42,7 @@ import com.alibaba.polardbx.common.utils.logger.LoggerFactory; import com.alibaba.polardbx.common.utils.logger.support.LogFormat; import com.alibaba.polardbx.common.utils.thread.ThreadCpuStatUtil; +import com.alibaba.polardbx.config.ConfigDataMode; import com.alibaba.polardbx.executor.Xprotocol.XRowSet; import com.alibaba.polardbx.executor.common.ExecutorContext; import com.alibaba.polardbx.executor.cursor.Cursor; @@ -71,10 +72,10 @@ import com.alibaba.polardbx.optimizer.core.rel.SingleTableOperation; import com.alibaba.polardbx.optimizer.core.row.ResultSetRow; import com.alibaba.polardbx.optimizer.core.row.Row; -import com.alibaba.polardbx.optimizer.optimizeralert.OptimizerAlertUtil; import com.alibaba.polardbx.optimizer.statis.OperatorStatistics; import com.alibaba.polardbx.optimizer.statis.OperatorStatisticsExt; import com.alibaba.polardbx.optimizer.statis.SQLRecord; +import com.alibaba.polardbx.optimizer.statis.XplanStat; import com.alibaba.polardbx.optimizer.utils.ExplainResult; import com.alibaba.polardbx.optimizer.utils.ITransaction; import com.alibaba.polardbx.optimizer.utils.PhyTableOperationUtil; @@ -121,6 +122,7 @@ import java.util.concurrent.Executor; import static com.alibaba.polardbx.common.TddlConstants.ANONAMOUS_DBKEY; +import static com.alibaba.polardbx.common.TddlConstants.LONG_ENOUGH_TIMEOUT_FOR_DDL_ON_XPROTO_CONN; import static com.alibaba.polardbx.common.utils.ExceptionUtils.isMySQLIntegrityConstraintViolationException; import static com.alibaba.polardbx.common.utils.GeneralUtil.listToMap; import static com.alibaba.polardbx.common.utils.GeneralUtil.mapToList; @@ -141,8 +143,6 @@ public class MyJdbcHandler implements GeneralQueryHandler { private static final Logger logger = LoggerFactory.getLogger(MyJdbcHandler.class); private static final int MAX_LOG_PARAM_COUNT = 500; - private static final int LONG_ENOUGH_TIMEOUT_FOR_DDL_ON_XPROTO_CONN = 7 * 24 * 60 * 60 * 1000; - private IConnection connection = null; private ResultSet resultSet = null; private java.sql.Statement ps = null; @@ -258,12 +258,7 @@ void recordSqlLog(BytesSql bytesSql, long startTime, long nanoStartTime, long sq long time = System.currentTimeMillis() - startTime; if (SQLRecorderLogger.physicalSlowLogger.isInfoEnabled()) { try { - long thresold = this.executionContext.getPhysicalRecorder().getSlowSqlTime(); - - // Use slow sql time of db level first - if (executionContext.getExtraCmds().containsKey(ConnectionProperties.SLOW_SQL_TIME)) { - thresold = executionContext.getParamManager().getLong(ConnectionParams.SLOW_SQL_TIME); - } + long thresold = executionContext.getParamManager().getLong(ConnectionParams.SLOW_SQL_TIME); if (thresold > 0L && time > thresold) { boolean isBackfillTask = (executionContext.getDdlJobId() != null @@ -432,7 +427,9 @@ public void close() throws SQLException { ; } } - OptimizerAlertUtil.xplanAlert(executionContext, xResult); + if (XResult.RequestType.PLAN_QUERY.equals(xResult.getRequestType())) { + XplanStat.addExaminedRowCount(executionContext.getXplanStat(), xResult.getExaminedRowCount()); + } xResult.close(); xResult = null; } @@ -791,7 +788,7 @@ protected void resetPhyConnSocketTimeout() { } private boolean executeQueryX(XPlanTemplate XTemplate, - List tableNames, + List phyTableNames, List> allPhyTableNames, Map params, BaseQueryOperation phyTblOp, @@ -810,18 +807,28 @@ private boolean executeQueryX(XPlanTemplate XTemplate, if (executionContext.getGroupHint() != null && !executionContext.getGroupHint().isEmpty()) { return false; // Not support. } + if (XplanStat.isForbidXplan(executionContext.getXplanStat(), executionContext)) { + return false; + } if (executionContext.getExplain() != null && executionContext.getExplain().explainMode == ExplainResult.ExplainMode.EXECUTE) { + // Generate final plan. + final PolarxExecPlan.ExecPlan.Builder execPlan = + XTemplate.getXPlan(dbName, phyTableNames, params, executionContext); + if (null != execPlan) { + executionContext.setXplanIndex(XTemplate.getIndexName()); + } return false; // Not support. } // Generate final plan. boolean isPhyOp = phyTblOp instanceof PhyTableOperation; final PolarxExecPlan.ExecPlan.Builder execPlan = - XTemplate.getXPlan(dbName, tableNames, params, executionContext); + XTemplate.getXPlan(dbName, phyTableNames, params, executionContext); if (null == execPlan) { return false; // Forbidden by some reason. } + executionContext.setXplanIndex(XTemplate.getIndexName()); final JsonFormat format = new JsonFormat(); if (logger.isDebugEnabled()) { @@ -1678,7 +1685,11 @@ public int[] executeUpdate(BaseQueryOperation phyTableModify) throws SQLExceptio xConnection.setTraceId(executionContext.getTraceId()); connection.flushUnsent(); // Caution: This is important when use deferred sql. xConnection.getSession().setChunkResult(false); - xResult = xPreparedStatement.executeUpdateReturningX(executionContext.getReturning()); + boolean isBackfill = executionContext.getBackfillReturning() != null; + String returning = + isBackfill ? executionContext.getBackfillReturning() : executionContext.getReturning(); + xResult = + xPreparedStatement.executeUpdateReturningX(returning, isBackfill); xResult.getMetaData(); // Compatible with original time record. affectRow = -2; } else { @@ -2118,7 +2129,18 @@ private void handleException(BaseQueryOperation queryOperation, SqlAndParam sqlA sb.append(", SQL: ").append(sqlAndParam.sql); if (!GeneralUtil.isEmpty(sqlAndParam.param.values())) { - sb.append(", PARAM: ").append(String.valueOf(sqlAndParam.param.values())); + boolean isBackfillTask = (executionContext.getDdlJobId() != null + && executionContext.getDdlJobId() > 0); + + sb.append(", PARAM: "); + if (isBackfillTask) { + //ignore the backfill physical slow sql's parameters + sb.append("{1="); + sb.append(sqlAndParam.param.get(1)); + sb.append(",...,}"); + } else { + sb.append(String.valueOf(sqlAndParam.param.values())); + } } sb.append(", ERROR: ").append(e.getMessage()); @@ -2216,7 +2238,10 @@ public boolean isClosed() { } protected String getCurrentDbkey(ITransaction.RW rw) { - TGroupDataSource dataSource = repo.getDataSource(groupName); + if (ConfigDataMode.isColumnarMode()) { + return ANONAMOUS_DBKEY; + } + DataSource dataSource = repo.getDataSource(groupName); String currentDbKey = ANONAMOUS_DBKEY; if (dataSource != null) { if (connection != null) { @@ -2229,7 +2254,7 @@ protected String getCurrentDbkey(ITransaction.RW rw) { MasterSlave masterSlave = ExecUtils.getMasterSlave(inTrans, rw.equals(ITransaction.RW.WRITE), executionContext); currentDbKey = - dataSource.getConfigManager().getDataSource( + ((TGroupDataSource) dataSource).getConfigManager().getDataSource( masterSlave).getDsConfHandle().getDbKey(); } if (StringUtils.isEmpty(currentDbKey)) { diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/sequence/SequenceRangePlus.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/sequence/SequenceRangePlus.java index 69c3df5dc..d194fca00 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/sequence/SequenceRangePlus.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/sequence/SequenceRangePlus.java @@ -1,117 +1,117 @@ -/* - * Copyright [2013-2021], Alibaba Group Holding Limited - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.alibaba.polardbx.sequence; - -import java.util.concurrent.atomic.AtomicLong; - -public class SequenceRangePlus { - - private final long min; - private final long max; - - private final int increment; - - private final AtomicLong value; - - private volatile boolean over = false; - - public SequenceRangePlus(long min, long max, int increment) { - this.min = min; - this.max = max; - this.increment = increment; - this.value = new AtomicLong(min); - } - - public long getBatch(int size) { - if (over) { - return -1; - } - - long currentValue = value.getAndAdd(size * increment) + size * increment - increment; - if (currentValue > max) { - over = true; - return -1; - } - - return currentValue; - } - - public long getAndIncrement() { - if (over) { - return -1; - } - - long currentValue = value.getAndAdd(increment); - if (currentValue > max) { - over = true; - return -1; - } - - return currentValue; - } - - public long[] getCurrentAndMax() { - if (over) { - return null; - } - - long[] currentAndMax = new long[2]; - - long currentValue = value.get(); - if (currentValue > max) { - over = true; - return null; - } - - currentAndMax[0] = currentValue; - currentAndMax[1] = max; - - return currentAndMax; - } - - public boolean updateValue(long expect, long update) { - if (over) { - return true; - } - return value.compareAndSet(expect, update); - } - - public long getMin() { - return min; - } - - public long getMax() { - return max; - } - - public boolean isOver() { - return over; - } - - public void setOver(boolean over) { - this.over = over; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("max: ").append(max).append(", min: ").append(min).append(", increment: ").append(increment) - .append(", value: ").append(value); - return sb.toString(); - } - -} +/* + * Copyright [2013-2021], Alibaba Group Holding Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.alibaba.polardbx.sequence; + +import java.util.concurrent.atomic.AtomicLong; + +public class SequenceRangePlus { + + private final long min; + private final long max; + + private final int increment; + + private final AtomicLong value; + + private volatile boolean over = false; + + public SequenceRangePlus(long min, long max, int increment) { + this.min = min; + this.max = max; + this.increment = increment; + this.value = new AtomicLong(min); + } + + public long getBatch(int size) { + if (over) { + return -1; + } + + long currentValue = value.getAndAdd(size * increment) + size * increment - increment; + if (currentValue > max) { + over = true; + return -1; + } + + return currentValue; + } + + public long getAndIncrement() { + if (over) { + return -1; + } + + long currentValue = value.getAndAdd(increment); + if (currentValue > max) { + over = true; + return -1; + } + + return currentValue; + } + + public long[] getCurrentAndMax() { + if (over) { + return null; + } + + long[] currentAndMax = new long[2]; + + long currentValue = value.get(); + if (currentValue > max) { + over = true; + return null; + } + + currentAndMax[0] = currentValue; + currentAndMax[1] = max; + + return currentAndMax; + } + + public boolean updateValue(long expect, long update) { + if (over) { + return true; + } + return value.compareAndSet(expect, update); + } + + public long getMin() { + return min; + } + + public long getMax() { + return max; + } + + public boolean isOver() { + return over; + } + + public void setOver(boolean over) { + this.over = over; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("max: ").append(max).append(", min: ").append(min).append(", increment: ").append(increment) + .append(", value: ").append(value); + return sb.toString(); + } + +} diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/ExecuteSQLOperation.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/ExecuteSQLOperation.java index 70daa77f3..336fd5280 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/ExecuteSQLOperation.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/ExecuteSQLOperation.java @@ -16,6 +16,7 @@ package com.alibaba.polardbx.statistics; +import com.alibaba.fastjson.annotation.JSONCreator; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.alibaba.polardbx.common.jdbc.Parameters; @@ -27,6 +28,7 @@ public class ExecuteSQLOperation extends AbstractSQLOperation { private String sql; public final static MessageFormat message = new MessageFormat("Execute sql on {0}, sql is: {1}, params is: {2}"); + @JSONCreator public ExecuteSQLOperation(String groupName, String dbKeyName, String sql, Long timestamp) { super(groupName, dbKeyName, timestamp); this.sql = sql; @@ -61,6 +63,10 @@ public void setSql(String sql) { this.sql = sql; } + public String getSql() { + return sql; + } + @Override public String getOperationType() { return "Query"; diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/RuntimeStatHelper.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/RuntimeStatHelper.java index 1819f3c4f..8a3ec8c26 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/RuntimeStatHelper.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/RuntimeStatHelper.java @@ -28,6 +28,7 @@ import com.alibaba.polardbx.optimizer.context.ExecutionContext; import com.alibaba.polardbx.optimizer.core.rel.LogicalView; import com.alibaba.polardbx.optimizer.statis.OperatorStatistics; +import com.alibaba.polardbx.optimizer.utils.ExplainResult; import org.apache.calcite.rel.RelNode; /** @@ -48,7 +49,9 @@ public static void registerCursorStatForPlan(RelNode logicalPlan, ExecutionConte try { boolean isApplySubQuery = executionContext.isApplyingSubquery(); boolean isInExplain = executionContext.getExplain() != null; - + if (isInExplain) { + isInExplain = (!executionContext.getExplain().explainMode.isAnalyze()); + } if (ExecUtils.isOperatorMetricEnabled(executionContext) && !isApplySubQuery && !isInExplain) { // register the run time stat of cursor into logicalPlan RuntimeStatistics runtimeStatistics = (RuntimeStatistics) executionContext.getRuntimeStatistics(); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/RuntimeStatistics.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/RuntimeStatistics.java index c15b1bd7c..2a13d824b 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/RuntimeStatistics.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/RuntimeStatistics.java @@ -25,7 +25,6 @@ import com.alibaba.polardbx.executor.cursor.impl.FirstThenOtherCursor; import com.alibaba.polardbx.executor.cursor.impl.GroupConcurrentUnionCursor; import com.alibaba.polardbx.executor.cursor.impl.LogicalViewResultCursor; -import com.alibaba.polardbx.executor.cursor.impl.MultiCursorAdapter; import com.alibaba.polardbx.executor.cursor.impl.MyPhysicalCursor; import com.alibaba.polardbx.executor.mpp.execution.TaskId; import com.alibaba.polardbx.executor.mpp.execution.TaskStatus; @@ -87,10 +86,12 @@ public class RuntimeStatistics extends RuntimeStat implements CpuCollector { private RelNode planTree; private final String traceId; - private final Map relationToStatistics = new ConcurrentHashMap<>(); + private final Map relationToStatistics = + new ConcurrentHashMap<>(); private final Map memoryToStatistics = new ConcurrentHashMap<>(); private final Map relationIdToNode = new HashMap<>(); - private final WeakHashMap mppOperatorStats = new WeakHashMap<>(); + private final WeakHashMap mppOperatorStats = + new WeakHashMap<>(); private QuerySpillSpaceMonitor querySpillSpaceMonitor; private MemoryPool holdMemoryPool; private CpuStat sqlWholeStageCpuStat; @@ -106,6 +107,7 @@ public class RuntimeStatistics extends RuntimeStat implements CpuCollector { private AtomicLong totalPhyFetchRows = new AtomicLong(0L); private AtomicLong totalPhySqlTimecost = new AtomicLong(0L); private AtomicLong totalPhyConnTimecost = new AtomicLong(0L); + private AtomicLong columnarSnapshotTimecost = new AtomicLong(0L); private AtomicLong spillCnt = new AtomicLong(0L); /** @@ -151,7 +153,9 @@ public void register(RelNode relation, Executor executor) { relationIdToNode.put(relation.getRelatedId(), relation); } if (logger.isDebugEnabled()) { - logger.debug("register:" + relation.getRelatedId() + ":" + relation.getClass().getSimpleName()); + logger.debug( + "register:" + relation.getRelatedId() + ":" + relation.getClass() + .getSimpleName()); } relationToStatistics.put(relation.getRelatedId(), sg); } @@ -163,7 +167,8 @@ public void register(RelNode relation, Executor executor) { sg.totalCount.addAndGet(1); } } catch (Throwable e) { - logger.warn("register cpu stat of executor failed for " + relation.getRelTypeName(), e); + logger.warn( + "register cpu stat of executor failed for " + relation.getRelTypeName(), e); } } @@ -191,7 +196,8 @@ public void register(RelNode relation, Cursor cursor) { //FIXME mpp也会走到这个流程里,BKA的gather if (logger.isDebugEnabled()) { logger.debug( - this + " register3:" + relation.getRelatedId() + ":" + relation.getClass().getSimpleName() + this + " register3:" + relation.getRelatedId() + ":" + + relation.getClass().getSimpleName() + "," + cursor.getClass().getSimpleName()); } if (!relationIdToNode.containsKey(relation.getRelatedId())) { @@ -202,7 +208,8 @@ public void register(RelNode relation, Cursor cursor) { registerCursorToGroup(cursor, sg); } } catch (Throwable e) { - logger.warn("register cpu stat of cursor failed for " + relation.getRelTypeName(), e); + logger.warn( + "register cpu stat of cursor failed for " + relation.getRelTypeName(), e); } } @@ -233,7 +240,8 @@ public static void registerAsyncTaskTimeCost(Cursor targetCursor, long asyncTask * the async task cpu time cost of Union is C1 + C2, * because C1 & C2 are also contributed to Union; * If the async task cpu time cost of LV3 is 0, - * then the async task cpu time cost of Join is C1 + C2, because C1 & C2 are also contributed to Join; + * then the async task cpu time cost of Join is C1 + C2, + * because C1 & C2 are also contributed to Join; * * */ @@ -284,26 +292,20 @@ public static void addSelfAsyncTaskCpuTimeToParent(OperatorStatisticsGroup sg) { } } - public static void registerWaitLockCpuTime(OperatorStatisticsGroup targetPlanStatGroup, long waitLockDuration) { + public static void registerWaitLockCpuTime(OperatorStatisticsGroup targetPlanStatGroup, + long waitLockDuration) { if (targetPlanStatGroup != null) { targetPlanStatGroup.waitLockDuration.addAndGet(waitLockDuration); } } protected void registerCursorToGroup(Cursor cursor, OperatorStatisticsGroup sg) { - if (cursor instanceof MultiCursorAdapter) { - for (Cursor c : ((MultiCursorAdapter) cursor).getSubCursors()) { - initTargetPlanStatGroup(sg, c); - } - } else { - /** - * when the cursor is AdaptiveParallelCursor, it is no need to - * register its runtime stats to its relation, because its input - * cursors will be registered during their init process - */ - initTargetPlanStatGroup(sg, cursor); - - } + /** + * when the cursor is AdaptiveParallelCursor, it is no need to + * register its runtime stats to its relation, because its input + * cursors will be registered during their init process + */ + initTargetPlanStatGroup(sg, cursor); } private void initTargetPlanStatGroup(OperatorStatisticsGroup sg, Cursor c) { @@ -338,8 +340,10 @@ public void setPlanTree(RelNode planTree) { protected void initOperatorStats() { SimplePlanVisitor simplePlanVisitor = new SimplePlanVisitor(); simplePlanVisitor.visit(this.planTree); - this.isSimpleLvPlan = simplePlanVisitor.isSamplePlan() && !(this.planTree instanceof Gather); - if (this.planTree instanceof PhyTableOperation || this.planTree instanceof SingleTableOperation) { + this.isSimpleLvPlan = + simplePlanVisitor.isSamplePlan() && !(this.planTree instanceof Gather); + if (this.planTree instanceof PhyTableOperation + || this.planTree instanceof SingleTableOperation) { isFromAllAtOnePhyTable = true; this.isSimpleLvPlan = true; } @@ -374,7 +378,8 @@ protected void buildStatisticsGroup(RelNode parent, RelNode targetRel) { } relationToStatistics.put(targetRel.getRelatedId(), osg); if (logger.isDebugEnabled()) { - logger.debug("register2:" + targetRel.getRelatedId() + ":" + targetRel.getClass().getSimpleName()); + logger.debug("register2:" + targetRel.getRelatedId() + ":" + targetRel.getClass() + .getSimpleName()); } if (!isLvOrLm && !isFromAllAtOnePhyTable) { List inputRelList = targetRel.getInputs(); @@ -394,6 +399,7 @@ public void clear() { totalPhySqlTimecost.set(0); sqlLogCpuTime.set(0); totalPhyConnTimecost.set(0); + columnarSnapshotTimecost.set(0); } public Map getRelationToStatistics() { @@ -416,9 +422,11 @@ public Map toSketch() { public Map toMppSketch() { final Map results = new IdentityHashMap<>(); if (mppOperatorStats != null && mppOperatorStats.size() > 0) { - for (Map.Entry operatorStats : mppOperatorStats.entrySet()) { + for (Map.Entry operatorStats : mppOperatorStats + .entrySet()) { if (relationIdToNode.containsKey(operatorStats.getKey())) { - results.put(relationIdToNode.get(operatorStats.getKey()), operatorStats.getValue()); + results.put(relationIdToNode.get(operatorStats.getKey()), + operatorStats.getValue()); } } } @@ -431,8 +439,8 @@ public synchronized void collectMppStatistics(TaskStatus taskStatus, ExecutionCo Map taskRuntimeStats = taskStatus.getRuntimeStatistics(); if (taskRuntimeStats != null) { - for (Map.Entry entry : taskRuntimeStats - .entrySet()) { + for (Map.Entry + entry : taskRuntimeStats.entrySet()) { RuntimeStatistics.OperatorStatisticsGroup serverPointStatisticsGroup = getRelationToStatistics().get(entry.getKey()); if (serverPointStatisticsGroup == null) { @@ -446,21 +454,24 @@ public synchronized void collectMppStatistics(TaskStatus taskStatus, ExecutionCo serverPointStatisticsGroup.hasInputOperator = taskOperatorStat.hasInputOperator; serverPointStatisticsGroup.statistics.addAll(taskOperatorStat.statistics); - serverPointStatisticsGroup.createAndInitJdbcStmtDuration - .addAndGet(taskOperatorStat.createAndInitJdbcStmtDuration.get()); - serverPointStatisticsGroup.prepareStmtEnvDuration - .addAndGet(taskOperatorStat.prepareStmtEnvDuration.get()); - serverPointStatisticsGroup.createConnDuration.addAndGet(taskOperatorStat.createConnDuration.get()); - serverPointStatisticsGroup.initConnDuration.addAndGet(taskOperatorStat.initConnDuration.get()); - serverPointStatisticsGroup.waitConnDuration.addAndGet(taskOperatorStat.waitConnDuration.get()); - serverPointStatisticsGroup.createAndInitJdbcStmtDuration - .addAndGet(taskOperatorStat.createAndInitJdbcStmtDuration.get()); - serverPointStatisticsGroup.execJdbcStmtDuration - .addAndGet(taskOperatorStat.execJdbcStmtDuration.get()); - serverPointStatisticsGroup.fetchJdbcResultSetDuration - .addAndGet(taskOperatorStat.fetchJdbcResultSetDuration.get()); - serverPointStatisticsGroup.closeAndClearJdbcEnv - .addAndGet(taskOperatorStat.closeAndClearJdbcEnv.get()); + serverPointStatisticsGroup.createAndInitJdbcStmtDuration.addAndGet( + taskOperatorStat.createAndInitJdbcStmtDuration.get()); + serverPointStatisticsGroup.prepareStmtEnvDuration.addAndGet( + taskOperatorStat.prepareStmtEnvDuration.get()); + serverPointStatisticsGroup.createConnDuration.addAndGet( + taskOperatorStat.createConnDuration.get()); + serverPointStatisticsGroup.initConnDuration.addAndGet( + taskOperatorStat.initConnDuration.get()); + serverPointStatisticsGroup.waitConnDuration.addAndGet( + taskOperatorStat.waitConnDuration.get()); + serverPointStatisticsGroup.createAndInitJdbcStmtDuration.addAndGet( + taskOperatorStat.createAndInitJdbcStmtDuration.get()); + serverPointStatisticsGroup.execJdbcStmtDuration.addAndGet( + taskOperatorStat.execJdbcStmtDuration.get()); + serverPointStatisticsGroup.fetchJdbcResultSetDuration.addAndGet( + taskOperatorStat.fetchJdbcResultSetDuration.get()); + serverPointStatisticsGroup.closeAndClearJdbcEnv.addAndGet( + taskOperatorStat.closeAndClearJdbcEnv.get()); //FIXME 需要确认下 addPhySqlTimecost(taskOperatorStat.execJdbcStmtDuration.get()); } @@ -470,15 +481,17 @@ public synchronized void collectMppStatistics(TaskStatus taskStatus, ExecutionCo MemoryStatisticsGroup nodeStatisticsGroup = getMemoryToStatistics().get(taskStatus.getSelf().getNodeServer().toString()); if (nodeStatisticsGroup == null) { - nodeStatisticsGroup = new MemoryStatisticsGroup(memoryStatistics.getQueryMemoryUsage(), - memoryStatistics.getQueryMaxMemoryUsage(), 0); + nodeStatisticsGroup = + new MemoryStatisticsGroup(memoryStatistics.getQueryMemoryUsage(), + memoryStatistics.getQueryMaxMemoryUsage(), 0); nodeStatisticsGroup.setMemoryStatistics(new TreeMap<>()); - getMemoryToStatistics() - .put(taskStatus.getSelf().getNodeServer().toString(), nodeStatisticsGroup); + getMemoryToStatistics().put(taskStatus.getSelf().getNodeServer().toString(), + nodeStatisticsGroup); } - if (memoryStatistics.getQueryMaxMemoryUsage() > nodeStatisticsGroup - .getMaxMemoryUsage()) { - nodeStatisticsGroup.setMaxMemoryUsage(memoryStatistics.getQueryMaxMemoryUsage()); + if (memoryStatistics.getQueryMaxMemoryUsage() + > nodeStatisticsGroup.getMaxMemoryUsage()) { + nodeStatisticsGroup.setMaxMemoryUsage( + memoryStatistics.getQueryMaxMemoryUsage()); } nodeStatisticsGroup.getMemoryStatistics() .put("Task@" + taskStatus.getTaskId().toString(), memoryStatistics); @@ -565,6 +578,9 @@ public Metrics toMetrics() { // plan long phyConnTc = 0; + // the time cost of generate columnar snapshot in SplitManager + long columnarSnapshotTc = 0; + long affectedPhyRows = 0; boolean isQuery = SqlTypeUtils.isSelectSqlType(sqlType); @@ -585,8 +601,10 @@ public Metrics toMetrics() { fetchRsTc = NOT_SUPPORT_VALUE; if (planTree != null && isQuery) { fetchRsTc = 0; - for (Map.Entry entry : relationToStatistics.entrySet()) { - final AbstractRelNode relation = (AbstractRelNode) relationIdToNode.get(entry.getKey()); + for (Map.Entry + entry : relationToStatistics.entrySet()) { + final AbstractRelNode relation = + (AbstractRelNode) relationIdToNode.get(entry.getKey()); OperatorStatisticsGroup operatorStatisticsGroup = entry.getValue(); if (relation instanceof LogicalView || relation instanceof LogicalInsert) { @@ -606,6 +624,7 @@ public Metrics toMetrics() { // ====== Cpu ======== logCpuTc = this.sqlLogCpuTime.get(); execSqlTc = this.totalPhySqlTimecost.get(); + columnarSnapshotTc = this.columnarSnapshotTimecost.get(); execPlanTc = logCpuTc - sqlToPlanTc; if (fetchRsTc > NOT_SUPPORT_VALUE) { phyCpuTc = execSqlTc + fetchRsTc; @@ -624,7 +643,8 @@ public Metrics toMetrics() { queryMem = queryPool.getMaxMemoryUsage(); long globalMemLimit = MemoryManager.getInstance().getGlobalMemoryPool().getMaxLimit(); BigDecimal queryMemPctVal = new BigDecimal(queryMem * 100 / globalMemLimit); - queryMemPct = queryMemPctVal.setScale(4, BigDecimal.ROUND_HALF_UP).doubleValue(); + queryMemPct = queryMemPctVal.setScale( + 4, BigDecimal.ROUND_HALF_UP).doubleValue(); MemoryPool planMemoryPool = ((QueryMemoryPool) queryPool).getPlanMemPool(); if (planMemoryPool != null) { @@ -660,6 +680,7 @@ public Metrics toMetrics() { metrics.execSqlTc = execSqlTc; metrics.fetchRsTc = fetchRsTc; metrics.phyConnTc = phyConnTc; + metrics.columnarSnapshotTc = columnarSnapshotTc; metrics.queryMemPct = queryMemPct; metrics.queryMem = queryMem; @@ -715,7 +736,8 @@ public double getSqlMemoryMaxUsagePct() { long maxMemUsage = memoryPool.getMaxMemoryUsage(); long globalMemLimit = MemoryManager.getInstance().getGlobalMemoryPool().getMaxLimit(); BigDecimal queryMemPctVal = new BigDecimal(maxMemUsage * 100 / globalMemLimit); - double queryMemPct = queryMemPctVal.setScale(4, BigDecimal.ROUND_HALF_UP).doubleValue(); + double queryMemPct = queryMemPctVal.setScale( + 4, BigDecimal.ROUND_HALF_UP).doubleValue(); return queryMemPct; } @@ -869,27 +891,28 @@ public OperatorStatisticsGroup(RuntimeStatistics runtimeStat) { } @JsonCreator - public OperatorStatisticsGroup(@JsonProperty("statistics") Set statistics, - @JsonProperty("hasInputOperator") boolean hasInputOperator, - @JsonProperty("finishCount") long finishCount, - @JsonProperty("totalCount") long totalCount, - @JsonProperty("childrenAsyncTaskCpuTime") long childrenAsyncTaskCpuTime, - @JsonProperty("selfAsyncTaskCpuTime") long selfAsyncTaskCpuTime, - @JsonProperty("selfAsyncTaskTimeCost") long selfAsyncTaskTimeCost, - @JsonProperty("processLvTimeCost") long processLvTimeCost, - @JsonProperty("waitLockDuration") long waitLockDuration, - @JsonProperty("createConnDuration") long createConnDuration, - @JsonProperty("waitConnDuration") long waitConnDuration, - @JsonProperty("initConnDuration") long initConnDuration, - @JsonProperty("prepareStmtEnvDuration") long prepareStmtEnvDuration, - @JsonProperty("createAndInitJdbcStmtDuration") - long createAndInitJdbcStmtDuration, - @JsonProperty("execJdbcStmtDuration") long execJdbcStmtDuration, - @JsonProperty("fetchJdbcResultSetDuration") long fetchJdbcResultSetDuration, - @JsonProperty("closeAndClearJdbcEnv") long closeAndClearJdbcEnv, - @JsonProperty("phyResultSetRowCount") long phyResultSetRowCount, - @JsonProperty("fetchJdbcResultSetParallelism") - int fetchJdbcResultSetParallelism) { + public OperatorStatisticsGroup( + @JsonProperty("statistics") Set statistics, + @JsonProperty("hasInputOperator") boolean hasInputOperator, + @JsonProperty("finishCount") long finishCount, + @JsonProperty("totalCount") long totalCount, + @JsonProperty("childrenAsyncTaskCpuTime") long childrenAsyncTaskCpuTime, + @JsonProperty("selfAsyncTaskCpuTime") long selfAsyncTaskCpuTime, + @JsonProperty("selfAsyncTaskTimeCost") long selfAsyncTaskTimeCost, + @JsonProperty("processLvTimeCost") long processLvTimeCost, + @JsonProperty("waitLockDuration") long waitLockDuration, + @JsonProperty("createConnDuration") long createConnDuration, + @JsonProperty("waitConnDuration") long waitConnDuration, + @JsonProperty("initConnDuration") long initConnDuration, + @JsonProperty("prepareStmtEnvDuration") long prepareStmtEnvDuration, + @JsonProperty("createAndInitJdbcStmtDuration") + long createAndInitJdbcStmtDuration, + @JsonProperty("execJdbcStmtDuration") long execJdbcStmtDuration, + @JsonProperty("fetchJdbcResultSetDuration") long fetchJdbcResultSetDuration, + @JsonProperty("closeAndClearJdbcEnv") long closeAndClearJdbcEnv, + @JsonProperty("phyResultSetRowCount") long phyResultSetRowCount, + @JsonProperty("fetchJdbcResultSetParallelism") + int fetchJdbcResultSetParallelism) { this.statistics = statistics; this.hasInputOperator = hasInputOperator; this.finishCount.set(finishCount); @@ -921,6 +944,7 @@ RuntimeStatisticsSketch toSketch() { long startupDuration = 0; long duration = 0; long rowCount = 0; + long runtimeFilteredCount = 0; long memory = 0; int spillCnt = 0; long workerDuration = 0; @@ -929,6 +953,7 @@ RuntimeStatisticsSketch toSketch() { startupDuration += statistic.getStartupDuration(); duration += statistic.getProcessDuration(); rowCount += statistic.getRowCount(); + runtimeFilteredCount += statistic.getRuntimeFilteredCount(); memory += statistic.getMemory(); workerDuration += statistic.getWorkerDuration(); spillCnt += statistic.getSpillCnt(); @@ -942,14 +967,9 @@ RuntimeStatisticsSketch toSketch() { n = this.fetchJdbcResultSetParallelism; } - return new RuntimeStatisticsSketch(durationSeconds, - startupDurationSeconds, - workerDurationSeconds, - rowCount, - outputBytes, - memory, - n, - spillCnt); + return new RuntimeStatisticsSketch(durationSeconds, startupDurationSeconds, + workerDurationSeconds, rowCount, runtimeFilteredCount, + outputBytes, memory, n, spillCnt); } RuntimeStatisticsSketch toSketchExt() { @@ -959,6 +979,7 @@ RuntimeStatisticsSketch toSketchExt() { long duration = 0; long closeDuration = 0; long rowCount = 0; + long runtimeFilteredCount = 0; long memory = 0; int spillCnt = 0; long outputBytes = 0; @@ -981,6 +1002,7 @@ RuntimeStatisticsSketch toSketchExt() { duration += os.getProcessDuration(); closeDuration += os.getCloseDuration(); rowCount += os.getRowCount(); + runtimeFilteredCount += os.getRuntimeFilteredCount(); memory += os.getMemory(); spillCnt += os.getSpillCnt(); } @@ -988,7 +1010,8 @@ RuntimeStatisticsSketch toSketchExt() { createConnDurationSum = this.createConnDuration.get(); waitConnDurationSum = this.waitConnDuration.get(); initConnDurationSum = this.initConnDuration.get(); - totalGetConnDurationSum = initConnDurationSum + waitConnDurationSum + createConnDurationSum; + totalGetConnDurationSum = + initConnDurationSum + waitConnDurationSum + createConnDurationSum; createAndInitJdbcStmtDurationSum = this.createAndInitJdbcStmtDuration.get(); execJdbcStmtDurationSum = this.execJdbcStmtDuration.get(); fetchJdbcResultSetDurationSum = this.fetchJdbcResultSetDuration.get(); @@ -1007,7 +1030,9 @@ RuntimeStatisticsSketch toSketchExt() { duration = this.runtimeStat.sqlLogCpuTime.get() - sqlToPlanTc; rowCount = this.phyResultSetRowCount.get(); } - rsse = new RuntimeStatisticsSketchExt(startupDuration, duration, closeDuration, 0, rowCount, outputBytes, + rsse = new RuntimeStatisticsSketchExt( + startupDuration, duration, closeDuration, 0, + rowCount, runtimeFilteredCount, outputBytes, memory, n, hasInputOperator, spillCnt); rsse.setCreateConnDurationNanoSum(createConnDurationSum); @@ -1019,13 +1044,10 @@ RuntimeStatisticsSketch toSketchExt() { rsse.setExecJdbcStmtDurationNanoSum(execJdbcStmtDurationSum); rsse.setFetchJdbcResultSetDurationNanoSum(fetchJdbcResultSetDurationSum); rsse.setCloseJdbcResultSetDurationNanoSum(closeJdbcResultSetDurationSum); - rsse.setSubOperatorStatCount(statisticCount); rsse.setChildrenAsyncTaskDuration(childrenAsyncCpuTimeSum); rsse.setSelfAsyncTaskDuration(selfAsyncCpuTimeSum); - return rsse; - } } @@ -1084,6 +1106,9 @@ public static class Metrics { // plan public long phyConnTc; + // the time cost of generate columnar snapshot in SplitManager + public long columnarSnapshotTc; + // the sql template id of planCache key public String sqlTemplateId = "-"; @@ -1126,6 +1151,11 @@ public void addPhyConnTimecost(long totalPhyConnTimecost) { this.totalPhyConnTimecost.addAndGet(totalPhyConnTimecost); } + @Override + public void addColumnarSnapshotTimecost(long columnarSnapshotTimecost) { + this.columnarSnapshotTimecost.addAndGet(columnarSnapshotTimecost); + } + @Override public void addPhySqlCount(long shardCount) { totalPhySqlCount.addAndGet(shardCount); diff --git a/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/SQLRecorderLogger.java b/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/SQLRecorderLogger.java index 92c6444ca..ff3cbf654 100644 --- a/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/SQLRecorderLogger.java +++ b/polardbx-executor/src/main/java/com/alibaba/polardbx/statistics/SQLRecorderLogger.java @@ -71,11 +71,22 @@ public class SQLRecorderLogger { // Slow trans log public final static Logger slowTransLogger = LoggerFactory.getLogger("slow_trans"); + // Inner sql executed by InnerConnection + public final static Logger innerSqlLogger = LoggerFactory.getLogger("inner_sql", true); + /** + * 0:sql#
+ * 1:success(0 for success, 1 for fail)#
+ * 2:time_cost + */ + public final static MessageFormat innerSqlFormat = new MessageFormat("{0}#{1}#{2}"); + + public final static Logger cdcLogger = LoggerFactory.getLogger("cdc_log"); + // format_version#trace_id#trx_type#start_time#finish_time#duration_time#status#active_time#idle_time // #write_time#read_time#write_affect_rows#read_return_rows#mdl_wait_time#get_tso_time#prepare_time - // #trx_log_time#commit_time#rollback_time#sql_count#rw_sql_count#trx_template_id# + // #trx_log_time#commit_time#rollback_time#sql_count#rw_sql_count#trx_template_id#trx_log_type# public final static MessageFormat slowTransFormat = new MessageFormat( - "{0}#{1}#{2}#{3}#{4}#{5}#{6}#{7}#{8}#{9}#{10}#{11}#{12}#{13}#{14}#{15}#{16}#{17}#{18}#{19}#{20}#{21}#"); + "{0}#{1}#{2}#{3}#{4}#{5}#{6}#{7}#{8}#{9}#{10}#{11}#{12}#{13}#{14}#{15}#{16}#{17}#{18}#{19}#{20}#{21}#{22}#"); } diff --git a/polardbx-executor/src/main/resources/webapp/assets/drds.css b/polardbx-executor/src/main/resources/webapp/assets/drds.css index f9562d8d5..f5c49e4c8 100644 --- a/polardbx-executor/src/main/resources/webapp/assets/drds.css +++ b/polardbx-executor/src/main/resources/webapp/assets/drds.css @@ -16,37 +16,41 @@ /** ======================== **/ body { - padding-top: 20px; + /*padding-top: 20px;*/ padding-bottom: 20px; + background-color: #e3e5e8; } h2 { - color: #fff; + color: #000000; } h3 { - color: #dadada; + color: #000000; } h1 small, h2 small, h3 small, h4 small, h5 small, h6 small { - color: #888; + color: #000000; } pre { margin: 0; - padding: 5px; - color: #999; - background-color: #2C2F38; + padding: 5px 10px 5px 10px; + color: #000000; border: none; } +a { + color: #1890ff; +} + /** Boostrap overrides **/ /** ================== **/ .container { min-width: 960px; width: auto !important; - max-width: 1200px; + background-color: #e8eaee; } .modal-lg { @@ -79,9 +83,18 @@ pre { margin: 0; } +.row { + margin-left: 0; + margin-right: 0; +} + /* To make the table striping less stark */ +.table .table { + background-color: #ffffff; +} + .table-striped > tbody > tr:nth-of-type(odd) { - background-color: #353944; + background-color: #f8f8f8; } .table > tbody > tr { @@ -130,6 +143,10 @@ pre { color: #fff; } +.font-black { + color: #000; +} + .font-light { color: #d6f3fb; } @@ -165,12 +182,11 @@ pre { /** ====== **/ .navbar { - padding: 0 5px 0 5px; - border-bottom: 1px solid #1EDCFF; + background-color: #ffffff; } .navbar-brand { - color: #fff; + color: #000000; text-transform: uppercase; font-size: 24px; padding-left: 30px; @@ -193,8 +209,8 @@ pre { } .navbar-cluster-info .text { - color: #fff; - font-size: 16px; + color: #000000; + font-size: 18px; } #page-subtitle { @@ -270,6 +286,10 @@ pre { margin-top: 20px; } +.card-container { + border: 1px solid rgba(0, 0, 0, 0.06); +} + /** ===================== **/ /** Cluster Overview Page **/ /** ===================== **/ @@ -277,23 +297,26 @@ pre { /** HUD **/ /** === **/ +.content-container { + margin: 24px 24px 0; + padding: inherit; +} + .hud-container { background-size: 5px 5px; - background-image: linear-gradient(to right, #2d2d2d 1px, transparent 1px), linear-gradient(to bottom, #2d2d2d 1px, transparent 1px); - margin: -15px 0 0 0; - padding: 15px 0 0 0; - height: 375px; + padding: 20px; + background-color: #ffffff; } + .stat-line-end { padding-top: 3px; - margin-bottom: 20px; + margin-bottom: 40px; } .stat { - color: #fff; + color: #111111; border-bottom: 1px #424242 solid; - border-left: 1px #424242 solid; margin: 0; font-size: 72px; height: 81px; @@ -318,8 +341,7 @@ pre { .stat-title > .text { padding: 5px; - background-color: #3e3e3e; - color: #ccc; + color: #000000; } .sparkline { @@ -347,21 +369,23 @@ pre { /** Query list **/ /** ========== **/ +.query-list-container { + padding: 20px; + background-color: #ffffff; + margin-top: 40px; +} -#query-list-title { +.container-title { font-size: 22px; - background-color: #696E77; - padding: 3px 3px 3px 7px; - text-transform: uppercase; - color: #fff; - border-top: 1px solid #9DA4AF; + font-weight: bold; + background-color: #ffffff; + padding: 3px 0 20px 0; + color: #000000; + margin-bottom: 10px; } #query-list { - padding: 5px 0 0 15px; - border-bottom: 1px #424242 solid; - border-left: 1px #424242 solid; - border-right: 1px #424242 solid; + /*padding: 5px 15px 0 15px;*/ } .error-message { @@ -370,10 +394,8 @@ pre { } .query { - border-bottom: 1px #424242 solid; - margin-bottom: 0px; - margin-left: -15px; - padding-left: 15px; + margin: 20px 15px 30px 0; + border: 1px solid rgba(5, 5, 5, 0.06); } .query-snippet { @@ -387,8 +409,9 @@ pre { .progress { margin: 0; - background-color: #808286; height: 28px; + padding-left: 10px; + padding-right: 10px; } .progress-bar { @@ -398,6 +421,7 @@ pre { .tinystat-row { padding-right: 0; + padding-top: 10px } .tinystat { @@ -406,39 +430,46 @@ pre { } .stat-row { - padding: 2px 0 1px 0; + padding: 5px 0 0px 10px; +} + +.query-header-container { + background-size: 5px 5px; + padding: 20px; + background-color: #ffffff; } .query-header { padding: 4px 0 4px 0; - background-color: #1b1b1b; + background-color: #f5f5f5; margin: 0; } .query-header-queryid { - margin-left: -15px; - margin-right: -35px; - padding: 8px 0 8px 0; + /*margin-left: -15px;*/ + /*margin-right: -35px;*/ + /*margin-bottom: 10px;*/ + padding: 8px 0 8px 10px; } .query-header-timestamp { text-align: right; color: #b1b1b1; + padding-right: 10px } .query-progress-container { - padding: 0; } /** Query List Toolbar **/ /** ================== **/ .toolbar-row { + /*padding-left: 15px;*/ padding-right: 15px; } .toolbar-col { - background-color: #454a58; margin-top: -5px; padding: 5px; } @@ -449,18 +480,18 @@ pre { .toolbar-row .input-group-btn .btn:focus { outline: none; - background-color: #5bc0de; + background-color: #2db7f5; border: 1px #529eb5 solid; } .toolbar-row .input-group-btn .active:focus { outline: none; - background-color: #277F98; + background-color: #108ee9; border: 1px #529eb5 solid; } .toolbar-row .input-group-btn .active { - background-color: #277F98; + background-color: #108ee9; border: 1px #529eb5 solid; } @@ -475,8 +506,8 @@ pre { color: #000; background-color: #fff; border: none; - border-top: 2px solid #454A58; - border-bottom: 3px solid #454A58; + border-top: 2px solid #f5f5f5; + border-bottom: 3px solid #f5f5f5; } .toolbar-row .input-group .form-control { @@ -528,7 +559,7 @@ pre { /** ================== **/ #query-detail { - color: #cecece; + color: #000000; font-size: 16px; } @@ -544,6 +575,19 @@ pre { /** Summary Section **/ /** =============== **/ +.info-container-top { + background-size: 5px 5px; + padding: 20px; + background-color: #ffffff; +} + +.info-container-next { + background-size: 5px 5px; + padding: 20px; + background-color: #ffffff; + margin-top: 40px; +} + .info-title { width: 170px; font-weight: 500; @@ -551,7 +595,7 @@ pre { } .info-text { - color: #fff; + color: #000; } .table > tbody > tr > .info-sparkline-text { @@ -661,7 +705,7 @@ pre { .expand-charts-button { padding: 45px 3px 45px 4px; - background-color: #3A3F4C; + background-color: #eeeeee; } .expanded-chart-title { @@ -679,7 +723,7 @@ pre { .graph-container { padding: 15px; - background-color: #1b1b1b; + background-color: #ffffff; height: 1000px; overflow: scroll; width: 100%; @@ -752,6 +796,23 @@ text { stroke-width: 0; } +.v-pipeline-edge { + stroke: transparent; + fill: transparent; + stroke-width: 0; +} + +.pipeline-edge { + stroke: #dadada; + fill: transparent; + stroke-width: 3; +} + +.pipeline-arrowhead { + fill: #dadada; + stroke-width: 0; +} + g .cluster:hover { cursor: pointer; } diff --git a/polardbx-executor/src/main/resources/webapp/assets/favicon.ico b/polardbx-executor/src/main/resources/webapp/assets/favicon.ico deleted file mode 100644 index 70ce7dab0..000000000 Binary files a/polardbx-executor/src/main/resources/webapp/assets/favicon.ico and /dev/null differ diff --git a/polardbx-executor/src/main/resources/webapp/assets/favicon.png b/polardbx-executor/src/main/resources/webapp/assets/favicon.png new file mode 100644 index 000000000..59e35c299 Binary files /dev/null and b/polardbx-executor/src/main/resources/webapp/assets/favicon.png differ diff --git a/polardbx-executor/src/main/resources/webapp/dist/embedded_plan.js b/polardbx-executor/src/main/resources/webapp/dist/embedded_plan.js index 9c78ed93a..65adf2b54 100644 --- a/polardbx-executor/src/main/resources/webapp/dist/embedded_plan.js +++ b/polardbx-executor/src/main/resources/webapp/dist/embedded_plan.js @@ -94,7 +94,7 @@ /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.LivePlan = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _server = __webpack_require__(/*! react-dom/server */ \"./node_modules/react-dom/server.browser.js\");\n\nvar _server2 = _interopRequireDefault(_server);\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nvar _QueryHeader = __webpack_require__(/*! ./QueryHeader */ \"./components/QueryHeader.jsx\");\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar StageStatistics = function (_React$Component) {\n _inherits(StageStatistics, _React$Component);\n\n function StageStatistics() {\n _classCallCheck(this, StageStatistics);\n\n return _possibleConstructorReturn(this, (StageStatistics.__proto__ || Object.getPrototypeOf(StageStatistics)).apply(this, arguments));\n }\n\n _createClass(StageStatistics, [{\n key: \"render\",\n value: function render() {\n var stage = this.props.stage;\n var stats = this.props.stage.stageStats;\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"h3\",\n { className: \"margin-top: 0\" },\n \"Stage \",\n stage.id\n ),\n stage.state,\n _react2.default.createElement(\"hr\", null),\n \"CPU: \",\n stats.totalCpuTime,\n _react2.default.createElement(\"br\", null),\n stats.fullyBlocked ? _react2.default.createElement(\n \"div\",\n { style: { color: '#ff0000' } },\n \"Blocked: \",\n stats.totalBlockedTime,\n \" \"\n ) : _react2.default.createElement(\n \"div\",\n null,\n \"Blocked: \",\n stats.totalBlockedTime,\n \" \"\n ),\n \"Memory: \",\n stats.totalMemoryReservation,\n _react2.default.createElement(\"br\", null),\n \"PipelineExecs: \",\n \"Q:\" + stats.queuedPipelineExecs + \", R:\" + stats.runningPipelineExecs + \", F:\" + stats.completedPipelineExecs,\n _react2.default.createElement(\"hr\", null),\n \"Input: \",\n stats.processedInputDataSize + \" / \" + (0, _utils.formatRows)(stats.processedInputPositions)\n )\n );\n }\n }], [{\n key: \"getStages\",\n value: function getStages(queryInfo) {\n var stages = new Map();\n StageStatistics.flattenStage(queryInfo.outputStage, stages);\n return stages;\n }\n }, {\n key: \"flattenStage\",\n value: function flattenStage(stageInfo, result) {\n stageInfo.subStages.forEach(function (stage) {\n StageStatistics.flattenStage(stage, result);\n });\n\n var nodes = new Map();\n StageStatistics.flattenNode(result, JSON.parse(stageInfo.plan.relNodeJson).rels, nodes);\n\n result.set(stageInfo.plan.id, {\n stageId: stageInfo.stageId,\n id: stageInfo.plan.id,\n root: stageInfo.plan.rootId,\n stageStats: stageInfo.stageStats,\n state: stageInfo.state,\n nodes: nodes\n });\n }\n }, {\n key: \"flattenNode\",\n value: function flattenNode(stages, node, result) {\n\n node.forEach(function (element) {\n var loadingMessage = element.relOp + element.id;\n if (element.tableNames != undefined) {\n loadingMessage = loadingMessage + \":\" + element.tableNames;\n }\n result.set(element.relatedId, {\n id: element.relatedId,\n name: element.relOp,\n identifier: loadingMessage,\n sources: element.sources,\n remoteSources: element.fragmentIds\n });\n });\n node.last;\n // result.set(node.id, {\n // id: node.id,\n // name: node['name'],\n // identifier: node['identifier'],\n // details: node['details'],\n // sources: node.children.map(node => node.id),\n // remoteSources: node.remoteSources,\n // });\n //\n // node.children.forEach(function (child) {\n // StageStatistics.flattenNode(stages, child, result);\n // });\n }\n }]);\n\n return StageStatistics;\n}(_react2.default.Component);\n\nvar PlanNode = function (_React$Component2) {\n _inherits(PlanNode, _React$Component2);\n\n function PlanNode(props) {\n _classCallCheck(this, PlanNode);\n\n return _possibleConstructorReturn(this, (PlanNode.__proto__ || Object.getPrototypeOf(PlanNode)).call(this, props));\n }\n\n _createClass(PlanNode, [{\n key: \"render\",\n value: function render() {\n return _react2.default.createElement(\n \"div\",\n { style: { color: \"#000\" }, \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", \"data-container\": \"body\",\n \"data-html\": \"true\",\n title: \"

\" + this.props.name + \"

\" + this.props.identifier },\n _react2.default.createElement(\n \"strong\",\n null,\n this.props.name\n ),\n _react2.default.createElement(\n \"div\",\n null,\n (0, _utils.truncateString)(this.props.identifier, 35)\n )\n );\n }\n }]);\n\n return PlanNode;\n}(_react2.default.Component);\n\nvar LivePlan = exports.LivePlan = function (_React$Component3) {\n _inherits(LivePlan, _React$Component3);\n\n function LivePlan(props) {\n _classCallCheck(this, LivePlan);\n\n var _this3 = _possibleConstructorReturn(this, (LivePlan.__proto__ || Object.getPrototypeOf(LivePlan)).call(this, props));\n\n _this3.state = {\n initialized: false,\n ended: false,\n\n query: null,\n\n graph: (0, _utils.initializeGraph)(),\n svg: null,\n render: new dagreD3.render()\n };\n return _this3;\n }\n\n _createClass(LivePlan, [{\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n // stop refreshing when query finishes or fails\n if (this.state.query === null || !this.state.ended) {\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }\n }, {\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this4 = this;\n\n clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously\n fetch('/v1/query/' + this.props.queryId).then(function (response) {\n return response.json();\n }).then(function (query) {\n _this4.setState({\n query: query,\n\n initialized: true,\n ended: query.finalQueryInfo\n });\n _this4.resetTimer();\n }).catch(function () {\n _this4.setState({\n initialized: true\n });\n _this4.resetTimer();\n });\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"updateD3Stage\",\n value: function updateD3Stage(stage, graph, allStages) {\n var clusterId = stage.stageId;\n var stageRootNodeId = \"stage-\" + stage.id + \"-root\";\n var color = (0, _utils.getStageStateColor)(stage);\n\n graph.setNode(clusterId, { style: 'fill: ' + color, labelStyle: 'fill: #fff' });\n\n // this is a non-standard use of ReactDOMServer, but it's the cleanest way to unify DagreD3 with React\n var html = _server2.default.renderToString(_react2.default.createElement(StageStatistics, { key: stage.id, stage: stage }));\n\n graph.setNode(stageRootNodeId, { class: \"stage-stats\", label: html, labelType: \"html\" });\n graph.setParent(stageRootNodeId, clusterId);\n graph.setEdge(\"node-\" + stage.root, stageRootNodeId, { style: \"visibility: hidden\" });\n\n stage.nodes.forEach(function (node) {\n var nodeId = \"node-\" + node.id;\n var nodeHtml = _server2.default.renderToString(_react2.default.createElement(PlanNode, node));\n\n graph.setNode(nodeId, { label: nodeHtml, style: 'fill: #fff', labelType: \"html\" });\n graph.setParent(nodeId, clusterId);\n\n node.sources.forEach(function (source) {\n graph.setEdge(\"node-\" + source, nodeId, { class: \"plan-edge\", arrowheadClass: \"plan-arrowhead\" });\n });\n\n if (node.remoteSources != undefined && node.remoteSources.length > 0) {\n graph.setNode(nodeId, { label: '', shape: \"circle\" });\n\n node.remoteSources.forEach(function (sourceId) {\n var source = allStages.get(sourceId);\n if (source) {\n var sourceStats = source.stageStats;\n graph.setEdge(\"stage-\" + sourceId + \"-root\", nodeId, {\n class: \"plan-edge\",\n style: \"stroke-width: 4px\",\n arrowheadClass: \"plan-arrowhead\",\n label: sourceStats.outputDataSize + \" / \" + (0, _utils.formatRows)(sourceStats.outputPositions),\n labelStyle: \"color: #fff; font-weight: bold; font-size: 24px;\",\n labelType: \"html\"\n });\n }\n });\n }\n });\n }\n }, {\n key: \"updateD3Graph\",\n value: function updateD3Graph() {\n var _this5 = this;\n\n if (!this.state.svg) {\n this.setState({\n svg: (0, _utils.initializeSvg)(\"#plan-canvas\")\n });\n return;\n }\n\n if (!this.state.query) {\n return;\n }\n\n var graph = this.state.graph;\n var stages = StageStatistics.getStages(this.state.query);\n stages.forEach(function (stage) {\n _this5.updateD3Stage(stage, graph, stages);\n });\n\n var inner = d3.select(\"#plan-canvas g\");\n this.state.render(inner, graph);\n\n var svg = this.state.svg;\n svg.selectAll(\"g.cluster\").on(\"click\", LivePlan.handleStageClick);\n\n var width = parseInt(window.getComputedStyle(document.getElementById(\"live-plan\"), null).getPropertyValue(\"width\").replace(/px/, \"\")) - 50;\n var height = parseInt(window.getComputedStyle(document.getElementById(\"live-plan\"), null).getPropertyValue(\"height\").replace(/px/, \"\")) - 50;\n\n var graphHeight = graph.graph().height + 100;\n var graphWidth = graph.graph().width + 100;\n if (this.state.ended) {\n // Zoom doesn't deal well with DOM changes\n var initialScale = Math.min(width / graphWidth, height / graphHeight);\n var zoom = d3.zoom().scaleExtent([initialScale, 1]).on(\"zoom\", function () {\n inner.attr(\"transform\", d3.event.transform);\n });\n\n svg.call(zoom);\n svg.call(zoom.transform, d3.zoomIdentity.translate((width - graph.graph().width * initialScale) / 2, 20).scale(initialScale));\n svg.attr('height', height);\n svg.attr('width', width);\n } else {\n svg.attr('height', graphHeight);\n svg.attr('width', graphWidth);\n }\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n this.updateD3Graph();\n //$FlowFixMe\n $('[data-toggle=\"tooltip\"]').tooltip();\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.state.query;\n\n if (query === null || this.state.initialized === false) {\n var label = _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n );\n if (this.state.initialized) {\n label = \"Query not found\";\n }\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n label\n )\n )\n );\n }\n\n var loadingMessage = null;\n if (query && !query.outputStage) {\n loadingMessage = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Live plan graph will appear automatically when query starts running.\"\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n )\n )\n );\n }\n\n // TODO: Refactor components to move refreshLoop to parent rather than using this property\n var queryHeader = this.props.isEmbedded ? null : _react2.default.createElement(_QueryHeader.QueryHeader, { query: query });\n return _react2.default.createElement(\n \"div\",\n null,\n queryHeader,\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n loadingMessage,\n _react2.default.createElement(\n \"div\",\n { id: \"live-plan\", className: \"graph-container\" },\n _react2.default.createElement(\n \"div\",\n { className: \"pull-right\" },\n this.state.ended ? \"Scroll to zoom.\" : \"Zoom disabled while query is running.\",\n \" Click stage to view additional statistics\"\n ),\n _react2.default.createElement(\"svg\", { id: \"plan-canvas\" })\n )\n )\n )\n );\n }\n }], [{\n key: \"handleStageClick\",\n value: function handleStageClick(stageCssId) {\n window.open(\"stage.html?\" + stageCssId, '_blank');\n }\n }]);\n\n return LivePlan;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/LivePlan.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.LivePlan = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _server = __webpack_require__(/*! react-dom/server */ \"./node_modules/react-dom/server.browser.js\");\n\nvar _server2 = _interopRequireDefault(_server);\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nvar _QueryHeader = __webpack_require__(/*! ./QueryHeader */ \"./components/QueryHeader.jsx\");\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar StageStatistics = function (_React$Component) {\n _inherits(StageStatistics, _React$Component);\n\n function StageStatistics() {\n _classCallCheck(this, StageStatistics);\n\n return _possibleConstructorReturn(this, (StageStatistics.__proto__ || Object.getPrototypeOf(StageStatistics)).apply(this, arguments));\n }\n\n _createClass(StageStatistics, [{\n key: \"render\",\n value: function render() {\n var stage = this.props.stage;\n var stats = this.props.stage.stageStats;\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"h3\",\n { className: \"margin-top: 0\" },\n \"Stage \",\n stage.id\n ),\n stage.state,\n _react2.default.createElement(\"hr\", null),\n \"CpuTime: \",\n stats.totalCpuTimeNanos,\n \" ms\",\n _react2.default.createElement(\"br\", null),\n stats.fullyBlocked ? _react2.default.createElement(\n \"div\",\n { style: { color: '#ff0000' } },\n \"BlockedTime: \",\n (0, _utils.formatDurationNs)(stats.totalBlockedTimeNanos),\n \" \"\n ) : _react2.default.createElement(\n \"div\",\n null,\n \"BlockedTime: \",\n (0, _utils.formatDurationNs)(stats.totalBlockedTimeNanos),\n \" \"\n ),\n _react2.default.createElement(\"br\", null),\n \"PipelineExecs: \",\n \"Q:\" + stats.queuedPipelineExecs + \", R:\" + stats.runningPipelineExecs + \", F:\" + stats.completedPipelineExecs,\n _react2.default.createElement(\"hr\", null),\n \"Input: \",\n (0, _utils.formatRows)(stats.processedInputPositions)\n )\n );\n }\n }], [{\n key: \"getStages\",\n value: function getStages(queryInfo) {\n var stages = new Map();\n StageStatistics.flattenStage(queryInfo.outputStage, stages);\n return stages;\n }\n }, {\n key: \"flattenStage\",\n value: function flattenStage(stageInfo, result) {\n stageInfo.subStages.forEach(function (stage) {\n StageStatistics.flattenStage(stage, result);\n });\n\n var nodes = new Map();\n StageStatistics.flattenNode(result, JSON.parse(stageInfo.plan.relNodeJson).rels, nodes);\n\n result.set(stageInfo.plan.id, {\n stageId: stageInfo.stageId,\n id: stageInfo.plan.id,\n root: stageInfo.plan.rootId,\n stageStats: stageInfo.stageStats,\n state: stageInfo.state,\n nodes: nodes\n });\n }\n }, {\n key: \"flattenNode\",\n value: function flattenNode(stages, node, result) {\n\n node.forEach(function (element) {\n var loadingMessage = element.relOp + element.id;\n if (element.tableNames != undefined) {\n loadingMessage = loadingMessage + \":\" + element.tableNames;\n }\n result.set(element.relatedId, {\n id: element.relatedId,\n name: element.relOp,\n identifier: loadingMessage,\n sources: element.sources,\n remoteSources: element.fragmentIds\n });\n });\n // node.last\n // result.set(node.id, {\n // id: node.id,\n // name: node['name'],\n // identifier: node['identifier'],\n // details: node['details'],\n // sources: node.children.map(node => node.id),\n // remoteSources: node.remoteSources,\n // });\n //\n // node.children.forEach(function (child) {\n // StageStatistics.flattenNode(stages, child, result);\n // });\n }\n }]);\n\n return StageStatistics;\n}(_react2.default.Component);\n\nvar PlanNode = function (_React$Component2) {\n _inherits(PlanNode, _React$Component2);\n\n function PlanNode(props) {\n _classCallCheck(this, PlanNode);\n\n return _possibleConstructorReturn(this, (PlanNode.__proto__ || Object.getPrototypeOf(PlanNode)).call(this, props));\n }\n\n _createClass(PlanNode, [{\n key: \"render\",\n value: function render() {\n return _react2.default.createElement(\n \"div\",\n { style: { color: \"#000\" }, \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", \"data-container\": \"body\",\n \"data-html\": \"true\",\n title: \"

\" + this.props.name + \"

\" + this.props.identifier },\n _react2.default.createElement(\n \"strong\",\n null,\n this.props.name\n ),\n _react2.default.createElement(\n \"div\",\n null,\n (0, _utils.truncateString)(this.props.identifier, 35)\n )\n );\n }\n }]);\n\n return PlanNode;\n}(_react2.default.Component);\n\nvar LivePlan = exports.LivePlan = function (_React$Component3) {\n _inherits(LivePlan, _React$Component3);\n\n function LivePlan(props) {\n _classCallCheck(this, LivePlan);\n\n var _this3 = _possibleConstructorReturn(this, (LivePlan.__proto__ || Object.getPrototypeOf(LivePlan)).call(this, props));\n\n _this3.state = {\n initialized: false,\n ended: false,\n\n query: null,\n\n graph: (0, _utils.initializeGraph)(),\n svg: null,\n render: new dagreD3.render()\n };\n return _this3;\n }\n\n _createClass(LivePlan, [{\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n // stop refreshing when query finishes or fails\n if (this.state.query === null || !this.state.ended) {\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }\n }, {\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this4 = this;\n\n clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously\n fetch('/v1/query/stats/' + this.props.queryId).then(function (response) {\n return response.json();\n }).then(function (query) {\n _this4.setState({\n query: query,\n\n initialized: true,\n ended: query.finalQueryInfo\n });\n _this4.resetTimer();\n }).catch(function () {\n _this4.setState({\n initialized: true\n });\n _this4.resetTimer();\n });\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"updateD3Stage\",\n value: function updateD3Stage(stage, graph, allStages) {\n var clusterId = stage.stageId;\n var stageRootNodeId = \"stage-\" + stage.id + \"-root\";\n var color = (0, _utils.getStageStateColor)(stage);\n\n graph.setNode(clusterId, { style: 'fill: ' + color, labelStyle: 'fill: #fff' });\n\n // this is a non-standard use of ReactDOMServer, but it's the cleanest way to unify DagreD3 with React\n var html = _server2.default.renderToString(_react2.default.createElement(StageStatistics, { key: stage.id, stage: stage }));\n\n graph.setNode(stageRootNodeId, { class: \"stage-stats\", label: html, labelType: \"html\" });\n graph.setParent(stageRootNodeId, clusterId);\n graph.setEdge(\"node-\" + stage.root, stageRootNodeId, { style: \"visibility: hidden\" });\n\n var stageOperatorsMap = new Map();\n if (stage.stageStats.operatorSummaries) {\n stage.stageStats.operatorSummaries.forEach(function (opSummary) {\n stageOperatorsMap.set(opSummary.operatorId, opSummary);\n });\n }\n\n stage.nodes.forEach(function (node) {\n var nodeId = \"node-\" + node.id;\n var nodeHtml = _server2.default.renderToString(_react2.default.createElement(PlanNode, node));\n\n graph.setNode(nodeId, { label: nodeHtml, style: 'fill: #fff', labelType: \"html\" });\n graph.setParent(nodeId, clusterId);\n\n node.sources.forEach(function (source) {\n if (stageOperatorsMap.has(source)) {\n graph.setEdge(\"node-\" + source, nodeId, {\n class: \"plan-edge\",\n arrowheadClass: \"plan-arrowhead\",\n label: (0, _utils.formatRows)(stageOperatorsMap.get(source).outputRowCount),\n labelStyle: \"color: #fff; font-weight: bold; font-size: 16px;\",\n labelType: \"html\"\n });\n } else {\n graph.setEdge(\"node-\" + source, nodeId, { class: \"plan-edge\", arrowheadClass: \"plan-arrowhead\" });\n }\n });\n\n if (node.remoteSources !== undefined && node.remoteSources.length > 0) {\n graph.setNode(nodeId, { label: '', shape: \"circle\" });\n\n node.remoteSources.forEach(function (sourceId) {\n var source = allStages.get(sourceId);\n if (source) {\n var sourceStats = source.stageStats;\n graph.setEdge(\"stage-\" + sourceId + \"-root\", nodeId, {\n class: \"plan-edge\",\n style: \"stroke-width: 4px\",\n arrowheadClass: \"plan-arrowhead\",\n label: (0, _utils.formatRows)(sourceStats.outputPositions),\n labelStyle: \"color: #fff; font-weight: bold; font-size: 24px;\",\n labelType: \"html\"\n });\n }\n });\n }\n });\n }\n }, {\n key: \"updateD3Graph\",\n value: function updateD3Graph() {\n var _this5 = this;\n\n if (!this.state.svg) {\n this.setState({\n svg: (0, _utils.initializeSvg)(\"#plan-canvas\")\n });\n return;\n }\n\n if (!this.state.query) {\n return;\n }\n\n var graph = this.state.graph;\n var stages = StageStatistics.getStages(this.state.query);\n stages.forEach(function (stage) {\n _this5.updateD3Stage(stage, graph, stages);\n });\n\n var inner = d3.select(\"#plan-canvas g\");\n this.state.render(inner, graph);\n\n var svg = this.state.svg;\n svg.selectAll(\"g.cluster\").on(\"click\", LivePlan.handleStageClick);\n\n var width = parseInt(window.getComputedStyle(document.getElementById(\"live-plan\"), null).getPropertyValue(\"width\").replace(/px/, \"\")) - 50;\n var height = parseInt(window.getComputedStyle(document.getElementById(\"live-plan\"), null).getPropertyValue(\"height\").replace(/px/, \"\")) - 50;\n\n var graphHeight = graph.graph().height + 100;\n var graphWidth = graph.graph().width + 100;\n if (this.state.ended) {\n // Zoom doesn't deal well with DOM changes\n var initialScale = Math.min(width / graphWidth, height / graphHeight);\n var zoom = d3.zoom().scaleExtent([initialScale, 1]).on(\"zoom\", function () {\n inner.attr(\"transform\", d3.event.transform);\n });\n\n svg.call(zoom);\n svg.call(zoom.transform, d3.zoomIdentity.translate((width - graph.graph().width * initialScale) / 2, 20).scale(initialScale));\n svg.attr('height', height);\n svg.attr('width', width);\n } else {\n svg.attr('height', graphHeight);\n svg.attr('width', graphWidth);\n }\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n this.updateD3Graph();\n //$FlowFixMe\n $('[data-toggle=\"tooltip\"]').tooltip();\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.state.query;\n\n if (query === null || this.state.initialized === false) {\n var label = _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n );\n if (this.state.initialized) {\n label = \"Query not found\";\n }\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n label\n )\n )\n );\n }\n\n var loadingMessage = null;\n if (query && !query.outputStage) {\n loadingMessage = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Live plan graph will appear automatically when query starts running.\"\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n )\n )\n );\n }\n\n // TODO: Refactor components to move refreshLoop to parent rather than using this property\n var queryHeader = this.props.isEmbedded ? null : _react2.default.createElement(_QueryHeader.QueryHeader, { query: query });\n return _react2.default.createElement(\n \"div\",\n null,\n queryHeader,\n _react2.default.createElement(\n \"div\",\n { className: \"info-container-next\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row \" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n loadingMessage,\n _react2.default.createElement(\n \"div\",\n { id: \"live-plan\", className: \"graph-container\" },\n _react2.default.createElement(\n \"div\",\n { className: \"pull-right\" },\n this.state.ended ? \"Scroll to zoom.\" : \"Zoom disabled while query is running.\",\n \" Click stage to view additional statistics\"\n ),\n _react2.default.createElement(\"svg\", { id: \"plan-canvas\" })\n )\n )\n )\n )\n );\n }\n }], [{\n key: \"handleStageClick\",\n value: function handleStageClick(stageCssId) {\n window.open(\"stage.html?\" + stageCssId, '_blank');\n }\n }]);\n\n return LivePlan;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/LivePlan.jsx?"); /***/ }), @@ -106,7 +106,7 @@ eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n}); /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryHeader = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar QueryHeader = exports.QueryHeader = function (_React$Component) {\n _inherits(QueryHeader, _React$Component);\n\n function QueryHeader(props) {\n _classCallCheck(this, QueryHeader);\n\n return _possibleConstructorReturn(this, (QueryHeader.__proto__ || Object.getPrototypeOf(QueryHeader)).call(this, props));\n }\n\n _createClass(QueryHeader, [{\n key: \"renderProgressBar\",\n value: function renderProgressBar() {\n var query = this.props.query;\n var progressBarStyle = {\n width: (0, _utils.getProgressBarPercentage)(query) + \"%\",\n backgroundColor: (0, _utils.getQueryStateColor)(query)\n };\n\n if ((0, _utils.isQueryEnded)(query)) {\n return _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n );\n }\n\n return _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { width: \"100%\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { onClick: function onClick() {\n return $.ajax({\n url: '/v1/query/' + query.queryId + '/killed',\n type: 'PUT',\n data: \"Killed via web UI\"\n });\n }, className: \"btn btn-warning\",\n target: \"_blank\" },\n \"Kill\"\n )\n )\n )\n )\n );\n }\n }, {\n key: \"renderTab\",\n value: function renderTab(path, name) {\n var queryId = this.props.query.queryId;\n if (window.location.pathname.includes(path)) {\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn nav-disabled\" },\n name\n );\n }\n\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn\" },\n name\n );\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.props.query;\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { className: \"query-id\" },\n _react2.default.createElement(\n \"span\",\n { id: \"query-id\" },\n query.queryId\n ),\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#query-id\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\", alt: \"Copy to clipboard\" })\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n this.renderTab(\"query.html\", \"Overview\"),\n \"\\xA0\",\n this.renderTab(\"plan.html\", \"Live Plan\"),\n \"\\xA0\",\n this.renderTab(\"stage.html\", \"Stage Performance\"),\n \"\\xA0\",\n this.renderTab(\"timeline.html\", \"Splits\"),\n \"\\xA0\",\n _react2.default.createElement(\n \"a\",\n { href: \"/v1/query/\" + query.queryId + \"?pretty\",\n className: \"btn btn-info navbar-btn\", target: \"_blank\" },\n \"JSON\"\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\"hr\", { className: \"h2-hr\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n this.renderProgressBar()\n )\n )\n );\n }\n }]);\n\n return QueryHeader;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryHeader.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryHeader = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar QueryHeader = exports.QueryHeader = function (_React$Component) {\n _inherits(QueryHeader, _React$Component);\n\n function QueryHeader(props) {\n _classCallCheck(this, QueryHeader);\n\n return _possibleConstructorReturn(this, (QueryHeader.__proto__ || Object.getPrototypeOf(QueryHeader)).call(this, props));\n }\n\n _createClass(QueryHeader, [{\n key: \"renderProgressBar\",\n value: function renderProgressBar() {\n var query = this.props.query;\n var progressBarStyle = {\n width: (0, _utils.getProgressBarPercentage)(query) + \"%\",\n backgroundColor: (0, _utils.getQueryStateColor)(query)\n };\n\n if ((0, _utils.isQueryEnded)(query)) {\n return _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n );\n }\n\n return _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { width: \"100%\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { onClick: function onClick() {\n return $.ajax({\n url: '/v1/query/' + query.queryId + '/killed',\n type: 'PUT',\n data: \"Killed via web UI\"\n });\n }, className: \"btn btn-warning\",\n target: \"_blank\" },\n \"Kill\"\n )\n )\n )\n )\n );\n }\n }, {\n key: \"renderTab\",\n value: function renderTab(path, name) {\n var queryId = this.props.query.queryId;\n if (window.location.pathname.includes(path)) {\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn nav-disabled\" },\n name\n );\n }\n\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn\" },\n name\n );\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.props.query;\n return _react2.default.createElement(\n \"div\",\n { className: \"\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { className: \"query-id\" },\n _react2.default.createElement(\n \"span\",\n { id: \"query-id\" },\n query.queryId\n ),\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#query-id\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\", alt: \"Copy to clipboard\" })\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n this.renderTab(\"query.html\", \"Overview\"),\n \"\\xA0\",\n this.renderTab(\"plan.html\", \"Live Plan\"),\n \"\\xA0\",\n this.renderTab(\"stage.html\", \"Stage Performance\"),\n \"\\xA0\",\n this.renderTab(\"timeline.html\", \"Splits\"),\n \"\\xA0\",\n _react2.default.createElement(\n \"a\",\n { href: \"/v1/query/\" + query.queryId + \"?pretty\",\n className: \"btn btn-info navbar-btn\", target: \"_blank\" },\n \"JSON\"\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\"hr\", { className: \"h2-hr\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n this.renderProgressBar()\n )\n )\n );\n }\n }]);\n\n return QueryHeader;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryHeader.jsx?"); /***/ }), @@ -20663,7 +20663,7 @@ eval("module.exports = function(module) {\n\tif (!module.webpackPolyfill) {\n\t\ /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDuration = formatDuration;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#1b8f72',\n RUNNING: '#19874e',\n PLANNING: '#674f98',\n FINISHED: '#1a4629',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDuration(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getFullSplitIdSuffix = getFullSplitIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDurationMs = formatDurationMs;\nexports.formatDurationNs = formatDurationNs;\nexports.formatNumber = formatNumber;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#7bb3fb',\n RUNNING: '#265cdf',\n PLANNING: '#674f98',\n FINISHED: '#22b647',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getFullSplitIdSuffix(driverId) {\n return driverId.substring(driverId.indexOf('.') + 1);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDurationMs(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatDurationNs(duration) {\n var unit = \"ns\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"us\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"ms\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatNumber(num) {\n return num.toLocaleString();\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); /***/ }) diff --git a/polardbx-executor/src/main/resources/webapp/dist/index.js b/polardbx-executor/src/main/resources/webapp/dist/index.js index 0f2ada5ad..33405c276 100644 --- a/polardbx-executor/src/main/resources/webapp/dist/index.js +++ b/polardbx-executor/src/main/resources/webapp/dist/index.js @@ -106,7 +106,7 @@ eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n}); /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.PageTitle = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar PageTitle = exports.PageTitle = function (_React$Component) {\n _inherits(PageTitle, _React$Component);\n\n function PageTitle(props) {\n _classCallCheck(this, PageTitle);\n\n var _this = _possibleConstructorReturn(this, (PageTitle.__proto__ || Object.getPrototypeOf(PageTitle)).call(this, props));\n\n _this.state = {\n noConnection: false,\n lightShown: false,\n info: null,\n lastSuccess: Date.now(),\n modalShown: false,\n errorText: null\n };\n return _this;\n }\n\n _createClass(PageTitle, [{\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this2 = this;\n\n clearTimeout(this.timeoutId);\n fetch(\"/v1/info\").then(function (response) {\n return response.json();\n }).then(function (info) {\n _this2.setState({\n info: info,\n noConnection: false,\n lastSuccess: Date.now(),\n modalShown: false\n });\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal('hide');\n _this2.resetTimer();\n }).catch(function (error) {\n _this2.setState({\n noConnection: true,\n lightShown: !_this2.state.lightShown,\n errorText: error\n });\n _this2.resetTimer();\n\n if (!_this2.state.modalShown && (error || Date.now() - _this2.state.lastSuccess > 30 * 1000)) {\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal();\n _this2.setState({ modalShown: true });\n }\n });\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"renderStatusLight\",\n value: function renderStatusLight() {\n if (this.state.noConnection) {\n if (this.state.lightShown) {\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-red\", id: \"status-indicator\" });\n } else {\n return _react2.default.createElement(\"span\", { className: \"status-light\", id: \"status-indicator\" });\n }\n }\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-green\", id: \"status-indicator\" });\n }\n }, {\n key: \"render\",\n value: function render() {\n var info = this.state.info;\n if (!info) {\n return null;\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"nav\",\n { className: \"navbar\" },\n _react2.default.createElement(\n \"div\",\n { className: \"container-fluid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"navbar-header\" },\n _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"/ui/\" },\n _react2.default.createElement(\"img\", { src: \"assets/logo.png\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-brand\" },\n this.props.title\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"navbar\", className: \"navbar-collapse collapse\" },\n _react2.default.createElement(\n \"ul\",\n { className: \"nav navbar-nav navbar-right\" },\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Version\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\",\n id: \"version-number\" },\n info.nodeVersion.version\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Environment\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"environment\" },\n info.environment\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"CoordinatorId\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"workerId\" },\n info.workerId\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Uptime\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", title: \"Connection status\" },\n this.renderStatusLight()\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"span\",\n { className: \"text\", id: \"uptime\" },\n info.uptime\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"no-connection-modal\", className: \"modal\", tabIndex: \"-1\", role: \"dialog\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-dialog modal-sm\", role: \"document\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-content\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"h4\",\n null,\n \"Unable to connect to server\"\n ),\n _react2.default.createElement(\n \"p\",\n null,\n this.state.errorText ? \"Error: \" + this.state.errorText : null\n )\n )\n )\n )\n )\n )\n );\n }\n }]);\n\n return PageTitle;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/PageTitle.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.PageTitle = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar PageTitle = exports.PageTitle = function (_React$Component) {\n _inherits(PageTitle, _React$Component);\n\n function PageTitle(props) {\n _classCallCheck(this, PageTitle);\n\n var _this = _possibleConstructorReturn(this, (PageTitle.__proto__ || Object.getPrototypeOf(PageTitle)).call(this, props));\n\n _this.state = {\n noConnection: false,\n lightShown: false,\n info: null,\n lastSuccess: Date.now(),\n modalShown: false,\n errorText: null\n };\n return _this;\n }\n\n _createClass(PageTitle, [{\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this2 = this;\n\n clearTimeout(this.timeoutId);\n fetch(\"/v1/info\").then(function (response) {\n return response.json();\n }).then(function (info) {\n _this2.setState({\n info: info,\n noConnection: false,\n lastSuccess: Date.now(),\n modalShown: false\n });\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal('hide');\n _this2.resetTimer();\n }).catch(function (error) {\n _this2.setState({\n noConnection: true,\n lightShown: !_this2.state.lightShown,\n errorText: error\n });\n _this2.resetTimer();\n\n if (!_this2.state.modalShown && (error || Date.now() - _this2.state.lastSuccess > 30 * 1000)) {\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal();\n _this2.setState({ modalShown: true });\n }\n });\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"renderStatusLight\",\n value: function renderStatusLight() {\n if (this.state.noConnection) {\n if (this.state.lightShown) {\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-red\", id: \"status-indicator\" });\n } else {\n return _react2.default.createElement(\"span\", { className: \"status-light\", id: \"status-indicator\" });\n }\n }\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-green\", id: \"status-indicator\" });\n }\n }, {\n key: \"render\",\n value: function render() {\n var info = this.state.info;\n if (!info) {\n return null;\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"nav\",\n { className: \"navbar\" },\n _react2.default.createElement(\n \"div\",\n { className: \"container-fluid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"navbar-header\" },\n _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"/ui/\" },\n _react2.default.createElement(\"img\", { src: \"assets/favicon.png\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-brand\" },\n this.props.title\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"navbar\", className: \"navbar-collapse collapse\" },\n _react2.default.createElement(\n \"ul\",\n { className: \"nav navbar-nav navbar-right\" },\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Version\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\",\n id: \"version-number\" },\n info.nodeVersion.version\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Environment\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"environment\" },\n \"PolarDB-X\"\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Node\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"workerId\" },\n info.workerId\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Uptime\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", title: \"Connection status\" },\n this.renderStatusLight()\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"span\",\n { className: \"text\", id: \"uptime\" },\n info.uptime\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"no-connection-modal\", className: \"modal\", tabIndex: \"-1\", role: \"dialog\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-dialog modal-sm\", role: \"document\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-content\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"h4\",\n null,\n \"Unable to connect to server\"\n ),\n _react2.default.createElement(\n \"p\",\n null,\n this.state.errorText ? \"Error: \" + this.state.errorText : null\n )\n )\n )\n )\n )\n )\n );\n }\n }]);\n\n return PageTitle;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/PageTitle.jsx?"); /***/ }), @@ -118,7 +118,7 @@ eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n}); /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryList = exports.QueryListItem = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar QueryListItem = exports.QueryListItem = function (_React$Component) {\n _inherits(QueryListItem, _React$Component);\n\n function QueryListItem() {\n _classCallCheck(this, QueryListItem);\n\n return _possibleConstructorReturn(this, (QueryListItem.__proto__ || Object.getPrototypeOf(QueryListItem)).apply(this, arguments));\n }\n\n _createClass(QueryListItem, [{\n key: \"renderWarning\",\n value: function renderWarning() {\n var query = this.props.query;\n if (query.warnings && query.warnings.length) {\n var warningCodes = [];\n query.warnings.forEach(function (warning) {\n warningCodes.push(warning.warningCode.name);\n });\n\n return _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-warning-sign query-warning\", \"data-toggle\": \"tooltip\",\n title: warningCodes.join(', ') });\n }\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.props.query;\n var progressBarStyle = {\n width: (0, _utils.getProgressBarPercentage)(query) + \"%\",\n backgroundColor: (0, _utils.getQueryStateColor)(query)\n };\n\n var splitDetails = _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12 tinystat-row\" },\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Completed splits\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-ok\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.completedPipelineExecs\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Running splits\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-play\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.state === \"FINISHED\" || query.state === \"FAILED\" ? 0 : query.queryStats.runningPipelineExecs\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Queued splits\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-pause\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.state === \"FINISHED\" || query.state === \"FAILED\" ? 0 : query.queryStats.queuedPipelineExecs\n )\n );\n\n var timingDetails = _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12 tinystat-row\" },\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\",\n title: \"Wall time spent executing the query (not including queued time)\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-hourglass\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.executionTime\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Total query wall time\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-time\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.elapsedTime\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Plan Time by this query\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-dashboard\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.totalPlanningTime\n )\n );\n\n var memoryDetails = _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12 tinystat-row\" },\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Current reserved memory\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-scale\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.totalMemoryReservation\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Peak memory\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-fire\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.peakMemoryReservation\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Cumulative user memory\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-equalizer\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n (0, _utils.formatDataSizeBytes)(query.queryStats.cumulativeMemory)\n )\n );\n\n return _react2.default.createElement(\n \"div\",\n { className: \"query\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-4\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row stat-row query-header query-header-queryid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-9\", \"data-placement\": \"bottom\" },\n _react2.default.createElement(\n \"a\",\n { href: \"query.html?\" + query.queryId, target: \"_blank\", \"data-toggle\": \"tooltip\",\n title: \"Query ID\" },\n query.queryId\n ),\n this.renderWarning()\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-3 query-header-timestamp\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"bottom\", title: \"Submit time\" },\n _react2.default.createElement(\n \"span\",\n null,\n (0, _utils.formatShortTime)(new Date(Date.parse(query.queryStats.createTime)))\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row stat-row\" },\n splitDetails\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row stat-row\" },\n timingDetails\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row stat-row\" },\n memoryDetails\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-8\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row query-header\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12 query-progress-container\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\",\n \"aria-valuemax\": \"100\", style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row query-row-bottom\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"pre\",\n { className: \"query-snippet\" },\n _react2.default.createElement(\n \"code\",\n {\n className: \"sql\" },\n QueryListItem.stripQueryTextWhitespace(query.query)\n )\n )\n )\n )\n )\n )\n );\n }\n }], [{\n key: \"stripQueryTextWhitespace\",\n value: function stripQueryTextWhitespace(queryText) {\n var lines = queryText.split(\"\\n\");\n var minLeadingWhitespace = -1;\n for (var i = 0; i < lines.length; i++) {\n if (minLeadingWhitespace === 0) {\n break;\n }\n\n if (lines[i].trim().length === 0) {\n continue;\n }\n\n var leadingWhitespace = lines[i].search(/\\S/);\n\n if (leadingWhitespace > -1 && (leadingWhitespace < minLeadingWhitespace || minLeadingWhitespace === -1)) {\n minLeadingWhitespace = leadingWhitespace;\n }\n }\n\n var formattedQueryText = \"\";\n\n for (var _i = 0; _i < lines.length; _i++) {\n var trimmedLine = lines[_i].substring(minLeadingWhitespace).replace(/\\s+$/g, '');\n\n if (trimmedLine.length > 0) {\n formattedQueryText += trimmedLine;\n\n if (_i < lines.length - 1) {\n formattedQueryText += \"\\n\";\n }\n }\n }\n\n return (0, _utils.truncateString)(formattedQueryText, 300);\n }\n }]);\n\n return QueryListItem;\n}(_react2.default.Component);\n\nvar DisplayedQueriesList = function (_React$Component2) {\n _inherits(DisplayedQueriesList, _React$Component2);\n\n function DisplayedQueriesList() {\n _classCallCheck(this, DisplayedQueriesList);\n\n return _possibleConstructorReturn(this, (DisplayedQueriesList.__proto__ || Object.getPrototypeOf(DisplayedQueriesList)).apply(this, arguments));\n }\n\n _createClass(DisplayedQueriesList, [{\n key: \"render\",\n value: function render() {\n var queryNodes = this.props.queries.map(function (query) {\n return _react2.default.createElement(QueryListItem, { key: query.queryId, query: query });\n }.bind(this));\n return _react2.default.createElement(\n \"div\",\n null,\n queryNodes\n );\n }\n }]);\n\n return DisplayedQueriesList;\n}(_react2.default.Component);\n\nvar FILTER_TYPE = {\n RUNNING: function RUNNING(query) {\n return query.state == \"PLANNING\" || query.state == \"STARTING\" || query.state == \"RUNNING\" || query.state == \"FINISHING\";\n },\n QUEUED: function QUEUED(query) {\n return query.state === \"QUEUED\";\n },\n FINISHED: function FINISHED(query) {\n return query.state === \"FINISHED\";\n }\n};\n\nvar SORT_TYPE = {\n CREATED: function CREATED(query) {\n return Date.parse(query.queryStats.createTime);\n },\n ELAPSED: function ELAPSED(query) {\n return (0, _utils.parseDuration)(query.queryStats.elapsedTime);\n },\n EXECUTION: function EXECUTION(query) {\n return (0, _utils.parseDuration)(query.queryStats.executionTime);\n },\n CPU: function CPU(query) {\n return (0, _utils.parseDuration)(query.queryStats.totalCpuTime);\n },\n CUMULATIVE_MEMORY: function CUMULATIVE_MEMORY(query) {\n return query.queryStats.cumulativeUserMemory;\n },\n CURRENT_MEMORY: function CURRENT_MEMORY(query) {\n return (0, _utils.parseDataSize)(query.queryStats.userMemoryReservation);\n }\n};\n\nvar ERROR_TYPE = {\n FAILED: function FAILED(query) {\n return query.state === \"FAILED\";\n }\n};\n\nvar SORT_ORDER = {\n ASCENDING: function ASCENDING(value) {\n return value;\n },\n DESCENDING: function DESCENDING(value) {\n return -value;\n }\n};\n\nvar QueryList = exports.QueryList = function (_React$Component3) {\n _inherits(QueryList, _React$Component3);\n\n function QueryList(props) {\n _classCallCheck(this, QueryList);\n\n var _this3 = _possibleConstructorReturn(this, (QueryList.__proto__ || Object.getPrototypeOf(QueryList)).call(this, props));\n\n _this3.state = {\n allQueries: [],\n displayedQueries: [],\n reorderInterval: 5000,\n currentSortType: SORT_TYPE.CREATED,\n currentSortOrder: SORT_ORDER.DESCENDING,\n stateFilters: [FILTER_TYPE.RUNNING, FILTER_TYPE.QUEUED],\n errorTypeFilters: [ERROR_TYPE.FAILED],\n searchString: '',\n maxQueries: 100,\n lastRefresh: Date.now(),\n lastReorder: Date.now(),\n initialized: false\n };\n\n _this3.refreshLoop = _this3.refreshLoop.bind(_this3);\n _this3.handleSearchStringChange = _this3.handleSearchStringChange.bind(_this3);\n _this3.executeSearch = _this3.executeSearch.bind(_this3);\n _this3.handleSortClick = _this3.handleSortClick.bind(_this3);\n return _this3;\n }\n\n _createClass(QueryList, [{\n key: \"sortAndLimitQueries\",\n value: function sortAndLimitQueries(queries, sortType, sortOrder, maxQueries) {\n queries.sort(function (queryA, queryB) {\n return sortOrder(sortType(queryA) - sortType(queryB));\n }, this);\n\n if (maxQueries !== 0 && queries.length > maxQueries) {\n queries.splice(maxQueries, queries.length - maxQueries);\n }\n }\n }, {\n key: \"filterQueries\",\n value: function filterQueries(queries, stateFilters, errorTypeFilters, searchString) {\n var stateFilteredQueries = queries.filter(function (query) {\n for (var i = 0; i < stateFilters.length; i++) {\n if (stateFilters[i](query)) {\n return true;\n }\n }\n for (var _i2 = 0; _i2 < errorTypeFilters.length; _i2++) {\n if (errorTypeFilters[_i2](query)) {\n return true;\n }\n }\n return false;\n });\n\n if (searchString === '') {\n return stateFilteredQueries;\n } else {\n return stateFilteredQueries.filter(function (query) {\n var term = searchString.toLowerCase();\n if (query.queryId.toLowerCase().indexOf(term) !== -1 || (0, _utils.getHumanReadableState)(query).toLowerCase().indexOf(term) !== -1 || query.query.toLowerCase().indexOf(term) !== -1) {\n return true;\n }\n }, this);\n }\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n // stop refreshing when query finishes or fails\n if (this.state.query === null || !this.state.ended) {\n this.timeoutId = setTimeout(this.refreshLoop, 5000);\n }\n }\n }, {\n key: \"refreshLoop\",\n value: function refreshLoop() {\n clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously\n clearTimeout(this.searchTimeoutId);\n\n $.get('/v1/query', function (queryList) {\n var queryMap = queryList.reduce(function (map, query) {\n map[query.queryId] = query;\n return map;\n }, {});\n\n var updatedQueries = [];\n this.state.displayedQueries.forEach(function (oldQuery) {\n if (oldQuery.queryId in queryMap) {\n updatedQueries.push(queryMap[oldQuery.queryId]);\n queryMap[oldQuery.queryId] = false;\n }\n });\n\n var newQueries = [];\n for (var queryId in queryMap) {\n if (queryMap[queryId]) {\n newQueries.push(queryMap[queryId]);\n }\n }\n newQueries = this.filterQueries(newQueries, this.state.stateFilters, this.state.errorTypeFilters, this.state.searchString);\n\n var lastRefresh = Date.now();\n var lastReorder = this.state.lastReorder;\n\n if (this.state.reorderInterval !== 0 && lastRefresh - lastReorder >= this.state.reorderInterval) {\n updatedQueries = this.filterQueries(updatedQueries, this.state.stateFilters, this.state.errorTypeFilters, this.state.searchString);\n updatedQueries = updatedQueries.concat(newQueries);\n this.sortAndLimitQueries(updatedQueries, this.state.currentSortType, this.state.currentSortOrder, 0);\n lastReorder = Date.now();\n } else {\n this.sortAndLimitQueries(newQueries, this.state.currentSortType, this.state.currentSortOrder, 0);\n updatedQueries = updatedQueries.concat(newQueries);\n }\n\n if (this.state.maxQueries !== 0 && updatedQueries.length > this.state.maxQueries) {\n updatedQueries.splice(this.state.maxQueries, updatedQueries.length - this.state.maxQueries);\n }\n\n this.setState({\n allQueries: queryList,\n displayedQueries: updatedQueries,\n lastRefresh: lastRefresh,\n lastReorder: lastReorder,\n initialized: true\n });\n this.resetTimer();\n }.bind(this)).error(function () {\n this.setState({\n initialized: true\n });\n this.resetTimer();\n }.bind(this));\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop();\n }\n }, {\n key: \"handleSearchStringChange\",\n value: function handleSearchStringChange(event) {\n var newSearchString = event.target.value;\n clearTimeout(this.searchTimeoutId);\n\n this.setState({\n searchString: newSearchString\n });\n\n this.searchTimeoutId = setTimeout(this.executeSearch, 5000);\n }\n }, {\n key: \"executeSearch\",\n value: function executeSearch() {\n clearTimeout(this.searchTimeoutId);\n\n var newDisplayedQueries = this.filterQueries(this.state.allQueries, this.state.stateFilters, this.state.errorTypeFilters, this.state.searchString);\n this.sortAndLimitQueries(newDisplayedQueries, this.state.currentSortType, this.state.currentSortOrder, this.state.maxQueries);\n\n this.setState({\n displayedQueries: newDisplayedQueries\n });\n }\n }, {\n key: \"renderMaxQueriesListItem\",\n value: function renderMaxQueriesListItem(maxQueries, maxQueriesText) {\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: this.state.maxQueries === maxQueries ? \"selected\" : \"\",\n onClick: this.handleMaxQueriesClick.bind(this, maxQueries) },\n maxQueriesText\n )\n );\n }\n }, {\n key: \"handleMaxQueriesClick\",\n value: function handleMaxQueriesClick(newMaxQueries) {\n var filteredQueries = this.filterQueries(this.state.allQueries, this.state.stateFilters, this.state.errorTypeFilters, this.state.searchString);\n this.sortAndLimitQueries(filteredQueries, this.state.currentSortType, this.state.currentSortOrder, newMaxQueries);\n\n this.setState({\n maxQueries: newMaxQueries,\n displayedQueries: filteredQueries\n });\n }\n }, {\n key: \"renderReorderListItem\",\n value: function renderReorderListItem(interval, intervalText) {\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: this.state.reorderInterval === interval ? \"selected\" : \"\",\n onClick: this.handleReorderClick.bind(this, interval) },\n intervalText\n )\n );\n }\n }, {\n key: \"handleReorderClick\",\n value: function handleReorderClick(interval) {\n if (this.state.reorderInterval !== interval) {\n this.setState({\n reorderInterval: interval\n });\n }\n }\n }, {\n key: \"renderSortListItem\",\n value: function renderSortListItem(sortType, sortText) {\n if (this.state.currentSortType === sortType) {\n var directionArrow = this.state.currentSortOrder === SORT_ORDER.ASCENDING ? _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-triangle-top\" }) : _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-triangle-bottom\" });\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: \"selected\", onClick: this.handleSortClick.bind(this, sortType) },\n sortText,\n \" \",\n directionArrow\n )\n );\n } else {\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", onClick: this.handleSortClick.bind(this, sortType) },\n sortText\n )\n );\n }\n }\n }, {\n key: \"handleSortClick\",\n value: function handleSortClick(sortType) {\n var newSortType = sortType;\n var newSortOrder = SORT_ORDER.DESCENDING;\n\n if (this.state.currentSortType === sortType && this.state.currentSortOrder === SORT_ORDER.DESCENDING) {\n newSortOrder = SORT_ORDER.ASCENDING;\n }\n\n var newDisplayedQueries = this.filterQueries(this.state.allQueries, this.state.stateFilters, this.state.errorTypeFilters, this.state.searchString);\n this.sortAndLimitQueries(newDisplayedQueries, newSortType, newSortOrder, this.state.maxQueries);\n\n this.setState({\n displayedQueries: newDisplayedQueries,\n currentSortType: newSortType,\n currentSortOrder: newSortOrder\n });\n }\n }, {\n key: \"renderFilterButton\",\n value: function renderFilterButton(filterType, filterText) {\n var checkmarkStyle = { color: '#57aac7' };\n var classNames = \"btn btn-sm btn-info style-check\";\n if (this.state.stateFilters.indexOf(filterType) > -1) {\n classNames += \" active\";\n checkmarkStyle = { color: '#ffffff' };\n }\n\n return _react2.default.createElement(\n \"button\",\n { type: \"button\", className: classNames, onClick: this.handleStateFilterClick.bind(this, filterType) },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-ok\", style: checkmarkStyle }),\n \"\\xA0\",\n filterText\n );\n }\n }, {\n key: \"handleStateFilterClick\",\n value: function handleStateFilterClick(filter) {\n var newFilters = this.state.stateFilters.slice();\n if (this.state.stateFilters.indexOf(filter) > -1) {\n newFilters.splice(newFilters.indexOf(filter), 1);\n } else {\n newFilters.push(filter);\n }\n\n var filteredQueries = this.filterQueries(this.state.allQueries, newFilters, this.state.errorTypeFilters, this.state.searchString);\n this.sortAndLimitQueries(filteredQueries, this.state.currentSortType, this.state.currentSortOrder);\n\n this.setState({\n stateFilters: newFilters,\n displayedQueries: filteredQueries\n });\n }\n }, {\n key: \"renderErrorTypeListItem\",\n value: function renderErrorTypeListItem(errorType, errorTypeText) {\n var checkmarkStyle = { color: '#ffffff' };\n if (this.state.errorTypeFilters.indexOf(errorType) > -1) {\n checkmarkStyle = _utils.GLYPHICON_HIGHLIGHT;\n }\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", onClick: this.handleErrorTypeFilterClick.bind(this, errorType) },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-ok\", style: checkmarkStyle }),\n \"\\xA0\",\n errorTypeText\n )\n );\n }\n }, {\n key: \"handleErrorTypeFilterClick\",\n value: function handleErrorTypeFilterClick(errorType) {\n var newFilters = this.state.errorTypeFilters.slice();\n if (this.state.errorTypeFilters.indexOf(errorType) > -1) {\n newFilters.splice(newFilters.indexOf(errorType), 1);\n } else {\n newFilters.push(errorType);\n }\n\n var filteredQueries = this.filterQueries(this.state.allQueries, this.state.stateFilters, newFilters, this.state.searchString);\n this.sortAndLimitQueries(filteredQueries, this.state.currentSortType, this.state.currentSortOrder);\n\n this.setState({\n errorTypeFilters: newFilters,\n displayedQueries: filteredQueries\n });\n }\n }, {\n key: \"render\",\n value: function render() {\n var queryList = _react2.default.createElement(DisplayedQueriesList, { queries: this.state.displayedQueries });\n if (this.state.displayedQueries === null || this.state.displayedQueries.length === 0) {\n var label = _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n );\n if (this.state.initialized) {\n if (this.state.allQueries === null || this.state.allQueries.length === 0) {\n label = \"No queries\";\n } else {\n label = \"No queries matched filters\";\n }\n }\n queryList = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n label\n )\n )\n );\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"row toolbar-row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12 toolbar-col\" },\n _react2.default.createElement(\n \"div\",\n { className: \"input-group input-group-sm\" },\n _react2.default.createElement(\"input\", { type: \"text\", className: \"form-control form-control-small search-bar\",\n placeholder: \"User, source, query ID, resource group, or query text\",\n onChange: this.handleSearchStringChange, value: this.state.searchString }),\n _react2.default.createElement(\n \"span\",\n { className: \"input-group-addon filter-addon\" },\n \"State:\"\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn\" },\n this.renderFilterButton(FILTER_TYPE.RUNNING, \"Running\"),\n this.renderFilterButton(FILTER_TYPE.QUEUED, \"Queued\"),\n this.renderFilterButton(FILTER_TYPE.FINISHED, \"Finished\"),\n _react2.default.createElement(\n \"button\",\n { type: \"button\", id: \"error-type-dropdown\",\n className: \"btn btn-default dropdown-toggle\", \"data-toggle\": \"dropdown\",\n \"aria-haspopup\": \"true\", \"aria-expanded\": \"false\" },\n \"Failed \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu error-type-dropdown-menu\" },\n this.renderErrorTypeListItem(ERROR_TYPE.FAILED, \"Query Error\")\n )\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\", className: \"btn btn-default dropdown-toggle\", \"data-toggle\": \"dropdown\",\n \"aria-haspopup\": \"true\", \"aria-expanded\": \"false\" },\n \"Sort \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n this.renderSortListItem(SORT_TYPE.CREATED, \"Creation Time\"),\n this.renderSortListItem(SORT_TYPE.ELAPSED, \"Elapsed Time\"),\n this.renderSortListItem(SORT_TYPE.CPU, \"CPU Time\"),\n this.renderSortListItem(SORT_TYPE.EXECUTION, \"Execution Time\"),\n this.renderSortListItem(SORT_TYPE.CURRENT_MEMORY, \"Current Memory\"),\n this.renderSortListItem(SORT_TYPE.CUMULATIVE_MEMORY, \"Cumulative User Memory\")\n )\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\", className: \"btn btn-default dropdown-toggle\", \"data-toggle\": \"dropdown\",\n \"aria-haspopup\": \"true\", \"aria-expanded\": \"false\" },\n \"Reorder Interval \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n this.renderReorderListItem(1000, \"1s\"),\n this.renderReorderListItem(5000, \"5s\"),\n this.renderReorderListItem(10000, \"10s\"),\n this.renderReorderListItem(30000, \"30s\"),\n _react2.default.createElement(\"li\", { role: \"separator\", className: \"divider\" }),\n this.renderReorderListItem(0, \"Off\")\n )\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\", className: \"btn btn-default dropdown-toggle\", \"data-toggle\": \"dropdown\",\n \"aria-haspopup\": \"true\", \"aria-expanded\": \"false\" },\n \"Show \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n this.renderMaxQueriesListItem(20, \"20 queries\"),\n this.renderMaxQueriesListItem(50, \"50 queries\"),\n this.renderMaxQueriesListItem(100, \"100 queries\"),\n _react2.default.createElement(\"li\", { role: \"separator\", className: \"divider\" }),\n this.renderMaxQueriesListItem(0, \"All queries\")\n )\n )\n )\n )\n ),\n queryList\n );\n }\n }]);\n\n return QueryList;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryList.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryList = exports.QueryListItem = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar QueryListItem = exports.QueryListItem = function (_React$Component) {\n _inherits(QueryListItem, _React$Component);\n\n function QueryListItem() {\n _classCallCheck(this, QueryListItem);\n\n return _possibleConstructorReturn(this, (QueryListItem.__proto__ || Object.getPrototypeOf(QueryListItem)).apply(this, arguments));\n }\n\n _createClass(QueryListItem, [{\n key: \"renderWarning\",\n value: function renderWarning() {\n var query = this.props.query;\n if (query.warnings && query.warnings.length) {\n var warningCodes = [];\n query.warnings.forEach(function (warning) {\n warningCodes.push(warning.warningCode.name);\n });\n\n return _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-warning-sign query-warning\", \"data-toggle\": \"tooltip\",\n title: warningCodes.join(', ') });\n }\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.props.query;\n var progressBarStyle = {\n width: (0, _utils.getProgressBarPercentage)(query) + \"%\",\n backgroundColor: (0, _utils.getQueryStateColor)(query)\n };\n\n var splitDetails = _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12 tinystat-row\" },\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Completed splits\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-ok\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.completedPipelineExecs\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Running splits\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-play\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.state === \"FINISHED\" || query.state === \"FAILED\" ? 0 : query.queryStats.runningPipelineExecs\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Queued splits\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-pause\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.state === \"FINISHED\" || query.state === \"FAILED\" ? 0 : query.queryStats.queuedPipelineExecs\n )\n );\n\n var timingDetails = _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12 tinystat-row\" },\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\",\n title: \"Wall time spent executing the query (not including queued time)\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-hourglass\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.executionTime\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Total query wall time\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-time\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.elapsedTime\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Plan Time by this query\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-dashboard\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.totalPlanningTime\n )\n );\n\n var memoryDetails = _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12 tinystat-row\" },\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Current reserved memory\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-scale\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.totalMemoryReservation\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Peak memory\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-fire\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n query.queryStats.peakMemoryReservation\n ),\n _react2.default.createElement(\n \"span\",\n { className: \"tinystat\", \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"Cumulative user memory\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-equalizer\", style: _utils.GLYPHICON_HIGHLIGHT }),\n \"\\xA0\\xA0\",\n (0, _utils.formatDataSizeBytes)(query.queryStats.cumulativeMemory)\n )\n );\n\n return _react2.default.createElement(\n \"div\",\n { className: \"query\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-4\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row stat-row query-header query-header-queryid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-9\", \"data-placement\": \"bottom\" },\n _react2.default.createElement(\n \"a\",\n { href: \"query.html?\" + query.queryId, target: \"_blank\", \"data-toggle\": \"tooltip\",\n title: \"Query ID\" },\n query.queryId\n ),\n this.renderWarning()\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-3 query-header-timestamp\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"bottom\", title: \"Submit time\" },\n _react2.default.createElement(\n \"span\",\n null,\n (0, _utils.formatShortTime)(new Date(Date.parse(query.queryStats.createTime)))\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row stat-row\" },\n splitDetails\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row stat-row\" },\n timingDetails\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row stat-row\" },\n memoryDetails\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-8\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row query-header\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12 query-progress-container\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\",\n \"aria-valuemax\": \"100\", style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row query-row-bottom\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"pre\",\n { className: \"query-snippet\" },\n _react2.default.createElement(\n \"code\",\n {\n className: \"sql\" },\n QueryListItem.stripQueryTextWhitespace(query.query)\n )\n )\n )\n )\n )\n )\n );\n }\n }], [{\n key: \"stripQueryTextWhitespace\",\n value: function stripQueryTextWhitespace(queryText) {\n var lines = queryText.split(\"\\n\");\n var minLeadingWhitespace = -1;\n for (var i = 0; i < lines.length; i++) {\n if (minLeadingWhitespace === 0) {\n break;\n }\n\n if (lines[i].trim().length === 0) {\n continue;\n }\n\n var leadingWhitespace = lines[i].search(/\\S/);\n\n if (leadingWhitespace > -1 && (leadingWhitespace < minLeadingWhitespace || minLeadingWhitespace === -1)) {\n minLeadingWhitespace = leadingWhitespace;\n }\n }\n\n var formattedQueryText = \"\";\n\n for (var _i = 0; _i < lines.length; _i++) {\n var trimmedLine = lines[_i].substring(minLeadingWhitespace).replace(/\\s+$/g, '');\n\n if (trimmedLine.length > 0) {\n formattedQueryText += trimmedLine;\n\n if (_i < lines.length - 1) {\n formattedQueryText += \"\\n\";\n }\n }\n }\n\n return (0, _utils.truncateString)(formattedQueryText, 300);\n }\n }]);\n\n return QueryListItem;\n}(_react2.default.Component);\n\nvar DisplayedQueriesList = function (_React$Component2) {\n _inherits(DisplayedQueriesList, _React$Component2);\n\n function DisplayedQueriesList() {\n _classCallCheck(this, DisplayedQueriesList);\n\n return _possibleConstructorReturn(this, (DisplayedQueriesList.__proto__ || Object.getPrototypeOf(DisplayedQueriesList)).apply(this, arguments));\n }\n\n _createClass(DisplayedQueriesList, [{\n key: \"render\",\n value: function render() {\n var queryNodes = this.props.queries.map(function (query) {\n return _react2.default.createElement(QueryListItem, { key: query.queryId, query: query });\n }.bind(this));\n return _react2.default.createElement(\n \"div\",\n null,\n queryNodes\n );\n }\n }]);\n\n return DisplayedQueriesList;\n}(_react2.default.Component);\n\nvar FILTER_TYPE = {\n RUNNING: function RUNNING(query) {\n return query.state == \"PLANNING\" || query.state == \"STARTING\" || query.state == \"RUNNING\" || query.state == \"FINISHING\";\n },\n QUEUED: function QUEUED(query) {\n return query.state === \"QUEUED\";\n },\n FINISHED: function FINISHED(query) {\n return query.state === \"FINISHED\";\n }\n};\n\nvar SORT_TYPE = {\n CREATED: function CREATED(query) {\n return Date.parse(query.queryStats.createTime);\n },\n ELAPSED: function ELAPSED(query) {\n return (0, _utils.parseDuration)(query.queryStats.elapsedTime);\n },\n EXECUTION: function EXECUTION(query) {\n return (0, _utils.parseDuration)(query.queryStats.executionTime);\n },\n CPU: function CPU(query) {\n return (0, _utils.parseDuration)(query.queryStats.totalCpuTime);\n },\n CUMULATIVE_MEMORY: function CUMULATIVE_MEMORY(query) {\n return query.queryStats.cumulativeUserMemory;\n },\n CURRENT_MEMORY: function CURRENT_MEMORY(query) {\n return (0, _utils.parseDataSize)(query.queryStats.userMemoryReservation);\n }\n};\n\nvar ERROR_TYPE = {\n FAILED: function FAILED(query) {\n return query.state === \"FAILED\";\n }\n};\n\nvar SORT_ORDER = {\n ASCENDING: function ASCENDING(value) {\n return value;\n },\n DESCENDING: function DESCENDING(value) {\n return -value;\n }\n};\n\nvar QueryList = exports.QueryList = function (_React$Component3) {\n _inherits(QueryList, _React$Component3);\n\n function QueryList(props) {\n _classCallCheck(this, QueryList);\n\n var _this3 = _possibleConstructorReturn(this, (QueryList.__proto__ || Object.getPrototypeOf(QueryList)).call(this, props));\n\n _this3.state = {\n allQueries: [],\n displayedQueries: [],\n reorderInterval: 5000,\n currentSortType: SORT_TYPE.CREATED,\n currentSortOrder: SORT_ORDER.DESCENDING,\n stateFilters: [FILTER_TYPE.RUNNING, FILTER_TYPE.QUEUED],\n errorTypeFilters: [ERROR_TYPE.FAILED],\n searchString: '',\n maxQueries: 100,\n lastRefresh: Date.now(),\n lastReorder: Date.now(),\n initialized: false\n };\n\n _this3.refreshLoop = _this3.refreshLoop.bind(_this3);\n _this3.handleSearchStringChange = _this3.handleSearchStringChange.bind(_this3);\n _this3.executeSearch = _this3.executeSearch.bind(_this3);\n _this3.handleSortClick = _this3.handleSortClick.bind(_this3);\n return _this3;\n }\n\n _createClass(QueryList, [{\n key: \"sortAndLimitQueries\",\n value: function sortAndLimitQueries(queries, sortType, sortOrder, maxQueries) {\n queries.sort(function (queryA, queryB) {\n return sortOrder(sortType(queryA) - sortType(queryB));\n }, this);\n\n if (maxQueries !== 0 && queries.length > maxQueries) {\n queries.splice(maxQueries, queries.length - maxQueries);\n }\n }\n }, {\n key: \"filterQueries\",\n value: function filterQueries(queries, stateFilters, errorTypeFilters, searchString) {\n var stateFilteredQueries = queries.filter(function (query) {\n for (var i = 0; i < stateFilters.length; i++) {\n if (stateFilters[i](query)) {\n return true;\n }\n }\n for (var _i2 = 0; _i2 < errorTypeFilters.length; _i2++) {\n if (errorTypeFilters[_i2](query)) {\n return true;\n }\n }\n return false;\n });\n\n if (searchString === '') {\n return stateFilteredQueries;\n } else {\n return stateFilteredQueries.filter(function (query) {\n var term = searchString.toLowerCase();\n if (query.queryId.toLowerCase().indexOf(term) !== -1 || (0, _utils.getHumanReadableState)(query).toLowerCase().indexOf(term) !== -1 || query.query.toLowerCase().indexOf(term) !== -1) {\n return true;\n }\n }, this);\n }\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n // stop refreshing when query finishes or fails\n if (this.state.query === null || !this.state.ended) {\n this.timeoutId = setTimeout(this.refreshLoop, 5000);\n }\n }\n }, {\n key: \"refreshLoop\",\n value: function refreshLoop() {\n clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously\n clearTimeout(this.searchTimeoutId);\n\n $.get('/v1/query', function (queryList) {\n var queryMap = queryList.reduce(function (map, query) {\n map[query.queryId] = query;\n return map;\n }, {});\n\n var updatedQueries = [];\n this.state.displayedQueries.forEach(function (oldQuery) {\n if (oldQuery.queryId in queryMap) {\n updatedQueries.push(queryMap[oldQuery.queryId]);\n queryMap[oldQuery.queryId] = false;\n }\n });\n\n var newQueries = [];\n for (var queryId in queryMap) {\n if (queryMap[queryId]) {\n newQueries.push(queryMap[queryId]);\n }\n }\n newQueries = this.filterQueries(newQueries, this.state.stateFilters, this.state.errorTypeFilters, this.state.searchString);\n\n var lastRefresh = Date.now();\n var lastReorder = this.state.lastReorder;\n\n if (this.state.reorderInterval !== 0 && lastRefresh - lastReorder >= this.state.reorderInterval) {\n updatedQueries = this.filterQueries(updatedQueries, this.state.stateFilters, this.state.errorTypeFilters, this.state.searchString);\n updatedQueries = updatedQueries.concat(newQueries);\n this.sortAndLimitQueries(updatedQueries, this.state.currentSortType, this.state.currentSortOrder, 0);\n lastReorder = Date.now();\n } else {\n this.sortAndLimitQueries(newQueries, this.state.currentSortType, this.state.currentSortOrder, 0);\n updatedQueries = updatedQueries.concat(newQueries);\n }\n\n if (this.state.maxQueries !== 0 && updatedQueries.length > this.state.maxQueries) {\n updatedQueries.splice(this.state.maxQueries, updatedQueries.length - this.state.maxQueries);\n }\n\n this.setState({\n allQueries: queryList,\n displayedQueries: updatedQueries,\n lastRefresh: lastRefresh,\n lastReorder: lastReorder,\n initialized: true\n });\n this.resetTimer();\n }.bind(this)).error(function () {\n this.setState({\n initialized: true\n });\n this.resetTimer();\n }.bind(this));\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop();\n }\n }, {\n key: \"handleSearchStringChange\",\n value: function handleSearchStringChange(event) {\n var newSearchString = event.target.value;\n clearTimeout(this.searchTimeoutId);\n\n this.setState({\n searchString: newSearchString\n });\n\n this.searchTimeoutId = setTimeout(this.executeSearch, 5000);\n }\n }, {\n key: \"executeSearch\",\n value: function executeSearch() {\n clearTimeout(this.searchTimeoutId);\n\n var newDisplayedQueries = this.filterQueries(this.state.allQueries, this.state.stateFilters, this.state.errorTypeFilters, this.state.searchString);\n this.sortAndLimitQueries(newDisplayedQueries, this.state.currentSortType, this.state.currentSortOrder, this.state.maxQueries);\n\n this.setState({\n displayedQueries: newDisplayedQueries\n });\n }\n }, {\n key: \"renderMaxQueriesListItem\",\n value: function renderMaxQueriesListItem(maxQueries, maxQueriesText) {\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: this.state.maxQueries === maxQueries ? \"selected\" : \"\",\n onClick: this.handleMaxQueriesClick.bind(this, maxQueries) },\n maxQueriesText\n )\n );\n }\n }, {\n key: \"handleMaxQueriesClick\",\n value: function handleMaxQueriesClick(newMaxQueries) {\n var filteredQueries = this.filterQueries(this.state.allQueries, this.state.stateFilters, this.state.errorTypeFilters, this.state.searchString);\n this.sortAndLimitQueries(filteredQueries, this.state.currentSortType, this.state.currentSortOrder, newMaxQueries);\n\n this.setState({\n maxQueries: newMaxQueries,\n displayedQueries: filteredQueries\n });\n }\n }, {\n key: \"renderReorderListItem\",\n value: function renderReorderListItem(interval, intervalText) {\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: this.state.reorderInterval === interval ? \"selected\" : \"\",\n onClick: this.handleReorderClick.bind(this, interval) },\n intervalText\n )\n );\n }\n }, {\n key: \"handleReorderClick\",\n value: function handleReorderClick(interval) {\n if (this.state.reorderInterval !== interval) {\n this.setState({\n reorderInterval: interval\n });\n }\n }\n }, {\n key: \"renderSortListItem\",\n value: function renderSortListItem(sortType, sortText) {\n if (this.state.currentSortType === sortType) {\n var directionArrow = this.state.currentSortOrder === SORT_ORDER.ASCENDING ? _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-triangle-top\" }) : _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-triangle-bottom\" });\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: \"selected\", onClick: this.handleSortClick.bind(this, sortType) },\n sortText,\n \" \",\n directionArrow\n )\n );\n } else {\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", onClick: this.handleSortClick.bind(this, sortType) },\n sortText\n )\n );\n }\n }\n }, {\n key: \"handleSortClick\",\n value: function handleSortClick(sortType) {\n var newSortType = sortType;\n var newSortOrder = SORT_ORDER.DESCENDING;\n\n if (this.state.currentSortType === sortType && this.state.currentSortOrder === SORT_ORDER.DESCENDING) {\n newSortOrder = SORT_ORDER.ASCENDING;\n }\n\n var newDisplayedQueries = this.filterQueries(this.state.allQueries, this.state.stateFilters, this.state.errorTypeFilters, this.state.searchString);\n this.sortAndLimitQueries(newDisplayedQueries, newSortType, newSortOrder, this.state.maxQueries);\n\n this.setState({\n displayedQueries: newDisplayedQueries,\n currentSortType: newSortType,\n currentSortOrder: newSortOrder\n });\n }\n }, {\n key: \"renderFilterButton\",\n value: function renderFilterButton(filterType, filterText) {\n var checkmarkStyle = { color: '#57aac7' };\n var classNames = \"btn btn-sm btn-info style-check\";\n if (this.state.stateFilters.indexOf(filterType) > -1) {\n classNames += \" active\";\n checkmarkStyle = { color: '#ffffff' };\n }\n\n return _react2.default.createElement(\n \"button\",\n { type: \"button\", className: classNames, onClick: this.handleStateFilterClick.bind(this, filterType) },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-ok\", style: checkmarkStyle }),\n \"\\xA0\",\n filterText\n );\n }\n }, {\n key: \"handleStateFilterClick\",\n value: function handleStateFilterClick(filter) {\n var newFilters = this.state.stateFilters.slice();\n if (this.state.stateFilters.indexOf(filter) > -1) {\n newFilters.splice(newFilters.indexOf(filter), 1);\n } else {\n newFilters.push(filter);\n }\n\n var filteredQueries = this.filterQueries(this.state.allQueries, newFilters, this.state.errorTypeFilters, this.state.searchString);\n this.sortAndLimitQueries(filteredQueries, this.state.currentSortType, this.state.currentSortOrder);\n\n this.setState({\n stateFilters: newFilters,\n displayedQueries: filteredQueries\n });\n }\n }, {\n key: \"renderErrorTypeListItem\",\n value: function renderErrorTypeListItem(errorType, errorTypeText) {\n var checkmarkStyle = { color: '#ffffff' };\n if (this.state.errorTypeFilters.indexOf(errorType) > -1) {\n checkmarkStyle = _utils.GLYPHICON_HIGHLIGHT;\n }\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", onClick: this.handleErrorTypeFilterClick.bind(this, errorType) },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-ok\", style: checkmarkStyle }),\n \"\\xA0\",\n errorTypeText\n )\n );\n }\n }, {\n key: \"handleErrorTypeFilterClick\",\n value: function handleErrorTypeFilterClick(errorType) {\n var newFilters = this.state.errorTypeFilters.slice();\n if (this.state.errorTypeFilters.indexOf(errorType) > -1) {\n newFilters.splice(newFilters.indexOf(errorType), 1);\n } else {\n newFilters.push(errorType);\n }\n\n var filteredQueries = this.filterQueries(this.state.allQueries, this.state.stateFilters, newFilters, this.state.searchString);\n this.sortAndLimitQueries(filteredQueries, this.state.currentSortType, this.state.currentSortOrder);\n\n this.setState({\n errorTypeFilters: newFilters,\n displayedQueries: filteredQueries\n });\n }\n }, {\n key: \"render\",\n value: function render() {\n var queryList = _react2.default.createElement(DisplayedQueriesList, { queries: this.state.displayedQueries });\n if (this.state.displayedQueries === null || this.state.displayedQueries.length === 0) {\n var label = _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n );\n if (this.state.initialized) {\n if (this.state.allQueries === null || this.state.allQueries.length === 0) {\n label = \"No queries\";\n } else {\n label = \"No queries matched filters\";\n }\n }\n queryList = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n label\n )\n )\n );\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"row toolbar-row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12 toolbar-col\" },\n _react2.default.createElement(\n \"div\",\n { className: \"input-group input-group-sm\" },\n _react2.default.createElement(\"input\", { type: \"text\", className: \"form-control form-control-small search-bar\",\n placeholder: \"TraceID or QueryText\",\n onChange: this.handleSearchStringChange, value: this.state.searchString }),\n _react2.default.createElement(\n \"span\",\n { className: \"input-group-addon filter-addon\" },\n \"State:\"\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn\" },\n this.renderFilterButton(FILTER_TYPE.RUNNING, \"Running\"),\n this.renderFilterButton(FILTER_TYPE.QUEUED, \"Queued\"),\n this.renderFilterButton(FILTER_TYPE.FINISHED, \"Finished\"),\n _react2.default.createElement(\n \"button\",\n { type: \"button\", id: \"error-type-dropdown\",\n className: \"btn btn-default dropdown-toggle\", \"data-toggle\": \"dropdown\",\n \"aria-haspopup\": \"true\", \"aria-expanded\": \"false\" },\n \"Failed \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu error-type-dropdown-menu\" },\n this.renderErrorTypeListItem(ERROR_TYPE.FAILED, \"Query Error\")\n )\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\", className: \"btn btn-default dropdown-toggle\", \"data-toggle\": \"dropdown\",\n \"aria-haspopup\": \"true\", \"aria-expanded\": \"false\" },\n \"Sort \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n this.renderSortListItem(SORT_TYPE.CREATED, \"Creation Time\"),\n this.renderSortListItem(SORT_TYPE.ELAPSED, \"Elapsed Time\"),\n this.renderSortListItem(SORT_TYPE.CPU, \"CPU Time\"),\n this.renderSortListItem(SORT_TYPE.EXECUTION, \"Execution Time\"),\n this.renderSortListItem(SORT_TYPE.CURRENT_MEMORY, \"Current Memory\"),\n this.renderSortListItem(SORT_TYPE.CUMULATIVE_MEMORY, \"Cumulative User Memory\")\n )\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\", className: \"btn btn-default dropdown-toggle\", \"data-toggle\": \"dropdown\",\n \"aria-haspopup\": \"true\", \"aria-expanded\": \"false\" },\n \"Reorder Interval \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n this.renderReorderListItem(1000, \"1s\"),\n this.renderReorderListItem(5000, \"5s\"),\n this.renderReorderListItem(10000, \"10s\"),\n this.renderReorderListItem(30000, \"30s\"),\n _react2.default.createElement(\"li\", { role: \"separator\", className: \"divider\" }),\n this.renderReorderListItem(0, \"Off\")\n )\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\", className: \"btn btn-default dropdown-toggle\", \"data-toggle\": \"dropdown\",\n \"aria-haspopup\": \"true\", \"aria-expanded\": \"false\" },\n \"Show \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n this.renderMaxQueriesListItem(20, \"20 queries\"),\n this.renderMaxQueriesListItem(50, \"50 queries\"),\n this.renderMaxQueriesListItem(100, \"100 queries\"),\n _react2.default.createElement(\"li\", { role: \"separator\", className: \"divider\" }),\n this.renderMaxQueriesListItem(0, \"All queries\")\n )\n )\n )\n )\n ),\n queryList\n );\n }\n }]);\n\n return QueryList;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryList.jsx?"); /***/ }), @@ -20639,7 +20639,7 @@ eval("module.exports = function(module) {\n\tif (!module.webpackPolyfill) {\n\t\ /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDuration = formatDuration;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#1b8f72',\n RUNNING: '#19874e',\n PLANNING: '#674f98',\n FINISHED: '#1a4629',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDuration(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getFullSplitIdSuffix = getFullSplitIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDurationMs = formatDurationMs;\nexports.formatDurationNs = formatDurationNs;\nexports.formatNumber = formatNumber;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#7bb3fb',\n RUNNING: '#265cdf',\n PLANNING: '#674f98',\n FINISHED: '#22b647',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getFullSplitIdSuffix(driverId) {\n return driverId.substring(driverId.indexOf('.') + 1);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDurationMs(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatDurationNs(duration) {\n var unit = \"ns\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"us\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"ms\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatNumber(num) {\n return num.toLocaleString();\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); /***/ }) diff --git a/polardbx-executor/src/main/resources/webapp/dist/plan.js b/polardbx-executor/src/main/resources/webapp/dist/plan.js index faf10d036..161649a65 100644 --- a/polardbx-executor/src/main/resources/webapp/dist/plan.js +++ b/polardbx-executor/src/main/resources/webapp/dist/plan.js @@ -94,7 +94,7 @@ /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.LivePlan = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _server = __webpack_require__(/*! react-dom/server */ \"./node_modules/react-dom/server.browser.js\");\n\nvar _server2 = _interopRequireDefault(_server);\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nvar _QueryHeader = __webpack_require__(/*! ./QueryHeader */ \"./components/QueryHeader.jsx\");\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar StageStatistics = function (_React$Component) {\n _inherits(StageStatistics, _React$Component);\n\n function StageStatistics() {\n _classCallCheck(this, StageStatistics);\n\n return _possibleConstructorReturn(this, (StageStatistics.__proto__ || Object.getPrototypeOf(StageStatistics)).apply(this, arguments));\n }\n\n _createClass(StageStatistics, [{\n key: \"render\",\n value: function render() {\n var stage = this.props.stage;\n var stats = this.props.stage.stageStats;\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"h3\",\n { className: \"margin-top: 0\" },\n \"Stage \",\n stage.id\n ),\n stage.state,\n _react2.default.createElement(\"hr\", null),\n \"CPU: \",\n stats.totalCpuTime,\n _react2.default.createElement(\"br\", null),\n stats.fullyBlocked ? _react2.default.createElement(\n \"div\",\n { style: { color: '#ff0000' } },\n \"Blocked: \",\n stats.totalBlockedTime,\n \" \"\n ) : _react2.default.createElement(\n \"div\",\n null,\n \"Blocked: \",\n stats.totalBlockedTime,\n \" \"\n ),\n \"Memory: \",\n stats.totalMemoryReservation,\n _react2.default.createElement(\"br\", null),\n \"PipelineExecs: \",\n \"Q:\" + stats.queuedPipelineExecs + \", R:\" + stats.runningPipelineExecs + \", F:\" + stats.completedPipelineExecs,\n _react2.default.createElement(\"hr\", null),\n \"Input: \",\n stats.processedInputDataSize + \" / \" + (0, _utils.formatRows)(stats.processedInputPositions)\n )\n );\n }\n }], [{\n key: \"getStages\",\n value: function getStages(queryInfo) {\n var stages = new Map();\n StageStatistics.flattenStage(queryInfo.outputStage, stages);\n return stages;\n }\n }, {\n key: \"flattenStage\",\n value: function flattenStage(stageInfo, result) {\n stageInfo.subStages.forEach(function (stage) {\n StageStatistics.flattenStage(stage, result);\n });\n\n var nodes = new Map();\n StageStatistics.flattenNode(result, JSON.parse(stageInfo.plan.relNodeJson).rels, nodes);\n\n result.set(stageInfo.plan.id, {\n stageId: stageInfo.stageId,\n id: stageInfo.plan.id,\n root: stageInfo.plan.rootId,\n stageStats: stageInfo.stageStats,\n state: stageInfo.state,\n nodes: nodes\n });\n }\n }, {\n key: \"flattenNode\",\n value: function flattenNode(stages, node, result) {\n\n node.forEach(function (element) {\n var loadingMessage = element.relOp + element.id;\n if (element.tableNames != undefined) {\n loadingMessage = loadingMessage + \":\" + element.tableNames;\n }\n result.set(element.relatedId, {\n id: element.relatedId,\n name: element.relOp,\n identifier: loadingMessage,\n sources: element.sources,\n remoteSources: element.fragmentIds\n });\n });\n node.last;\n // result.set(node.id, {\n // id: node.id,\n // name: node['name'],\n // identifier: node['identifier'],\n // details: node['details'],\n // sources: node.children.map(node => node.id),\n // remoteSources: node.remoteSources,\n // });\n //\n // node.children.forEach(function (child) {\n // StageStatistics.flattenNode(stages, child, result);\n // });\n }\n }]);\n\n return StageStatistics;\n}(_react2.default.Component);\n\nvar PlanNode = function (_React$Component2) {\n _inherits(PlanNode, _React$Component2);\n\n function PlanNode(props) {\n _classCallCheck(this, PlanNode);\n\n return _possibleConstructorReturn(this, (PlanNode.__proto__ || Object.getPrototypeOf(PlanNode)).call(this, props));\n }\n\n _createClass(PlanNode, [{\n key: \"render\",\n value: function render() {\n return _react2.default.createElement(\n \"div\",\n { style: { color: \"#000\" }, \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", \"data-container\": \"body\",\n \"data-html\": \"true\",\n title: \"

\" + this.props.name + \"

\" + this.props.identifier },\n _react2.default.createElement(\n \"strong\",\n null,\n this.props.name\n ),\n _react2.default.createElement(\n \"div\",\n null,\n (0, _utils.truncateString)(this.props.identifier, 35)\n )\n );\n }\n }]);\n\n return PlanNode;\n}(_react2.default.Component);\n\nvar LivePlan = exports.LivePlan = function (_React$Component3) {\n _inherits(LivePlan, _React$Component3);\n\n function LivePlan(props) {\n _classCallCheck(this, LivePlan);\n\n var _this3 = _possibleConstructorReturn(this, (LivePlan.__proto__ || Object.getPrototypeOf(LivePlan)).call(this, props));\n\n _this3.state = {\n initialized: false,\n ended: false,\n\n query: null,\n\n graph: (0, _utils.initializeGraph)(),\n svg: null,\n render: new dagreD3.render()\n };\n return _this3;\n }\n\n _createClass(LivePlan, [{\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n // stop refreshing when query finishes or fails\n if (this.state.query === null || !this.state.ended) {\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }\n }, {\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this4 = this;\n\n clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously\n fetch('/v1/query/' + this.props.queryId).then(function (response) {\n return response.json();\n }).then(function (query) {\n _this4.setState({\n query: query,\n\n initialized: true,\n ended: query.finalQueryInfo\n });\n _this4.resetTimer();\n }).catch(function () {\n _this4.setState({\n initialized: true\n });\n _this4.resetTimer();\n });\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"updateD3Stage\",\n value: function updateD3Stage(stage, graph, allStages) {\n var clusterId = stage.stageId;\n var stageRootNodeId = \"stage-\" + stage.id + \"-root\";\n var color = (0, _utils.getStageStateColor)(stage);\n\n graph.setNode(clusterId, { style: 'fill: ' + color, labelStyle: 'fill: #fff' });\n\n // this is a non-standard use of ReactDOMServer, but it's the cleanest way to unify DagreD3 with React\n var html = _server2.default.renderToString(_react2.default.createElement(StageStatistics, { key: stage.id, stage: stage }));\n\n graph.setNode(stageRootNodeId, { class: \"stage-stats\", label: html, labelType: \"html\" });\n graph.setParent(stageRootNodeId, clusterId);\n graph.setEdge(\"node-\" + stage.root, stageRootNodeId, { style: \"visibility: hidden\" });\n\n stage.nodes.forEach(function (node) {\n var nodeId = \"node-\" + node.id;\n var nodeHtml = _server2.default.renderToString(_react2.default.createElement(PlanNode, node));\n\n graph.setNode(nodeId, { label: nodeHtml, style: 'fill: #fff', labelType: \"html\" });\n graph.setParent(nodeId, clusterId);\n\n node.sources.forEach(function (source) {\n graph.setEdge(\"node-\" + source, nodeId, { class: \"plan-edge\", arrowheadClass: \"plan-arrowhead\" });\n });\n\n if (node.remoteSources != undefined && node.remoteSources.length > 0) {\n graph.setNode(nodeId, { label: '', shape: \"circle\" });\n\n node.remoteSources.forEach(function (sourceId) {\n var source = allStages.get(sourceId);\n if (source) {\n var sourceStats = source.stageStats;\n graph.setEdge(\"stage-\" + sourceId + \"-root\", nodeId, {\n class: \"plan-edge\",\n style: \"stroke-width: 4px\",\n arrowheadClass: \"plan-arrowhead\",\n label: sourceStats.outputDataSize + \" / \" + (0, _utils.formatRows)(sourceStats.outputPositions),\n labelStyle: \"color: #fff; font-weight: bold; font-size: 24px;\",\n labelType: \"html\"\n });\n }\n });\n }\n });\n }\n }, {\n key: \"updateD3Graph\",\n value: function updateD3Graph() {\n var _this5 = this;\n\n if (!this.state.svg) {\n this.setState({\n svg: (0, _utils.initializeSvg)(\"#plan-canvas\")\n });\n return;\n }\n\n if (!this.state.query) {\n return;\n }\n\n var graph = this.state.graph;\n var stages = StageStatistics.getStages(this.state.query);\n stages.forEach(function (stage) {\n _this5.updateD3Stage(stage, graph, stages);\n });\n\n var inner = d3.select(\"#plan-canvas g\");\n this.state.render(inner, graph);\n\n var svg = this.state.svg;\n svg.selectAll(\"g.cluster\").on(\"click\", LivePlan.handleStageClick);\n\n var width = parseInt(window.getComputedStyle(document.getElementById(\"live-plan\"), null).getPropertyValue(\"width\").replace(/px/, \"\")) - 50;\n var height = parseInt(window.getComputedStyle(document.getElementById(\"live-plan\"), null).getPropertyValue(\"height\").replace(/px/, \"\")) - 50;\n\n var graphHeight = graph.graph().height + 100;\n var graphWidth = graph.graph().width + 100;\n if (this.state.ended) {\n // Zoom doesn't deal well with DOM changes\n var initialScale = Math.min(width / graphWidth, height / graphHeight);\n var zoom = d3.zoom().scaleExtent([initialScale, 1]).on(\"zoom\", function () {\n inner.attr(\"transform\", d3.event.transform);\n });\n\n svg.call(zoom);\n svg.call(zoom.transform, d3.zoomIdentity.translate((width - graph.graph().width * initialScale) / 2, 20).scale(initialScale));\n svg.attr('height', height);\n svg.attr('width', width);\n } else {\n svg.attr('height', graphHeight);\n svg.attr('width', graphWidth);\n }\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n this.updateD3Graph();\n //$FlowFixMe\n $('[data-toggle=\"tooltip\"]').tooltip();\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.state.query;\n\n if (query === null || this.state.initialized === false) {\n var label = _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n );\n if (this.state.initialized) {\n label = \"Query not found\";\n }\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n label\n )\n )\n );\n }\n\n var loadingMessage = null;\n if (query && !query.outputStage) {\n loadingMessage = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Live plan graph will appear automatically when query starts running.\"\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n )\n )\n );\n }\n\n // TODO: Refactor components to move refreshLoop to parent rather than using this property\n var queryHeader = this.props.isEmbedded ? null : _react2.default.createElement(_QueryHeader.QueryHeader, { query: query });\n return _react2.default.createElement(\n \"div\",\n null,\n queryHeader,\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n loadingMessage,\n _react2.default.createElement(\n \"div\",\n { id: \"live-plan\", className: \"graph-container\" },\n _react2.default.createElement(\n \"div\",\n { className: \"pull-right\" },\n this.state.ended ? \"Scroll to zoom.\" : \"Zoom disabled while query is running.\",\n \" Click stage to view additional statistics\"\n ),\n _react2.default.createElement(\"svg\", { id: \"plan-canvas\" })\n )\n )\n )\n );\n }\n }], [{\n key: \"handleStageClick\",\n value: function handleStageClick(stageCssId) {\n window.open(\"stage.html?\" + stageCssId, '_blank');\n }\n }]);\n\n return LivePlan;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/LivePlan.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.LivePlan = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _server = __webpack_require__(/*! react-dom/server */ \"./node_modules/react-dom/server.browser.js\");\n\nvar _server2 = _interopRequireDefault(_server);\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nvar _QueryHeader = __webpack_require__(/*! ./QueryHeader */ \"./components/QueryHeader.jsx\");\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar StageStatistics = function (_React$Component) {\n _inherits(StageStatistics, _React$Component);\n\n function StageStatistics() {\n _classCallCheck(this, StageStatistics);\n\n return _possibleConstructorReturn(this, (StageStatistics.__proto__ || Object.getPrototypeOf(StageStatistics)).apply(this, arguments));\n }\n\n _createClass(StageStatistics, [{\n key: \"render\",\n value: function render() {\n var stage = this.props.stage;\n var stats = this.props.stage.stageStats;\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"h3\",\n { className: \"margin-top: 0\" },\n \"Stage \",\n stage.id\n ),\n stage.state,\n _react2.default.createElement(\"hr\", null),\n \"CpuTime: \",\n stats.totalCpuTimeNanos,\n \" ms\",\n _react2.default.createElement(\"br\", null),\n stats.fullyBlocked ? _react2.default.createElement(\n \"div\",\n { style: { color: '#ff0000' } },\n \"BlockedTime: \",\n (0, _utils.formatDurationNs)(stats.totalBlockedTimeNanos),\n \" \"\n ) : _react2.default.createElement(\n \"div\",\n null,\n \"BlockedTime: \",\n (0, _utils.formatDurationNs)(stats.totalBlockedTimeNanos),\n \" \"\n ),\n _react2.default.createElement(\"br\", null),\n \"PipelineExecs: \",\n \"Q:\" + stats.queuedPipelineExecs + \", R:\" + stats.runningPipelineExecs + \", F:\" + stats.completedPipelineExecs,\n _react2.default.createElement(\"hr\", null),\n \"Input: \",\n (0, _utils.formatRows)(stats.processedInputPositions)\n )\n );\n }\n }], [{\n key: \"getStages\",\n value: function getStages(queryInfo) {\n var stages = new Map();\n StageStatistics.flattenStage(queryInfo.outputStage, stages);\n return stages;\n }\n }, {\n key: \"flattenStage\",\n value: function flattenStage(stageInfo, result) {\n stageInfo.subStages.forEach(function (stage) {\n StageStatistics.flattenStage(stage, result);\n });\n\n var nodes = new Map();\n StageStatistics.flattenNode(result, JSON.parse(stageInfo.plan.relNodeJson).rels, nodes);\n\n result.set(stageInfo.plan.id, {\n stageId: stageInfo.stageId,\n id: stageInfo.plan.id,\n root: stageInfo.plan.rootId,\n stageStats: stageInfo.stageStats,\n state: stageInfo.state,\n nodes: nodes\n });\n }\n }, {\n key: \"flattenNode\",\n value: function flattenNode(stages, node, result) {\n\n node.forEach(function (element) {\n var loadingMessage = element.relOp + element.id;\n if (element.tableNames != undefined) {\n loadingMessage = loadingMessage + \":\" + element.tableNames;\n }\n result.set(element.relatedId, {\n id: element.relatedId,\n name: element.relOp,\n identifier: loadingMessage,\n sources: element.sources,\n remoteSources: element.fragmentIds\n });\n });\n // node.last\n // result.set(node.id, {\n // id: node.id,\n // name: node['name'],\n // identifier: node['identifier'],\n // details: node['details'],\n // sources: node.children.map(node => node.id),\n // remoteSources: node.remoteSources,\n // });\n //\n // node.children.forEach(function (child) {\n // StageStatistics.flattenNode(stages, child, result);\n // });\n }\n }]);\n\n return StageStatistics;\n}(_react2.default.Component);\n\nvar PlanNode = function (_React$Component2) {\n _inherits(PlanNode, _React$Component2);\n\n function PlanNode(props) {\n _classCallCheck(this, PlanNode);\n\n return _possibleConstructorReturn(this, (PlanNode.__proto__ || Object.getPrototypeOf(PlanNode)).call(this, props));\n }\n\n _createClass(PlanNode, [{\n key: \"render\",\n value: function render() {\n return _react2.default.createElement(\n \"div\",\n { style: { color: \"#000\" }, \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", \"data-container\": \"body\",\n \"data-html\": \"true\",\n title: \"

\" + this.props.name + \"

\" + this.props.identifier },\n _react2.default.createElement(\n \"strong\",\n null,\n this.props.name\n ),\n _react2.default.createElement(\n \"div\",\n null,\n (0, _utils.truncateString)(this.props.identifier, 35)\n )\n );\n }\n }]);\n\n return PlanNode;\n}(_react2.default.Component);\n\nvar LivePlan = exports.LivePlan = function (_React$Component3) {\n _inherits(LivePlan, _React$Component3);\n\n function LivePlan(props) {\n _classCallCheck(this, LivePlan);\n\n var _this3 = _possibleConstructorReturn(this, (LivePlan.__proto__ || Object.getPrototypeOf(LivePlan)).call(this, props));\n\n _this3.state = {\n initialized: false,\n ended: false,\n\n query: null,\n\n graph: (0, _utils.initializeGraph)(),\n svg: null,\n render: new dagreD3.render()\n };\n return _this3;\n }\n\n _createClass(LivePlan, [{\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n // stop refreshing when query finishes or fails\n if (this.state.query === null || !this.state.ended) {\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }\n }, {\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this4 = this;\n\n clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously\n fetch('/v1/query/stats/' + this.props.queryId).then(function (response) {\n return response.json();\n }).then(function (query) {\n _this4.setState({\n query: query,\n\n initialized: true,\n ended: query.finalQueryInfo\n });\n _this4.resetTimer();\n }).catch(function () {\n _this4.setState({\n initialized: true\n });\n _this4.resetTimer();\n });\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"updateD3Stage\",\n value: function updateD3Stage(stage, graph, allStages) {\n var clusterId = stage.stageId;\n var stageRootNodeId = \"stage-\" + stage.id + \"-root\";\n var color = (0, _utils.getStageStateColor)(stage);\n\n graph.setNode(clusterId, { style: 'fill: ' + color, labelStyle: 'fill: #fff' });\n\n // this is a non-standard use of ReactDOMServer, but it's the cleanest way to unify DagreD3 with React\n var html = _server2.default.renderToString(_react2.default.createElement(StageStatistics, { key: stage.id, stage: stage }));\n\n graph.setNode(stageRootNodeId, { class: \"stage-stats\", label: html, labelType: \"html\" });\n graph.setParent(stageRootNodeId, clusterId);\n graph.setEdge(\"node-\" + stage.root, stageRootNodeId, { style: \"visibility: hidden\" });\n\n var stageOperatorsMap = new Map();\n if (stage.stageStats.operatorSummaries) {\n stage.stageStats.operatorSummaries.forEach(function (opSummary) {\n stageOperatorsMap.set(opSummary.operatorId, opSummary);\n });\n }\n\n stage.nodes.forEach(function (node) {\n var nodeId = \"node-\" + node.id;\n var nodeHtml = _server2.default.renderToString(_react2.default.createElement(PlanNode, node));\n\n graph.setNode(nodeId, { label: nodeHtml, style: 'fill: #fff', labelType: \"html\" });\n graph.setParent(nodeId, clusterId);\n\n node.sources.forEach(function (source) {\n if (stageOperatorsMap.has(source)) {\n graph.setEdge(\"node-\" + source, nodeId, {\n class: \"plan-edge\",\n arrowheadClass: \"plan-arrowhead\",\n label: (0, _utils.formatRows)(stageOperatorsMap.get(source).outputRowCount),\n labelStyle: \"color: #fff; font-weight: bold; font-size: 16px;\",\n labelType: \"html\"\n });\n } else {\n graph.setEdge(\"node-\" + source, nodeId, { class: \"plan-edge\", arrowheadClass: \"plan-arrowhead\" });\n }\n });\n\n if (node.remoteSources !== undefined && node.remoteSources.length > 0) {\n graph.setNode(nodeId, { label: '', shape: \"circle\" });\n\n node.remoteSources.forEach(function (sourceId) {\n var source = allStages.get(sourceId);\n if (source) {\n var sourceStats = source.stageStats;\n graph.setEdge(\"stage-\" + sourceId + \"-root\", nodeId, {\n class: \"plan-edge\",\n style: \"stroke-width: 4px\",\n arrowheadClass: \"plan-arrowhead\",\n label: (0, _utils.formatRows)(sourceStats.outputPositions),\n labelStyle: \"color: #fff; font-weight: bold; font-size: 24px;\",\n labelType: \"html\"\n });\n }\n });\n }\n });\n }\n }, {\n key: \"updateD3Graph\",\n value: function updateD3Graph() {\n var _this5 = this;\n\n if (!this.state.svg) {\n this.setState({\n svg: (0, _utils.initializeSvg)(\"#plan-canvas\")\n });\n return;\n }\n\n if (!this.state.query) {\n return;\n }\n\n var graph = this.state.graph;\n var stages = StageStatistics.getStages(this.state.query);\n stages.forEach(function (stage) {\n _this5.updateD3Stage(stage, graph, stages);\n });\n\n var inner = d3.select(\"#plan-canvas g\");\n this.state.render(inner, graph);\n\n var svg = this.state.svg;\n svg.selectAll(\"g.cluster\").on(\"click\", LivePlan.handleStageClick);\n\n var width = parseInt(window.getComputedStyle(document.getElementById(\"live-plan\"), null).getPropertyValue(\"width\").replace(/px/, \"\")) - 50;\n var height = parseInt(window.getComputedStyle(document.getElementById(\"live-plan\"), null).getPropertyValue(\"height\").replace(/px/, \"\")) - 50;\n\n var graphHeight = graph.graph().height + 100;\n var graphWidth = graph.graph().width + 100;\n if (this.state.ended) {\n // Zoom doesn't deal well with DOM changes\n var initialScale = Math.min(width / graphWidth, height / graphHeight);\n var zoom = d3.zoom().scaleExtent([initialScale, 1]).on(\"zoom\", function () {\n inner.attr(\"transform\", d3.event.transform);\n });\n\n svg.call(zoom);\n svg.call(zoom.transform, d3.zoomIdentity.translate((width - graph.graph().width * initialScale) / 2, 20).scale(initialScale));\n svg.attr('height', height);\n svg.attr('width', width);\n } else {\n svg.attr('height', graphHeight);\n svg.attr('width', graphWidth);\n }\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n this.updateD3Graph();\n //$FlowFixMe\n $('[data-toggle=\"tooltip\"]').tooltip();\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.state.query;\n\n if (query === null || this.state.initialized === false) {\n var label = _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n );\n if (this.state.initialized) {\n label = \"Query not found\";\n }\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n label\n )\n )\n );\n }\n\n var loadingMessage = null;\n if (query && !query.outputStage) {\n loadingMessage = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Live plan graph will appear automatically when query starts running.\"\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n )\n )\n );\n }\n\n // TODO: Refactor components to move refreshLoop to parent rather than using this property\n var queryHeader = this.props.isEmbedded ? null : _react2.default.createElement(_QueryHeader.QueryHeader, { query: query });\n return _react2.default.createElement(\n \"div\",\n null,\n queryHeader,\n _react2.default.createElement(\n \"div\",\n { className: \"info-container-next\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row \" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n loadingMessage,\n _react2.default.createElement(\n \"div\",\n { id: \"live-plan\", className: \"graph-container\" },\n _react2.default.createElement(\n \"div\",\n { className: \"pull-right\" },\n this.state.ended ? \"Scroll to zoom.\" : \"Zoom disabled while query is running.\",\n \" Click stage to view additional statistics\"\n ),\n _react2.default.createElement(\"svg\", { id: \"plan-canvas\" })\n )\n )\n )\n )\n );\n }\n }], [{\n key: \"handleStageClick\",\n value: function handleStageClick(stageCssId) {\n window.open(\"stage.html?\" + stageCssId, '_blank');\n }\n }]);\n\n return LivePlan;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/LivePlan.jsx?"); /***/ }), @@ -106,7 +106,7 @@ eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n}); /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.PageTitle = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar PageTitle = exports.PageTitle = function (_React$Component) {\n _inherits(PageTitle, _React$Component);\n\n function PageTitle(props) {\n _classCallCheck(this, PageTitle);\n\n var _this = _possibleConstructorReturn(this, (PageTitle.__proto__ || Object.getPrototypeOf(PageTitle)).call(this, props));\n\n _this.state = {\n noConnection: false,\n lightShown: false,\n info: null,\n lastSuccess: Date.now(),\n modalShown: false,\n errorText: null\n };\n return _this;\n }\n\n _createClass(PageTitle, [{\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this2 = this;\n\n clearTimeout(this.timeoutId);\n fetch(\"/v1/info\").then(function (response) {\n return response.json();\n }).then(function (info) {\n _this2.setState({\n info: info,\n noConnection: false,\n lastSuccess: Date.now(),\n modalShown: false\n });\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal('hide');\n _this2.resetTimer();\n }).catch(function (error) {\n _this2.setState({\n noConnection: true,\n lightShown: !_this2.state.lightShown,\n errorText: error\n });\n _this2.resetTimer();\n\n if (!_this2.state.modalShown && (error || Date.now() - _this2.state.lastSuccess > 30 * 1000)) {\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal();\n _this2.setState({ modalShown: true });\n }\n });\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"renderStatusLight\",\n value: function renderStatusLight() {\n if (this.state.noConnection) {\n if (this.state.lightShown) {\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-red\", id: \"status-indicator\" });\n } else {\n return _react2.default.createElement(\"span\", { className: \"status-light\", id: \"status-indicator\" });\n }\n }\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-green\", id: \"status-indicator\" });\n }\n }, {\n key: \"render\",\n value: function render() {\n var info = this.state.info;\n if (!info) {\n return null;\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"nav\",\n { className: \"navbar\" },\n _react2.default.createElement(\n \"div\",\n { className: \"container-fluid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"navbar-header\" },\n _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"/ui/\" },\n _react2.default.createElement(\"img\", { src: \"assets/logo.png\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-brand\" },\n this.props.title\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"navbar\", className: \"navbar-collapse collapse\" },\n _react2.default.createElement(\n \"ul\",\n { className: \"nav navbar-nav navbar-right\" },\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Version\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\",\n id: \"version-number\" },\n info.nodeVersion.version\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Environment\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"environment\" },\n info.environment\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"CoordinatorId\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"workerId\" },\n info.workerId\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Uptime\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", title: \"Connection status\" },\n this.renderStatusLight()\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"span\",\n { className: \"text\", id: \"uptime\" },\n info.uptime\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"no-connection-modal\", className: \"modal\", tabIndex: \"-1\", role: \"dialog\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-dialog modal-sm\", role: \"document\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-content\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"h4\",\n null,\n \"Unable to connect to server\"\n ),\n _react2.default.createElement(\n \"p\",\n null,\n this.state.errorText ? \"Error: \" + this.state.errorText : null\n )\n )\n )\n )\n )\n )\n );\n }\n }]);\n\n return PageTitle;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/PageTitle.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.PageTitle = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar PageTitle = exports.PageTitle = function (_React$Component) {\n _inherits(PageTitle, _React$Component);\n\n function PageTitle(props) {\n _classCallCheck(this, PageTitle);\n\n var _this = _possibleConstructorReturn(this, (PageTitle.__proto__ || Object.getPrototypeOf(PageTitle)).call(this, props));\n\n _this.state = {\n noConnection: false,\n lightShown: false,\n info: null,\n lastSuccess: Date.now(),\n modalShown: false,\n errorText: null\n };\n return _this;\n }\n\n _createClass(PageTitle, [{\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this2 = this;\n\n clearTimeout(this.timeoutId);\n fetch(\"/v1/info\").then(function (response) {\n return response.json();\n }).then(function (info) {\n _this2.setState({\n info: info,\n noConnection: false,\n lastSuccess: Date.now(),\n modalShown: false\n });\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal('hide');\n _this2.resetTimer();\n }).catch(function (error) {\n _this2.setState({\n noConnection: true,\n lightShown: !_this2.state.lightShown,\n errorText: error\n });\n _this2.resetTimer();\n\n if (!_this2.state.modalShown && (error || Date.now() - _this2.state.lastSuccess > 30 * 1000)) {\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal();\n _this2.setState({ modalShown: true });\n }\n });\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"renderStatusLight\",\n value: function renderStatusLight() {\n if (this.state.noConnection) {\n if (this.state.lightShown) {\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-red\", id: \"status-indicator\" });\n } else {\n return _react2.default.createElement(\"span\", { className: \"status-light\", id: \"status-indicator\" });\n }\n }\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-green\", id: \"status-indicator\" });\n }\n }, {\n key: \"render\",\n value: function render() {\n var info = this.state.info;\n if (!info) {\n return null;\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"nav\",\n { className: \"navbar\" },\n _react2.default.createElement(\n \"div\",\n { className: \"container-fluid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"navbar-header\" },\n _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"/ui/\" },\n _react2.default.createElement(\"img\", { src: \"assets/favicon.png\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-brand\" },\n this.props.title\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"navbar\", className: \"navbar-collapse collapse\" },\n _react2.default.createElement(\n \"ul\",\n { className: \"nav navbar-nav navbar-right\" },\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Version\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\",\n id: \"version-number\" },\n info.nodeVersion.version\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Environment\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"environment\" },\n \"PolarDB-X\"\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Node\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"workerId\" },\n info.workerId\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Uptime\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", title: \"Connection status\" },\n this.renderStatusLight()\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"span\",\n { className: \"text\", id: \"uptime\" },\n info.uptime\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"no-connection-modal\", className: \"modal\", tabIndex: \"-1\", role: \"dialog\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-dialog modal-sm\", role: \"document\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-content\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"h4\",\n null,\n \"Unable to connect to server\"\n ),\n _react2.default.createElement(\n \"p\",\n null,\n this.state.errorText ? \"Error: \" + this.state.errorText : null\n )\n )\n )\n )\n )\n )\n );\n }\n }]);\n\n return PageTitle;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/PageTitle.jsx?"); /***/ }), @@ -118,7 +118,7 @@ eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n}); /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryHeader = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar QueryHeader = exports.QueryHeader = function (_React$Component) {\n _inherits(QueryHeader, _React$Component);\n\n function QueryHeader(props) {\n _classCallCheck(this, QueryHeader);\n\n return _possibleConstructorReturn(this, (QueryHeader.__proto__ || Object.getPrototypeOf(QueryHeader)).call(this, props));\n }\n\n _createClass(QueryHeader, [{\n key: \"renderProgressBar\",\n value: function renderProgressBar() {\n var query = this.props.query;\n var progressBarStyle = {\n width: (0, _utils.getProgressBarPercentage)(query) + \"%\",\n backgroundColor: (0, _utils.getQueryStateColor)(query)\n };\n\n if ((0, _utils.isQueryEnded)(query)) {\n return _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n );\n }\n\n return _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { width: \"100%\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { onClick: function onClick() {\n return $.ajax({\n url: '/v1/query/' + query.queryId + '/killed',\n type: 'PUT',\n data: \"Killed via web UI\"\n });\n }, className: \"btn btn-warning\",\n target: \"_blank\" },\n \"Kill\"\n )\n )\n )\n )\n );\n }\n }, {\n key: \"renderTab\",\n value: function renderTab(path, name) {\n var queryId = this.props.query.queryId;\n if (window.location.pathname.includes(path)) {\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn nav-disabled\" },\n name\n );\n }\n\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn\" },\n name\n );\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.props.query;\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { className: \"query-id\" },\n _react2.default.createElement(\n \"span\",\n { id: \"query-id\" },\n query.queryId\n ),\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#query-id\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\", alt: \"Copy to clipboard\" })\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n this.renderTab(\"query.html\", \"Overview\"),\n \"\\xA0\",\n this.renderTab(\"plan.html\", \"Live Plan\"),\n \"\\xA0\",\n this.renderTab(\"stage.html\", \"Stage Performance\"),\n \"\\xA0\",\n this.renderTab(\"timeline.html\", \"Splits\"),\n \"\\xA0\",\n _react2.default.createElement(\n \"a\",\n { href: \"/v1/query/\" + query.queryId + \"?pretty\",\n className: \"btn btn-info navbar-btn\", target: \"_blank\" },\n \"JSON\"\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\"hr\", { className: \"h2-hr\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n this.renderProgressBar()\n )\n )\n );\n }\n }]);\n\n return QueryHeader;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryHeader.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryHeader = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar QueryHeader = exports.QueryHeader = function (_React$Component) {\n _inherits(QueryHeader, _React$Component);\n\n function QueryHeader(props) {\n _classCallCheck(this, QueryHeader);\n\n return _possibleConstructorReturn(this, (QueryHeader.__proto__ || Object.getPrototypeOf(QueryHeader)).call(this, props));\n }\n\n _createClass(QueryHeader, [{\n key: \"renderProgressBar\",\n value: function renderProgressBar() {\n var query = this.props.query;\n var progressBarStyle = {\n width: (0, _utils.getProgressBarPercentage)(query) + \"%\",\n backgroundColor: (0, _utils.getQueryStateColor)(query)\n };\n\n if ((0, _utils.isQueryEnded)(query)) {\n return _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n );\n }\n\n return _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { width: \"100%\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { onClick: function onClick() {\n return $.ajax({\n url: '/v1/query/' + query.queryId + '/killed',\n type: 'PUT',\n data: \"Killed via web UI\"\n });\n }, className: \"btn btn-warning\",\n target: \"_blank\" },\n \"Kill\"\n )\n )\n )\n )\n );\n }\n }, {\n key: \"renderTab\",\n value: function renderTab(path, name) {\n var queryId = this.props.query.queryId;\n if (window.location.pathname.includes(path)) {\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn nav-disabled\" },\n name\n );\n }\n\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn\" },\n name\n );\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.props.query;\n return _react2.default.createElement(\n \"div\",\n { className: \"\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { className: \"query-id\" },\n _react2.default.createElement(\n \"span\",\n { id: \"query-id\" },\n query.queryId\n ),\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#query-id\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\", alt: \"Copy to clipboard\" })\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n this.renderTab(\"query.html\", \"Overview\"),\n \"\\xA0\",\n this.renderTab(\"plan.html\", \"Live Plan\"),\n \"\\xA0\",\n this.renderTab(\"stage.html\", \"Stage Performance\"),\n \"\\xA0\",\n this.renderTab(\"timeline.html\", \"Splits\"),\n \"\\xA0\",\n _react2.default.createElement(\n \"a\",\n { href: \"/v1/query/\" + query.queryId + \"?pretty\",\n className: \"btn btn-info navbar-btn\", target: \"_blank\" },\n \"JSON\"\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\"hr\", { className: \"h2-hr\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n this.renderProgressBar()\n )\n )\n );\n }\n }]);\n\n return QueryHeader;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryHeader.jsx?"); /***/ }), @@ -20675,7 +20675,7 @@ eval("\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/i /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDuration = formatDuration;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#1b8f72',\n RUNNING: '#19874e',\n PLANNING: '#674f98',\n FINISHED: '#1a4629',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDuration(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getFullSplitIdSuffix = getFullSplitIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDurationMs = formatDurationMs;\nexports.formatDurationNs = formatDurationNs;\nexports.formatNumber = formatNumber;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#7bb3fb',\n RUNNING: '#265cdf',\n PLANNING: '#674f98',\n FINISHED: '#22b647',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getFullSplitIdSuffix(driverId) {\n return driverId.substring(driverId.indexOf('.') + 1);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDurationMs(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatDurationNs(duration) {\n var unit = \"ns\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"us\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"ms\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatNumber(num) {\n return num.toLocaleString();\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); /***/ }) diff --git a/polardbx-executor/src/main/resources/webapp/dist/query.js b/polardbx-executor/src/main/resources/webapp/dist/query.js index 0a7afbc74..defbd44a1 100644 --- a/polardbx-executor/src/main/resources/webapp/dist/query.js +++ b/polardbx-executor/src/main/resources/webapp/dist/query.js @@ -94,7 +94,7 @@ /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.PageTitle = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar PageTitle = exports.PageTitle = function (_React$Component) {\n _inherits(PageTitle, _React$Component);\n\n function PageTitle(props) {\n _classCallCheck(this, PageTitle);\n\n var _this = _possibleConstructorReturn(this, (PageTitle.__proto__ || Object.getPrototypeOf(PageTitle)).call(this, props));\n\n _this.state = {\n noConnection: false,\n lightShown: false,\n info: null,\n lastSuccess: Date.now(),\n modalShown: false,\n errorText: null\n };\n return _this;\n }\n\n _createClass(PageTitle, [{\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this2 = this;\n\n clearTimeout(this.timeoutId);\n fetch(\"/v1/info\").then(function (response) {\n return response.json();\n }).then(function (info) {\n _this2.setState({\n info: info,\n noConnection: false,\n lastSuccess: Date.now(),\n modalShown: false\n });\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal('hide');\n _this2.resetTimer();\n }).catch(function (error) {\n _this2.setState({\n noConnection: true,\n lightShown: !_this2.state.lightShown,\n errorText: error\n });\n _this2.resetTimer();\n\n if (!_this2.state.modalShown && (error || Date.now() - _this2.state.lastSuccess > 30 * 1000)) {\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal();\n _this2.setState({ modalShown: true });\n }\n });\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"renderStatusLight\",\n value: function renderStatusLight() {\n if (this.state.noConnection) {\n if (this.state.lightShown) {\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-red\", id: \"status-indicator\" });\n } else {\n return _react2.default.createElement(\"span\", { className: \"status-light\", id: \"status-indicator\" });\n }\n }\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-green\", id: \"status-indicator\" });\n }\n }, {\n key: \"render\",\n value: function render() {\n var info = this.state.info;\n if (!info) {\n return null;\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"nav\",\n { className: \"navbar\" },\n _react2.default.createElement(\n \"div\",\n { className: \"container-fluid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"navbar-header\" },\n _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"/ui/\" },\n _react2.default.createElement(\"img\", { src: \"assets/logo.png\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-brand\" },\n this.props.title\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"navbar\", className: \"navbar-collapse collapse\" },\n _react2.default.createElement(\n \"ul\",\n { className: \"nav navbar-nav navbar-right\" },\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Version\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\",\n id: \"version-number\" },\n info.nodeVersion.version\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Environment\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"environment\" },\n info.environment\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"CoordinatorId\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"workerId\" },\n info.workerId\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Uptime\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", title: \"Connection status\" },\n this.renderStatusLight()\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"span\",\n { className: \"text\", id: \"uptime\" },\n info.uptime\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"no-connection-modal\", className: \"modal\", tabIndex: \"-1\", role: \"dialog\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-dialog modal-sm\", role: \"document\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-content\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"h4\",\n null,\n \"Unable to connect to server\"\n ),\n _react2.default.createElement(\n \"p\",\n null,\n this.state.errorText ? \"Error: \" + this.state.errorText : null\n )\n )\n )\n )\n )\n )\n );\n }\n }]);\n\n return PageTitle;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/PageTitle.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.PageTitle = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar PageTitle = exports.PageTitle = function (_React$Component) {\n _inherits(PageTitle, _React$Component);\n\n function PageTitle(props) {\n _classCallCheck(this, PageTitle);\n\n var _this = _possibleConstructorReturn(this, (PageTitle.__proto__ || Object.getPrototypeOf(PageTitle)).call(this, props));\n\n _this.state = {\n noConnection: false,\n lightShown: false,\n info: null,\n lastSuccess: Date.now(),\n modalShown: false,\n errorText: null\n };\n return _this;\n }\n\n _createClass(PageTitle, [{\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this2 = this;\n\n clearTimeout(this.timeoutId);\n fetch(\"/v1/info\").then(function (response) {\n return response.json();\n }).then(function (info) {\n _this2.setState({\n info: info,\n noConnection: false,\n lastSuccess: Date.now(),\n modalShown: false\n });\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal('hide');\n _this2.resetTimer();\n }).catch(function (error) {\n _this2.setState({\n noConnection: true,\n lightShown: !_this2.state.lightShown,\n errorText: error\n });\n _this2.resetTimer();\n\n if (!_this2.state.modalShown && (error || Date.now() - _this2.state.lastSuccess > 30 * 1000)) {\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal();\n _this2.setState({ modalShown: true });\n }\n });\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"renderStatusLight\",\n value: function renderStatusLight() {\n if (this.state.noConnection) {\n if (this.state.lightShown) {\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-red\", id: \"status-indicator\" });\n } else {\n return _react2.default.createElement(\"span\", { className: \"status-light\", id: \"status-indicator\" });\n }\n }\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-green\", id: \"status-indicator\" });\n }\n }, {\n key: \"render\",\n value: function render() {\n var info = this.state.info;\n if (!info) {\n return null;\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"nav\",\n { className: \"navbar\" },\n _react2.default.createElement(\n \"div\",\n { className: \"container-fluid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"navbar-header\" },\n _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"/ui/\" },\n _react2.default.createElement(\"img\", { src: \"assets/favicon.png\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-brand\" },\n this.props.title\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"navbar\", className: \"navbar-collapse collapse\" },\n _react2.default.createElement(\n \"ul\",\n { className: \"nav navbar-nav navbar-right\" },\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Version\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\",\n id: \"version-number\" },\n info.nodeVersion.version\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Environment\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"environment\" },\n \"PolarDB-X\"\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Node\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"workerId\" },\n info.workerId\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Uptime\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", title: \"Connection status\" },\n this.renderStatusLight()\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"span\",\n { className: \"text\", id: \"uptime\" },\n info.uptime\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"no-connection-modal\", className: \"modal\", tabIndex: \"-1\", role: \"dialog\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-dialog modal-sm\", role: \"document\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-content\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"h4\",\n null,\n \"Unable to connect to server\"\n ),\n _react2.default.createElement(\n \"p\",\n null,\n this.state.errorText ? \"Error: \" + this.state.errorText : null\n )\n )\n )\n )\n )\n )\n );\n }\n }]);\n\n return PageTitle;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/PageTitle.jsx?"); /***/ }), @@ -106,7 +106,7 @@ eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n}); /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryDetail = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _reactable = __webpack_require__(/*! reactable */ \"./node_modules/reactable/lib/reactable.js\");\n\nvar _reactable2 = _interopRequireDefault(_reactable);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nvar _QueryHeader = __webpack_require__(/*! ./QueryHeader */ \"./components/QueryHeader.jsx\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar Table = _reactable2.default.Table,\n Thead = _reactable2.default.Thead,\n Th = _reactable2.default.Th,\n Tr = _reactable2.default.Tr,\n Td = _reactable2.default.Td;\n\nvar TaskList = function (_React$Component) {\n _inherits(TaskList, _React$Component);\n\n function TaskList() {\n _classCallCheck(this, TaskList);\n\n return _possibleConstructorReturn(this, (TaskList.__proto__ || Object.getPrototypeOf(TaskList)).apply(this, arguments));\n }\n\n _createClass(TaskList, [{\n key: \"render\",\n value: function render() {\n var tasks = this.props.tasks;\n\n if (tasks === undefined || tasks.length === 0) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"No threads in the selected group\"\n )\n )\n );\n }\n\n var renderedTasks = tasks.map(function (task) {\n if (typeof task.stats === \"undefined\") {\n return _react2.default.createElement(\n Tr,\n { key: task.taskStatus.taskId },\n _react2.default.createElement(\n Td,\n { column: \"id\", value: task.taskStatus.taskId },\n (0, _utils.getTaskIdSuffix)(task.taskStatus.taskId)\n ),\n _react2.default.createElement(\n Td,\n { column: \"host\", value: (0, _utils.getHostname)(task.taskStatus.self) },\n _react2.default.createElement(\n \"a\",\n { href: \"worker.html?\" + task.taskStatus.nodeId, className: \"font-light\", target: \"_blank\" },\n (0, _utils.getHostAndPort)(task.taskStatus.self)\n )\n ),\n _react2.default.createElement(\n Td,\n { column: \"state\" },\n task.taskStatus.state\n ),\n _react2.default.createElement(\n Td,\n { column: \"outputRows\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"inputRows\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"inputRowsSec\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"inputBytes\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"intputBytesSec\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsPending\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsRunning\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsDone\" },\n task.completedPipelineExecs\n ),\n _react2.default.createElement(\n Td,\n { column: \"elapsedTime\" },\n task.elapsedTime\n ),\n _react2.default.createElement(\n Td,\n { column: \"deliveryTime\" },\n task.deliveryTime\n ),\n _react2.default.createElement(\n Td,\n { column: \"processTime\" },\n (0, _utils.formatDuration)(task.processTime)\n ),\n _react2.default.createElement(\n Td,\n { column: \"processWall\" },\n (0, _utils.formatDuration)(task.processWall)\n ),\n _react2.default.createElement(\n Td,\n { column: \"dataFinishTime\" },\n (0, _utils.formatDuration)(task.pullDataTime)\n ),\n _react2.default.createElement(\n Td,\n { column: \"bufferedBytes\", value: task.outputBuffers.totalBufferedBytes },\n (0, _utils.formatDataSizeBytes)(task.outputBuffers.totalBufferedBytes)\n )\n );\n } else {\n return _react2.default.createElement(\n Tr,\n { key: task.taskStatus.taskId },\n _react2.default.createElement(\n Td,\n { column: \"id\", value: task.taskStatus.taskId },\n (0, _utils.getTaskIdSuffix)(task.taskStatus.taskId)\n ),\n _react2.default.createElement(\n Td,\n { column: \"host\", value: (0, _utils.getHostname)(task.taskStatus.self) },\n _react2.default.createElement(\n \"a\",\n { href: \"worker.html?\" + task.taskStatus.nodeId, className: \"font-light\", target: \"_blank\" },\n (0, _utils.getHostAndPort)(task.taskStatus.self)\n )\n ),\n _react2.default.createElement(\n Td,\n { column: \"state\" },\n task.taskStatus.state\n ),\n _react2.default.createElement(\n Td,\n { column: \"outputRows\" },\n (0, _utils.formatCount)(task.stats.outputPositions)\n ),\n _react2.default.createElement(\n Td,\n { column: \"inputRows\" },\n (0, _utils.formatCount)(task.stats.processedInputPositions)\n ),\n _react2.default.createElement(\n Td,\n { column: \"inputRowsSec\" },\n (0, _utils.formatCount)((0, _utils.computeRate)(task.stats.processedInputPositions, task.elapsedTime))\n ),\n _react2.default.createElement(\n Td,\n { column: \"inputBytes\" },\n (0, _utils.formatDataSizeBytes)(task.stats.processedInputDataSize)\n ),\n _react2.default.createElement(\n Td,\n { column: \"inputBytesSec\" },\n (0, _utils.formatDataSizeBytes)((0, _utils.computeRate)(task.stats.processedInputDataSize, task.elapsedTime))\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsPending\" },\n task.stats.queuedPipelineExecs\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsRunning\" },\n task.stats.runningPipelineExecs\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsDone\" },\n task.completedPipelineExecs\n ),\n _react2.default.createElement(\n Td,\n { column: \"elapsedTime\" },\n task.elapsedTime\n ),\n _react2.default.createElement(\n Td,\n { column: \"deliveryTime\" },\n (0, _utils.formatDuration)(task.deliveryTime)\n ),\n _react2.default.createElement(\n Td,\n { column: \"processTime\" },\n (0, _utils.formatDuration)(task.processTime)\n ),\n _react2.default.createElement(\n Td,\n { column: \"processWall\" },\n (0, _utils.formatDuration)(task.processWall)\n ),\n _react2.default.createElement(\n Td,\n { column: \"dataFinishTime\" },\n (0, _utils.formatDuration)(task.pullDataTime)\n ),\n _react2.default.createElement(\n Td,\n { column: \"bufferedBytes\", value: task.outputBuffers.totalBufferedBytes },\n (0, _utils.formatDataSizeBytes)(task.outputBuffers.totalBufferedBytes)\n )\n );\n }\n });\n\n return _react2.default.createElement(\n Table,\n { id: \"tasks\", className: \"table table-striped sortable\", sortable: [{\n column: 'id',\n sortFunction: TaskList.compareTaskId\n }, 'host', 'state', 'splitsPending', 'splitsRunning', 'splitsDone', 'outputRows', 'inputRows', 'inputRowsSec', 'inputBytes', 'inputBytesSec', 'elapsedTime', 'deliveryTime', 'processTime', 'processWall', 'dataFinishTime', 'tsds', 'tstc', 'bufferedBytes'],\n defaultSort: { column: 'id', direction: 'asc' } },\n _react2.default.createElement(\n Thead,\n null,\n _react2.default.createElement(\n Th,\n { column: \"id\" },\n \"ID\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"host\" },\n \"Host\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"state\" },\n \"State\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"splitsPending\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-pause\", style: _utils.GLYPHICON_HIGHLIGHT,\n \"data-toggle\": \"tooltip\", \"data-placement\": \"top\",\n title: \"Pending splits\" })\n ),\n _react2.default.createElement(\n Th,\n { column: \"splitsRunning\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-play\", style: _utils.GLYPHICON_HIGHLIGHT,\n \"data-toggle\": \"tooltip\", \"data-placement\": \"top\",\n title: \"Running splits\" })\n ),\n _react2.default.createElement(\n Th,\n { column: \"splitsDone\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-ok\", style: _utils.GLYPHICON_HIGHLIGHT,\n \"data-toggle\": \"tooltip\", \"data-placement\": \"top\",\n title: \"Completed splits\" })\n ),\n _react2.default.createElement(\n Th,\n { column: \"outputRows\" },\n \"outputRows\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"inputRows\" },\n \"inputRows\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"inputRowsSec\" },\n \"inputRows/s\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"inputBytes\" },\n \"inputBytes\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"inputBytesSec\" },\n \"inputBytes/s\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"elapsedTime\" },\n \"Elapsed\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"deliveryTime\" },\n \"Delivery\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"processTime\" },\n \"Process\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"processWall\" },\n \"ProcessWall\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"dataFinishTime\" },\n \"DT\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"bufferedBytes\" },\n \"Buffered\"\n )\n ),\n renderedTasks\n );\n }\n }], [{\n key: \"removeQueryId\",\n value: function removeQueryId(id) {\n var pos = id.indexOf('.');\n if (pos !== -1) {\n return id.substring(pos + 1);\n }\n return id;\n }\n }, {\n key: \"compareTaskId\",\n value: function compareTaskId(taskA, taskB) {\n var taskIdArrA = TaskList.removeQueryId(taskA).split(\".\");\n var taskIdArrB = TaskList.removeQueryId(taskB).split(\".\");\n\n if (taskIdArrA.length > taskIdArrB.length) {\n return 1;\n }\n for (var i = 0; i < taskIdArrA.length; i++) {\n var anum = Number.parseInt(taskIdArrA[i]);\n var bnum = Number.parseInt(taskIdArrB[i]);\n if (anum !== bnum) {\n return anum > bnum ? 1 : -1;\n }\n }\n\n return 0;\n }\n }, {\n key: \"formatState\",\n value: function formatState(state, fullyBlocked) {\n if (fullyBlocked && state === \"RUNNING\") {\n return \"BLOCKED\";\n } else {\n return state;\n }\n }\n }]);\n\n return TaskList;\n}(_react2.default.Component);\n\nvar BAR_CHART_WIDTH = 800;\n\nvar BAR_CHART_PROPERTIES = {\n type: 'bar',\n barSpacing: '0',\n height: '80px',\n barColor: '#747F96',\n zeroColor: '#8997B3',\n chartRangeMin: 0,\n tooltipClassname: 'sparkline-tooltip',\n tooltipFormat: 'Task {{offset:offset}} - {{value}}',\n disableHiddenCheck: true\n};\n\nvar HISTOGRAM_WIDTH = 175;\n\nvar HISTOGRAM_PROPERTIES = {\n type: 'bar',\n barSpacing: '0',\n height: '80px',\n barColor: '#747F96',\n zeroColor: '#747F96',\n zeroAxis: true,\n chartRangeMin: 0,\n tooltipClassname: 'sparkline-tooltip',\n tooltipFormat: '{{offset:offset}} -- {{value}} tasks',\n disableHiddenCheck: true\n};\n\nvar StageSummary = function (_React$Component2) {\n _inherits(StageSummary, _React$Component2);\n\n function StageSummary(props) {\n _classCallCheck(this, StageSummary);\n\n var _this2 = _possibleConstructorReturn(this, (StageSummary.__proto__ || Object.getPrototypeOf(StageSummary)).call(this, props));\n\n _this2.state = {\n expanded: false,\n lastRender: null\n };\n return _this2;\n }\n\n _createClass(StageSummary, [{\n key: \"getExpandedIcon\",\n value: function getExpandedIcon() {\n return this.state.expanded ? \"glyphicon-chevron-up\" : \"glyphicon-chevron-down\";\n }\n }, {\n key: \"getExpandedStyle\",\n value: function getExpandedStyle() {\n return this.state.expanded ? {} : { display: \"none\" };\n }\n }, {\n key: \"toggleExpanded\",\n value: function toggleExpanded() {\n this.setState({\n expanded: !this.state.expanded\n });\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n var stage = this.props.stage;\n var numTasks = stage.tasks.length;\n\n // sort the x-axis\n stage.tasks.sort(function (taskA, taskB) {\n return (0, _utils.getTaskNumber)(taskA.taskStatus.taskId) - (0, _utils.getTaskNumber)(taskB.taskStatus.taskId);\n });\n\n var scheduledTimes = stage.tasks.map(function (task) {\n if (typeof task.stats === \"undefined\") {\n (0, _utils.parseDuration)(0);\n } else {\n (0, _utils.parseDuration)(task.stats.totalScheduledTime);\n }\n });\n var cpuTimes = stage.tasks.map(function (task) {\n if (typeof task.stats === \"undefined\") {\n (0, _utils.parseDuration)(0);\n } else {\n (0, _utils.parseDuration)(task.stats.totalCpuTime);\n }\n });\n\n // prevent multiple calls to componentDidUpdate (resulting from calls to setState or otherwise) within the refresh interval from re-rendering sparklines/charts\n if (this.state.lastRender === null || Date.now() - this.state.lastRender >= 1000) {\n var renderTimestamp = Date.now();\n var stageId = (0, _utils.getStageNumber)(stage.stageId);\n\n StageSummary.renderHistogram('#scheduled-time-histogram-' + stageId, scheduledTimes, _utils.formatDuration);\n StageSummary.renderHistogram('#cpu-time-histogram-' + stageId, cpuTimes, _utils.formatDuration);\n\n if (this.state.expanded) {\n // this needs to be a string otherwise it will also be passed to numberFormatter\n var tooltipValueLookups = { 'offset': {} };\n for (var i = 0; i < numTasks; i++) {\n tooltipValueLookups['offset'][i] = (0, _utils.getStageNumber)(stage.stageId) + \".\" + i;\n }\n\n var stageBarChartProperties = $.extend({}, BAR_CHART_PROPERTIES, {\n barWidth: BAR_CHART_WIDTH / numTasks,\n tooltipValueLookups: tooltipValueLookups\n });\n\n $('#scheduled-time-bar-chart-' + stageId).sparkline(scheduledTimes, $.extend({}, stageBarChartProperties, { numberFormatter: _utils.formatDuration }));\n $('#cpu-time-bar-chart-' + stageId).sparkline(cpuTimes, $.extend({}, stageBarChartProperties, { numberFormatter: _utils.formatDuration }));\n }\n\n this.setState({\n lastRender: renderTimestamp\n });\n }\n }\n }, {\n key: \"render\",\n value: function render() {\n var stage = this.props.stage;\n if (stage === undefined || !stage.hasOwnProperty('plan')) {\n return _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Information about this stage is unavailable.\"\n )\n );\n }\n\n var totalBufferedBytes = stage.tasks.map(function (task) {\n return task.outputBuffers.totalBufferedBytes;\n }).reduce(function (a, b) {\n return a + b;\n }, 0);\n\n var stageId = (0, _utils.getStageNumber)(stage.stageId);\n\n return _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-id\" },\n _react2.default.createElement(\n \"div\",\n { className: \"stage-state-color\",\n style: { borderLeftColor: (0, _utils.getStageStateColor)(stage) } },\n stageId\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"table single-stage-table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"stage-table stage-table-time\" },\n _react2.default.createElement(\n \"thead\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"th\",\n { className: \"stage-table-stat-title stage-table-stat-header\" },\n \"Time\"\n ),\n _react2.default.createElement(\"th\", null)\n )\n ),\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Scheduled\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.stageStats.totalScheduledTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Blocked\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.stageStats.totalBlockedTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Wall\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.stageStats.totalUserTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"CPU\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.stageStats.totalCpuTime\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"stage-table stage-table-memory\" },\n _react2.default.createElement(\n \"thead\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"th\",\n { className: \"stage-table-stat-title stage-table-stat-header\" },\n \"Memory\"\n ),\n _react2.default.createElement(\"th\", null)\n )\n ),\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Cumulative\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n (0, _utils.formatDataSizeBytes)(stage.stageStats.cumulativeMemory / 1000)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Current\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.stageStats.totalMemoryReservation\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Buffers\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n (0, _utils.formatDataSize)(totalBufferedBytes)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Peak\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.stageStats.peakMemoryReservation\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"stage-table stage-table-tasks\" },\n _react2.default.createElement(\n \"thead\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"th\",\n { className: \"stage-table-stat-title stage-table-stat-header\" },\n \"Tasks\"\n ),\n _react2.default.createElement(\"th\", null)\n )\n ),\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Pending\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.tasks.filter(function (task) {\n return task.taskStatus.state === \"PLANNED\";\n }).length\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Running\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.tasks.filter(function (task) {\n return task.taskStatus.state === \"RUNNING\";\n }).length\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Finished\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.tasks.filter(function (task) {\n return task.taskStatus.state == \"FINISHED\" || task.taskStatus.state == \"CANCELED\" || task.taskStatus.state == \"ABORTED\" || task.taskStatus.state == \"FAILED\";\n }).length\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Total\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.tasks.length\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"stage-table histogram-table\" },\n _react2.default.createElement(\n \"thead\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"th\",\n { className: \"stage-table-stat-title stage-table-chart-header\" },\n \"Scheduled Time Skew\"\n )\n )\n ),\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"histogram-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"histogram\", id: \"scheduled-time-histogram-\" + stageId },\n _react2.default.createElement(\"div\", {\n className: \"loader\" })\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"stage-table histogram-table\" },\n _react2.default.createElement(\n \"thead\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"th\",\n { className: \"stage-table-stat-title stage-table-chart-header\" },\n \"CPU Time Skew\"\n )\n )\n ),\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"histogram-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"histogram\", id: \"cpu-time-histogram-\" + stageId },\n _react2.default.createElement(\"div\", {\n className: \"loader\" })\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"expand-charts-container\" },\n _react2.default.createElement(\n \"a\",\n { onClick: this.toggleExpanded.bind(this), className: \"expand-charts-button\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon \" + this.getExpandedIcon(), style: _utils.GLYPHICON_HIGHLIGHT,\n \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"More\" })\n )\n )\n ),\n _react2.default.createElement(\n \"tr\",\n { style: this.getExpandedStyle() },\n _react2.default.createElement(\n \"td\",\n { colSpan: \"6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"expanded-chart\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title expanded-chart-title\" },\n \"Task Scheduled Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"bar-chart-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"bar-chart\", id: \"scheduled-time-bar-chart-\" + stageId },\n _react2.default.createElement(\"div\", {\n className: \"loader\" })\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"tr\",\n { style: this.getExpandedStyle() },\n _react2.default.createElement(\n \"td\",\n { colSpan: \"6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"expanded-chart\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title expanded-chart-title\" },\n \"Task CPU Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"bar-chart-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"bar-chart\", id: \"cpu-time-bar-chart-\" + stageId },\n _react2.default.createElement(\"div\", {\n className: \"loader\" })\n )\n )\n )\n )\n )\n )\n )\n )\n )\n )\n );\n }\n }], [{\n key: \"renderHistogram\",\n value: function renderHistogram(histogramId, inputData, numberFormatter) {\n var numBuckets = Math.min(HISTOGRAM_WIDTH, Math.sqrt(inputData.length));\n var dataMin = Math.min.apply(null, inputData);\n var dataMax = Math.max.apply(null, inputData);\n var bucketSize = (dataMax - dataMin) / numBuckets;\n\n var histogramData = [];\n if (bucketSize === 0) {\n histogramData = [inputData.length];\n } else {\n for (var i = 0; i < numBuckets + 1; i++) {\n histogramData.push(0);\n }\n\n for (var _i in inputData) {\n var dataPoint = inputData[_i];\n var bucket = Math.floor((dataPoint - dataMin) / bucketSize);\n histogramData[bucket] = histogramData[bucket] + 1;\n }\n }\n\n var tooltipValueLookups = { 'offset': {} };\n for (var _i2 = 0; _i2 < histogramData.length; _i2++) {\n tooltipValueLookups['offset'][_i2] = numberFormatter(dataMin + _i2 * bucketSize) + \"-\" + numberFormatter(dataMin + (_i2 + 1) * bucketSize);\n }\n\n var stageHistogramProperties = $.extend({}, HISTOGRAM_PROPERTIES, {\n barWidth: HISTOGRAM_WIDTH / histogramData.length,\n tooltipValueLookups: tooltipValueLookups\n });\n $(histogramId).sparkline(histogramData, stageHistogramProperties);\n }\n }]);\n\n return StageSummary;\n}(_react2.default.Component);\n\nvar StageList = function (_React$Component3) {\n _inherits(StageList, _React$Component3);\n\n function StageList() {\n _classCallCheck(this, StageList);\n\n return _possibleConstructorReturn(this, (StageList.__proto__ || Object.getPrototypeOf(StageList)).apply(this, arguments));\n }\n\n _createClass(StageList, [{\n key: \"getStages\",\n value: function getStages(stage) {\n if (stage === undefined || !stage.hasOwnProperty('subStages')) {\n return [];\n }\n\n return [].concat.apply(stage, stage.subStages.map(this.getStages, this));\n }\n }, {\n key: \"render\",\n value: function render() {\n var stages = this.getStages(this.props.outputStage);\n\n if (stages === undefined || stages.length === 0) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n \"No stage information available.\"\n )\n );\n }\n\n var renderedStages = stages.map(function (stage) {\n return _react2.default.createElement(StageSummary, { key: stage.stageId, stage: stage });\n });\n\n return _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"table\",\n { className: \"table\", id: \"stage-list\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n renderedStages\n )\n )\n )\n );\n }\n }]);\n\n return StageList;\n}(_react2.default.Component);\n\nvar SMALL_SPARKLINE_PROPERTIES = {\n width: '100%',\n height: '57px',\n fillColor: '#3F4552',\n lineColor: '#747F96',\n spotColor: '#1EDCFF',\n tooltipClassname: 'sparkline-tooltip',\n disableHiddenCheck: true\n};\n\nvar TASK_FILTER = {\n NONE: {\n text: \"None\",\n predicate: function predicate() {\n return false;\n }\n },\n ALL: {\n text: \"All\",\n predicate: function predicate() {\n return true;\n }\n },\n PLANNED: {\n text: \"Planned\",\n predicate: function predicate(state) {\n return state === 'PLANNED';\n }\n },\n RUNNING: {\n text: \"Running\",\n predicate: function predicate(state) {\n return state === 'RUNNING';\n }\n },\n FINISHED: {\n text: \"Finished\",\n predicate: function predicate(state) {\n return state === 'FINISHED';\n }\n },\n FAILED: {\n text: \"Aborted/Canceled/Failed\",\n predicate: function predicate(state) {\n return state === 'FAILED' || state === 'ABORTED' || state === 'CANCELED';\n }\n }\n};\n\nvar QueryDetail = exports.QueryDetail = function (_React$Component4) {\n _inherits(QueryDetail, _React$Component4);\n\n function QueryDetail(props) {\n _classCallCheck(this, QueryDetail);\n\n var _this4 = _possibleConstructorReturn(this, (QueryDetail.__proto__ || Object.getPrototypeOf(QueryDetail)).call(this, props));\n\n _this4.state = {\n query: null,\n lastSnapshotStages: null,\n lastSnapshotTasks: null,\n\n lastScheduledTime: 0,\n lastCpuTime: 0,\n lastRowInput: 0,\n lastByteInput: 0,\n\n scheduledTimeRate: [],\n cpuTimeRate: [],\n rowInputRate: [],\n byteInputRate: [],\n\n reservedMemory: [],\n\n initialized: false,\n ended: false,\n\n lastRefresh: null,\n lastRender: null,\n\n stageRefresh: true,\n taskRefresh: true,\n\n taskFilter: TASK_FILTER.NONE\n };\n\n _this4.refreshLoop = _this4.refreshLoop.bind(_this4);\n return _this4;\n }\n\n _createClass(QueryDetail, [{\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n // stop refreshing when query finishes or fails\n if (this.state.query === null || !this.state.ended) {\n // task.info-update-interval is set to 3 seconds by default\n this.timeoutId = setTimeout(this.refreshLoop, 5000);\n }\n }\n }, {\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this5 = this;\n\n clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously\n var queryId = (0, _utils.getFirstParameter)(window.location.search);\n $.get('/v1/query/' + queryId, function (query) {\n var lastSnapshotStages = this.state.lastSnapshotStage;\n if (this.state.stageRefresh) {\n lastSnapshotStages = query.outputStage;\n }\n var lastSnapshotTasks = this.state.lastSnapshotTasks;\n if (this.state.taskRefresh) {\n lastSnapshotTasks = query.outputStage;\n }\n\n var lastRefresh = this.state.lastRefresh;\n var lastScheduledTime = this.state.lastScheduledTime;\n var lastCpuTime = this.state.lastCpuTime;\n var lastRowInput = this.state.lastRowInput;\n var lastByteInput = this.state.lastByteInput;\n var alreadyEnded = this.state.ended;\n var nowMillis = Date.now();\n\n this.setState({\n query: query,\n lastSnapshotStage: lastSnapshotStages,\n lastSnapshotTasks: lastSnapshotTasks,\n\n lastScheduledTime: (0, _utils.parseDuration)(query.queryStats.totalScheduledTime),\n lastCpuTime: (0, _utils.parseDuration)(query.queryStats.totalCpuTime),\n lastRowInput: query.queryStats.processedInputPositions,\n lastByteInput: (0, _utils.parseDataSize)(query.queryStats.processedInputDataSize),\n\n initialized: true,\n ended: query.finalQueryInfo,\n\n lastRefresh: nowMillis\n });\n\n // i.e. don't show sparklines if we've already decided not to update or if we don't have one previous measurement\n if (alreadyEnded || lastRefresh === null && query.state === \"RUNNING\") {\n this.resetTimer();\n return;\n }\n\n if (lastRefresh === null) {\n lastRefresh = nowMillis - (0, _utils.parseDuration)(query.queryStats.elapsedTime);\n }\n\n var elapsedSecsSinceLastRefresh = (nowMillis - lastRefresh) / 1000.0;\n if (elapsedSecsSinceLastRefresh >= 0) {\n var currentScheduledTimeRate = ((0, _utils.parseDuration)(query.queryStats.totalScheduledTime) - lastScheduledTime) / (elapsedSecsSinceLastRefresh * 1000);\n var currentCpuTimeRate = ((0, _utils.parseDuration)(query.queryStats.totalCpuTime) - lastCpuTime) / (elapsedSecsSinceLastRefresh * 1000);\n var currentRowInputRate = (query.queryStats.processedInputPositions - lastRowInput) / elapsedSecsSinceLastRefresh;\n var currentByteInputRate = ((0, _utils.parseDataSize)(query.queryStats.processedInputDataSize) - lastByteInput) / elapsedSecsSinceLastRefresh;\n this.setState({\n scheduledTimeRate: (0, _utils.addToHistory)(currentScheduledTimeRate, this.state.scheduledTimeRate),\n cpuTimeRate: (0, _utils.addToHistory)(currentCpuTimeRate, this.state.cpuTimeRate),\n rowInputRate: (0, _utils.addToHistory)(currentRowInputRate, this.state.rowInputRate),\n byteInputRate: (0, _utils.addToHistory)(currentByteInputRate, this.state.byteInputRate),\n reservedMemory: (0, _utils.addToHistory)((0, _utils.parseDataSize)(query.queryStats.totalMemoryReservation), this.state.reservedMemory)\n });\n }\n this.resetTimer();\n }.bind(this)).error(function () {\n _this5.setState({\n initialized: true\n });\n _this5.resetTimer();\n });\n }\n }, {\n key: \"handleTaskRefreshClick\",\n value: function handleTaskRefreshClick() {\n if (this.state.taskRefresh) {\n this.setState({\n taskRefresh: false,\n lastSnapshotTasks: this.state.query.outputStage\n });\n } else {\n this.setState({\n taskRefresh: true\n });\n }\n }\n }, {\n key: \"renderTaskRefreshButton\",\n value: function renderTaskRefreshButton() {\n if (this.state.taskRefresh) {\n return _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleTaskRefreshClick.bind(this) },\n \"Auto-Refresh: On\"\n );\n } else {\n return _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleTaskRefreshClick.bind(this) },\n \"Auto-Refresh: Off\"\n );\n }\n }\n }, {\n key: \"handleStageRefreshClick\",\n value: function handleStageRefreshClick() {\n if (this.state.stageRefresh) {\n this.setState({\n stageRefresh: false,\n lastSnapshotStages: this.state.query.outputStage\n });\n } else {\n this.setState({\n stageRefresh: true\n });\n }\n }\n }, {\n key: \"renderStageRefreshButton\",\n value: function renderStageRefreshButton() {\n if (this.state.stageRefresh) {\n return _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleStageRefreshClick.bind(this) },\n \"Auto-Refresh: On\"\n );\n } else {\n return _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleStageRefreshClick.bind(this) },\n \"Auto-Refresh: Off\"\n );\n }\n }\n }, {\n key: \"renderTaskFilterListItem\",\n value: function renderTaskFilterListItem(taskFilter) {\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: this.state.taskFilter === taskFilter ? \"selected\" : \"\",\n onClick: this.handleTaskFilterClick.bind(this, taskFilter) },\n taskFilter.text\n )\n );\n }\n }, {\n key: \"handleTaskFilterClick\",\n value: function handleTaskFilterClick(filter, event) {\n this.setState({\n taskFilter: filter\n });\n event.preventDefault();\n }\n }, {\n key: \"getTasksFromStage\",\n value: function getTasksFromStage(stage) {\n if (stage === undefined || !stage.hasOwnProperty('subStages') || !stage.hasOwnProperty('tasks')) {\n return [];\n }\n\n return [].concat.apply(stage.tasks, stage.subStages.map(this.getTasksFromStage, this));\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop();\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n // prevent multiple calls to componentDidUpdate (resulting from calls to setState or otherwise) within the refresh interval from re-rendering sparklines/charts\n if (this.state.lastRender === null || Date.now() - this.state.lastRender >= 1000) {\n var renderTimestamp = Date.now();\n $('#scheduled-time-rate-sparkline').sparkline(this.state.scheduledTimeRate, $.extend({}, SMALL_SPARKLINE_PROPERTIES, {\n chartRangeMin: 0,\n numberFormatter: _utils.precisionRound\n }));\n $('#cpu-time-rate-sparkline').sparkline(this.state.cpuTimeRate, $.extend({}, SMALL_SPARKLINE_PROPERTIES, {\n chartRangeMin: 0,\n numberFormatter: _utils.precisionRound\n }));\n $('#row-input-rate-sparkline').sparkline(this.state.rowInputRate, $.extend({}, SMALL_SPARKLINE_PROPERTIES, { numberFormatter: _utils.formatCount }));\n $('#byte-input-rate-sparkline').sparkline(this.state.byteInputRate, $.extend({}, SMALL_SPARKLINE_PROPERTIES, { numberFormatter: _utils.formatDataSize }));\n $('#reserved-memory-sparkline').sparkline(this.state.reservedMemory, $.extend({}, SMALL_SPARKLINE_PROPERTIES, { numberFormatter: _utils.formatDataSize }));\n\n if (this.state.lastRender === null) {\n $('#query').each(function (i, block) {\n hljs.highlightBlock(block);\n });\n }\n\n this.setState({\n lastRender: renderTimestamp\n });\n }\n\n $('[data-toggle=\"tooltip\"]').tooltip();\n new Clipboard('.copy-button');\n }\n }, {\n key: \"renderTasks\",\n value: function renderTasks() {\n var _this6 = this;\n\n if (this.state.lastSnapshotTasks === null) {\n return;\n }\n\n var tasks = [];\n if (this.state.taskFilter !== TASK_FILTER.NONE) {\n tasks = this.getTasksFromStage(this.state.lastSnapshotTasks).filter(function (task) {\n return _this6.state.taskFilter.predicate(task.taskStatus.state);\n }, this);\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Tasks\"\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn text-right\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\",\n className: \"btn btn-default dropdown-toggle pull-right text-right\",\n \"data-toggle\": \"dropdown\", \"aria-haspopup\": \"true\",\n \"aria-expanded\": \"false\" },\n \"Show: \",\n this.state.taskFilter.text,\n \" \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n this.renderTaskFilterListItem(TASK_FILTER.NONE),\n this.renderTaskFilterListItem(TASK_FILTER.ALL),\n this.renderTaskFilterListItem(TASK_FILTER.PLANNED),\n this.renderTaskFilterListItem(TASK_FILTER.RUNNING),\n this.renderTaskFilterListItem(TASK_FILTER.FINISHED),\n this.renderTaskFilterListItem(TASK_FILTER.FAILED)\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n \"\\xA0\\xA0\",\n this.renderTaskRefreshButton()\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(TaskList, { key: this.state.query.queryId, tasks: tasks })\n )\n )\n );\n }\n }, {\n key: \"renderStages\",\n value: function renderStages() {\n if (this.state.lastSnapshotStage === null) {\n return;\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-9\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Stages\"\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-3\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n this.renderStageRefreshButton()\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(StageList, { key: this.state.query.queryId, outputStage: this.state.lastSnapshotStage })\n )\n )\n );\n }\n }, {\n key: \"renderWarningInfo\",\n value: function renderWarningInfo() {\n var query = this.state.query;\n if (query.warnings != null && query.warnings.length > 0) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Warnings\"\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"table\",\n { className: \"table\", id: \"warnings-table\" },\n query.warnings.map(function (warning) {\n return _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n warning.warningCode.name\n ),\n _react2.default.createElement(\n \"td\",\n null,\n warning.message\n )\n );\n })\n )\n )\n );\n } else {\n return null;\n }\n }\n }, {\n key: \"renderUserProperties\",\n value: function renderUserProperties() {\n var query = this.state.query;\n\n var properties = [];\n for (var property in query.session.userDefVariables) {\n if (query.session.userDefVariables.hasOwnProperty(property)) {\n properties.push(_react2.default.createElement(\n \"span\",\n null,\n \"- \",\n property + \"=\" + query.session.userDefVariables[property],\n \" \",\n _react2.default.createElement(\"br\", null)\n ));\n }\n }\n\n return properties;\n }\n }, {\n key: \"renderServerProperties\",\n value: function renderServerProperties() {\n var query = this.state.query;\n\n var properties = [];\n for (var property in query.session.serverVariables) {\n if (query.session.serverVariables.hasOwnProperty(property)) {\n properties.push(_react2.default.createElement(\n \"span\",\n null,\n \"- \",\n property + \"=\" + query.session.serverVariables[property],\n \" \",\n _react2.default.createElement(\"br\", null)\n ));\n }\n }\n\n return properties;\n }\n }, {\n key: \"renderFailureInfo\",\n value: function renderFailureInfo() {\n var query = this.state.query;\n if (query.failureInfo) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Error Information\"\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Error Type\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.errorType\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Error Code\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n QueryDetail.formatErrorCode(query.errorCode)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Stack Trace\",\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#stack-trace\",\n \"data-toggle\": \"tooltip\", \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\",\n alt: \"Copy to clipboard\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n _react2.default.createElement(\n \"pre\",\n { id: \"stack-trace\" },\n QueryDetail.formatStackTrace(query.failureInfo)\n )\n )\n )\n )\n )\n )\n );\n } else {\n return \"\";\n }\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.state.query;\n\n if (query === null || this.state.initialized === false) {\n var label = _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n );\n if (this.state.initialized) {\n label = \"Query not found\";\n }\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n label\n )\n )\n );\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(_QueryHeader.QueryHeader, { query: query }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Session\"\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"User\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text wrap-text\" },\n query.session.user\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Schema\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.session.schema\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Submission Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n (0, _utils.formatShortDateTime)(new Date(query.queryStats.createTime))\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Completion Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.endTime ? (0, _utils.formatShortDateTime)(new Date(query.queryStats.endTime)) : \"\"\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Server Properties\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n this.renderServerProperties()\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"User Properties\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n this.renderUserProperties()\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Execution\"\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Elapsed Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.elapsedTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Queued Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.queuedTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Execution Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.executionTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Total Plan Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.totalPlanningTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Distributed Plan Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.distributedPlanningTime\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Resource Utilization Summary\"\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"CPU Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.totalCpuTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Input Rows\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n (0, _utils.formatCount)(query.queryStats.processedInputPositions)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Input Data\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.processedInputDataSize\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Raw Input Rows\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n (0, _utils.formatCount)(query.queryStats.processedInputPositions)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Raw Input Data\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.processedInputDataSize\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Peak Memory\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.peakMemoryReservation\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Cumulative Memory\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n (0, _utils.formatDataSizeBytes)(query.queryStats.cumulativeMemory / 1000.0, \"\") + \" seconds\"\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Timeline\"\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Parallelism\"\n ),\n _react2.default.createElement(\n \"td\",\n { rowSpan: \"2\" },\n _react2.default.createElement(\n \"div\",\n { className: \"query-stats-sparkline-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"sparkline\", id: \"cpu-time-rate-sparkline\" },\n _react2.default.createElement(\n \"div\",\n {\n className: \"loader\" },\n \"Loading ...\"\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"tr\",\n { className: \"tr-noborder\" },\n _react2.default.createElement(\n \"td\",\n { className: \"info-sparkline-text\" },\n (0, _utils.formatCount)(this.state.cpuTimeRate[this.state.cpuTimeRate.length - 1])\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Scheduled Time/s\"\n ),\n _react2.default.createElement(\n \"td\",\n { rowSpan: \"2\" },\n _react2.default.createElement(\n \"div\",\n { className: \"query-stats-sparkline-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"sparkline\", id: \"scheduled-time-rate-sparkline\" },\n _react2.default.createElement(\n \"div\",\n {\n className: \"loader\" },\n \"Loading ...\"\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"tr\",\n { className: \"tr-noborder\" },\n _react2.default.createElement(\n \"td\",\n { className: \"info-sparkline-text\" },\n (0, _utils.formatCount)(this.state.scheduledTimeRate[this.state.scheduledTimeRate.length - 1])\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Input Rows/s\"\n ),\n _react2.default.createElement(\n \"td\",\n { rowSpan: \"2\" },\n _react2.default.createElement(\n \"div\",\n { className: \"query-stats-sparkline-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"sparkline\", id: \"row-input-rate-sparkline\" },\n _react2.default.createElement(\n \"div\",\n {\n className: \"loader\" },\n \"Loading ...\"\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"tr\",\n { className: \"tr-noborder\" },\n _react2.default.createElement(\n \"td\",\n { className: \"info-sparkline-text\" },\n (0, _utils.formatCount)(this.state.rowInputRate[this.state.rowInputRate.length - 1])\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Input Bytes/s\"\n ),\n _react2.default.createElement(\n \"td\",\n { rowSpan: \"2\" },\n _react2.default.createElement(\n \"div\",\n { className: \"query-stats-sparkline-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"sparkline\", id: \"byte-input-rate-sparkline\" },\n _react2.default.createElement(\n \"div\",\n {\n className: \"loader\" },\n \"Loading ...\"\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"tr\",\n { className: \"tr-noborder\" },\n _react2.default.createElement(\n \"td\",\n { className: \"info-sparkline-text\" },\n (0, _utils.formatDataSize)(this.state.byteInputRate[this.state.byteInputRate.length - 1])\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Memory Utilization\"\n ),\n _react2.default.createElement(\n \"td\",\n { rowSpan: \"2\" },\n _react2.default.createElement(\n \"div\",\n { className: \"query-stats-sparkline-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"sparkline\", id: \"reserved-memory-sparkline\" },\n _react2.default.createElement(\n \"div\",\n {\n className: \"loader\" },\n \"Loading ...\"\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"tr\",\n { className: \"tr-noborder\" },\n _react2.default.createElement(\n \"td\",\n { className: \"info-sparkline-text\" },\n (0, _utils.formatDataSize)(this.state.reservedMemory[this.state.reservedMemory.length - 1])\n )\n )\n )\n )\n )\n )\n )\n ),\n this.renderFailureInfo(),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Query\",\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#query-text\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\", alt: \"Copy to clipboard\" })\n )\n ),\n _react2.default.createElement(\n \"pre\",\n { id: \"query\" },\n _react2.default.createElement(\n \"code\",\n { className: \"lang-sql\", id: \"query-text\" },\n query.query\n )\n )\n )\n ),\n this.renderStages(),\n this.renderTasks()\n );\n }\n }], [{\n key: \"formatStackTrace\",\n value: function formatStackTrace(info) {\n return QueryDetail.formatStackTraceHelper(info, [], \"\", \"\");\n }\n }, {\n key: \"formatErrorCode\",\n value: function formatErrorCode(errorCode) {\n if (typeof errorCode === \"undefined\") {\n return \"\";\n } else {\n return errorCode.name + \" (\" + errorCode.code + \")\";\n }\n }\n }, {\n key: \"formatStackTraceHelper\",\n value: function formatStackTraceHelper(info, parentStack, prefix, linePrefix) {\n var s = linePrefix + prefix + QueryDetail.failureInfoToString(info) + \"\\n\";\n\n if (info.stack) {\n var sharedStackFrames = 0;\n if (parentStack !== null) {\n sharedStackFrames = QueryDetail.countSharedStackFrames(info.stack, parentStack);\n }\n\n for (var i = 0; i < info.stack.length - sharedStackFrames; i++) {\n s += linePrefix + \"\\tat \" + info.stack[i] + \"\\n\";\n }\n if (sharedStackFrames !== 0) {\n s += linePrefix + \"\\t... \" + sharedStackFrames + \" more\" + \"\\n\";\n }\n }\n\n if (info.suppressed) {\n for (var _i3 = 0; _i3 < info.suppressed.length; _i3++) {\n s += QueryDetail.formatStackTraceHelper(info.suppressed[_i3], info.stack, \"Suppressed: \", linePrefix + \"\\t\");\n }\n }\n\n if (info.cause) {\n s += QueryDetail.formatStackTraceHelper(info.cause, info.stack, \"Caused by: \", linePrefix);\n }\n\n return s;\n }\n }, {\n key: \"countSharedStackFrames\",\n value: function countSharedStackFrames(stack, parentStack) {\n var n = 0;\n var minStackLength = Math.min(stack.length, parentStack.length);\n while (n < minStackLength && stack[stack.length - 1 - n] === parentStack[parentStack.length - 1 - n]) {\n n++;\n }\n return n;\n }\n }, {\n key: \"failureInfoToString\",\n value: function failureInfoToString(t) {\n return t.message !== null ? t.type + \": \" + t.message : t.type;\n }\n }]);\n\n return QueryDetail;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryDetail.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryDetail = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _reactable = __webpack_require__(/*! reactable */ \"./node_modules/reactable/lib/reactable.js\");\n\nvar _reactable2 = _interopRequireDefault(_reactable);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nvar _QueryHeader = __webpack_require__(/*! ./QueryHeader */ \"./components/QueryHeader.jsx\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar Table = _reactable2.default.Table,\n Thead = _reactable2.default.Thead,\n Th = _reactable2.default.Th,\n Tr = _reactable2.default.Tr,\n Td = _reactable2.default.Td;\n\nvar TaskList = function (_React$Component) {\n _inherits(TaskList, _React$Component);\n\n function TaskList() {\n _classCallCheck(this, TaskList);\n\n return _possibleConstructorReturn(this, (TaskList.__proto__ || Object.getPrototypeOf(TaskList)).apply(this, arguments));\n }\n\n _createClass(TaskList, [{\n key: \"render\",\n value: function render() {\n var tasks = this.props.tasks;\n\n if (tasks === undefined || tasks.length === 0) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"No threads in the selected group\"\n )\n )\n );\n }\n\n var renderedTasks = tasks.map(function (task) {\n if (typeof task.detailedStats === \"undefined\") {\n return _react2.default.createElement(\n Tr,\n { key: task.taskStatus.taskId },\n _react2.default.createElement(\n Td,\n { column: \"id\", value: task.taskStatus.taskId },\n (0, _utils.getTaskIdSuffix)(task.taskStatus.taskId)\n ),\n _react2.default.createElement(\n Td,\n { column: \"host\", value: (0, _utils.getHostname)(task.taskStatus.self) },\n _react2.default.createElement(\n \"a\",\n { href: \"worker.html?\" + task.taskStatus.nodeId, target: \"_blank\" },\n (0, _utils.getHostAndPort)(task.taskStatus.self)\n )\n ),\n _react2.default.createElement(\n Td,\n { column: \"state\" },\n task.taskStatus.state\n ),\n _react2.default.createElement(\n Td,\n { column: \"outputRows\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"inputRows\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"inputBytes\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"intputBytesSec\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsPending\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsRunning\" },\n 0\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsDone\" },\n task.completedPipelineExecs\n ),\n _react2.default.createElement(\n Td,\n { column: \"elapsedTime\" },\n (0, _utils.formatDurationMs)(task.elapsedTimeMillis)\n ),\n _react2.default.createElement(\n Td,\n { column: \"deliveryTime\" },\n (0, _utils.formatDurationMs)(task.deliveryTimeMillis)\n ),\n _react2.default.createElement(\n Td,\n { column: \"processTime\" },\n (0, _utils.formatDurationMs)(task.processTimeMillis)\n ),\n _react2.default.createElement(\n Td,\n { column: \"dataFinishTime\" },\n (0, _utils.formatDurationMs)(task.pullDataTimeMillis)\n )\n );\n } else {\n return _react2.default.createElement(\n Tr,\n { key: task.taskStatus.taskId },\n _react2.default.createElement(\n Td,\n { column: \"id\", value: task.taskStatus.taskId },\n (0, _utils.getTaskIdSuffix)(task.taskStatus.taskId)\n ),\n _react2.default.createElement(\n Td,\n { column: \"host\", value: (0, _utils.getHostname)(task.taskStatus.self) },\n _react2.default.createElement(\n \"a\",\n { href: \"worker.html?\" + task.taskStatus.nodeId, target: \"_blank\" },\n (0, _utils.getHostAndPort)(task.taskStatus.self)\n )\n ),\n _react2.default.createElement(\n Td,\n { column: \"state\" },\n task.taskStatus.state\n ),\n _react2.default.createElement(\n Td,\n { column: \"outputRows\" },\n (0, _utils.formatCount)(task.detailedStats.outputPositions)\n ),\n _react2.default.createElement(\n Td,\n { column: \"inputRows\" },\n (0, _utils.formatCount)(task.detailedStats.processedInputPositions)\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsPending\" },\n task.detailedStats.queuedPipelineExecs\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsRunning\" },\n task.detailedStats.runningPipelineExecs\n ),\n _react2.default.createElement(\n Td,\n { column: \"splitsDone\" },\n task.completedPipelineExecs\n ),\n _react2.default.createElement(\n Td,\n { column: \"elapsedTime\" },\n (0, _utils.formatDurationMs)(task.elapsedTimeMillis)\n ),\n _react2.default.createElement(\n Td,\n { column: \"deliveryTime\" },\n (0, _utils.formatDurationMs)(task.deliveryTimeMillis)\n ),\n _react2.default.createElement(\n Td,\n { column: \"processTime\" },\n (0, _utils.formatDurationMs)(task.processTimeMillis)\n ),\n _react2.default.createElement(\n Td,\n { column: \"dataFinishTime\" },\n (0, _utils.formatDurationMs)(task.pullDataTimeMillis)\n )\n );\n }\n });\n\n return _react2.default.createElement(\n Table,\n { id: \"tasks\", className: \"table table-striped sortable\", sortable: [{\n column: 'id',\n sortFunction: TaskList.compareTaskId\n }, 'host', 'state', 'splitsPending', 'splitsRunning', 'splitsDone', 'outputRows', 'inputRows',\n // 'inputBytes',\n 'elapsedTime', 'deliveryTime', 'processTime', 'dataFinishTime'],\n defaultSort: { column: 'id', direction: 'asc' } },\n _react2.default.createElement(\n Thead,\n null,\n _react2.default.createElement(\n Th,\n { column: \"id\" },\n \"ID\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"host\" },\n \"Host\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"state\" },\n \"State\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"splitsPending\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-pause\", style: _utils.GLYPHICON_HIGHLIGHT,\n \"data-toggle\": \"tooltip\", \"data-placement\": \"top\",\n title: \"Pending splits\" })\n ),\n _react2.default.createElement(\n Th,\n { column: \"splitsRunning\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-play\", style: _utils.GLYPHICON_HIGHLIGHT,\n \"data-toggle\": \"tooltip\", \"data-placement\": \"top\",\n title: \"Running splits\" })\n ),\n _react2.default.createElement(\n Th,\n { column: \"splitsDone\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-ok\", style: _utils.GLYPHICON_HIGHLIGHT,\n \"data-toggle\": \"tooltip\", \"data-placement\": \"top\",\n title: \"Completed splits\" })\n ),\n _react2.default.createElement(\n Th,\n { column: \"outputRows\" },\n \"OutputRows\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"inputRows\" },\n \"InputRows\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"elapsedTime\" },\n \"Elapsed\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"deliveryTime\" },\n \"Delivery\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"processTime\" },\n \"Process\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"dataFinishTime\" },\n \"DataFinish\"\n )\n ),\n renderedTasks\n );\n }\n }], [{\n key: \"removeQueryId\",\n value: function removeQueryId(id) {\n var pos = id.indexOf('.');\n if (pos !== -1) {\n return id.substring(pos + 1);\n }\n return id;\n }\n }, {\n key: \"compareTaskId\",\n value: function compareTaskId(taskA, taskB) {\n var taskIdArrA = TaskList.removeQueryId(taskA).split(\".\");\n var taskIdArrB = TaskList.removeQueryId(taskB).split(\".\");\n\n if (taskIdArrA.length > taskIdArrB.length) {\n return 1;\n }\n for (var i = 0; i < taskIdArrA.length; i++) {\n var anum = Number.parseInt(taskIdArrA[i]);\n var bnum = Number.parseInt(taskIdArrB[i]);\n if (anum !== bnum) {\n return anum > bnum ? 1 : -1;\n }\n }\n\n return 0;\n }\n }, {\n key: \"formatState\",\n value: function formatState(state, fullyBlocked) {\n if (fullyBlocked && state === \"RUNNING\") {\n return \"BLOCKED\";\n } else {\n return state;\n }\n }\n }]);\n\n return TaskList;\n}(_react2.default.Component);\n\nvar SplitList = function (_React$Component2) {\n _inherits(SplitList, _React$Component2);\n\n function SplitList() {\n _classCallCheck(this, SplitList);\n\n return _possibleConstructorReturn(this, (SplitList.__proto__ || Object.getPrototypeOf(SplitList)).apply(this, arguments));\n }\n\n _createClass(SplitList, [{\n key: \"render\",\n value: function render() {\n var splits = this.props.splits;\n\n if (splits === undefined || splits.length === 0) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"No splits in the selected group\"\n )\n )\n );\n }\n\n var renderedSplits = splits.map(function (split) {\n return _react2.default.createElement(\n Tr,\n { key: split.driverId },\n _react2.default.createElement(\n Td,\n { column: \"id\", value: split.driverId },\n (0, _utils.getFullSplitIdSuffix)(split.driverId)\n ),\n _react2.default.createElement(\n Td,\n { column: \"outputRows\" },\n (0, _utils.formatCount)(split.outputPositions)\n ),\n _react2.default.createElement(\n Td,\n { column: \"inputRows\" },\n (0, _utils.formatCount)(split.inputPositions)\n ),\n _react2.default.createElement(\n Td,\n { column: \"elapsedTime\" },\n (0, _utils.formatDurationMs)(split.endMillis - split.startMillis)\n ),\n _react2.default.createElement(\n Td,\n { column: \"blockTime\" },\n (0, _utils.formatDurationNs)(split.blockedNanos)\n ),\n _react2.default.createElement(\n Td,\n { column: \"processTime\" },\n (0, _utils.formatDurationNs)(split.processNanos)\n )\n );\n });\n\n return _react2.default.createElement(\n Table,\n { id: \"splits\", className: \"table table-striped sortable\", sortable: [{\n column: 'id',\n sortFunction: TaskList.compareTaskId\n }, 'host', 'state', 'outputRows', 'inputRows', 'elapsedTime', 'blockTime', 'processTime'],\n defaultSort: { column: 'id', direction: 'asc' } },\n _react2.default.createElement(\n Thead,\n null,\n _react2.default.createElement(\n Th,\n { column: \"id\" },\n \"ID\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"outputRows\" },\n \"OutputRows\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"inputRows\" },\n \"InputRows\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"elapsedTime\" },\n \"Elapsed\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"blockTime\" },\n \"Blocked\"\n ),\n _react2.default.createElement(\n Th,\n { column: \"processTime\" },\n \"Process\"\n )\n ),\n renderedSplits\n );\n }\n }], [{\n key: \"removeQueryId\",\n value: function removeQueryId(id) {\n var pos = id.indexOf('.');\n if (pos !== -1) {\n return id.substring(pos + 1);\n }\n return id;\n }\n }, {\n key: \"compareTaskId\",\n value: function compareTaskId(taskA, taskB) {\n var taskIdArrA = TaskList.removeQueryId(taskA).split(\".\");\n var taskIdArrB = TaskList.removeQueryId(taskB).split(\".\");\n\n if (taskIdArrA.length > taskIdArrB.length) {\n return 1;\n }\n for (var i = 0; i < taskIdArrA.length; i++) {\n var anum = Number.parseInt(taskIdArrA[i]);\n var bnum = Number.parseInt(taskIdArrB[i]);\n if (anum !== bnum) {\n return anum > bnum ? 1 : -1;\n }\n }\n\n return 0;\n }\n }, {\n key: \"formatState\",\n value: function formatState(state, fullyBlocked) {\n if (fullyBlocked && state === \"RUNNING\") {\n return \"BLOCKED\";\n } else {\n return state;\n }\n }\n }]);\n\n return SplitList;\n}(_react2.default.Component);\n\nvar BAR_CHART_WIDTH = 800;\n\nvar BAR_CHART_PROPERTIES = {\n type: 'bar',\n barSpacing: '0',\n height: '80px',\n barColor: '#747F96',\n zeroColor: '#8997B3',\n chartRangeMin: 0,\n tooltipClassname: 'sparkline-tooltip',\n tooltipFormat: 'Task {{offset:offset}} - {{value}}',\n disableHiddenCheck: true\n};\n\nvar HISTOGRAM_WIDTH = 175;\n\nvar HISTOGRAM_PROPERTIES = {\n type: 'bar',\n barSpacing: '0',\n height: '80px',\n barColor: '#747F96',\n zeroColor: '#747F96',\n zeroAxis: true,\n chartRangeMin: 0,\n tooltipClassname: 'sparkline-tooltip',\n tooltipFormat: '{{offset:offset}} -- {{value}} tasks',\n disableHiddenCheck: true\n};\n\nvar StageSummary = function (_React$Component3) {\n _inherits(StageSummary, _React$Component3);\n\n function StageSummary(props) {\n _classCallCheck(this, StageSummary);\n\n var _this3 = _possibleConstructorReturn(this, (StageSummary.__proto__ || Object.getPrototypeOf(StageSummary)).call(this, props));\n\n _this3.state = {\n expanded: false,\n lastRender: null\n };\n return _this3;\n }\n\n _createClass(StageSummary, [{\n key: \"getExpandedIcon\",\n value: function getExpandedIcon() {\n return this.state.expanded ? \"glyphicon-chevron-up\" : \"glyphicon-chevron-down\";\n }\n }, {\n key: \"getExpandedStyle\",\n value: function getExpandedStyle() {\n return this.state.expanded ? {} : { display: \"none\" };\n }\n }, {\n key: \"toggleExpanded\",\n value: function toggleExpanded() {\n this.setState({\n expanded: !this.state.expanded\n });\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n var stage = this.props.stage;\n var numTasks = stage.taskStats.length;\n\n // sort the x-axis\n stage.taskStats.sort(function (taskA, taskB) {\n return (0, _utils.getTaskNumber)(taskA.taskStatus.taskId) - (0, _utils.getTaskNumber)(taskB.taskStatus.taskId);\n });\n\n var scheduledTimes = stage.taskStats.map(function (task) {\n if (typeof task.stats === \"undefined\") {\n (0, _utils.parseDuration)(0);\n } else {\n (0, _utils.parseDuration)(task.stats.totalScheduledTime);\n }\n });\n var cpuTimes = stage.taskStats.map(function (task) {\n if (typeof task.stats === \"undefined\") {\n (0, _utils.parseDuration)(0);\n } else {\n (0, _utils.parseDuration)(task.stats.totalCpuTime);\n }\n });\n\n // prevent multiple calls to componentDidUpdate (resulting from calls to setState or otherwise) within the refresh interval from re-rendering sparklines/charts\n if (this.state.lastRender === null || Date.now() - this.state.lastRender >= 1000) {\n var renderTimestamp = Date.now();\n var stageId = (0, _utils.getStageNumber)(stage.stageId);\n\n StageSummary.renderHistogram('#scheduled-time-histogram-' + stageId, scheduledTimes, _utils.formatDurationMs);\n StageSummary.renderHistogram('#cpu-time-histogram-' + stageId, cpuTimes, _utils.formatDurationMs);\n\n if (this.state.expanded) {\n // this needs to be a string otherwise it will also be passed to numberFormatter\n var tooltipValueLookups = { 'offset': {} };\n for (var i = 0; i < numTasks; i++) {\n tooltipValueLookups['offset'][i] = (0, _utils.getStageNumber)(stage.stageId) + \".\" + i;\n }\n\n var stageBarChartProperties = $.extend({}, BAR_CHART_PROPERTIES, {\n barWidth: BAR_CHART_WIDTH / numTasks,\n tooltipValueLookups: tooltipValueLookups\n });\n\n $('#scheduled-time-bar-chart-' + stageId).sparkline(scheduledTimes, $.extend({}, stageBarChartProperties, { numberFormatter: _utils.formatDurationMs }));\n $('#cpu-time-bar-chart-' + stageId).sparkline(cpuTimes, $.extend({}, stageBarChartProperties, { numberFormatter: _utils.formatDurationMs }));\n }\n\n this.setState({\n lastRender: renderTimestamp\n });\n }\n }\n }, {\n key: \"render\",\n value: function render() {\n var stage = this.props.stage;\n if (stage === undefined) {\n return _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Information about this stage is unavailable.\"\n )\n );\n }\n //\n // const totalBufferedBytes = stage.taskStats\n // .map(task => task.outputBuffers.totalBufferedBytes)\n // .reduce((a, b) => a + b, 0);\n\n var stageId = (0, _utils.getStageNumber)(stage.stageId);\n\n return _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-id\" },\n _react2.default.createElement(\n \"div\",\n { className: \"stage-state-color\",\n style: { borderLeftColor: (0, _utils.getStageStateColor)(stage) } },\n stageId\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"table single-stage-table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"stage-table stage-table-time\" },\n _react2.default.createElement(\n \"thead\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"th\",\n { className: \"stage-table-stat-title stage-table-stat-header\" },\n \"Time\"\n ),\n _react2.default.createElement(\"th\", null)\n )\n ),\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Scheduled\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n (0, _utils.formatDurationNs)(stage.stageStats.totalScheduledTimeNanos)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Blocked\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n (0, _utils.formatDurationNs)(stage.stageStats.totalBlockedTimeNanos)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Wall\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n (0, _utils.formatDurationNs)(stage.stageStats.totalUserTimeNanos)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"CPU\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n (0, _utils.formatDurationNs)(stage.stageStats.totalCpuTimeNanos)\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"stage-table stage-table-memory\" },\n _react2.default.createElement(\n \"thead\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"th\",\n { className: \"stage-table-stat-title stage-table-stat-header\" },\n \"Memory\"\n ),\n _react2.default.createElement(\"th\", null)\n )\n ),\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Cumulative\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n (0, _utils.formatDataSizeBytes)(stage.stageStats.cumulativeMemory / 1000)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Current\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.stageStats.totalMemoryReservation\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Peak\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.stageStats.peakMemoryReservation\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"stage-table stage-table-tasks\" },\n _react2.default.createElement(\n \"thead\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"th\",\n { className: \"stage-table-stat-title stage-table-stat-header\" },\n \"Tasks\"\n ),\n _react2.default.createElement(\"th\", null)\n )\n ),\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Pending\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.taskStats.filter(function (task) {\n return task.taskStatus.state === \"PLANNED\";\n }).length\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Running\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.taskStats.filter(function (task) {\n return task.taskStatus.state === \"RUNNING\";\n }).length\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Finished\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.taskStats.filter(function (task) {\n return task.taskStatus.state == \"FINISHED\" || task.taskStatus.state == \"CANCELED\" || task.taskStatus.state == \"ABORTED\" || task.taskStatus.state == \"FAILED\";\n }).length\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title\" },\n \"Total\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-text\" },\n stage.taskStats.length\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"stage-table histogram-table\" },\n _react2.default.createElement(\n \"thead\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"th\",\n { className: \"stage-table-stat-title stage-table-chart-header\" },\n \"Scheduled Time Skew\"\n )\n )\n ),\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"histogram-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"histogram\", id: \"scheduled-time-histogram-\" + stageId },\n _react2.default.createElement(\"div\", {\n className: \"loader\" })\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"table\",\n { className: \"stage-table histogram-table\" },\n _react2.default.createElement(\n \"thead\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"th\",\n { className: \"stage-table-stat-title stage-table-chart-header\" },\n \"CPU Time Skew\"\n )\n )\n ),\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"histogram-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"histogram\", id: \"cpu-time-histogram-\" + stageId },\n _react2.default.createElement(\"div\", {\n className: \"loader\" })\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"expand-charts-container\" },\n _react2.default.createElement(\n \"a\",\n { onClick: this.toggleExpanded.bind(this), className: \"expand-charts-button\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon \" + this.getExpandedIcon(), style: _utils.GLYPHICON_HIGHLIGHT,\n \"data-toggle\": \"tooltip\", \"data-placement\": \"top\", title: \"More\" })\n )\n )\n ),\n _react2.default.createElement(\n \"tr\",\n { style: this.getExpandedStyle() },\n _react2.default.createElement(\n \"td\",\n { colSpan: \"6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"expanded-chart\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title expanded-chart-title\" },\n \"Task Scheduled Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"bar-chart-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"bar-chart\", id: \"scheduled-time-bar-chart-\" + stageId },\n _react2.default.createElement(\"div\", {\n className: \"loader\" })\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"tr\",\n { style: this.getExpandedStyle() },\n _react2.default.createElement(\n \"td\",\n { colSpan: \"6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"expanded-chart\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"stage-table-stat-title expanded-chart-title\" },\n \"Task CPU Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"bar-chart-container\" },\n _react2.default.createElement(\n \"span\",\n { className: \"bar-chart\", id: \"cpu-time-bar-chart-\" + stageId },\n _react2.default.createElement(\"div\", {\n className: \"loader\" })\n )\n )\n )\n )\n )\n )\n )\n )\n )\n )\n );\n }\n }], [{\n key: \"renderHistogram\",\n value: function renderHistogram(histogramId, inputData, numberFormatter) {\n var numBuckets = Math.min(HISTOGRAM_WIDTH, Math.sqrt(inputData.length));\n var dataMin = Math.min.apply(null, inputData);\n var dataMax = Math.max.apply(null, inputData);\n var bucketSize = (dataMax - dataMin) / numBuckets;\n\n var histogramData = [];\n if (bucketSize === 0) {\n histogramData = [inputData.length];\n } else {\n for (var i = 0; i < numBuckets + 1; i++) {\n histogramData.push(0);\n }\n\n for (var _i in inputData) {\n var dataPoint = inputData[_i];\n var bucket = Math.floor((dataPoint - dataMin) / bucketSize);\n histogramData[bucket] = histogramData[bucket] + 1;\n }\n }\n\n var tooltipValueLookups = { 'offset': {} };\n for (var _i2 = 0; _i2 < histogramData.length; _i2++) {\n tooltipValueLookups['offset'][_i2] = numberFormatter(dataMin + _i2 * bucketSize) + \"-\" + numberFormatter(dataMin + (_i2 + 1) * bucketSize);\n }\n\n var stageHistogramProperties = $.extend({}, HISTOGRAM_PROPERTIES, {\n barWidth: HISTOGRAM_WIDTH / histogramData.length,\n tooltipValueLookups: tooltipValueLookups\n });\n $(histogramId).sparkline(histogramData, stageHistogramProperties);\n }\n }]);\n\n return StageSummary;\n}(_react2.default.Component);\n\nvar StageList = function (_React$Component4) {\n _inherits(StageList, _React$Component4);\n\n function StageList() {\n _classCallCheck(this, StageList);\n\n return _possibleConstructorReturn(this, (StageList.__proto__ || Object.getPrototypeOf(StageList)).apply(this, arguments));\n }\n\n _createClass(StageList, [{\n key: \"getStages\",\n value: function getStages(stage) {\n if (stage === undefined || !stage.hasOwnProperty('subStages')) {\n return [];\n }\n\n return [].concat.apply(stage, stage.subStages.map(this.getStages, this));\n }\n }, {\n key: \"render\",\n value: function render() {\n var stages = this.getStages(this.props.outputStage);\n\n if (stages === undefined || stages.length === 0) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n \"No stage information available.\"\n )\n );\n }\n\n var renderedStages = stages.map(function (stage) {\n return _react2.default.createElement(StageSummary, { key: stage.stageId, stage: stage });\n });\n\n return _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"table\",\n { className: \"table\", id: \"stage-list\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n renderedStages\n )\n )\n )\n );\n }\n }]);\n\n return StageList;\n}(_react2.default.Component);\n\nvar SMALL_SPARKLINE_PROPERTIES = {\n width: '100%',\n height: '57px',\n fillColor: '#3F4552',\n lineColor: '#747F96',\n spotColor: '#1EDCFF',\n tooltipClassname: 'sparkline-tooltip',\n disableHiddenCheck: true\n};\n\nvar TASK_FILTER = {\n NONE: {\n text: \"None\",\n predicate: function predicate() {\n return false;\n }\n },\n ALL: {\n text: \"All\",\n predicate: function predicate() {\n return true;\n }\n },\n PLANNED: {\n text: \"Planned\",\n predicate: function predicate(state) {\n return state === 'PLANNED';\n }\n },\n RUNNING: {\n text: \"Running\",\n predicate: function predicate(state) {\n return state === 'RUNNING';\n }\n },\n FINISHED: {\n text: \"Finished\",\n predicate: function predicate(state) {\n return state === 'FINISHED';\n }\n },\n FAILED: {\n text: \"Aborted/Canceled/Failed\",\n predicate: function predicate(state) {\n return state === 'FAILED' || state === 'ABORTED' || state === 'CANCELED';\n }\n }\n};\n\nvar QueryDetail = exports.QueryDetail = function (_React$Component5) {\n _inherits(QueryDetail, _React$Component5);\n\n function QueryDetail(props) {\n _classCallCheck(this, QueryDetail);\n\n var _this5 = _possibleConstructorReturn(this, (QueryDetail.__proto__ || Object.getPrototypeOf(QueryDetail)).call(this, props));\n\n _this5.state = {\n query: null,\n lastSnapshotStages: null,\n lastSnapshotTasks: null,\n\n lastScheduledTime: 0,\n lastCpuTime: 0,\n lastRowInput: 0,\n lastByteInput: 0,\n\n scheduledTimeRate: [],\n cpuTimeRate: [],\n rowInputRate: [],\n byteInputRate: [],\n\n reservedMemory: [],\n\n initialized: false,\n ended: false,\n\n lastRefresh: null,\n lastRender: null,\n\n stageRefresh: true,\n taskRefresh: true,\n splitRefresh: true,\n\n taskFilter: TASK_FILTER.NONE,\n splitFilter: TASK_FILTER.NONE\n };\n\n _this5.refreshLoop = _this5.refreshLoop.bind(_this5);\n return _this5;\n }\n\n _createClass(QueryDetail, [{\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n // stop refreshing when query finishes or fails\n if (this.state.query === null || !this.state.ended) {\n // task.info-update-interval is set to 3 seconds by default\n this.timeoutId = setTimeout(this.refreshLoop, 5000);\n }\n }\n }, {\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this6 = this;\n\n clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously\n var queryId = (0, _utils.getFirstParameter)(window.location.search);\n $.get('/v1/query/stats/' + queryId, function (query) {\n var lastSnapshotStages = this.state.lastSnapshotStage;\n if (this.state.stageRefresh) {\n lastSnapshotStages = query.outputStage;\n }\n var lastSnapshotTasks = this.state.lastSnapshotTasks;\n if (this.state.taskRefresh) {\n lastSnapshotTasks = query.outputStage;\n }\n\n var lastRefresh = this.state.lastRefresh;\n var lastScheduledTime = this.state.lastScheduledTime;\n var lastCpuTime = this.state.lastCpuTime;\n var lastRowInput = this.state.lastRowInput;\n var lastByteInput = this.state.lastByteInput;\n var alreadyEnded = this.state.ended;\n var nowMillis = Date.now();\n\n this.setState({\n query: query,\n lastSnapshotStage: lastSnapshotStages,\n lastSnapshotTasks: lastSnapshotTasks,\n\n lastScheduledTime: (0, _utils.parseDuration)(query.queryStats.totalScheduledTime),\n lastCpuTime: (0, _utils.parseDuration)(query.queryStats.totalCpuTime),\n lastRowInput: query.queryStats.processedInputPositions,\n lastByteInput: (0, _utils.parseDataSize)(query.queryStats.processedInputDataSize),\n\n initialized: true,\n ended: query.finalQueryInfo,\n\n lastRefresh: nowMillis\n });\n\n // i.e. don't show sparklines if we've already decided not to update or if we don't have one previous measurement\n if (alreadyEnded || lastRefresh === null && query.state === \"RUNNING\") {\n this.resetTimer();\n return;\n }\n\n if (lastRefresh === null) {\n lastRefresh = nowMillis - (0, _utils.parseDuration)(query.queryStats.elapsedTime);\n }\n\n var elapsedSecsSinceLastRefresh = (nowMillis - lastRefresh) / 1000.0;\n if (elapsedSecsSinceLastRefresh >= 0) {\n var currentScheduledTimeRate = ((0, _utils.parseDuration)(query.queryStats.totalScheduledTime) - lastScheduledTime) / (elapsedSecsSinceLastRefresh * 1000);\n var currentCpuTimeRate = ((0, _utils.parseDuration)(query.queryStats.totalCpuTime) - lastCpuTime) / (elapsedSecsSinceLastRefresh * 1000);\n var currentRowInputRate = (query.queryStats.processedInputPositions - lastRowInput) / elapsedSecsSinceLastRefresh;\n var currentByteInputRate = ((0, _utils.parseDataSize)(query.queryStats.processedInputDataSize) - lastByteInput) / elapsedSecsSinceLastRefresh;\n this.setState({\n scheduledTimeRate: (0, _utils.addToHistory)(currentScheduledTimeRate, this.state.scheduledTimeRate),\n cpuTimeRate: (0, _utils.addToHistory)(currentCpuTimeRate, this.state.cpuTimeRate),\n rowInputRate: (0, _utils.addToHistory)(currentRowInputRate, this.state.rowInputRate),\n byteInputRate: (0, _utils.addToHistory)(currentByteInputRate, this.state.byteInputRate),\n reservedMemory: (0, _utils.addToHistory)((0, _utils.parseDataSize)(query.queryStats.totalMemoryReservation), this.state.reservedMemory)\n });\n }\n this.resetTimer();\n }.bind(this)).error(function () {\n _this6.setState({\n initialized: true\n });\n _this6.resetTimer();\n });\n }\n }, {\n key: \"handleTaskRefreshClick\",\n value: function handleTaskRefreshClick() {\n if (this.state.taskRefresh) {\n this.setState({\n taskRefresh: false,\n lastSnapshotTasks: this.state.query.outputStage\n });\n } else {\n this.setState({\n taskRefresh: true\n });\n }\n }\n }, {\n key: \"handleSplitRefreshClick\",\n value: function handleSplitRefreshClick() {\n if (this.state.splitRefresh) {\n this.setState({\n splitRefresh: false\n // lastSnapshotTasks: this.state.query.outputStage,\n });\n } else {\n this.setState({\n splitRefresh: true\n });\n }\n }\n }, {\n key: \"renderTaskRefreshButton\",\n value: function renderTaskRefreshButton() {\n if (this.state.taskRefresh) {\n return _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleTaskRefreshClick.bind(this) },\n \"Auto-Refresh: On\"\n );\n } else {\n return _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleTaskRefreshClick.bind(this) },\n \"Auto-Refresh: Off\"\n );\n }\n }\n }, {\n key: \"renderSplitRefreshButton\",\n value: function renderSplitRefreshButton() {\n if (this.state.splitRefresh) {\n return _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleSplitRefreshClick.bind(this) },\n \"Auto-Refresh: On\"\n );\n } else {\n return _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleSplitRefreshClick.bind(this) },\n \"Auto-Refresh: Off\"\n );\n }\n }\n }, {\n key: \"handleStageRefreshClick\",\n value: function handleStageRefreshClick() {\n if (this.state.stageRefresh) {\n this.setState({\n stageRefresh: false,\n lastSnapshotStages: this.state.query.outputStage\n });\n } else {\n this.setState({\n stageRefresh: true\n });\n }\n }\n }, {\n key: \"renderStageRefreshButton\",\n value: function renderStageRefreshButton() {\n if (this.state.stageRefresh) {\n return _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleStageRefreshClick.bind(this) },\n \"Auto-Refresh: On\"\n );\n } else {\n return _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleStageRefreshClick.bind(this) },\n \"Auto-Refresh: Off\"\n );\n }\n }\n }, {\n key: \"renderTaskFilterListItem\",\n value: function renderTaskFilterListItem(taskFilter) {\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: this.state.taskFilter === taskFilter ? \"selected\" : \"\",\n onClick: this.handleTaskFilterClick.bind(this, taskFilter) },\n taskFilter.text\n )\n );\n }\n }, {\n key: \"handleTaskFilterClick\",\n value: function handleTaskFilterClick(filter, event) {\n this.setState({\n taskFilter: filter\n });\n event.preventDefault();\n }\n }, {\n key: \"renderSplitFilterListItem\",\n value: function renderSplitFilterListItem(splitFilter) {\n return _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: this.state.splitFilter === splitFilter ? \"selected\" : \"\",\n onClick: this.handleSplitFilterClick.bind(this, splitFilter) },\n splitFilter.text\n )\n );\n }\n }, {\n key: \"handleSplitFilterClick\",\n value: function handleSplitFilterClick(filter, event) {\n this.setState({\n splitFilter: filter\n });\n event.preventDefault();\n }\n }, {\n key: \"getTasksFromStage\",\n value: function getTasksFromStage(stage) {\n if (stage === undefined || !stage.hasOwnProperty('subStages') || !stage.hasOwnProperty('taskStats')) {\n return [];\n }\n\n return [].concat.apply(stage.taskStats, stage.subStages.map(this.getTasksFromStage, this));\n }\n }, {\n key: \"getSplitsFromStage\",\n value: function getSplitsFromStage(stage) {\n console.log(\"getting splits from stage\");\n var tasks = this.getTasksFromStage(stage);\n var splits = [];\n for (var i = 0; i < tasks.length; i++) {\n splits = splits.concat(tasks[i].detailedStats.driverStats);\n }\n\n return splits;\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop();\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n // prevent multiple calls to componentDidUpdate (resulting from calls to setState or otherwise) within the refresh interval from re-rendering sparklines/charts\n if (this.state.lastRender === null || Date.now() - this.state.lastRender >= 1000) {\n var renderTimestamp = Date.now();\n $('#scheduled-time-rate-sparkline').sparkline(this.state.scheduledTimeRate, $.extend({}, SMALL_SPARKLINE_PROPERTIES, {\n chartRangeMin: 0,\n numberFormatter: _utils.precisionRound\n }));\n $('#cpu-time-rate-sparkline').sparkline(this.state.cpuTimeRate, $.extend({}, SMALL_SPARKLINE_PROPERTIES, {\n chartRangeMin: 0,\n numberFormatter: _utils.precisionRound\n }));\n $('#row-input-rate-sparkline').sparkline(this.state.rowInputRate, $.extend({}, SMALL_SPARKLINE_PROPERTIES, { numberFormatter: _utils.formatCount }));\n $('#byte-input-rate-sparkline').sparkline(this.state.byteInputRate, $.extend({}, SMALL_SPARKLINE_PROPERTIES, { numberFormatter: _utils.formatDataSize }));\n $('#reserved-memory-sparkline').sparkline(this.state.reservedMemory, $.extend({}, SMALL_SPARKLINE_PROPERTIES, { numberFormatter: _utils.formatDataSize }));\n\n if (this.state.lastRender === null) {\n $('#query').each(function (i, block) {\n hljs.highlightBlock(block);\n });\n }\n\n this.setState({\n lastRender: renderTimestamp\n });\n }\n\n $('[data-toggle=\"tooltip\"]').tooltip();\n new Clipboard('.copy-button');\n }\n }, {\n key: \"renderSplits\",\n value: function renderSplits() {\n var _this7 = this;\n\n if (this.state.lastSnapshotTasks === null) {\n return;\n }\n\n var splits = [];\n if (this.state.splitFilter !== TASK_FILTER.NONE) {\n // TODO split state\n splits = this.getSplitsFromStage(this.state.lastSnapshotTasks).filter(function (split) {\n return _this7.state.splitFilter.predicate(\"\");\n }, this);\n }\n\n return _react2.default.createElement(\n \"div\",\n { className: \"info-container-next\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { \"class\": \"container-title\" },\n \"Splits\"\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn text-right\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\",\n className: \"btn btn-default dropdown-toggle pull-right text-right\",\n \"data-toggle\": \"dropdown\", \"aria-haspopup\": \"true\",\n \"aria-expanded\": \"false\" },\n \"Show: \",\n this.state.splitFilter.text,\n \" \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n this.renderSplitFilterListItem(TASK_FILTER.NONE),\n this.renderSplitFilterListItem(TASK_FILTER.ALL),\n this.renderSplitFilterListItem(TASK_FILTER.FINISHED)\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n \"\\xA0\\xA0\",\n this.renderSplitRefreshButton()\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(SplitList, { key: this.state.query.queryId, splits: splits })\n )\n )\n );\n }\n }, {\n key: \"renderTasks\",\n value: function renderTasks() {\n var _this8 = this;\n\n if (this.state.lastSnapshotTasks === null) {\n return;\n }\n\n var tasks = [];\n if (this.state.taskFilter !== TASK_FILTER.NONE) {\n tasks = this.getTasksFromStage(this.state.lastSnapshotTasks).filter(function (task) {\n return _this8.state.taskFilter.predicate(task.taskStatus.state);\n }, this);\n }\n\n return _react2.default.createElement(\n \"div\",\n { className: \"info-container-next\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { \"class\": \"container-title\" },\n \"Tasks\"\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn text-right\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\",\n className: \"btn btn-default dropdown-toggle pull-right text-right\",\n \"data-toggle\": \"dropdown\", \"aria-haspopup\": \"true\",\n \"aria-expanded\": \"false\" },\n \"Show: \",\n this.state.taskFilter.text,\n \" \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n this.renderTaskFilterListItem(TASK_FILTER.NONE),\n this.renderTaskFilterListItem(TASK_FILTER.ALL),\n this.renderTaskFilterListItem(TASK_FILTER.PLANNED),\n this.renderTaskFilterListItem(TASK_FILTER.RUNNING),\n this.renderTaskFilterListItem(TASK_FILTER.FINISHED),\n this.renderTaskFilterListItem(TASK_FILTER.FAILED)\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n \"\\xA0\\xA0\",\n this.renderTaskRefreshButton()\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(TaskList, { key: this.state.query.queryId, tasks: tasks })\n )\n )\n );\n }\n }, {\n key: \"renderStages\",\n value: function renderStages() {\n if (this.state.lastSnapshotStage === null) {\n return;\n }\n\n return _react2.default.createElement(\n \"div\",\n { className: \"info-container-next\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-9\" },\n _react2.default.createElement(\n \"h3\",\n { \"class\": \"container-title\" },\n \"Stages\"\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-3\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n this.renderStageRefreshButton()\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(StageList, { key: this.state.query.queryId, outputStage: this.state.lastSnapshotStage })\n )\n )\n );\n }\n }, {\n key: \"renderWarningInfo\",\n value: function renderWarningInfo() {\n var query = this.state.query;\n if (query.warnings != null && query.warnings.length > 0) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row info-container-next\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Warnings\"\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"table\",\n { className: \"table\", id: \"warnings-table\" },\n query.warnings.map(function (warning) {\n return _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n warning.warningCode.name\n ),\n _react2.default.createElement(\n \"td\",\n null,\n warning.message\n )\n );\n })\n )\n )\n );\n } else {\n return null;\n }\n }\n }, {\n key: \"renderUserProperties\",\n value: function renderUserProperties() {\n var query = this.state.query;\n\n var properties = [];\n for (var property in query.session.userDefVariables) {\n if (query.session.userDefVariables.hasOwnProperty(property)) {\n properties.push(_react2.default.createElement(\n \"span\",\n null,\n \"- \",\n property + \"=\" + query.session.userDefVariables[property],\n \" \",\n _react2.default.createElement(\"br\", null)\n ));\n }\n }\n\n return properties;\n }\n }, {\n key: \"renderServerProperties\",\n value: function renderServerProperties() {\n var query = this.state.query;\n\n var properties = [];\n for (var property in query.session.serverVariables) {\n if (query.session.serverVariables.hasOwnProperty(property)) {\n properties.push(_react2.default.createElement(\n \"span\",\n null,\n \"- \",\n property + \"=\" + query.session.serverVariables[property],\n \" \",\n _react2.default.createElement(\"br\", null)\n ));\n }\n }\n\n return properties;\n }\n }, {\n key: \"renderFailureInfo\",\n value: function renderFailureInfo() {\n var query = this.state.query;\n if (query.failureInfo) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row info-container-next\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Error Information\"\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Error Type\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.errorType\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Error Code\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n QueryDetail.formatErrorCode(query.errorCode)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Stack Trace\",\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#stack-trace\",\n \"data-toggle\": \"tooltip\", \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\",\n alt: \"Copy to clipboard\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n _react2.default.createElement(\n \"pre\",\n { id: \"stack-trace\" },\n QueryDetail.formatStackTrace(query.failureInfo)\n )\n )\n )\n )\n )\n )\n );\n } else {\n return \"\";\n }\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.state.query;\n\n if (query === null || this.state.initialized === false) {\n var label = _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n );\n if (this.state.initialized) {\n label = \"Query not found\";\n }\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n label\n )\n )\n );\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(_QueryHeader.QueryHeader, { query: query }),\n _react2.default.createElement(\n \"div\",\n { className: \"row info-container-next\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { \"class\": \"container-title\" },\n \"Session\"\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"User\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text wrap-text\" },\n query.session.user\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Schema\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.session.schema\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Submission Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n (0, _utils.formatShortDateTime)(new Date(query.queryStats.createTime))\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Completion Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.endTime ? (0, _utils.formatShortDateTime)(new Date(query.queryStats.endTime)) : \"\"\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Server Properties\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n this.renderServerProperties()\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"User Properties\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n this.renderUserProperties()\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { \"class\": \"container-title\" },\n \"Execution\"\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Elapsed Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.elapsedTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Queued Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.queuedTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Execution Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.executionTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Total Plan Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.totalPlanningTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Distributed Plan Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.distributedPlanningTime\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row info-container-next\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { \"class\": \"container-title\" },\n \"Resource Utilization Summary\"\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"CPU Time\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.totalCpuTime\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Input Rows\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n (0, _utils.formatCount)(query.queryStats.processedInputPositions)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Input Data\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.processedInputDataSize\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Raw Input Rows\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n (0, _utils.formatCount)(query.queryStats.processedInputPositions)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Raw Input Data\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.processedInputDataSize\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Peak Memory\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n query.queryStats.peakMemoryReservation\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { className: \"info-title\" },\n \"Cumulative Memory\"\n ),\n _react2.default.createElement(\n \"td\",\n { className: \"info-text\" },\n (0, _utils.formatDataSizeBytes)(query.queryStats.cumulativeMemory / 1000.0, \"\") + \" seconds\"\n )\n )\n )\n )\n )\n )\n )\n ),\n this.renderFailureInfo(),\n _react2.default.createElement(\n \"div\",\n { className: \"row info-container-next\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h3\",\n { \"class\": \"container-title\" },\n \"Query\",\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#query-text\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\", alt: \"Copy to clipboard\" })\n )\n ),\n _react2.default.createElement(\n \"pre\",\n { id: \"query\" },\n _react2.default.createElement(\n \"code\",\n { className: \"lang-sql\", id: \"query-text\" },\n query.query\n )\n )\n )\n ),\n this.renderStages(),\n this.renderTasks(),\n this.renderSplits()\n );\n }\n }], [{\n key: \"formatStackTrace\",\n value: function formatStackTrace(info) {\n return QueryDetail.formatStackTraceHelper(info, [], \"\", \"\");\n }\n }, {\n key: \"formatErrorCode\",\n value: function formatErrorCode(errorCode) {\n if (typeof errorCode === \"undefined\") {\n return \"\";\n } else {\n return errorCode.name + \" (\" + errorCode.code + \")\";\n }\n }\n }, {\n key: \"formatStackTraceHelper\",\n value: function formatStackTraceHelper(info, parentStack, prefix, linePrefix) {\n var s = linePrefix + prefix + QueryDetail.failureInfoToString(info) + \"\\n\";\n\n if (info.stack) {\n var sharedStackFrames = 0;\n if (parentStack !== null) {\n sharedStackFrames = QueryDetail.countSharedStackFrames(info.stack, parentStack);\n }\n\n for (var i = 0; i < info.stack.length - sharedStackFrames; i++) {\n s += linePrefix + \"\\tat \" + info.stack[i] + \"\\n\";\n }\n if (sharedStackFrames !== 0) {\n s += linePrefix + \"\\t... \" + sharedStackFrames + \" more\" + \"\\n\";\n }\n }\n\n if (info.suppressed) {\n for (var _i3 = 0; _i3 < info.suppressed.length; _i3++) {\n s += QueryDetail.formatStackTraceHelper(info.suppressed[_i3], info.stack, \"Suppressed: \", linePrefix + \"\\t\");\n }\n }\n\n if (info.cause) {\n s += QueryDetail.formatStackTraceHelper(info.cause, info.stack, \"Caused by: \", linePrefix);\n }\n\n return s;\n }\n }, {\n key: \"countSharedStackFrames\",\n value: function countSharedStackFrames(stack, parentStack) {\n var n = 0;\n var minStackLength = Math.min(stack.length, parentStack.length);\n while (n < minStackLength && stack[stack.length - 1 - n] === parentStack[parentStack.length - 1 - n]) {\n n++;\n }\n return n;\n }\n }, {\n key: \"failureInfoToString\",\n value: function failureInfoToString(t) {\n return t.message !== null ? t.type + \": \" + t.message : t.type;\n }\n }]);\n\n return QueryDetail;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryDetail.jsx?"); /***/ }), @@ -118,7 +118,7 @@ eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n}); /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryHeader = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar QueryHeader = exports.QueryHeader = function (_React$Component) {\n _inherits(QueryHeader, _React$Component);\n\n function QueryHeader(props) {\n _classCallCheck(this, QueryHeader);\n\n return _possibleConstructorReturn(this, (QueryHeader.__proto__ || Object.getPrototypeOf(QueryHeader)).call(this, props));\n }\n\n _createClass(QueryHeader, [{\n key: \"renderProgressBar\",\n value: function renderProgressBar() {\n var query = this.props.query;\n var progressBarStyle = {\n width: (0, _utils.getProgressBarPercentage)(query) + \"%\",\n backgroundColor: (0, _utils.getQueryStateColor)(query)\n };\n\n if ((0, _utils.isQueryEnded)(query)) {\n return _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n );\n }\n\n return _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { width: \"100%\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { onClick: function onClick() {\n return $.ajax({\n url: '/v1/query/' + query.queryId + '/killed',\n type: 'PUT',\n data: \"Killed via web UI\"\n });\n }, className: \"btn btn-warning\",\n target: \"_blank\" },\n \"Kill\"\n )\n )\n )\n )\n );\n }\n }, {\n key: \"renderTab\",\n value: function renderTab(path, name) {\n var queryId = this.props.query.queryId;\n if (window.location.pathname.includes(path)) {\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn nav-disabled\" },\n name\n );\n }\n\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn\" },\n name\n );\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.props.query;\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { className: \"query-id\" },\n _react2.default.createElement(\n \"span\",\n { id: \"query-id\" },\n query.queryId\n ),\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#query-id\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\", alt: \"Copy to clipboard\" })\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n this.renderTab(\"query.html\", \"Overview\"),\n \"\\xA0\",\n this.renderTab(\"plan.html\", \"Live Plan\"),\n \"\\xA0\",\n this.renderTab(\"stage.html\", \"Stage Performance\"),\n \"\\xA0\",\n this.renderTab(\"timeline.html\", \"Splits\"),\n \"\\xA0\",\n _react2.default.createElement(\n \"a\",\n { href: \"/v1/query/\" + query.queryId + \"?pretty\",\n className: \"btn btn-info navbar-btn\", target: \"_blank\" },\n \"JSON\"\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\"hr\", { className: \"h2-hr\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n this.renderProgressBar()\n )\n )\n );\n }\n }]);\n\n return QueryHeader;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryHeader.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryHeader = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar QueryHeader = exports.QueryHeader = function (_React$Component) {\n _inherits(QueryHeader, _React$Component);\n\n function QueryHeader(props) {\n _classCallCheck(this, QueryHeader);\n\n return _possibleConstructorReturn(this, (QueryHeader.__proto__ || Object.getPrototypeOf(QueryHeader)).call(this, props));\n }\n\n _createClass(QueryHeader, [{\n key: \"renderProgressBar\",\n value: function renderProgressBar() {\n var query = this.props.query;\n var progressBarStyle = {\n width: (0, _utils.getProgressBarPercentage)(query) + \"%\",\n backgroundColor: (0, _utils.getQueryStateColor)(query)\n };\n\n if ((0, _utils.isQueryEnded)(query)) {\n return _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n );\n }\n\n return _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { width: \"100%\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { onClick: function onClick() {\n return $.ajax({\n url: '/v1/query/' + query.queryId + '/killed',\n type: 'PUT',\n data: \"Killed via web UI\"\n });\n }, className: \"btn btn-warning\",\n target: \"_blank\" },\n \"Kill\"\n )\n )\n )\n )\n );\n }\n }, {\n key: \"renderTab\",\n value: function renderTab(path, name) {\n var queryId = this.props.query.queryId;\n if (window.location.pathname.includes(path)) {\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn nav-disabled\" },\n name\n );\n }\n\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn\" },\n name\n );\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.props.query;\n return _react2.default.createElement(\n \"div\",\n { className: \"\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { className: \"query-id\" },\n _react2.default.createElement(\n \"span\",\n { id: \"query-id\" },\n query.queryId\n ),\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#query-id\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\", alt: \"Copy to clipboard\" })\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n this.renderTab(\"query.html\", \"Overview\"),\n \"\\xA0\",\n this.renderTab(\"plan.html\", \"Live Plan\"),\n \"\\xA0\",\n this.renderTab(\"stage.html\", \"Stage Performance\"),\n \"\\xA0\",\n this.renderTab(\"timeline.html\", \"Splits\"),\n \"\\xA0\",\n _react2.default.createElement(\n \"a\",\n { href: \"/v1/query/\" + query.queryId + \"?pretty\",\n className: \"btn btn-info navbar-btn\", target: \"_blank\" },\n \"JSON\"\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\"hr\", { className: \"h2-hr\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n this.renderProgressBar()\n )\n )\n );\n }\n }]);\n\n return QueryHeader;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryHeader.jsx?"); /***/ }), @@ -20831,7 +20831,7 @@ eval("\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/i /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDuration = formatDuration;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#1b8f72',\n RUNNING: '#19874e',\n PLANNING: '#674f98',\n FINISHED: '#1a4629',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDuration(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getFullSplitIdSuffix = getFullSplitIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDurationMs = formatDurationMs;\nexports.formatDurationNs = formatDurationNs;\nexports.formatNumber = formatNumber;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#7bb3fb',\n RUNNING: '#265cdf',\n PLANNING: '#674f98',\n FINISHED: '#22b647',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getFullSplitIdSuffix(driverId) {\n return driverId.substring(driverId.indexOf('.') + 1);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDurationMs(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatDurationNs(duration) {\n var unit = \"ns\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"us\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"ms\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatNumber(num) {\n return num.toLocaleString();\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); /***/ }) diff --git a/polardbx-executor/src/main/resources/webapp/dist/stage.js b/polardbx-executor/src/main/resources/webapp/dist/stage.js index 3fdc18730..8c3648d16 100644 --- a/polardbx-executor/src/main/resources/webapp/dist/stage.js +++ b/polardbx-executor/src/main/resources/webapp/dist/stage.js @@ -94,7 +94,7 @@ /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.PageTitle = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar PageTitle = exports.PageTitle = function (_React$Component) {\n _inherits(PageTitle, _React$Component);\n\n function PageTitle(props) {\n _classCallCheck(this, PageTitle);\n\n var _this = _possibleConstructorReturn(this, (PageTitle.__proto__ || Object.getPrototypeOf(PageTitle)).call(this, props));\n\n _this.state = {\n noConnection: false,\n lightShown: false,\n info: null,\n lastSuccess: Date.now(),\n modalShown: false,\n errorText: null\n };\n return _this;\n }\n\n _createClass(PageTitle, [{\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this2 = this;\n\n clearTimeout(this.timeoutId);\n fetch(\"/v1/info\").then(function (response) {\n return response.json();\n }).then(function (info) {\n _this2.setState({\n info: info,\n noConnection: false,\n lastSuccess: Date.now(),\n modalShown: false\n });\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal('hide');\n _this2.resetTimer();\n }).catch(function (error) {\n _this2.setState({\n noConnection: true,\n lightShown: !_this2.state.lightShown,\n errorText: error\n });\n _this2.resetTimer();\n\n if (!_this2.state.modalShown && (error || Date.now() - _this2.state.lastSuccess > 30 * 1000)) {\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal();\n _this2.setState({ modalShown: true });\n }\n });\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"renderStatusLight\",\n value: function renderStatusLight() {\n if (this.state.noConnection) {\n if (this.state.lightShown) {\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-red\", id: \"status-indicator\" });\n } else {\n return _react2.default.createElement(\"span\", { className: \"status-light\", id: \"status-indicator\" });\n }\n }\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-green\", id: \"status-indicator\" });\n }\n }, {\n key: \"render\",\n value: function render() {\n var info = this.state.info;\n if (!info) {\n return null;\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"nav\",\n { className: \"navbar\" },\n _react2.default.createElement(\n \"div\",\n { className: \"container-fluid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"navbar-header\" },\n _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"/ui/\" },\n _react2.default.createElement(\"img\", { src: \"assets/logo.png\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-brand\" },\n this.props.title\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"navbar\", className: \"navbar-collapse collapse\" },\n _react2.default.createElement(\n \"ul\",\n { className: \"nav navbar-nav navbar-right\" },\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Version\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\",\n id: \"version-number\" },\n info.nodeVersion.version\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Environment\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"environment\" },\n info.environment\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"CoordinatorId\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"workerId\" },\n info.workerId\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Uptime\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", title: \"Connection status\" },\n this.renderStatusLight()\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"span\",\n { className: \"text\", id: \"uptime\" },\n info.uptime\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"no-connection-modal\", className: \"modal\", tabIndex: \"-1\", role: \"dialog\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-dialog modal-sm\", role: \"document\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-content\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"h4\",\n null,\n \"Unable to connect to server\"\n ),\n _react2.default.createElement(\n \"p\",\n null,\n this.state.errorText ? \"Error: \" + this.state.errorText : null\n )\n )\n )\n )\n )\n )\n );\n }\n }]);\n\n return PageTitle;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/PageTitle.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.PageTitle = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar PageTitle = exports.PageTitle = function (_React$Component) {\n _inherits(PageTitle, _React$Component);\n\n function PageTitle(props) {\n _classCallCheck(this, PageTitle);\n\n var _this = _possibleConstructorReturn(this, (PageTitle.__proto__ || Object.getPrototypeOf(PageTitle)).call(this, props));\n\n _this.state = {\n noConnection: false,\n lightShown: false,\n info: null,\n lastSuccess: Date.now(),\n modalShown: false,\n errorText: null\n };\n return _this;\n }\n\n _createClass(PageTitle, [{\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this2 = this;\n\n clearTimeout(this.timeoutId);\n fetch(\"/v1/info\").then(function (response) {\n return response.json();\n }).then(function (info) {\n _this2.setState({\n info: info,\n noConnection: false,\n lastSuccess: Date.now(),\n modalShown: false\n });\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal('hide');\n _this2.resetTimer();\n }).catch(function (error) {\n _this2.setState({\n noConnection: true,\n lightShown: !_this2.state.lightShown,\n errorText: error\n });\n _this2.resetTimer();\n\n if (!_this2.state.modalShown && (error || Date.now() - _this2.state.lastSuccess > 30 * 1000)) {\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal();\n _this2.setState({ modalShown: true });\n }\n });\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"renderStatusLight\",\n value: function renderStatusLight() {\n if (this.state.noConnection) {\n if (this.state.lightShown) {\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-red\", id: \"status-indicator\" });\n } else {\n return _react2.default.createElement(\"span\", { className: \"status-light\", id: \"status-indicator\" });\n }\n }\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-green\", id: \"status-indicator\" });\n }\n }, {\n key: \"render\",\n value: function render() {\n var info = this.state.info;\n if (!info) {\n return null;\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"nav\",\n { className: \"navbar\" },\n _react2.default.createElement(\n \"div\",\n { className: \"container-fluid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"navbar-header\" },\n _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"/ui/\" },\n _react2.default.createElement(\"img\", { src: \"assets/favicon.png\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-brand\" },\n this.props.title\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"navbar\", className: \"navbar-collapse collapse\" },\n _react2.default.createElement(\n \"ul\",\n { className: \"nav navbar-nav navbar-right\" },\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Version\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\",\n id: \"version-number\" },\n info.nodeVersion.version\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Environment\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"environment\" },\n \"PolarDB-X\"\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Node\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"workerId\" },\n info.workerId\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Uptime\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", title: \"Connection status\" },\n this.renderStatusLight()\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"span\",\n { className: \"text\", id: \"uptime\" },\n info.uptime\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"no-connection-modal\", className: \"modal\", tabIndex: \"-1\", role: \"dialog\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-dialog modal-sm\", role: \"document\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-content\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"h4\",\n null,\n \"Unable to connect to server\"\n ),\n _react2.default.createElement(\n \"p\",\n null,\n this.state.errorText ? \"Error: \" + this.state.errorText : null\n )\n )\n )\n )\n )\n )\n );\n }\n }]);\n\n return PageTitle;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/PageTitle.jsx?"); /***/ }), @@ -106,7 +106,7 @@ eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n}); /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryHeader = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar QueryHeader = exports.QueryHeader = function (_React$Component) {\n _inherits(QueryHeader, _React$Component);\n\n function QueryHeader(props) {\n _classCallCheck(this, QueryHeader);\n\n return _possibleConstructorReturn(this, (QueryHeader.__proto__ || Object.getPrototypeOf(QueryHeader)).call(this, props));\n }\n\n _createClass(QueryHeader, [{\n key: \"renderProgressBar\",\n value: function renderProgressBar() {\n var query = this.props.query;\n var progressBarStyle = {\n width: (0, _utils.getProgressBarPercentage)(query) + \"%\",\n backgroundColor: (0, _utils.getQueryStateColor)(query)\n };\n\n if ((0, _utils.isQueryEnded)(query)) {\n return _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n );\n }\n\n return _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { width: \"100%\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { onClick: function onClick() {\n return $.ajax({\n url: '/v1/query/' + query.queryId + '/killed',\n type: 'PUT',\n data: \"Killed via web UI\"\n });\n }, className: \"btn btn-warning\",\n target: \"_blank\" },\n \"Kill\"\n )\n )\n )\n )\n );\n }\n }, {\n key: \"renderTab\",\n value: function renderTab(path, name) {\n var queryId = this.props.query.queryId;\n if (window.location.pathname.includes(path)) {\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn nav-disabled\" },\n name\n );\n }\n\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn\" },\n name\n );\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.props.query;\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { className: \"query-id\" },\n _react2.default.createElement(\n \"span\",\n { id: \"query-id\" },\n query.queryId\n ),\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#query-id\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\", alt: \"Copy to clipboard\" })\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n this.renderTab(\"query.html\", \"Overview\"),\n \"\\xA0\",\n this.renderTab(\"plan.html\", \"Live Plan\"),\n \"\\xA0\",\n this.renderTab(\"stage.html\", \"Stage Performance\"),\n \"\\xA0\",\n this.renderTab(\"timeline.html\", \"Splits\"),\n \"\\xA0\",\n _react2.default.createElement(\n \"a\",\n { href: \"/v1/query/\" + query.queryId + \"?pretty\",\n className: \"btn btn-info navbar-btn\", target: \"_blank\" },\n \"JSON\"\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\"hr\", { className: \"h2-hr\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n this.renderProgressBar()\n )\n )\n );\n }\n }]);\n\n return QueryHeader;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryHeader.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.QueryHeader = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar QueryHeader = exports.QueryHeader = function (_React$Component) {\n _inherits(QueryHeader, _React$Component);\n\n function QueryHeader(props) {\n _classCallCheck(this, QueryHeader);\n\n return _possibleConstructorReturn(this, (QueryHeader.__proto__ || Object.getPrototypeOf(QueryHeader)).call(this, props));\n }\n\n _createClass(QueryHeader, [{\n key: \"renderProgressBar\",\n value: function renderProgressBar() {\n var query = this.props.query;\n var progressBarStyle = {\n width: (0, _utils.getProgressBarPercentage)(query) + \"%\",\n backgroundColor: (0, _utils.getQueryStateColor)(query)\n };\n\n if ((0, _utils.isQueryEnded)(query)) {\n return _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n );\n }\n\n return _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n { width: \"100%\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-large\" },\n _react2.default.createElement(\n \"div\",\n { className: \"progress-bar progress-bar-info\", role: \"progressbar\",\n \"aria-valuenow\": (0, _utils.getProgressBarPercentage)(query), \"aria-valuemin\": \"0\", \"aria-valuemax\": \"100\",\n style: progressBarStyle },\n (0, _utils.getProgressBarTitle)(query)\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { onClick: function onClick() {\n return $.ajax({\n url: '/v1/query/' + query.queryId + '/killed',\n type: 'PUT',\n data: \"Killed via web UI\"\n });\n }, className: \"btn btn-warning\",\n target: \"_blank\" },\n \"Kill\"\n )\n )\n )\n )\n );\n }\n }, {\n key: \"renderTab\",\n value: function renderTab(path, name) {\n var queryId = this.props.query.queryId;\n if (window.location.pathname.includes(path)) {\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn nav-disabled\" },\n name\n );\n }\n\n return _react2.default.createElement(\n \"a\",\n { href: path + '?' + queryId, className: \"btn btn-info navbar-btn\" },\n name\n );\n }\n }, {\n key: \"render\",\n value: function render() {\n var query = this.props.query;\n return _react2.default.createElement(\n \"div\",\n { className: \"\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"h3\",\n { className: \"query-id\" },\n _react2.default.createElement(\n \"span\",\n { id: \"query-id\" },\n query.queryId\n ),\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#query-id\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", \"aria-hidden\": \"true\", alt: \"Copy to clipboard\" })\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n this.renderTab(\"query.html\", \"Overview\"),\n \"\\xA0\",\n this.renderTab(\"plan.html\", \"Live Plan\"),\n \"\\xA0\",\n this.renderTab(\"stage.html\", \"Stage Performance\"),\n \"\\xA0\",\n this.renderTab(\"timeline.html\", \"Splits\"),\n \"\\xA0\",\n _react2.default.createElement(\n \"a\",\n { href: \"/v1/query/\" + query.queryId + \"?pretty\",\n className: \"btn btn-info navbar-btn\", target: \"_blank\" },\n \"JSON\"\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\"hr\", { className: \"h2-hr\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n this.renderProgressBar()\n )\n )\n );\n }\n }]);\n\n return QueryHeader;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/QueryHeader.jsx?"); /***/ }), @@ -118,7 +118,7 @@ eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n}); /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.StageDetail = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _server = __webpack_require__(/*! react-dom/server */ \"./node_modules/react-dom/server.browser.js\");\n\nvar _server2 = _interopRequireDefault(_server);\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nvar _QueryHeader = __webpack_require__(/*! ./QueryHeader */ \"./components/QueryHeader.jsx\");\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nfunction getTotalWallTime(operator) {\n return (0, _utils.parseDuration)(operator.addInputWall) + (0, _utils.parseDuration)(operator.getOutputWall) + (0, _utils.parseDuration)(operator.finishWall) + (0, _utils.parseDuration)(operator.blockedWall);\n}\n\nvar OperatorSummary = function (_React$Component) {\n _inherits(OperatorSummary, _React$Component);\n\n function OperatorSummary() {\n _classCallCheck(this, OperatorSummary);\n\n return _possibleConstructorReturn(this, (OperatorSummary.__proto__ || Object.getPrototypeOf(OperatorSummary)).apply(this, arguments));\n }\n\n _createClass(OperatorSummary, [{\n key: \"render\",\n value: function render() {\n var operator = this.props.operator;\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"highlight-row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"header-row\" },\n operator.operatorType\n )\n ),\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Output\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatCount)(operator.outputRowCount) + \" rows (\" + (0, _utils.formatDataSize)(operator.outputBytes) + \")\"\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Startup Time\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatDuration)(operator.startupDuration * 1000)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Run Time\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatDuration)(operator.duration * 1000)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Memory\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatDataSize)(operator.memory)\n )\n )\n )\n )\n );\n }\n }]);\n\n return OperatorSummary;\n}(_react2.default.Component);\n\nvar BAR_CHART_PROPERTIES = {\n type: 'bar',\n barSpacing: '0',\n height: '80px',\n barColor: '#747F96',\n zeroColor: '#8997B3',\n tooltipClassname: 'sparkline-tooltip',\n tooltipFormat: 'Task {{offset:offset}} - {{value}}',\n disableHiddenCheck: true\n};\n\nvar OperatorStatistic = function (_React$Component2) {\n _inherits(OperatorStatistic, _React$Component2);\n\n function OperatorStatistic() {\n _classCallCheck(this, OperatorStatistic);\n\n return _possibleConstructorReturn(this, (OperatorStatistic.__proto__ || Object.getPrototypeOf(OperatorStatistic)).apply(this, arguments));\n }\n\n _createClass(OperatorStatistic, [{\n key: \"componentDidMount\",\n value: function componentDidMount() {\n var operators = this.props.operators;\n var statistic = operators.map(this.props.supplier);\n var numTasks = operators.length;\n\n var tooltipValueLookups = { 'offset': {} };\n for (var i = 0; i < numTasks; i++) {\n tooltipValueLookups['offset'][i] = \"\" + i;\n }\n\n var stageBarChartProperties = $.extend({}, BAR_CHART_PROPERTIES, {\n barWidth: 800 / numTasks,\n tooltipValueLookups: tooltipValueLookups\n });\n $('#' + this.props.id).sparkline(statistic, $.extend({}, stageBarChartProperties, { numberFormatter: this.props.renderer }));\n }\n }, {\n key: \"render\",\n value: function render() {\n return _react2.default.createElement(\n \"div\",\n { className: \"row operator-statistic\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-2 italic-uppercase operator-statistic-title\" },\n this.props.name\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-10\" },\n _react2.default.createElement(\"span\", { className: \"bar-chart\", id: this.props.id })\n )\n );\n }\n }]);\n\n return OperatorStatistic;\n}(_react2.default.Component);\n\nvar OperatorDetail = function (_React$Component3) {\n _inherits(OperatorDetail, _React$Component3);\n\n function OperatorDetail(props) {\n _classCallCheck(this, OperatorDetail);\n\n var _this3 = _possibleConstructorReturn(this, (OperatorDetail.__proto__ || Object.getPrototypeOf(OperatorDetail)).call(this, props));\n\n _this3.state = {\n selectedStatistics: _this3.getInitialStatistics()\n };\n return _this3;\n }\n\n _createClass(OperatorDetail, [{\n key: \"getInitialStatistics\",\n value: function getInitialStatistics() {\n return [{\n name: \"Total Wall Time\",\n id: \"totalWallTime\",\n supplier: getTotalWallTime,\n renderer: _utils.formatDuration\n }, {\n name: \"Input Rows\",\n id: \"inputPositions\",\n supplier: function supplier(operator) {\n return operator.inputPositions;\n },\n renderer: _utils.formatCount\n }, {\n name: \"Input Data Size\",\n id: \"inputDataSize\",\n supplier: function supplier(operator) {\n return (0, _utils.parseDataSize)(operator.inputDataSize);\n },\n renderer: _utils.formatDataSize\n }, {\n name: \"Output Rows\",\n id: \"outputPositions\",\n supplier: function supplier(operator) {\n return operator.outputPositions;\n },\n renderer: _utils.formatCount\n }, {\n name: \"Output Data Size\",\n id: \"outputDataSize\",\n supplier: function supplier(operator) {\n return (0, _utils.parseDataSize)(operator.outputDataSize);\n },\n renderer: _utils.formatDataSize\n }];\n }\n }, {\n key: \"getOperatorTasks\",\n value: function getOperatorTasks() {\n // sort the x-axis\n var tasks = this.props.tasks.sort(function (taskA, taskB) {\n return (0, _utils.getTaskNumber)(taskA.taskStatus.taskId) - (0, _utils.getTaskNumber)(taskB.taskStatus.taskId);\n });\n\n var operatorSummary = this.props.operator;\n\n var operatorTasks = [];\n tasks.forEach(function (task) {\n task.stats.pipelines.forEach(function (pipeline) {\n if (pipeline.pipelineId === operatorSummary.pipelineId) {\n pipeline.operatorSummaries.forEach(function (operator) {\n if (operatorSummary.operatorId === operator.operatorId) {\n operatorTasks.push(operator);\n }\n });\n }\n });\n });\n\n return operatorTasks;\n }\n }, {\n key: \"render\",\n value: function render() {\n var operator = this.props.operator;\n var operatorTasks = this.getOperatorTasks();\n var totalWallTime = getTotalWallTime(operator);\n\n var rowInputRate = totalWallTime === 0 ? 0 : 1.0 * operator.inputPositions / totalWallTime;\n var byteInputRate = totalWallTime === 0 ? 0 : 1.0 * (0, _utils.parseDataSize)(operator.inputDataSize) / (totalWallTime / 1000.0);\n\n var rowOutputRate = totalWallTime === 0 ? 0 : 1.0 * operator.outputPositions / totalWallTime;\n var byteOutputRate = totalWallTime === 0 ? 0 : 1.0 * (0, _utils.parseDataSize)(operator.outputDataSize) / (totalWallTime / 1000.0);\n\n return _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-header\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\", className: \"close\", \"data-dismiss\": \"modal\", \"aria-label\": \"Close\" },\n _react2.default.createElement(\n \"span\",\n {\n \"aria-hidden\": \"true\" },\n \"\\xD7\"\n )\n ),\n _react2.default.createElement(\n \"h3\",\n null,\n _react2.default.createElement(\n \"small\",\n null,\n \"Pipeline \",\n operator.pipelineId\n ),\n _react2.default.createElement(\"br\", null),\n operator.operatorType\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Input\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatCount)(operator.inputPositions) + \" rows (\" + operator.inputDataSize + \")\"\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Input Rate\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatCount)(rowInputRate) + \" rows/s (\" + (0, _utils.formatDataSize)(byteInputRate) + \"/s)\"\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Output\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatCount)(operator.outputPositions) + \" rows (\" + operator.outputDataSize + \")\"\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Output Rate\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatCount)(rowOutputRate) + \" rows/s (\" + (0, _utils.formatDataSize)(byteOutputRate) + \"/s)\"\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Wall Time\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatDuration)(totalWallTime)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Blocked\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatDuration)((0, _utils.parseDuration)(operator.blockedWall))\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Drivers\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n operator.totalDrivers\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Tasks\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n operatorTasks.length\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row font-white\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-2 italic-uppercase\" },\n _react2.default.createElement(\n \"strong\",\n null,\n \"Statistic\"\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-10 italic-uppercase\" },\n _react2.default.createElement(\n \"strong\",\n null,\n \"Tasks\"\n )\n )\n ),\n this.state.selectedStatistics.map(function (statistic) {\n return _react2.default.createElement(OperatorStatistic, {\n key: statistic.id,\n id: statistic.id,\n name: statistic.name,\n supplier: statistic.supplier,\n renderer: statistic.renderer,\n operators: operatorTasks });\n }.bind(this)),\n _react2.default.createElement(\"p\", null),\n _react2.default.createElement(\"p\", null)\n )\n );\n }\n }]);\n\n return OperatorDetail;\n}(_react2.default.Component);\n\nvar StageOperatorGraph = function (_React$Component4) {\n _inherits(StageOperatorGraph, _React$Component4);\n\n function StageOperatorGraph() {\n _classCallCheck(this, StageOperatorGraph);\n\n return _possibleConstructorReturn(this, (StageOperatorGraph.__proto__ || Object.getPrototypeOf(StageOperatorGraph)).apply(this, arguments));\n }\n\n _createClass(StageOperatorGraph, [{\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.updateD3Graph();\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n this.updateD3Graph();\n }\n }, {\n key: \"handleOperatorClick\",\n value: function handleOperatorClick(operatorCssId) {\n $('#operator-detail-modal').modal();\n\n var pipelineId = parseInt(operatorCssId.split('-')[1]);\n var operatorId = parseInt(operatorCssId.split('-')[2]);\n var stage = this.props.stage;\n\n var operatorStageSummary = null;\n var operatorSummaries = stage.stageStats.operatorSummaries;\n for (var i = 0; i < operatorSummaries.length; i++) {\n if (operatorSummaries[i].pipelineId === pipelineId && operatorSummaries[i].operatorId === operatorId) {\n operatorStageSummary = operatorSummaries[i];\n }\n }\n\n // ReactDOM.render(,\n // document.getElementById('operator-detail'));\n }\n }, {\n key: \"computeOperatorGraphs\",\n value: function computeOperatorGraphs(mapInfo, element, operatorMap) {\n var _this5 = this;\n\n var sources = element.sources;\n\n var sourceResults = new Map();\n sources.forEach(function (source) {\n var sourceResult = _this5.computeOperatorGraphs(mapInfo, mapInfo.get(source), operatorMap);\n sourceResult.forEach(function (operator, pipelineId) {\n if (sourceResults.has(pipelineId)) {\n console.error(\"Multiple sources for \", element['@type'], \" had the same pipeline ID\");\n return sourceResults;\n }\n sourceResults.set(pipelineId, operator);\n });\n });\n\n var nodeOperators = operatorMap.get(element.relatedId);\n if (!nodeOperators || nodeOperators.length === 0) {\n return sourceResults;\n }\n\n var pipelineOperators = new Map();\n nodeOperators.forEach(function (operator) {\n if (!pipelineOperators.has(operator.pipelineId)) {\n pipelineOperators.set(operator.pipelineId, []);\n }\n pipelineOperators.get(operator.pipelineId).push(operator);\n });\n\n var result = new Map();\n pipelineOperators.forEach(function (pipelineOperators, pipelineId) {\n // sort deep-copied operators in this pipeline from source to sink\n var linkedOperators = pipelineOperators.map(function (a) {\n return Object.assign({}, a);\n }).sort(function (a, b) {\n return a.operatorId - b.operatorId;\n });\n var sinkOperator = linkedOperators[linkedOperators.length - 1];\n var sourceOperator = linkedOperators[0];\n\n if (sourceResults.has(pipelineId)) {\n var pipelineChildResult = sourceResults.get(pipelineId);\n if (pipelineChildResult) {\n sourceOperator.child = pipelineChildResult;\n }\n }\n\n // chain operators at this level\n var currentOperator = sourceOperator;\n linkedOperators.slice(1).forEach(function (source) {\n source.child = currentOperator;\n currentOperator = source;\n });\n\n result.set(pipelineId, sinkOperator);\n });\n\n sourceResults.forEach(function (operator, pipelineId) {\n if (!result.has(pipelineId)) {\n result.set(pipelineId, operator);\n }\n });\n\n return result;\n }\n }, {\n key: \"computeOperatorMap\",\n value: function computeOperatorMap() {\n var operatorMap = new Map();\n this.props.stage.stageStats.operatorSummaries.forEach(function (operator) {\n if (!operatorMap.has(operator.operatorId)) {\n operatorMap.set(operator.operatorId, []);\n }\n\n operatorMap.get(operator.operatorId).push(operator);\n });\n\n return operatorMap;\n }\n }, {\n key: \"computeD3StageOperatorGraph\",\n value: function computeD3StageOperatorGraph(graph, operator, sink, pipelineNode) {\n var operatorNodeId = \"operator-\" + operator.pipelineId + \"-\" + operator.operatorId;\n\n // this is a non-standard use of ReactDOMServer, but it's the cleanest way to unify DagreD3 with React\n var html = _server2.default.renderToString(_react2.default.createElement(OperatorSummary, {\n key: operator.pipelineId + \"-\" + operator.operatorId, operator: operator }));\n graph.setNode(operatorNodeId, { class: \"operator-stats\", label: html, labelType: \"html\" });\n\n if (operator.hasOwnProperty(\"child\")) {\n this.computeD3StageOperatorGraph(graph, operator.child, operatorNodeId, pipelineNode);\n }\n\n if (sink !== null) {\n graph.setEdge(operatorNodeId, sink, { class: \"plan-edge\", arrowheadClass: \"plan-arrowhead\" });\n }\n\n graph.setParent(operatorNodeId, pipelineNode);\n }\n }, {\n key: \"updateD3Graph\",\n value: function updateD3Graph() {\n var _this6 = this;\n\n if (!this.props.stage) {\n return;\n }\n\n var stage = this.props.stage;\n var operatorMap = this.computeOperatorMap();\n var rootId = stage.plan.rootId;\n\n var rels = JSON.parse(stage.plan.relNodeJson).rels;\n var mapInfo = new Map();\n\n rels.forEach(function (element) {\n mapInfo.set(element.relatedId, element);\n });\n\n var operatorGraphs = this.computeOperatorGraphs(mapInfo, mapInfo.get(rootId), operatorMap);\n\n var graph = (0, _utils.initializeGraph)();\n operatorGraphs.forEach(function (operator, pipelineId) {\n var pipelineNodeId = \"pipeline-\" + pipelineId;\n graph.setNode(pipelineNodeId, {\n label: \"Pipeline \" + pipelineId + \" \",\n clusterLabelPos: 'top',\n style: 'fill: #2b2b2b',\n labelStyle: 'fill: #fff'\n });\n _this6.computeD3StageOperatorGraph(graph, operator, null, pipelineNodeId);\n });\n\n $(\"#operator-canvas\").html(\"\");\n\n if (operatorGraphs.size > 0) {\n $(\".graph-container\").css(\"display\", \"block\");\n var svg = (0, _utils.initializeSvg)(\"#operator-canvas\");\n var render = new dagreD3.render();\n render(d3.select(\"#operator-canvas g\"), graph);\n\n svg.selectAll(\"g.operator-stats\").on(\"click\", this.handleOperatorClick.bind(this));\n svg.attr(\"height\", graph.graph().height);\n svg.attr(\"width\", graph.graph().width);\n } else {\n $(\".graph-container\").css(\"display\", \"none\");\n }\n }\n }, {\n key: \"render\",\n value: function render() {\n var stage = this.props.stage;\n\n if (!stage.hasOwnProperty('plan')) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Stage does not have a plan\"\n )\n )\n );\n }\n\n if (!stage.hasOwnProperty('stageStats') || !stage.stageStats.hasOwnProperty(\"operatorSummaries\") || stage.stageStats.operatorSummaries.length === 0) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Operator data not available for \",\n stage.stageId\n )\n )\n );\n }\n\n return null;\n }\n }]);\n\n return StageOperatorGraph;\n}(_react2.default.Component);\n\nvar StageDetail = exports.StageDetail = function (_React$Component5) {\n _inherits(StageDetail, _React$Component5);\n\n function StageDetail(props) {\n _classCallCheck(this, StageDetail);\n\n var _this7 = _possibleConstructorReturn(this, (StageDetail.__proto__ || Object.getPrototypeOf(StageDetail)).call(this, props));\n\n _this7.state = {\n initialized: false,\n ended: false,\n\n selectedStageId: null,\n query: null,\n\n lastRefresh: null,\n lastRender: null\n };\n\n _this7.refreshLoop = _this7.refreshLoop.bind(_this7);\n return _this7;\n }\n\n _createClass(StageDetail, [{\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n // stop refreshing when query finishes or fails\n if (this.state.query === null || !this.state.ended) {\n this.timeoutId = setTimeout(this.refreshLoop, 5000);\n }\n }\n }, {\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this8 = this;\n\n clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously\n var queryString = (0, _utils.getFirstParameter)(window.location.search).split('.');\n var queryId = queryString[0];\n\n var selectedStageId = this.state.selectedStageId;\n if (selectedStageId === null) {\n selectedStageId = 0;\n if (queryString.length > 1) {\n selectedStageId = parseInt(queryString[1]);\n }\n }\n\n $.get('/v1/query/' + queryId, function (query) {\n _this8.setState({\n initialized: true,\n ended: query.finalQueryInfo,\n\n selectedStageId: selectedStageId,\n query: query\n });\n _this8.resetTimer();\n }).error(function () {\n _this8.setState({\n initialized: true\n });\n _this8.resetTimer();\n });\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop();\n }\n }, {\n key: \"findStage\",\n value: function findStage(stageId, currentStage) {\n if (stageId === null) {\n return null;\n }\n\n if (currentStage.stageId === stageId) {\n return currentStage;\n }\n\n for (var i = 0; i < currentStage.subStages.length; i++) {\n var stage = this.findStage(stageId, currentStage.subStages[i]);\n if (stage !== null) {\n return stage;\n }\n }\n\n return null;\n }\n }, {\n key: \"getAllStageIds\",\n value: function getAllStageIds(result, currentStage) {\n var _this9 = this;\n\n result.push(currentStage.plan.id);\n currentStage.subStages.forEach(function (stage) {\n _this9.getAllStageIds(result, stage);\n });\n }\n }, {\n key: \"render\",\n value: function render() {\n var _this10 = this;\n\n if (!this.state.query) {\n var label = _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n );\n if (this.state.initialized) {\n label = \"Query not found\";\n }\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n label\n )\n )\n );\n }\n\n if (!this.state.query.outputStage) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Query does not have an output stage\"\n )\n )\n );\n }\n\n var query = this.state.query;\n var allStages = [];\n this.getAllStageIds(allStages, query.outputStage);\n\n var stage = this.findStage(query.queryId + \".\" + this.state.selectedStageId, query.outputStage);\n if (stage === null) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Stage not found\"\n )\n )\n );\n }\n\n var stageOperatorGraph = null;\n if (!(0, _utils.isQueryEnded)(query)) {\n stageOperatorGraph = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Operator graph will appear automatically when query completes.\"\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n )\n )\n );\n } else {\n stageOperatorGraph = _react2.default.createElement(StageOperatorGraph, { id: stage.stageId, stage: stage });\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(_QueryHeader.QueryHeader, { query: query }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-2\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Stage \",\n stage.plan.id\n )\n ),\n _react2.default.createElement(\"div\", { className: \"col-xs-8\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-2 stage-dropdown\" },\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\", className: \"btn btn-default dropdown-toggle\",\n \"data-toggle\": \"dropdown\", \"aria-haspopup\": \"true\", \"aria-expanded\": \"false\" },\n \"Select Stage \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n allStages.map(function (stageId) {\n return _react2.default.createElement(\n \"li\",\n { key: stageId },\n _react2.default.createElement(\n \"a\",\n {\n onClick: function onClick() {\n return _this10.setState({ selectedStageId: stageId });\n } },\n stageId\n )\n );\n })\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n stageOperatorGraph\n )\n )\n );\n }\n }]);\n\n return StageDetail;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/StageDetail.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.StageDetail = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _server = __webpack_require__(/*! react-dom/server */ \"./node_modules/react-dom/server.browser.js\");\n\nvar _server2 = _interopRequireDefault(_server);\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nvar _QueryHeader = __webpack_require__(/*! ./QueryHeader */ \"./components/QueryHeader.jsx\");\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nfunction getTotalWallTime(operator) {\n return (0, _utils.parseDuration)(operator.addInputWall) + (0, _utils.parseDuration)(operator.getOutputWall) + (0, _utils.parseDuration)(operator.finishWall) + (0, _utils.parseDuration)(operator.blockedWall);\n}\n\nvar OperatorSummary = function (_React$Component) {\n _inherits(OperatorSummary, _React$Component);\n\n function OperatorSummary() {\n _classCallCheck(this, OperatorSummary);\n\n return _possibleConstructorReturn(this, (OperatorSummary.__proto__ || Object.getPrototypeOf(OperatorSummary)).apply(this, arguments));\n }\n\n _createClass(OperatorSummary, [{\n key: \"render\",\n value: function render() {\n var operator = this.props.operator;\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"highlight-row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"header-row\" },\n operator.operatorType\n )\n ),\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"OutputRows\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatCount)(operator.outputRowCount)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"StartupTime\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatDurationMs)(operator.startupDuration * 1000)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"RunTime\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatDurationMs)(operator.duration * 1000)\n )\n )\n )\n )\n );\n }\n }]);\n\n return OperatorSummary;\n}(_react2.default.Component);\n\nvar LocalBufferOperator = function (_React$Component2) {\n _inherits(LocalBufferOperator, _React$Component2);\n\n function LocalBufferOperator() {\n _classCallCheck(this, LocalBufferOperator);\n\n return _possibleConstructorReturn(this, (LocalBufferOperator.__proto__ || Object.getPrototypeOf(LocalBufferOperator)).apply(this, arguments));\n }\n\n _createClass(LocalBufferOperator, [{\n key: \"render\",\n value: function render() {\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"highlight-row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"header-row\" },\n \"LocalBuffer\"\n )\n )\n );\n }\n }]);\n\n return LocalBufferOperator;\n}(_react2.default.Component);\n\nvar BAR_CHART_PROPERTIES = {\n type: 'bar',\n barSpacing: '0',\n height: '80px',\n barColor: '#747F96',\n zeroColor: '#8997B3',\n tooltipClassname: 'sparkline-tooltip',\n tooltipFormat: 'Task {{offset:offset}} - {{value}}',\n disableHiddenCheck: true\n};\n\nvar OperatorStatistic = function (_React$Component3) {\n _inherits(OperatorStatistic, _React$Component3);\n\n function OperatorStatistic() {\n _classCallCheck(this, OperatorStatistic);\n\n return _possibleConstructorReturn(this, (OperatorStatistic.__proto__ || Object.getPrototypeOf(OperatorStatistic)).apply(this, arguments));\n }\n\n _createClass(OperatorStatistic, [{\n key: \"componentDidMount\",\n value: function componentDidMount() {\n var operators = this.props.operators;\n var statistic = operators.map(this.props.supplier);\n var numTasks = operators.length;\n\n var tooltipValueLookups = { 'offset': {} };\n for (var i = 0; i < numTasks; i++) {\n tooltipValueLookups['offset'][i] = \"\" + i;\n }\n\n var stageBarChartProperties = $.extend({}, BAR_CHART_PROPERTIES, {\n barWidth: 800 / numTasks,\n tooltipValueLookups: tooltipValueLookups\n });\n $('#' + this.props.id).sparkline(statistic, $.extend({}, stageBarChartProperties, { numberFormatter: this.props.renderer }));\n }\n }, {\n key: \"render\",\n value: function render() {\n return _react2.default.createElement(\n \"div\",\n { className: \"row operator-statistic\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-2 italic-uppercase operator-statistic-title\" },\n this.props.name\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-10\" },\n _react2.default.createElement(\"span\", { className: \"bar-chart\", id: this.props.id })\n )\n );\n }\n }]);\n\n return OperatorStatistic;\n}(_react2.default.Component);\n\nvar OperatorDetail = function (_React$Component4) {\n _inherits(OperatorDetail, _React$Component4);\n\n function OperatorDetail(props) {\n _classCallCheck(this, OperatorDetail);\n\n var _this4 = _possibleConstructorReturn(this, (OperatorDetail.__proto__ || Object.getPrototypeOf(OperatorDetail)).call(this, props));\n\n _this4.state = {\n selectedStatistics: _this4.getInitialStatistics()\n };\n return _this4;\n }\n\n _createClass(OperatorDetail, [{\n key: \"getInitialStatistics\",\n value: function getInitialStatistics() {\n return [{\n name: \"Total Wall Time\",\n id: \"totalWallTime\",\n supplier: getTotalWallTime,\n renderer: _utils.formatDurationMs\n }, {\n name: \"Input Rows\",\n id: \"inputPositions\",\n supplier: function supplier(operator) {\n return operator.inputPositions;\n },\n renderer: _utils.formatCount\n }, {\n name: \"Input Data Size\",\n id: \"inputDataSize\",\n supplier: function supplier(operator) {\n return (0, _utils.parseDataSize)(operator.inputDataSize);\n },\n renderer: _utils.formatDataSize\n }, {\n name: \"Output Rows\",\n id: \"outputPositions\",\n supplier: function supplier(operator) {\n return operator.outputPositions;\n },\n renderer: _utils.formatCount\n }, {\n name: \"Output Data Size\",\n id: \"outputDataSize\",\n supplier: function supplier(operator) {\n return (0, _utils.parseDataSize)(operator.outputDataSize);\n },\n renderer: _utils.formatDataSize\n }];\n }\n }, {\n key: \"getOperatorTasks\",\n value: function getOperatorTasks() {\n // sort the x-axis\n var tasks = this.props.tasks.sort(function (taskA, taskB) {\n return (0, _utils.getTaskNumber)(taskA.taskStatus.taskId) - (0, _utils.getTaskNumber)(taskB.taskStatus.taskId);\n });\n\n var operatorSummary = this.props.operator;\n\n var operatorTasks = [];\n tasks.forEach(function (task) {\n task.stats.pipelines.forEach(function (pipeline) {\n if (pipeline.pipelineId === operatorSummary.pipelineId) {\n pipeline.operatorSummaries.forEach(function (operator) {\n if (operatorSummary.operatorId === operator.operatorId) {\n operatorTasks.push(operator);\n }\n });\n }\n });\n });\n\n return operatorTasks;\n }\n }, {\n key: \"render\",\n value: function render() {\n var operator = this.props.operator;\n var operatorTasks = this.getOperatorTasks();\n var totalWallTime = getTotalWallTime(operator);\n\n var rowInputRate = totalWallTime === 0 ? 0 : 1.0 * operator.inputPositions / totalWallTime;\n var byteInputRate = totalWallTime === 0 ? 0 : 1.0 * (0, _utils.parseDataSize)(operator.inputDataSize) / (totalWallTime / 1000.0);\n\n var rowOutputRate = totalWallTime === 0 ? 0 : 1.0 * operator.outputPositions / totalWallTime;\n var byteOutputRate = totalWallTime === 0 ? 0 : 1.0 * (0, _utils.parseDataSize)(operator.outputDataSize) / (totalWallTime / 1000.0);\n\n return _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-header\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\", className: \"close\", \"data-dismiss\": \"modal\", \"aria-label\": \"Close\" },\n _react2.default.createElement(\n \"span\",\n {\n \"aria-hidden\": \"true\" },\n \"\\xD7\"\n )\n ),\n _react2.default.createElement(\n \"h3\",\n null,\n _react2.default.createElement(\n \"small\",\n null,\n \"Pipeline \",\n operator.pipelineId\n ),\n _react2.default.createElement(\"br\", null),\n operator.operatorType\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Input\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatCount)(operator.inputPositions) + \" rows (\" + operator.inputDataSize + \")\"\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Input Rate\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatCount)(rowInputRate) + \" rows/s (\" + (0, _utils.formatDataSize)(byteInputRate) + \"/s)\"\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Output\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatCount)(operator.outputPositions) + \" rows (\" + operator.outputDataSize + \")\"\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Output Rate\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatCount)(rowOutputRate) + \" rows/s (\" + (0, _utils.formatDataSize)(byteOutputRate) + \"/s)\"\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-6\" },\n _react2.default.createElement(\n \"table\",\n { className: \"table\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Wall Time\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatDurationMs)(totalWallTime)\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Blocked\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n (0, _utils.formatDurationMs)((0, _utils.parseDuration)(operator.blockedWall))\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Drivers\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n operator.totalDrivers\n )\n ),\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n \"Tasks\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n operatorTasks.length\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row font-black\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-2 italic-uppercase\" },\n _react2.default.createElement(\n \"strong\",\n null,\n \"Statistic\"\n )\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-10 italic-uppercase\" },\n _react2.default.createElement(\n \"strong\",\n null,\n \"Tasks\"\n )\n )\n ),\n this.state.selectedStatistics.map(function (statistic) {\n return _react2.default.createElement(OperatorStatistic, {\n key: statistic.id,\n id: statistic.id,\n name: statistic.name,\n supplier: statistic.supplier,\n renderer: statistic.renderer,\n operators: operatorTasks });\n }.bind(this)),\n _react2.default.createElement(\"p\", null),\n _react2.default.createElement(\"p\", null)\n )\n );\n }\n }]);\n\n return OperatorDetail;\n}(_react2.default.Component);\n\nvar StageOperatorGraph = function (_React$Component5) {\n _inherits(StageOperatorGraph, _React$Component5);\n\n function StageOperatorGraph() {\n _classCallCheck(this, StageOperatorGraph);\n\n return _possibleConstructorReturn(this, (StageOperatorGraph.__proto__ || Object.getPrototypeOf(StageOperatorGraph)).apply(this, arguments));\n }\n\n _createClass(StageOperatorGraph, [{\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.updateD3Graph();\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n this.updateD3Graph();\n }\n }, {\n key: \"handleOperatorClick\",\n value: function handleOperatorClick(operatorCssId) {\n $('#operator-detail-modal').modal();\n\n var pipelineId = parseInt(operatorCssId.split('-')[1]);\n var operatorId = parseInt(operatorCssId.split('-')[2]);\n var stage = this.props.stage;\n\n var operatorStageSummary = null;\n var operatorSummaries = stage.stageStats.operatorSummaries;\n for (var i = 0; i < operatorSummaries.length; i++) {\n if (operatorSummaries[i].pipelineId === pipelineId && operatorSummaries[i].operatorId === operatorId) {\n operatorStageSummary = operatorSummaries[i];\n }\n }\n\n // ReactDOM.render(,\n // document.getElementById('operator-detail'));\n }\n }, {\n key: \"computeOperatorGraphs\",\n value: function computeOperatorGraphs(mapInfo, element, operatorMap) {\n var _this6 = this;\n\n var sources = element.sources;\n\n var sourceResults = new Map();\n sources.forEach(function (source) {\n var sourceResult = _this6.computeOperatorGraphs(mapInfo, mapInfo.get(source), operatorMap);\n sourceResult.forEach(function (operator, pipelineId) {\n if (sourceResults.has(pipelineId)) {\n console.error(\"Multiple sources for \" + element.relOp + \" had the same pipeline ID\");\n return sourceResults;\n }\n sourceResults.set(pipelineId, operator);\n });\n });\n\n var nodeOperators = operatorMap.get(element.relatedId);\n if (!nodeOperators || nodeOperators.length === 0) {\n return sourceResults;\n }\n\n var pipelineOperators = new Map();\n nodeOperators.forEach(function (operator) {\n if (!pipelineOperators.has(operator.pipelineId)) {\n pipelineOperators.set(operator.pipelineId, []);\n }\n pipelineOperators.get(operator.pipelineId).push(operator);\n });\n\n var result = new Map();\n pipelineOperators.forEach(function (pipelineOperators, pipelineId) {\n // sort deep-copied operators in this pipeline from source to sink\n var linkedOperators = pipelineOperators.map(function (a) {\n return Object.assign({}, a);\n }).sort(function (a, b) {\n return a.operatorId - b.operatorId;\n });\n var sinkOperator = linkedOperators[linkedOperators.length - 1];\n var sourceOperator = linkedOperators[0];\n\n if (sourceResults.has(pipelineId)) {\n var pipelineChildResult = sourceResults.get(pipelineId);\n if (pipelineChildResult) {\n sourceOperator.child = pipelineChildResult;\n }\n }\n\n // chain operators at this level\n var currentOperator = sourceOperator;\n linkedOperators.slice(1).forEach(function (source) {\n source.child = currentOperator;\n currentOperator = source;\n });\n\n result.set(pipelineId, sinkOperator);\n });\n\n sourceResults.forEach(function (operator, pipelineId) {\n if (!result.has(pipelineId)) {\n result.set(pipelineId, operator);\n }\n });\n\n return result;\n }\n }, {\n key: \"computeOperatorMap\",\n value: function computeOperatorMap() {\n var operatorMap = new Map();\n this.props.stage.stageStats.operatorSummaries.forEach(function (operator) {\n // if (!operatorMap.has(operator.operatorId)) {\n // operatorMap.set(operator.operatorId, [])\n // }\n var operators = [];\n operators.push(operator);\n operatorMap.set(operator.operatorId, operators);\n });\n\n return operatorMap;\n }\n }, {\n key: \"computeD3StageOperatorGraph\",\n value: function computeD3StageOperatorGraph(graph, operator, sink, pipelineNode, pipelineRootNodeMap) {\n var operatorNodeId = \"operator-\" + operator.pipelineId + \"-\" + operator.operatorId;\n\n // this is a non-standard use of ReactDOMServer, but it's the cleanest way to unify DagreD3 with React\n var html = _server2.default.renderToString(_react2.default.createElement(OperatorSummary, {\n key: operator.pipelineId + \"-\" + operator.operatorId, operator: operator }));\n graph.setNode(operatorNodeId, { class: \"operator-stats\", label: html, labelType: \"html\" });\n\n if (operator.hasOwnProperty(\"child\")) {\n this.computeD3StageOperatorGraph(graph, operator.child, operatorNodeId, pipelineNode, pipelineRootNodeMap);\n } else {\n pipelineRootNodeMap.set(pipelineNode, operatorNodeId);\n }\n\n if (sink !== null) {\n graph.setEdge(operatorNodeId, sink, { class: \"plan-edge\", arrowheadClass: \"plan-arrowhead\" });\n }\n\n graph.setParent(operatorNodeId, pipelineNode);\n }\n }, {\n key: \"updateD3Graph\",\n value: function updateD3Graph() {\n var _this7 = this;\n\n if (!this.props.stage) {\n return;\n }\n\n var stage = this.props.stage;\n var operatorMap = this.computeOperatorMap();\n var pipelineDepMap = new Map(Object.entries(stage.tasks[0].taskStats.pipelineDeps));\n var rootId = stage.plan.rootId;\n\n var rels = JSON.parse(stage.plan.relNodeJson).rels;\n var mapInfo = new Map();\n\n rels.forEach(function (element) {\n mapInfo.set(element.relatedId, element);\n });\n\n var operatorGraphs = this.computeOperatorGraphs(mapInfo, mapInfo.get(rootId), operatorMap);\n\n var graph = (0, _utils.initializeGraph)();\n var pipelineNodeMap = new Map();\n var pipelineRootNodeMap = new Map();\n var pipelineTopNodeMap = new Map();\n operatorGraphs.forEach(function (operator, pipelineId) {\n var pipelineNodeId = \"pipeline-\" + pipelineId;\n pipelineNodeMap.set(pipelineId, pipelineNodeId);\n graph.setNode(pipelineNodeId, {\n label: \"Pipeline \" + pipelineId + \" \",\n clusterLabelPos: 'top',\n style: 'fill: #2b2b2b',\n labelStyle: 'fill: #fff'\n });\n var operatorNodeId = \"operator-\" + operator.pipelineId + \"-\" + operator.operatorId;\n pipelineTopNodeMap.set(pipelineNodeId, operatorNodeId);\n _this7.computeD3StageOperatorGraph(graph, operator, null, pipelineNodeId, pipelineRootNodeMap);\n });\n pipelineDepMap.forEach(function (childIds, parentId) {\n for (var i = 0; i < childIds.length; i++) {\n var childNode = pipelineNodeMap.get(childIds[i]);\n if (childNode === undefined) {\n var childNodeId = \"pipeline-\" + childIds[i];\n var childNodeHtml = {\n label: \"Pipeline \" + childIds[i],\n clusterLabelPos: 'top',\n style: 'fill: #2b2b2b',\n labelStyle: 'fill: #fff'\n };\n graph.setNode(childNodeId, childNodeHtml);\n var localBufferNodeId = \"localBuffer-\" + parentId;\n var html = _server2.default.renderToString(_react2.default.createElement(LocalBufferOperator, {\n key: localBufferNodeId }));\n graph.setNode(localBufferNodeId, { class: \"operator-stats\", label: html, labelType: \"html\" });\n graph.setParent(localBufferNodeId, childNodeId);\n pipelineNodeMap.set(childIds[i], childNodeId);\n pipelineRootNodeMap.set(childNodeId, localBufferNodeId);\n pipelineTopNodeMap.set(childNodeId, localBufferNodeId);\n childNode = childNodeId;\n }\n var parentNode = pipelineNodeMap.get(parseInt(parentId));\n if (parentNode === undefined) {\n var parentNodeId = \"pipeline-\" + parentId;\n var parentNodeHtml = {\n label: \"Pipeline \" + parentId,\n clusterLabelPos: 'top',\n style: 'fill: #2b2b2b',\n labelStyle: 'fill: #fff'\n };\n graph.setNode(parentNodeId, parentNodeHtml);\n var _localBufferNodeId = \"localBuffer-\" + parentId;\n var _html = _server2.default.renderToString(_react2.default.createElement(LocalBufferOperator, {\n key: _localBufferNodeId }));\n graph.setNode(_localBufferNodeId, { class: \"operator-stats\", label: _html, labelType: \"html\" });\n graph.setParent(_localBufferNodeId, parentNodeId);\n pipelineNodeMap.set(parseInt(parentId), parentNodeId);\n pipelineRootNodeMap.set(parentNodeId, _localBufferNodeId);\n pipelineTopNodeMap.set(parentNodeId, _localBufferNodeId);\n parentNode = parentNodeId;\n }\n var vParentInput = \"v-\" + parentNode + \"-input\";\n var vChildOutput = \"v-\" + childNode + \"-output\";\n graph.setNode(vParentInput, {\n label: \"\",\n shape: \"circle\"\n });\n graph.setNode(vChildOutput, {\n label: \"\",\n shape: \"circle\"\n });\n graph.setParent(vParentInput, parentNode);\n graph.setParent(vChildOutput, childNode);\n graph.setEdge(vChildOutput, vParentInput, {\n class: \"pipeline-edge\",\n arrowhead: \"vee\",\n arrowheadClass: \"pipeline-arrowhead\",\n style: \"stroke-width: 2px\"\n });\n graph.setEdge(vParentInput, pipelineRootNodeMap.get(parentNode), {\n class: \"v-pipeline-edge\",\n arrowhead: \"undirected\",\n style: \"stroke-width: 0\"\n });\n graph.setEdge(pipelineTopNodeMap.get(childNode), vChildOutput, {\n class: \"v-pipeline-edge\",\n arrowhead: \"undirected\",\n style: \"stroke-width: 0\"\n });\n }\n });\n\n $(\"#operator-canvas\").html(\"\");\n\n if (operatorGraphs.size > 0) {\n $(\".graph-container\").css(\"display\", \"block\");\n var svg = (0, _utils.initializeSvg)(\"#operator-canvas\");\n var render = new dagreD3.render();\n render(d3.select(\"#operator-canvas g\"), graph);\n\n svg.selectAll(\"g.operator-stats\").on(\"click\", this.handleOperatorClick.bind(this));\n svg.attr(\"height\", graph.graph().height);\n svg.attr(\"width\", graph.graph().width);\n } else {\n $(\".graph-container\").css(\"display\", \"none\");\n }\n }\n }, {\n key: \"render\",\n value: function render() {\n var stage = this.props.stage;\n\n if (!stage.hasOwnProperty('plan')) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Stage does not have a plan\"\n )\n )\n );\n }\n\n if (!stage.hasOwnProperty('stageStats') || !stage.stageStats.hasOwnProperty(\"operatorSummaries\") || stage.stageStats.operatorSummaries.length === 0) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Operator data not available for \",\n stage.stageId\n )\n )\n );\n }\n\n return null;\n }\n }]);\n\n return StageOperatorGraph;\n}(_react2.default.Component);\n\nvar StageDetail = exports.StageDetail = function (_React$Component6) {\n _inherits(StageDetail, _React$Component6);\n\n function StageDetail(props) {\n _classCallCheck(this, StageDetail);\n\n var _this8 = _possibleConstructorReturn(this, (StageDetail.__proto__ || Object.getPrototypeOf(StageDetail)).call(this, props));\n\n _this8.state = {\n initialized: false,\n ended: false,\n\n selectedStageId: null,\n query: null,\n\n lastRefresh: null,\n lastRender: null\n };\n\n _this8.refreshLoop = _this8.refreshLoop.bind(_this8);\n return _this8;\n }\n\n _createClass(StageDetail, [{\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n // stop refreshing when query finishes or fails\n if (this.state.query === null || !this.state.ended) {\n this.timeoutId = setTimeout(this.refreshLoop, 5000);\n }\n }\n }, {\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this9 = this;\n\n clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously\n var queryString = (0, _utils.getFirstParameter)(window.location.search).split('.');\n var queryId = queryString[0];\n\n var selectedStageId = this.state.selectedStageId;\n if (selectedStageId === null) {\n selectedStageId = 0;\n if (queryString.length > 1) {\n selectedStageId = parseInt(queryString[1]);\n }\n }\n\n $.get('/v1/query/' + queryId, function (query) {\n _this9.setState({\n initialized: true,\n ended: query.finalQueryInfo,\n\n selectedStageId: selectedStageId,\n query: query\n });\n _this9.resetTimer();\n }).error(function () {\n _this9.setState({\n initialized: true\n });\n _this9.resetTimer();\n });\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop();\n }\n }, {\n key: \"findStage\",\n value: function findStage(stageId, currentStage) {\n if (stageId === null) {\n return null;\n }\n\n if (currentStage.stageId === stageId) {\n return currentStage;\n }\n\n for (var i = 0; i < currentStage.subStages.length; i++) {\n var stage = this.findStage(stageId, currentStage.subStages[i]);\n if (stage !== null) {\n return stage;\n }\n }\n\n return null;\n }\n }, {\n key: \"getAllStageIds\",\n value: function getAllStageIds(result, currentStage) {\n var _this10 = this;\n\n result.push(currentStage.plan.id);\n currentStage.subStages.forEach(function (stage) {\n _this10.getAllStageIds(result, stage);\n });\n }\n }, {\n key: \"render\",\n value: function render() {\n var _this11 = this;\n\n if (!this.state.query) {\n var label = _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n );\n if (this.state.initialized) {\n label = \"Query not found\";\n }\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n label\n )\n )\n );\n }\n\n if (!this.state.query.outputStage) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Query does not have an output stage\"\n )\n )\n );\n }\n\n var query = this.state.query;\n var allStages = [];\n this.getAllStageIds(allStages, query.outputStage);\n\n var stage = this.findStage(query.queryId + \".\" + this.state.selectedStageId, query.outputStage);\n if (stage === null) {\n return _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Stage not found\"\n )\n )\n );\n }\n\n var stageOperatorGraph = null;\n if (!(0, _utils.isQueryEnded)(query)) {\n stageOperatorGraph = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Operator graph will appear automatically when query completes.\"\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"loader\" },\n \"Loading...\"\n )\n )\n );\n } else {\n stageOperatorGraph = _react2.default.createElement(StageOperatorGraph, { id: stage.stageId, stage: stage });\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(_QueryHeader.QueryHeader, { query: query }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-2\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Stage \",\n stage.plan.id\n )\n ),\n _react2.default.createElement(\"div\", { className: \"col-xs-8\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-2 stage-dropdown\" },\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\", className: \"btn btn-default dropdown-toggle\",\n \"data-toggle\": \"dropdown\", \"aria-haspopup\": \"true\", \"aria-expanded\": \"false\" },\n \"Select Stage \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n allStages.map(function (stageId) {\n return _react2.default.createElement(\n \"li\",\n { key: stageId },\n _react2.default.createElement(\n \"a\",\n {\n onClick: function onClick() {\n return _this11.setState({ selectedStageId: stageId });\n } },\n stageId\n )\n );\n })\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n stageOperatorGraph\n )\n )\n );\n }\n }]);\n\n return StageDetail;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/StageDetail.jsx?"); /***/ }), @@ -20675,7 +20675,7 @@ eval("\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/i /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDuration = formatDuration;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#1b8f72',\n RUNNING: '#19874e',\n PLANNING: '#674f98',\n FINISHED: '#1a4629',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDuration(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getFullSplitIdSuffix = getFullSplitIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDurationMs = formatDurationMs;\nexports.formatDurationNs = formatDurationNs;\nexports.formatNumber = formatNumber;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#7bb3fb',\n RUNNING: '#265cdf',\n PLANNING: '#674f98',\n FINISHED: '#22b647',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getFullSplitIdSuffix(driverId) {\n return driverId.substring(driverId.indexOf('.') + 1);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDurationMs(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatDurationNs(duration) {\n var unit = \"ns\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"us\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"ms\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatNumber(num) {\n return num.toLocaleString();\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); /***/ }) diff --git a/polardbx-executor/src/main/resources/webapp/dist/worker.js b/polardbx-executor/src/main/resources/webapp/dist/worker.js index 7b05fbc5e..2f77c0d2a 100644 --- a/polardbx-executor/src/main/resources/webapp/dist/worker.js +++ b/polardbx-executor/src/main/resources/webapp/dist/worker.js @@ -94,7 +94,7 @@ /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.PageTitle = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar PageTitle = exports.PageTitle = function (_React$Component) {\n _inherits(PageTitle, _React$Component);\n\n function PageTitle(props) {\n _classCallCheck(this, PageTitle);\n\n var _this = _possibleConstructorReturn(this, (PageTitle.__proto__ || Object.getPrototypeOf(PageTitle)).call(this, props));\n\n _this.state = {\n noConnection: false,\n lightShown: false,\n info: null,\n lastSuccess: Date.now(),\n modalShown: false,\n errorText: null\n };\n return _this;\n }\n\n _createClass(PageTitle, [{\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this2 = this;\n\n clearTimeout(this.timeoutId);\n fetch(\"/v1/info\").then(function (response) {\n return response.json();\n }).then(function (info) {\n _this2.setState({\n info: info,\n noConnection: false,\n lastSuccess: Date.now(),\n modalShown: false\n });\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal('hide');\n _this2.resetTimer();\n }).catch(function (error) {\n _this2.setState({\n noConnection: true,\n lightShown: !_this2.state.lightShown,\n errorText: error\n });\n _this2.resetTimer();\n\n if (!_this2.state.modalShown && (error || Date.now() - _this2.state.lastSuccess > 30 * 1000)) {\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal();\n _this2.setState({ modalShown: true });\n }\n });\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"renderStatusLight\",\n value: function renderStatusLight() {\n if (this.state.noConnection) {\n if (this.state.lightShown) {\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-red\", id: \"status-indicator\" });\n } else {\n return _react2.default.createElement(\"span\", { className: \"status-light\", id: \"status-indicator\" });\n }\n }\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-green\", id: \"status-indicator\" });\n }\n }, {\n key: \"render\",\n value: function render() {\n var info = this.state.info;\n if (!info) {\n return null;\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"nav\",\n { className: \"navbar\" },\n _react2.default.createElement(\n \"div\",\n { className: \"container-fluid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"navbar-header\" },\n _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"/ui/\" },\n _react2.default.createElement(\"img\", { src: \"assets/logo.png\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-brand\" },\n this.props.title\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"navbar\", className: \"navbar-collapse collapse\" },\n _react2.default.createElement(\n \"ul\",\n { className: \"nav navbar-nav navbar-right\" },\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Version\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\",\n id: \"version-number\" },\n info.nodeVersion.version\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Environment\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"environment\" },\n info.environment\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"CoordinatorId\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"workerId\" },\n info.workerId\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Uptime\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", title: \"Connection status\" },\n this.renderStatusLight()\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"span\",\n { className: \"text\", id: \"uptime\" },\n info.uptime\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"no-connection-modal\", className: \"modal\", tabIndex: \"-1\", role: \"dialog\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-dialog modal-sm\", role: \"document\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-content\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"h4\",\n null,\n \"Unable to connect to server\"\n ),\n _react2.default.createElement(\n \"p\",\n null,\n this.state.errorText ? \"Error: \" + this.state.errorText : null\n )\n )\n )\n )\n )\n )\n );\n }\n }]);\n\n return PageTitle;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/PageTitle.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.PageTitle = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n\nvar PageTitle = exports.PageTitle = function (_React$Component) {\n _inherits(PageTitle, _React$Component);\n\n function PageTitle(props) {\n _classCallCheck(this, PageTitle);\n\n var _this = _possibleConstructorReturn(this, (PageTitle.__proto__ || Object.getPrototypeOf(PageTitle)).call(this, props));\n\n _this.state = {\n noConnection: false,\n lightShown: false,\n info: null,\n lastSuccess: Date.now(),\n modalShown: false,\n errorText: null\n };\n return _this;\n }\n\n _createClass(PageTitle, [{\n key: \"refreshLoop\",\n value: function refreshLoop() {\n var _this2 = this;\n\n clearTimeout(this.timeoutId);\n fetch(\"/v1/info\").then(function (response) {\n return response.json();\n }).then(function (info) {\n _this2.setState({\n info: info,\n noConnection: false,\n lastSuccess: Date.now(),\n modalShown: false\n });\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal('hide');\n _this2.resetTimer();\n }).catch(function (error) {\n _this2.setState({\n noConnection: true,\n lightShown: !_this2.state.lightShown,\n errorText: error\n });\n _this2.resetTimer();\n\n if (!_this2.state.modalShown && (error || Date.now() - _this2.state.lastSuccess > 30 * 1000)) {\n //$FlowFixMe$ Bootstrap 3 plugin\n $('#no-connection-modal').modal();\n _this2.setState({ modalShown: true });\n }\n });\n }\n }, {\n key: \"resetTimer\",\n value: function resetTimer() {\n clearTimeout(this.timeoutId);\n this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000);\n }\n }, {\n key: \"componentDidMount\",\n value: function componentDidMount() {\n this.refreshLoop.bind(this)();\n }\n }, {\n key: \"renderStatusLight\",\n value: function renderStatusLight() {\n if (this.state.noConnection) {\n if (this.state.lightShown) {\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-red\", id: \"status-indicator\" });\n } else {\n return _react2.default.createElement(\"span\", { className: \"status-light\", id: \"status-indicator\" });\n }\n }\n return _react2.default.createElement(\"span\", { className: \"status-light status-light-green\", id: \"status-indicator\" });\n }\n }, {\n key: \"render\",\n value: function render() {\n var info = this.state.info;\n if (!info) {\n return null;\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"nav\",\n { className: \"navbar\" },\n _react2.default.createElement(\n \"div\",\n { className: \"container-fluid\" },\n _react2.default.createElement(\n \"div\",\n { className: \"navbar-header\" },\n _react2.default.createElement(\n \"table\",\n null,\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"a\",\n { href: \"/ui/\" },\n _react2.default.createElement(\"img\", { src: \"assets/favicon.png\" })\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-brand\" },\n this.props.title\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"navbar\", className: \"navbar-collapse collapse\" },\n _react2.default.createElement(\n \"ul\",\n { className: \"nav navbar-nav navbar-right\" },\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Version\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\",\n id: \"version-number\" },\n info.nodeVersion.version\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Environment\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"environment\" },\n \"PolarDB-X\"\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Node\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"text uppercase\", id: \"workerId\" },\n info.workerId\n )\n )\n ),\n _react2.default.createElement(\n \"li\",\n null,\n _react2.default.createElement(\n \"span\",\n { className: \"navbar-cluster-info\" },\n _react2.default.createElement(\n \"span\",\n { className: \"uppercase\" },\n \"Uptime\"\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { \"data-toggle\": \"tooltip\", \"data-placement\": \"bottom\", title: \"Connection status\" },\n this.renderStatusLight()\n ),\n \"\\xA0\",\n _react2.default.createElement(\n \"span\",\n { className: \"text\", id: \"uptime\" },\n info.uptime\n )\n )\n )\n )\n )\n )\n ),\n _react2.default.createElement(\n \"div\",\n { id: \"no-connection-modal\", className: \"modal\", tabIndex: \"-1\", role: \"dialog\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-dialog modal-sm\", role: \"document\" },\n _react2.default.createElement(\n \"div\",\n { className: \"modal-content\" },\n _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"h4\",\n null,\n \"Unable to connect to server\"\n ),\n _react2.default.createElement(\n \"p\",\n null,\n this.state.errorText ? \"Error: \" + this.state.errorText : null\n )\n )\n )\n )\n )\n )\n );\n }\n }]);\n\n return PageTitle;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/PageTitle.jsx?"); /***/ }), @@ -118,7 +118,7 @@ eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n}); /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.WorkerThreadList = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar ALL_THREADS = \"All Threads\";\nvar QUERY_THREADS = \"Running Queries\";\n\nvar ALL_THREAD_STATE = \"ALL\";\nvar THREAD_STATES = [ALL_THREAD_STATE, \"RUNNABLE\", \"BLOCKED\", \"WAITING\", \"TIMED_WAITING\", \"NEW\", \"TERMINATED\"];\nvar QUERY_THREAD_REGEX = new RegExp(/([0-9])*_([0-9])*_([0-9])*_.*?\\.([0-9])*\\.([0-9])*-([0-9])*-([0-9])*/);\nvar THREAD_GROUP_REGEXP = new RegExp(/(.*?)-[0-9]+/);\n\nvar WorkerThreadList = exports.WorkerThreadList = function (_React$Component) {\n _inherits(WorkerThreadList, _React$Component);\n\n function WorkerThreadList(props) {\n _classCallCheck(this, WorkerThreadList);\n\n var _this = _possibleConstructorReturn(this, (WorkerThreadList.__proto__ || Object.getPrototypeOf(WorkerThreadList)).call(this, props));\n\n _this.state = {\n serverInfo: null,\n initialized: false,\n ended: false,\n\n threads: null,\n\n snapshotTime: null,\n\n selectedGroup: ALL_THREADS,\n selectedThreadState: ALL_THREAD_STATE\n };\n return _this;\n }\n\n _createClass(WorkerThreadList, [{\n key: \"captureSnapshot\",\n value: function captureSnapshot() {\n var nodeId = (0, _utils.getFirstParameter)(window.location.search);\n $.get('/v1/worker/' + nodeId + '/thread', function (threads) {\n this.setState({\n threads: WorkerThreadList.processThreads(threads),\n snapshotTime: new Date(),\n initialized: true\n });\n }.bind(this)).error(function () {\n this.setState({\n initialized: true\n });\n }.bind(this));\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n new Clipboard('.copy-button');\n }\n }, {\n key: \"handleGroupClick\",\n value: function handleGroupClick(selectedGroup, event) {\n this.setState({\n selectedGroup: selectedGroup\n });\n event.preventDefault();\n }\n }, {\n key: \"handleThreadStateClick\",\n value: function handleThreadStateClick(selectedThreadState, event) {\n this.setState({\n selectedThreadState: selectedThreadState\n });\n event.preventDefault();\n }\n }, {\n key: \"handleNewSnapshotClick\",\n value: function handleNewSnapshotClick(event) {\n this.setState({\n initialized: false\n });\n this.captureSnapshot();\n event.preventDefault();\n }\n }, {\n key: \"filterThreads\",\n value: function filterThreads(group, state) {\n return this.state.threads[group].filter(function (t) {\n return t.state === state || state === ALL_THREAD_STATE;\n });\n }\n }, {\n key: \"renderGroupListItem\",\n value: function renderGroupListItem(group) {\n return _react2.default.createElement(\n \"li\",\n { key: group },\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: this.state.selectedGroup === group ? \"selected\" : \"\",\n onClick: this.handleGroupClick.bind(this, group) },\n group,\n \" (\",\n this.filterThreads(group, this.state.selectedThreadState).length,\n \")\"\n )\n );\n }\n }, {\n key: \"renderThreadStateListItem\",\n value: function renderThreadStateListItem(threadState) {\n return _react2.default.createElement(\n \"li\",\n { key: threadState },\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: this.state.selectedThreadState === threadState ? \"selected\" : \"\",\n onClick: this.handleThreadStateClick.bind(this, threadState) },\n threadState,\n \" (\",\n this.filterThreads(this.state.selectedGroup, threadState).length,\n \")\"\n )\n );\n }\n }, {\n key: \"renderStackLine\",\n value: function renderStackLine(threadId) {\n return function (stackLine, index) {\n return _react2.default.createElement(\n \"div\",\n { key: threadId + index },\n \"\\xA0\\xA0at \",\n stackLine.className,\n \".\",\n stackLine.method,\n \"(\",\n _react2.default.createElement(\n \"span\",\n { className: \"font-light\" },\n stackLine.file,\n \":\",\n stackLine.line\n ),\n \")\"\n );\n };\n }\n }, {\n key: \"renderThread\",\n value: function renderThread(threadInfo) {\n return _react2.default.createElement(\n \"div\",\n { key: threadInfo.id },\n _react2.default.createElement(\n \"span\",\n {\n className: \"font-white\" },\n threadInfo.name,\n \" \",\n threadInfo.state,\n \" #\",\n threadInfo.id,\n \" \",\n threadInfo.lockOwnerId\n ),\n _react2.default.createElement(\n \"a\",\n { className: \"copy-button\", \"data-clipboard-target\": \"#stack-trace-\" + threadInfo.id, \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\",\n title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", alt: \"Copy to clipboard\" })\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"stack-traces\", id: \"stack-trace-\" + threadInfo.id },\n threadInfo.stackTrace.map(this.renderStackLine(threadInfo.id))\n ),\n _react2.default.createElement(\n \"div\",\n null,\n \"\\xA0\"\n )\n );\n }\n }, {\n key: \"render\",\n value: function render() {\n var _this2 = this;\n\n var threads = this.state.threads;\n\n var display = null;\n var toolbar = null;\n if (threads === null) {\n if (this.state.initialized === false) {\n display = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleNewSnapshotClick.bind(this) },\n \"Capture Snapshot\"\n )\n )\n );\n } else {\n display = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Thread snapshot could not be loaded\"\n )\n )\n );\n }\n } else {\n toolbar = _react2.default.createElement(\n \"div\",\n { className: \"col-xs-9\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"small\",\n null,\n \"Snapshot at \",\n this.state.snapshotTime.toTimeString()\n ),\n \"\\xA0\\xA0\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleNewSnapshotClick.bind(this) },\n \"New Snapshot\"\n ),\n \"\\xA0\\xA0 \\xA0\\xA0\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn text-right\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\",\n className: \"btn btn-default dropdown-toggle pull-right text-right\",\n \"data-toggle\": \"dropdown\", \"aria-haspopup\": \"true\",\n \"aria-expanded\": \"false\" },\n _react2.default.createElement(\n \"strong\",\n null,\n \"Group:\"\n ),\n \" \",\n this.state.selectedGroup,\n \" \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n Object.keys(threads).map(function (group) {\n return _this2.renderGroupListItem(group);\n })\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn text-right\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\",\n className: \"btn btn-default dropdown-toggle pull-right text-right\",\n \"data-toggle\": \"dropdown\", \"aria-haspopup\": \"true\",\n \"aria-expanded\": \"false\" },\n _react2.default.createElement(\n \"strong\",\n null,\n \"State:\"\n ),\n \" \",\n this.state.selectedThreadState,\n \" \",\n _react2.default.createElement(\"span\", {\n className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n THREAD_STATES.map(function (state) {\n return _this2.renderThreadStateListItem(state);\n })\n )\n )\n )\n )\n )\n )\n );\n\n var filteredThreads = this.filterThreads(this.state.selectedGroup, this.state.selectedThreadState);\n var displayedThreads = void 0;\n if (filteredThreads.length === 0 && this.state.selectedThreadState === ALL_THREAD_STATE) {\n displayedThreads = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"No threads in group '\",\n this.state.selectedGroup,\n \"'\"\n )\n )\n );\n } else if (filteredThreads.length === 0 && this.state.selectedGroup === ALL_THREADS) {\n displayedThreads = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"No threads with state \",\n this.state.selectedThreadState\n )\n )\n );\n } else if (filteredThreads.length === 0) {\n displayedThreads = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"No threads in group '\",\n this.state.selectedGroup,\n \"' with state \",\n this.state.selectedThreadState\n )\n )\n );\n } else {\n displayedThreads = _react2.default.createElement(\n \"pre\",\n null,\n filteredThreads.map(function (t) {\n return _this2.renderThread(t);\n })\n );\n }\n\n display = _react2.default.createElement(\n \"div\",\n { id: \"stack-traces\" },\n displayedThreads\n );\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-3\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Thread Snapshot\",\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#stack-traces\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", alt: \"Copy to clipboard\" })\n ),\n \"\\xA0\"\n )\n ),\n toolbar\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n display\n )\n )\n );\n }\n }], [{\n key: \"processThreads\",\n value: function processThreads(threads) {\n var result = {};\n\n result[ALL_THREADS] = threads;\n\n for (var i = 0; i < threads.length; i++) {\n var thread = threads[i];\n if (thread.name.match(QUERY_THREAD_REGEX)) {\n result[QUERY_THREADS].push(thread);\n }\n\n var match = THREAD_GROUP_REGEXP.exec(thread.name);\n var threadGroup = match ? match[1] : thread.name;\n if (!result[threadGroup]) {\n result[threadGroup] = [];\n }\n result[threadGroup].push(thread);\n }\n\n return result;\n }\n }]);\n\n return WorkerThreadList;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/WorkerThreadList.jsx?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.WorkerThreadList = undefined;\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nvar _react = __webpack_require__(/*! react */ \"./node_modules/react/index.js\");\n\nvar _react2 = _interopRequireDefault(_react);\n\nvar _utils = __webpack_require__(/*! ../utils */ \"./utils.js\");\n\nfunction _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; } /*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nvar ALL_THREADS = \"All Threads\";\nvar QUERY_THREADS = \"Running Queries\";\n\nvar ALL_THREAD_STATE = \"ALL\";\nvar THREAD_STATES = [ALL_THREAD_STATE, \"RUNNABLE\", \"BLOCKED\", \"WAITING\", \"TIMED_WAITING\", \"NEW\", \"TERMINATED\"];\nvar QUERY_THREAD_REGEX = new RegExp(/([0-9])*_([0-9])*_([0-9])*_.*?\\.([0-9])*\\.([0-9])*-([0-9])*-([0-9])*/);\nvar THREAD_GROUP_REGEXP = new RegExp(/(.*?)-[0-9]+/);\n\nvar WorkerThreadList = exports.WorkerThreadList = function (_React$Component) {\n _inherits(WorkerThreadList, _React$Component);\n\n function WorkerThreadList(props) {\n _classCallCheck(this, WorkerThreadList);\n\n var _this = _possibleConstructorReturn(this, (WorkerThreadList.__proto__ || Object.getPrototypeOf(WorkerThreadList)).call(this, props));\n\n _this.state = {\n serverInfo: null,\n initialized: false,\n ended: false,\n\n threads: null,\n\n snapshotTime: null,\n\n selectedGroup: ALL_THREADS,\n selectedThreadState: ALL_THREAD_STATE\n };\n return _this;\n }\n\n _createClass(WorkerThreadList, [{\n key: \"captureSnapshot\",\n value: function captureSnapshot() {\n var nodeId = (0, _utils.getFirstParameter)(window.location.search);\n $.get('/v1/worker/' + nodeId + '/thread', function (threads) {\n this.setState({\n threads: WorkerThreadList.processThreads(threads),\n snapshotTime: new Date(),\n initialized: true\n });\n }.bind(this)).error(function () {\n this.setState({\n initialized: true\n });\n }.bind(this));\n }\n }, {\n key: \"componentDidUpdate\",\n value: function componentDidUpdate() {\n new Clipboard('.copy-button');\n }\n }, {\n key: \"handleGroupClick\",\n value: function handleGroupClick(selectedGroup, event) {\n this.setState({\n selectedGroup: selectedGroup\n });\n event.preventDefault();\n }\n }, {\n key: \"handleThreadStateClick\",\n value: function handleThreadStateClick(selectedThreadState, event) {\n this.setState({\n selectedThreadState: selectedThreadState\n });\n event.preventDefault();\n }\n }, {\n key: \"handleNewSnapshotClick\",\n value: function handleNewSnapshotClick(event) {\n this.setState({\n initialized: false\n });\n this.captureSnapshot();\n event.preventDefault();\n }\n }, {\n key: \"filterThreads\",\n value: function filterThreads(group, state) {\n return this.state.threads[group].filter(function (t) {\n return t.state === state || state === ALL_THREAD_STATE;\n });\n }\n }, {\n key: \"renderGroupListItem\",\n value: function renderGroupListItem(group) {\n return _react2.default.createElement(\n \"li\",\n { key: group },\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: this.state.selectedGroup === group ? \"selected\" : \"\",\n onClick: this.handleGroupClick.bind(this, group) },\n group,\n \" (\",\n this.filterThreads(group, this.state.selectedThreadState).length,\n \")\"\n )\n );\n }\n }, {\n key: \"renderThreadStateListItem\",\n value: function renderThreadStateListItem(threadState) {\n return _react2.default.createElement(\n \"li\",\n { key: threadState },\n _react2.default.createElement(\n \"a\",\n { href: \"#\", className: this.state.selectedThreadState === threadState ? \"selected\" : \"\",\n onClick: this.handleThreadStateClick.bind(this, threadState) },\n threadState,\n \" (\",\n this.filterThreads(this.state.selectedGroup, threadState).length,\n \")\"\n )\n );\n }\n }, {\n key: \"renderStackLine\",\n value: function renderStackLine(threadId) {\n return function (stackLine, index) {\n return _react2.default.createElement(\n \"div\",\n { key: threadId + index },\n \"\\xA0\\xA0at \",\n stackLine.className,\n \".\",\n stackLine.method,\n \"(\",\n _react2.default.createElement(\n \"span\",\n { className: \"font-light\" },\n stackLine.file,\n \":\",\n stackLine.line\n ),\n \")\"\n );\n };\n }\n }, {\n key: \"renderThread\",\n value: function renderThread(threadInfo) {\n return _react2.default.createElement(\n \"div\",\n { key: threadInfo.id },\n _react2.default.createElement(\n \"span\",\n {\n className: \"font-black\" },\n threadInfo.name,\n \" \",\n threadInfo.state,\n \" #\",\n threadInfo.id,\n \" \",\n threadInfo.lockOwnerId\n ),\n _react2.default.createElement(\n \"a\",\n { className: \"copy-button\", \"data-clipboard-target\": \"#stack-trace-\" + threadInfo.id, \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\",\n title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", alt: \"Copy to clipboard\" })\n ),\n _react2.default.createElement(\"br\", null),\n _react2.default.createElement(\n \"span\",\n { className: \"stack-traces\", id: \"stack-trace-\" + threadInfo.id },\n threadInfo.stackTrace.map(this.renderStackLine(threadInfo.id))\n ),\n _react2.default.createElement(\n \"div\",\n null,\n \"\\xA0\"\n )\n );\n }\n }, {\n key: \"render\",\n value: function render() {\n var _this2 = this;\n\n var threads = this.state.threads;\n\n var display = null;\n var toolbar = null;\n if (threads === null) {\n if (this.state.initialized === false) {\n display = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleNewSnapshotClick.bind(this) },\n \"Capture Snapshot\"\n )\n )\n );\n } else {\n display = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"Thread snapshot could not be loaded\"\n )\n )\n );\n }\n } else {\n toolbar = _react2.default.createElement(\n \"div\",\n { className: \"col-xs-9\" },\n _react2.default.createElement(\n \"table\",\n { className: \"header-inline-links\" },\n _react2.default.createElement(\n \"tbody\",\n null,\n _react2.default.createElement(\n \"tr\",\n null,\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"small\",\n null,\n \"Snapshot at \",\n this.state.snapshotTime.toTimeString()\n ),\n \"\\xA0\\xA0\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"button\",\n { className: \"btn btn-info live-button\",\n onClick: this.handleNewSnapshotClick.bind(this) },\n \"New Snapshot\"\n ),\n \"\\xA0\\xA0 \\xA0\\xA0\"\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn text-right\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\",\n className: \"btn btn-default dropdown-toggle pull-right text-right\",\n \"data-toggle\": \"dropdown\", \"aria-haspopup\": \"true\",\n \"aria-expanded\": \"false\" },\n _react2.default.createElement(\n \"strong\",\n null,\n \"Group:\"\n ),\n \" \",\n this.state.selectedGroup,\n \" \",\n _react2.default.createElement(\"span\", { className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n Object.keys(threads).map(function (group) {\n return _this2.renderGroupListItem(group);\n })\n )\n )\n ),\n _react2.default.createElement(\n \"td\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"input-group-btn text-right\" },\n _react2.default.createElement(\n \"button\",\n { type: \"button\",\n className: \"btn btn-default dropdown-toggle pull-right text-right\",\n \"data-toggle\": \"dropdown\", \"aria-haspopup\": \"true\",\n \"aria-expanded\": \"false\" },\n _react2.default.createElement(\n \"strong\",\n null,\n \"State:\"\n ),\n \" \",\n this.state.selectedThreadState,\n \" \",\n _react2.default.createElement(\"span\", {\n className: \"caret\" })\n ),\n _react2.default.createElement(\n \"ul\",\n { className: \"dropdown-menu\" },\n THREAD_STATES.map(function (state) {\n return _this2.renderThreadStateListItem(state);\n })\n )\n )\n )\n )\n )\n )\n );\n\n var filteredThreads = this.filterThreads(this.state.selectedGroup, this.state.selectedThreadState);\n var displayedThreads = void 0;\n if (filteredThreads.length === 0 && this.state.selectedThreadState === ALL_THREAD_STATE) {\n displayedThreads = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"No threads in group '\",\n this.state.selectedGroup,\n \"'\"\n )\n )\n );\n } else if (filteredThreads.length === 0 && this.state.selectedGroup === ALL_THREADS) {\n displayedThreads = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"No threads with state \",\n this.state.selectedThreadState\n )\n )\n );\n } else if (filteredThreads.length === 0) {\n displayedThreads = _react2.default.createElement(\n \"div\",\n { className: \"row error-message\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\n \"h4\",\n null,\n \"No threads in group '\",\n this.state.selectedGroup,\n \"' with state \",\n this.state.selectedThreadState\n )\n )\n );\n } else {\n displayedThreads = _react2.default.createElement(\n \"pre\",\n null,\n filteredThreads.map(function (t) {\n return _this2.renderThread(t);\n })\n );\n }\n\n display = _react2.default.createElement(\n \"div\",\n { id: \"stack-traces\" },\n displayedThreads\n );\n }\n\n return _react2.default.createElement(\n \"div\",\n null,\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-3\" },\n _react2.default.createElement(\n \"h3\",\n null,\n \"Thread Snapshot\",\n _react2.default.createElement(\n \"a\",\n { className: \"btn copy-button\", \"data-clipboard-target\": \"#stack-traces\", \"data-toggle\": \"tooltip\",\n \"data-placement\": \"right\", title: \"Copy to clipboard\" },\n _react2.default.createElement(\"span\", { className: \"glyphicon glyphicon-copy\", alt: \"Copy to clipboard\" })\n ),\n \"\\xA0\"\n )\n ),\n toolbar\n ),\n _react2.default.createElement(\n \"div\",\n { className: \"row\" },\n _react2.default.createElement(\n \"div\",\n { className: \"col-xs-12\" },\n _react2.default.createElement(\"hr\", { className: \"h3-hr\" }),\n display\n )\n )\n );\n }\n }], [{\n key: \"processThreads\",\n value: function processThreads(threads) {\n var result = {};\n\n result[ALL_THREADS] = threads;\n\n for (var i = 0; i < threads.length; i++) {\n var thread = threads[i];\n if (thread.name.match(QUERY_THREAD_REGEX)) {\n result[QUERY_THREADS].push(thread);\n }\n\n var match = THREAD_GROUP_REGEXP.exec(thread.name);\n var threadGroup = match ? match[1] : thread.name;\n if (!result[threadGroup]) {\n result[threadGroup] = [];\n }\n result[threadGroup].push(thread);\n }\n\n return result;\n }\n }]);\n\n return WorkerThreadList;\n}(_react2.default.Component);\n\n//# sourceURL=webpack:///./components/WorkerThreadList.jsx?"); /***/ }), @@ -20627,7 +20627,7 @@ eval("module.exports = function(module) {\n\tif (!module.webpackPolyfill) {\n\t\ /***/ (function(module, exports, __webpack_require__) { "use strict"; -eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDuration = formatDuration;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#1b8f72',\n RUNNING: '#19874e',\n PLANNING: '#674f98',\n FINISHED: '#1a4629',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDuration(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); +eval("\n\nObject.defineProperty(exports, \"__esModule\", {\n value: true\n});\nexports.GLYPHICON_HIGHLIGHT = exports.GLYPHICON_DEFAULT = undefined;\nexports.getQueryStateColor = getQueryStateColor;\nexports.getStageStateColor = getStageStateColor;\nexports.getHumanReadableState = getHumanReadableState;\nexports.getProgressBarPercentage = getProgressBarPercentage;\nexports.getProgressBarTitle = getProgressBarTitle;\nexports.isQueryEnded = isQueryEnded;\nexports.addToHistory = addToHistory;\nexports.addExponentiallyWeightedToHistory = addExponentiallyWeightedToHistory;\nexports.initializeGraph = initializeGraph;\nexports.initializeSvg = initializeSvg;\nexports.truncateString = truncateString;\nexports.getStageNumber = getStageNumber;\nexports.getTaskIdSuffix = getTaskIdSuffix;\nexports.getFullSplitIdSuffix = getFullSplitIdSuffix;\nexports.getTaskNumber = getTaskNumber;\nexports.getFirstParameter = getFirstParameter;\nexports.getHostname = getHostname;\nexports.getPort = getPort;\nexports.getHostAndPort = getHostAndPort;\nexports.computeRate = computeRate;\nexports.precisionRound = precisionRound;\nexports.formatDurationMs = formatDurationMs;\nexports.formatDurationNs = formatDurationNs;\nexports.formatNumber = formatNumber;\nexports.formatRows = formatRows;\nexports.formatCount = formatCount;\nexports.formatDataSizeBytes = formatDataSizeBytes;\nexports.formatDataSize = formatDataSize;\nexports.parseDataSize = parseDataSize;\nexports.parseDuration = parseDuration;\nexports.formatShortTime = formatShortTime;\nexports.formatShortDateTime = formatShortDateTime;\n\nvar _dagreD = __webpack_require__(/*! dagre-d3 */ \"./node_modules/dagre-d3/index.js\");\n\nvar dagreD3 = _interopRequireWildcard(_dagreD);\n\nvar _d = __webpack_require__(/*! d3 */ \"./node_modules/d3/index.js\");\n\nvar d3 = _interopRequireWildcard(_d);\n\nfunction _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) newObj[key] = obj[key]; } } newObj.default = obj; return newObj; } }\n\n// Query display\n// =============\n\n/*\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\nvar GLYPHICON_DEFAULT = exports.GLYPHICON_DEFAULT = { color: '#1edcff' };\nvar GLYPHICON_HIGHLIGHT = exports.GLYPHICON_HIGHLIGHT = { color: '#999999' };\n\nvar STATE_COLOR_MAP = {\n QUEUED: '#7bb3fb',\n RUNNING: '#265cdf',\n PLANNING: '#674f98',\n FINISHED: '#22b647',\n BLOCKED: '#61003b',\n USER_ERROR: '#9a7d66',\n CANCELED: '#858959',\n INSUFFICIENT_RESOURCES: '#7f5b72',\n EXTERNAL_ERROR: '#ca7640',\n UNKNOWN_ERROR: '#943524'\n};\n\nfunction getQueryStateColor(query) {\n switch (query.state) {\n case \"QUEUED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"PLANNING\":\n return STATE_COLOR_MAP.PLANNING;\n case \"STARTING\":\n case \"FINISHING\":\n case \"RUNNING\":\n if (query.queryStats && query.queryStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FAILED\":\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === 'USER_CANCELED') {\n return STATE_COLOR_MAP.CANCELED;\n }\n return STATE_COLOR_MAP.USER_ERROR;\n case \"EXTERNAL\":\n return STATE_COLOR_MAP.EXTERNAL_ERROR;\n case \"INSUFFICIENT_RESOURCES\":\n return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES;\n default:\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n }\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n default:\n return STATE_COLOR_MAP.QUEUED;\n }\n}\n\nfunction getStageStateColor(stage) {\n switch (stage.state) {\n case \"PLANNED\":\n return STATE_COLOR_MAP.QUEUED;\n case \"SCHEDULING\":\n case \"SCHEDULING_SPLITS\":\n case \"SCHEDULED\":\n return STATE_COLOR_MAP.PLANNING;\n case \"RUNNING\":\n if (stage.stageStats && stage.stageStats.fullyBlocked) {\n return STATE_COLOR_MAP.BLOCKED;\n }\n return STATE_COLOR_MAP.RUNNING;\n case \"FINISHED\":\n return STATE_COLOR_MAP.FINISHED;\n case \"CANCELED\":\n case \"ABORTED\":\n return STATE_COLOR_MAP.CANCELED;\n case \"FAILED\":\n return STATE_COLOR_MAP.UNKNOWN_ERROR;\n default:\n return \"#b5b5b5\";\n }\n}\n\n// This relies on the fact that BasicQueryInfo and QueryInfo have all the fields\n// necessary to compute this string, and that these fields are consistently named.\nfunction getHumanReadableState(query) {\n if (query.state === \"RUNNING\") {\n var title = \"RUNNING\";\n\n if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) {\n if (query.queryStats.fullyBlocked) {\n title = \"BLOCKED\";\n\n if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) {\n title += \" (\" + query.queryStats.blockedReasons.join(\", \") + \")\";\n }\n }\n\n if (query.memoryPool === \"reserved\") {\n title += \" (RESERVED)\";\n }\n\n return title;\n }\n }\n\n if (query.state === \"FAILED\") {\n switch (query.errorType) {\n case \"USER_ERROR\":\n if (query.errorCode.name === \"USER_CANCELED\") {\n return \"USER CANCELED\";\n }\n return \"USER ERROR\";\n case \"INTERNAL_ERROR\":\n return \"INTERNAL ERROR\";\n case \"INSUFFICIENT_RESOURCES\":\n return \"INSUFFICIENT RESOURCES\";\n case \"EXTERNAL\":\n return \"EXTERNAL ERROR\";\n }\n }\n\n return query.state;\n}\n\nfunction getProgressBarPercentage(query) {\n var progress = query.queryStats.progressPercentage;\n\n // progress bars should appear 'full' when query progress is not meaningful\n if (!progress || query.state !== \"RUNNING\") {\n return 100;\n }\n\n return Math.round(progress);\n}\n\nfunction getProgressBarTitle(query) {\n if (query.queryStats.progressPercentage && query.state === \"RUNNING\") {\n return getHumanReadableState(query) + \" (\" + getProgressBarPercentage(query) + \"%)\";\n }\n\n return getHumanReadableState(query);\n}\n\nfunction isQueryEnded(query) {\n return [\"FINISHED\", \"FAILED\", \"CANCELED\"].indexOf(query.state) > -1;\n}\n\n// Sparkline-related functions\n// ===========================\n\n// display at most 5 minutes worth of data on the sparklines\nvar MAX_HISTORY = 60 * 5;\n// alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness\nvar MOVING_AVERAGE_ALPHA = 0.2;\n\nfunction addToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\nfunction addExponentiallyWeightedToHistory(value, valuesArray) {\n if (valuesArray.length === 0) {\n return valuesArray.concat([value]);\n }\n\n var movingAverage = value * MOVING_AVERAGE_ALPHA + valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA);\n if (value < 1) {\n movingAverage = 0;\n }\n\n return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0));\n}\n\n// DagreD3 Graph-related functions\n// ===============================\n\nfunction initializeGraph() {\n return new dagreD3.graphlib.Graph({ compound: true }).setGraph({ rankdir: 'BT' }).setDefaultEdgeLabel(function () {\n return {};\n });\n}\n\nfunction initializeSvg(selector) {\n var svg = d3.select(selector);\n svg.append(\"g\");\n\n return svg;\n}\n\n// Utility functions\n// =================\n\nfunction truncateString(inputString, length) {\n if (inputString && inputString.length > length) {\n return inputString.substring(0, length) + \"...\";\n }\n\n return inputString;\n}\n\nfunction getStageNumber(stageId) {\n return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length));\n}\n\nfunction getTaskIdSuffix(taskId) {\n return taskId.slice(taskId.indexOf('.') + 1, taskId.length);\n}\n\nfunction getFullSplitIdSuffix(driverId) {\n return driverId.substring(driverId.indexOf('.') + 1);\n}\n\nfunction getTaskNumber(taskId) {\n return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId)));\n}\n\nfunction getFirstParameter(searchString) {\n var searchText = searchString.substring(1);\n\n if (searchText.indexOf('&') !== -1) {\n return searchText.substring(0, searchText.indexOf('&'));\n }\n\n return searchText;\n}\n\nfunction getHostname(taskLocation) {\n var hostname = taskLocation.nodeServer.host;\n //\n // var hostname = new URL(url).hostname;\n if (hostname.charAt(0) == '[' && hostname.charAt(hostname.length - 1) == ']') {\n hostname = hostname.substr(1, hostname.length - 2);\n }\n return hostname;\n}\n\nfunction getPort(taskLocation) {\n return taskLocation.nodeServer.httpPort;\n // return new URL(url).port;\n}\n\nfunction getHostAndPort(taskLocation) {\n // var url = new URL(taskLocation.nodeServer.uri);\n return taskLocation.nodeServer.host + \":\" + taskLocation.nodeServer.httpPort;\n}\n\nfunction computeRate(count, ms) {\n if (ms === 0) {\n return 0;\n }\n return count / ms * 1000.0;\n}\n\nfunction precisionRound(n) {\n if (n < 10) {\n return n.toFixed(2);\n }\n if (n < 100) {\n return n.toFixed(1);\n }\n return Math.round(n).toString();\n}\n\nfunction formatDurationMs(duration) {\n var unit = \"ms\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatDurationNs(duration) {\n var unit = \"ns\";\n if (duration > 1000) {\n duration /= 1000;\n unit = \"us\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"ms\";\n }\n if (duration > 1000) {\n duration /= 1000;\n unit = \"s\";\n }\n if (unit === \"s\" && duration > 60) {\n duration /= 60;\n unit = \"m\";\n }\n if (unit === \"m\" && duration > 60) {\n duration /= 60;\n unit = \"h\";\n }\n if (unit === \"h\" && duration > 24) {\n duration /= 24;\n unit = \"d\";\n }\n if (unit === \"d\" && duration > 7) {\n duration /= 7;\n unit = \"w\";\n }\n return precisionRound(duration) + unit;\n}\n\nfunction formatNumber(num) {\n return num.toLocaleString();\n}\n\nfunction formatRows(count) {\n if (count === 1) {\n return \"1 row\";\n }\n\n return formatCount(count) + \" rows\";\n}\n\nfunction formatCount(count) {\n var unit = \"\";\n if (count > 1000) {\n count /= 1000;\n unit = \"K\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"M\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"B\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"T\";\n }\n if (count > 1000) {\n count /= 1000;\n unit = \"Q\";\n }\n return precisionRound(count) + unit;\n}\n\nfunction formatDataSizeBytes(size) {\n return formatDataSizeMinUnit(size, \"\");\n}\n\nfunction formatDataSize(size) {\n return formatDataSizeMinUnit(size, \"B\");\n}\n\nfunction formatDataSizeMinUnit(size, minUnit) {\n var unit = minUnit;\n if (size === 0) {\n return \"0\" + unit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"K\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"M\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"G\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"T\" + minUnit;\n }\n if (size >= 1024) {\n size /= 1024;\n unit = \"P\" + minUnit;\n }\n return precisionRound(size) + unit;\n}\n\nfunction parseDataSize(value) {\n var DATA_SIZE_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n var match = DATA_SIZE_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"B\":\n return number;\n case \"kB\":\n return number * Math.pow(2, 10);\n case \"MB\":\n return number * Math.pow(2, 20);\n case \"GB\":\n return number * Math.pow(2, 30);\n case \"TB\":\n return number * Math.pow(2, 40);\n case \"PB\":\n return number * Math.pow(2, 50);\n default:\n return null;\n }\n}\n\nfunction parseDuration(value) {\n var DURATION_PATTERN = /^\\s*(\\d+(?:\\.\\d+)?)\\s*([a-zA-Z]+)\\s*$/;\n\n var match = DURATION_PATTERN.exec(value);\n if (match === null) {\n return null;\n }\n var number = parseFloat(match[1]);\n switch (match[2]) {\n case \"ns\":\n return number / 1000000.0;\n case \"us\":\n return number / 1000.0;\n case \"ms\":\n return number;\n case \"s\":\n return number * 1000;\n case \"m\":\n return number * 1000 * 60;\n case \"h\":\n return number * 1000 * 60 * 60;\n case \"d\":\n return number * 1000 * 60 * 60 * 24;\n default:\n return null;\n }\n}\n\nfunction formatShortTime(date) {\n var hours = date.getHours() % 12 || 12;\n var minutes = (date.getMinutes() < 10 ? \"0\" : \"\") + date.getMinutes();\n return hours + \":\" + minutes + (date.getHours() >= 12 ? \"pm\" : \"am\");\n}\n\nfunction formatShortDateTime(date) {\n var year = date.getFullYear();\n var month = \"\" + (date.getMonth() + 1);\n var dayOfMonth = \"\" + date.getDate();\n return year + \"-\" + (month[1] ? month : \"0\" + month[0]) + \"-\" + (dayOfMonth[1] ? dayOfMonth : \"0\" + dayOfMonth[0]) + \" \" + formatShortTime(date);\n}\n\n//# sourceURL=webpack:///./utils.js?"); /***/ }), diff --git a/polardbx-executor/src/main/resources/webapp/embedded_plan.html b/polardbx-executor/src/main/resources/webapp/embedded_plan.html index 805e29f32..68b129a8c 100644 --- a/polardbx-executor/src/main/resources/webapp/embedded_plan.html +++ b/polardbx-executor/src/main/resources/webapp/embedded_plan.html @@ -4,10 +4,10 @@ - - Live Plan - PolarDBX + + Live Plan - PolarDB-X - + diff --git a/polardbx-executor/src/main/resources/webapp/index.html b/polardbx-executor/src/main/resources/webapp/index.html index 6acdeeee4..1a416e892 100644 --- a/polardbx-executor/src/main/resources/webapp/index.html +++ b/polardbx-executor/src/main/resources/webapp/index.html @@ -4,10 +4,10 @@ - - Cluster Overview - PolarDBX + + Cluster Overview - PolarDB-X - + @@ -39,21 +39,25 @@
-
-
-
Loading...
+
+
+
+ Cluster HUD +
+
+
Loading...
+
-
-
-
- Query Details -
-
-
Loading...
+
+
+ Query List +
+
+
Loading...
+
-
diff --git a/polardbx-executor/src/main/resources/webapp/pipeline.html b/polardbx-executor/src/main/resources/webapp/pipeline.html new file mode 100644 index 000000000..6a44f31a9 --- /dev/null +++ b/polardbx-executor/src/main/resources/webapp/pipeline.html @@ -0,0 +1,54 @@ + + + + + + + + Pipeline - PolarDB-X + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+
Loading...
+
+
+ + + + + diff --git a/polardbx-executor/src/main/resources/webapp/plan.html b/polardbx-executor/src/main/resources/webapp/plan.html index 5c5c1c95b..60c282883 100644 --- a/polardbx-executor/src/main/resources/webapp/plan.html +++ b/polardbx-executor/src/main/resources/webapp/plan.html @@ -4,10 +4,10 @@ - - Live Plan - PolarDBX + + Live Plan - PolarDB-X - + @@ -43,9 +43,12 @@
-
-
Loading...
+
+
+
Loading...
+
+
diff --git a/polardbx-executor/src/main/resources/webapp/query.html b/polardbx-executor/src/main/resources/webapp/query.html index b7745be44..6cec578b1 100644 --- a/polardbx-executor/src/main/resources/webapp/query.html +++ b/polardbx-executor/src/main/resources/webapp/query.html @@ -4,10 +4,10 @@ - - Query Overview - PolarDBX + + Query Overview - PolarDB-X - + @@ -46,10 +46,13 @@
-
-
Loading...
+
+
+
Loading...
+
+
diff --git a/polardbx-executor/src/main/resources/webapp/src/components/LivePlan.jsx b/polardbx-executor/src/main/resources/webapp/src/components/LivePlan.jsx index 32b1784ee..11dec713f 100644 --- a/polardbx-executor/src/main/resources/webapp/src/components/LivePlan.jsx +++ b/polardbx-executor/src/main/resources/webapp/src/components/LivePlan.jsx @@ -18,7 +18,15 @@ import ReactDOMServer from "react-dom/server"; import * as dagreD3 from "dagre-d3"; import * as d3 from "d3"; -import {formatRows, getStageStateColor, initializeGraph, initializeSvg, truncateString} from "../utils"; +import { + formatDurationNs, + formatNumber, + formatRows, + getStageStateColor, + initializeGraph, + initializeSvg, + truncateString +} from "../utils"; import {QueryHeader} from "./QueryHeader"; type @@ -77,7 +85,7 @@ class StageStatistics extends React.ComponentStage {stage.id} {stage.state}
- CPU: {stats.totalCpuTime}
+ CpuTime: {stats.totalCpuTimeNanos} ms
{stats.fullyBlocked ? -
Blocked: {stats.totalBlockedTime}
: -
Blocked: {stats.totalBlockedTime}
+
BlockedTime: {formatDurationNs(stats.totalBlockedTimeNanos)}
: +
BlockedTime: {formatDurationNs(stats.totalBlockedTimeNanos)}
} - Memory: {stats.totalMemoryReservation} + {/*Memory: {stats.totalMemoryReservation}*/}
PipelineExecs: {"Q:" + stats.queuedPipelineExecs + ", R:" + stats.runningPipelineExecs + ", F:" + stats.completedPipelineExecs}
- Input: {stats.processedInputDataSize + " / " + formatRows(stats.processedInputPositions)} + {/*Input: {stats.processedInputDataSize + " / " + formatRows(stats.processedInputPositions)}*/} + Input: {formatRows(stats.processedInputPositions)}
); @@ -192,7 +201,7 @@ export class LivePlan extends React.Component { refreshLoop() { clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously - fetch('/v1/query/' + this.props.queryId) + fetch('/v1/query/stats/' + this.props.queryId) .then(response => response.json()) .then(query => { this.setState({ @@ -233,6 +242,13 @@ export class LivePlan extends React.Component { graph.setParent(stageRootNodeId, clusterId); graph.setEdge("node-" + stage.root, stageRootNodeId, {style: "visibility: hidden"}); + const stageOperatorsMap: Map = new Map(); + if (stage.stageStats.operatorSummaries) { + stage.stageStats.operatorSummaries.forEach(opSummary => { + stageOperatorsMap.set(opSummary.operatorId, opSummary); + }); + } + stage.nodes.forEach(node => { const nodeId = "node-" + node.id; const nodeHtml = ReactDOMServer.renderToString(); @@ -241,10 +257,20 @@ export class LivePlan extends React.Component { graph.setParent(nodeId, clusterId); node.sources.forEach(source => { - graph.setEdge("node-" + source, nodeId, {class: "plan-edge", arrowheadClass: "plan-arrowhead"}); + if (stageOperatorsMap.has(source)) { + graph.setEdge("node-" + source, nodeId, { + class: "plan-edge", + arrowheadClass: "plan-arrowhead", + label: formatRows(stageOperatorsMap.get(source).outputRowCount), + labelStyle: "color: #fff; font-weight: bold; font-size: 16px;", + labelType: "html", + }); + } else { + graph.setEdge("node-" + source, nodeId, {class: "plan-edge", arrowheadClass: "plan-arrowhead"}); + } }); - if (node.remoteSources != undefined && node.remoteSources.length > 0) { + if (node.remoteSources !== undefined && node.remoteSources.length > 0) { graph.setNode(nodeId, {label: '', shape: "circle"}); node.remoteSources.forEach(sourceId => { @@ -255,7 +281,7 @@ export class LivePlan extends React.Component { class: "plan-edge", style: "stroke-width: 4px", arrowheadClass: "plan-arrowhead", - label: sourceStats.outputDataSize + " / " + formatRows(sourceStats.outputPositions), + label: formatRows(sourceStats.outputPositions), labelStyle: "color: #fff; font-weight: bold; font-size: 24px;", labelType: "html", }); @@ -350,15 +376,17 @@ export class LivePlan extends React.Component { return (
{queryHeader} -
-
- {loadingMessage} -
-
- {this.state.ended ? "Scroll to zoom." : "Zoom disabled while query is running."} Click - stage to view additional statistics +
+
+
+ {loadingMessage} +
+
+ {this.state.ended ? "Scroll to zoom." : "Zoom disabled while query is running."} Click + stage to view additional statistics +
+
-
diff --git a/polardbx-executor/src/main/resources/webapp/src/components/PageTitle.jsx b/polardbx-executor/src/main/resources/webapp/src/components/PageTitle.jsx index 9018f7f14..3dc7f7c1d 100644 --- a/polardbx-executor/src/main/resources/webapp/src/components/PageTitle.jsx +++ b/polardbx-executor/src/main/resources/webapp/src/components/PageTitle.jsx @@ -111,7 +111,7 @@ export class PageTitle extends React.Component { - + {this.props.title} @@ -132,12 +132,12 @@ export class PageTitle extends React.Component {
  • Environment
    - {info.environment} + PolarDB-X
  • - CoordinatorId
    + Node
    {info.workerId}
  • diff --git a/polardbx-executor/src/main/resources/webapp/src/components/Pipeline.jsx b/polardbx-executor/src/main/resources/webapp/src/components/Pipeline.jsx new file mode 100644 index 000000000..9b9119ad0 --- /dev/null +++ b/polardbx-executor/src/main/resources/webapp/src/components/Pipeline.jsx @@ -0,0 +1,348 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +//@flow + +import React from "react"; +import ReactDOMServer from "react-dom/server"; +import * as dagreD3 from "dagre-d3"; +import * as d3 from "d3"; + +import { + formatDurationNs, + formatNumber, + formatRows, + getStageStateColor, + initializeGraph, + initializeSvg, + truncateString +} from "../utils"; +import {QueryHeader} from "./QueryHeader"; + +type +StageStatisticsProps = { + stage: any, +} +type +StageStatisticsState = {} +type +StageNodeInfo = { + stageId: string, + id: string, + root: string, + stageStats: any, + state: string, + nodes: Map < string, any >, +} + +class PipelineStatistics extends React.Component { + static getStages(queryInfo): Map { + const stages: Map = new Map(); + PipelineStatistics.flattenStage(queryInfo.outputStage, stages); + return stages; + } + + static flattenStage(stageInfo, result) { + stageInfo.subStages.forEach(function (stage) { + PipelineStatistics.flattenStage(stage, result); + }); + + const nodes = new Map(); + PipelineStatistics.flattenNode(result, JSON.parse(stageInfo.plan.relNodeJson).rels, nodes); + + result.set(stageInfo.plan.id, { + stageId: stageInfo.stageId, + id: stageInfo.plan.id, + root: stageInfo.plan.rootId, + stageStats: stageInfo.stageStats, + state: stageInfo.state, + nodes: nodes + }); + } + + static flattenNode(stages, node: any, result: Map) { + + node.forEach(function (element) { + let loadingMessage = (element.relOp + element.id); + if (element.tableNames !== undefined) { + loadingMessage = loadingMessage + ":" + element.tableNames; + } + result.set(element.relatedId, { + id: element.relatedId, + name: element.relOp, + identifier: loadingMessage, + sources: element.sources, + remoteSources: element.fragmentIds, + }); + }) + } + + render() { + const stage = this.props.stage; + const stats = this.props.stage.stageStats; + return ( +
    +
    +

    Stage {stage.id}

    + {stage.state} +
    +
    +
    + ); + } +} + +type +PlanNodeProps = { + id: string, + name: string, + identifier: string, + sources: string[], + remoteSources: string[], +} +type +PlanNodeState = {} + +class PlanNode extends React.Component { + constructor(props: PlanNodeProps) { + super(props); + } + + render() { + return ( +
    " + this.props.name + "" + this.props.identifier}> + {this.props.name} +
    + {truncateString(this.props.identifier, 35)} +
    +
    + ); + } +} + +type +LivePlanProps = { + queryId: string, + isEmbedded: boolean, +} + +type +LivePlanState = { + initialized: boolean, + ended: boolean, + + query: ? any, + + graph: any, + svg: any, + render: any, +} + +export class PipelinePlan extends React.Component { + timeoutId: TimeoutID; + + constructor(props: LivePlanProps) { + super(props); + this.state = { + initialized: false, + ended: false, + + query: null, + + graph: initializeGraph(), + svg: null, + render: new dagreD3.render(), + }; + } + + resetTimer() { + clearTimeout(this.timeoutId); + // stop refreshing when query finishes or fails + if (this.state.query === null || !this.state.ended) { + this.timeoutId = setTimeout(this.refreshLoop.bind(this), 5000); + } + } + + refreshLoop() { + clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously + fetch('/v1/query/stats/' + this.props.queryId) + .then(response => response.json()) + .then(query => { + this.setState({ + query: query, + + initialized: true, + ended: query.finalQueryInfo, + }); + this.resetTimer(); + }) + .catch(() => { + this.setState({ + initialized: true, + }); + this.resetTimer(); + }); + } + + static handleStageClick(stageCssId: string) { + window.open("stage.html?" + stageCssId, '_blank'); + } + + componentDidMount() { + this.refreshLoop.bind(this)(); + } + + updateD3Stage(stage: StageNodeInfo, graph: any, allStages: Map) { + const clusterId = stage.stageId; + const stageRootNodeId = "stage-" + stage.id + "-root"; + const color = getStageStateColor(stage); + + graph.setNode(clusterId, {style: 'fill: ' + color, labelStyle: 'fill: #fff'}); + + // this is a non-standard use of ReactDOMServer, but it's the cleanest way to unify DagreD3 with React + const html = ReactDOMServer.renderToString(); + + graph.setNode(stageRootNodeId, {class: "stage-stats", label: html, labelType: "html"}); + graph.setParent(stageRootNodeId, clusterId); + graph.setEdge("node-" + stage.root, stageRootNodeId, {style: "visibility: hidden"}); + + stage.nodes.forEach(node => { + const nodeId = "node-" + node.id; + const nodeHtml = ReactDOMServer.renderToString(); + + graph.setNode(nodeId, {label: nodeHtml, style: 'fill: #fff', labelType: "html"}); + graph.setParent(nodeId, clusterId); + + node.sources.forEach(source => { + graph.setEdge("node-" + source, nodeId, {class: "plan-edge", arrowheadClass: "plan-arrowhead"}); + }); + + if (node.remoteSources !== undefined && node.remoteSources.length > 0) { + graph.setNode(nodeId, {label: '', shape: "circle"}); + + node.remoteSources.forEach(sourceId => { + const source = allStages.get(sourceId); + if (source) { + graph.setEdge("stage-" + sourceId + "-root", nodeId, { + class: "plan-edge", + style: "stroke-width: 4px", + }); + } + }); + } + }); + } + + updateD3Graph() { + if (!this.state.svg) { + this.setState({ + svg: initializeSvg("#plan-canvas"), + }); + return; + } + + if (!this.state.query) { + return; + } + + const graph = this.state.graph; + const stages = PipelineStatistics.getStages(this.state.query); + stages.forEach(stage => { + this.updateD3Stage(stage, graph, stages); + }); + + const inner = d3.select("#plan-canvas g"); + this.state.render(inner, graph); + + const svg = this.state.svg; + svg.selectAll("g.cluster").on("click", PipelinePlan.handleStageClick); + + const width = parseInt(window.getComputedStyle(document.getElementById("live-plan"), null).getPropertyValue("width").replace(/px/, "")) - 50; + const height = parseInt(window.getComputedStyle(document.getElementById("live-plan"), null).getPropertyValue("height").replace(/px/, "")) - 50; + + const graphHeight = graph.graph().height + 100; + const graphWidth = graph.graph().width + 100; + if (this.state.ended) { + // Zoom doesn't deal well with DOM changes + const initialScale = Math.min(width / graphWidth, height / graphHeight); + const zoom = d3.zoom().scaleExtent([initialScale, 1]).on("zoom", function () { + inner.attr("transform", d3.event.transform); + }); + + svg.call(zoom); + svg.call(zoom.transform, d3.zoomIdentity.translate((width - graph.graph().width * initialScale) / 2, 20).scale(initialScale)); + svg.attr('height', height); + svg.attr('width', width); + } + else { + svg.attr('height', graphHeight); + svg.attr('width', graphWidth); + } + } + + componentDidUpdate() { + this.updateD3Graph(); + //$FlowFixMe + $('[data-toggle="tooltip"]').tooltip() + } + + render() { + const query = this.state.query; + + if (query === null || this.state.initialized === false) { + let label = (
    Loading...
    ); + if (this.state.initialized) { + label = "Query not found"; + } + return ( +
    +

    {label}

    +
    + ); + } + + let loadingMessage = null; + if (query && !query.outputStage) { + loadingMessage = ( +
    +
    +

    Live plan graph will appear automatically when query starts running.

    +
    Loading...
    +
    +
    + ) + } + + // TODO: Refactor components to move refreshLoop to parent rather than using this property + const queryHeader = this.props.isEmbedded ? null : ; + return ( +
    + {queryHeader} +
    +
    + {loadingMessage} +
    +
    + {this.state.ended ? "Scroll to zoom." : "Zoom disabled while query is running."} Click + stage to view additional statistics +
    + +
    +
    +
    +
    + ); + } +} diff --git a/polardbx-executor/src/main/resources/webapp/src/components/QueryDetail.jsx b/polardbx-executor/src/main/resources/webapp/src/components/QueryDetail.jsx index ba774a78d..083df340d 100644 --- a/polardbx-executor/src/main/resources/webapp/src/components/QueryDetail.jsx +++ b/polardbx-executor/src/main/resources/webapp/src/components/QueryDetail.jsx @@ -21,9 +21,9 @@ import { formatCount, formatDataSize, formatDataSizeBytes, - formatDuration, + formatDurationMs, formatDurationNs, formatShortDateTime, - getFirstParameter, + getFirstParameter, getFullSplitIdSuffix, getHostAndPort, getHostname, getStageNumber, @@ -90,14 +90,14 @@ class TaskList extends React.Component { } const renderedTasks = tasks.map(task => { - if (typeof(task.stats) === "undefined") { + if (typeof(task.detailedStats) === "undefined") { return ( {getTaskIdSuffix(task.taskStatus.taskId)} - + {getHostAndPort(task.taskStatus.self)} @@ -110,9 +110,6 @@ class TaskList extends React.Component { {0} - - {0} - {0} @@ -129,22 +126,16 @@ class TaskList extends React.Component { {task.completedPipelineExecs} - {task.elapsedTime} + {formatDurationMs(task.elapsedTimeMillis)} - {task.deliveryTime} + {formatDurationMs(task.deliveryTimeMillis)} - {formatDuration(task.processTime)} - - - {formatDuration(task.processWall)} + {formatDurationMs(task.processTimeMillis)} - {formatDuration(task.pullDataTime)} - - - {formatDataSizeBytes(task.outputBuffers.totalBufferedBytes)} + {formatDurationMs(task.pullDataTimeMillis)} ); @@ -155,7 +146,7 @@ class TaskList extends React.Component { {getTaskIdSuffix(task.taskStatus.taskId)} - + {getHostAndPort(task.taskStatus.self)} @@ -163,46 +154,34 @@ class TaskList extends React.Component { {task.taskStatus.state} - {formatCount(task.stats.outputPositions)} + {formatCount(task.detailedStats.outputPositions)} - {formatCount(task.stats.processedInputPositions)} - - - {formatCount(computeRate(task.stats.processedInputPositions, task.elapsedTime))} - - - {formatDataSizeBytes(task.stats.processedInputDataSize)} - - - {formatDataSizeBytes(computeRate(task.stats.processedInputDataSize, task.elapsedTime))} + {formatCount(task.detailedStats.processedInputPositions)} + {/**/} + {/* {formatDataSizeBytes(task.stats.processedInputDataSize)}*/} + {/**/} - {task.stats.queuedPipelineExecs} + {task.detailedStats.queuedPipelineExecs} - {task.stats.runningPipelineExecs} + {task.detailedStats.runningPipelineExecs} {task.completedPipelineExecs} - {task.elapsedTime} + {formatDurationMs(task.elapsedTimeMillis)} - {formatDuration(task.deliveryTime)} + {formatDurationMs(task.deliveryTimeMillis)} - {formatDuration(task.processTime)} - - - {formatDuration(task.processWall)} + {formatDurationMs(task.processTimeMillis)} - {formatDuration(task.pullDataTime)} - - - {formatDataSizeBytes(task.outputBuffers.totalBufferedBytes)} + {formatDurationMs(task.pullDataTimeMillis)} ); @@ -223,17 +202,11 @@ class TaskList extends React.Component { 'splitsDone', 'outputRows', 'inputRows', - 'inputRowsSec', - 'inputBytes', - 'inputBytesSec', + // 'inputBytes', 'elapsedTime', 'deliveryTime', 'processTime', - 'processWall', 'dataFinishTime', - 'tsds', - 'tstc', - 'bufferedBytes' ]} defaultSort={{column: 'id', direction: 'asc'}}> @@ -249,17 +222,13 @@ class TaskList extends React.Component { - outputRows - inputRows - inputRows/s - inputBytes - inputBytes/s + OutputRows + InputRows + {/*inputBytes*/} Elapsed Delivery Process - ProcessWall - DT - Buffered + DataFinish {renderedTasks} @@ -267,6 +236,112 @@ class TaskList extends React.Component { } } +class SplitList extends React.Component { + static removeQueryId(id) { + const pos = id.indexOf('.'); + if (pos !== -1) { + return id.substring(pos + 1); + } + return id; + } + + static compareTaskId(taskA, taskB) { + const taskIdArrA = TaskList.removeQueryId(taskA).split("."); + const taskIdArrB = TaskList.removeQueryId(taskB).split("."); + + if (taskIdArrA.length > taskIdArrB.length) { + return 1; + } + for (let i = 0; i < taskIdArrA.length; i++) { + const anum = Number.parseInt(taskIdArrA[i]); + const bnum = Number.parseInt(taskIdArrB[i]); + if (anum !== bnum) { + return anum > bnum ? 1 : -1; + } + } + + return 0; + } + + static formatState(state, fullyBlocked) { + if (fullyBlocked && state === "RUNNING") { + return "BLOCKED"; + } + else { + return state; + } + } + + render() { + const splits = this.props.splits; + + if (splits === undefined || splits.length === 0) { + return ( +
    +

    No splits in the selected group

    +
    ); + } + + const renderedSplits = splits.map(split => { + return ( + + + {getFullSplitIdSuffix(split.driverId)} + + {/**/} + {/* {split.state}*/} + {/**/} + + {formatCount(split.outputPositions)} + + + {formatCount(split.inputPositions)} + + + {formatDurationMs(split.endMillis - split.startMillis)} + + + {formatDurationNs(split.blockedNanos)} + + + {formatDurationNs(split.processNanos)} + + + ); + }); + + return ( + + + + {/**/} + + + {/**/} + + + + + {renderedSplits} +
    IDStateOutputRowsInputRowsinputBytesElapsedBlockedProcess
    + ); + } +} + const BAR_CHART_WIDTH = 800; const BAR_CHART_PROPERTIES = { @@ -355,19 +430,19 @@ class StageSummary extends React.Component { componentDidUpdate() { const stage = this.props.stage; - const numTasks = stage.tasks.length; + const numTasks = stage.taskStats.length; // sort the x-axis - stage.tasks.sort((taskA, taskB) => getTaskNumber(taskA.taskStatus.taskId) - getTaskNumber(taskB.taskStatus.taskId)); + stage.taskStats.sort((taskA, taskB) => getTaskNumber(taskA.taskStatus.taskId) - getTaskNumber(taskB.taskStatus.taskId)); - const scheduledTimes = stage.tasks.map(task => { + const scheduledTimes = stage.taskStats.map(task => { if (typeof(task.stats) === "undefined") { parseDuration(0); } else { parseDuration(task.stats.totalScheduledTime); } }); - const cpuTimes = stage.tasks.map(task => { + const cpuTimes = stage.taskStats.map(task => { if (typeof(task.stats) === "undefined") { parseDuration(0); } else { @@ -381,8 +456,8 @@ class StageSummary extends React.Component { const renderTimestamp = Date.now(); const stageId = getStageNumber(stage.stageId); - StageSummary.renderHistogram('#scheduled-time-histogram-' + stageId, scheduledTimes, formatDuration); - StageSummary.renderHistogram('#cpu-time-histogram-' + stageId, cpuTimes, formatDuration); + StageSummary.renderHistogram('#scheduled-time-histogram-' + stageId, scheduledTimes, formatDurationMs); + StageSummary.renderHistogram('#cpu-time-histogram-' + stageId, cpuTimes, formatDurationMs); if (this.state.expanded) { // this needs to be a string otherwise it will also be passed to numberFormatter @@ -396,8 +471,8 @@ class StageSummary extends React.Component { tooltipValueLookups: tooltipValueLookups }); - $('#scheduled-time-bar-chart-' + stageId).sparkline(scheduledTimes, $.extend({}, stageBarChartProperties, {numberFormatter: formatDuration})); - $('#cpu-time-bar-chart-' + stageId).sparkline(cpuTimes, $.extend({}, stageBarChartProperties, {numberFormatter: formatDuration})); + $('#scheduled-time-bar-chart-' + stageId).sparkline(scheduledTimes, $.extend({}, stageBarChartProperties, {numberFormatter: formatDurationMs})); + $('#cpu-time-bar-chart-' + stageId).sparkline(cpuTimes, $.extend({}, stageBarChartProperties, {numberFormatter: formatDurationMs})); } this.setState({ @@ -408,16 +483,16 @@ class StageSummary extends React.Component { render() { const stage = this.props.stage; - if (stage === undefined || !stage.hasOwnProperty('plan')) { + if (stage === undefined) { return ( Information about this stage is unavailable. ); } - - const totalBufferedBytes = stage.tasks - .map(task => task.outputBuffers.totalBufferedBytes) - .reduce((a, b) => a + b, 0); + // + // const totalBufferedBytes = stage.taskStats + // .map(task => task.outputBuffers.totalBufferedBytes) + // .reduce((a, b) => a + b, 0); const stageId = getStageNumber(stage.stageId); @@ -447,7 +522,7 @@ class StageSummary extends React.Component { Scheduled - {stage.stageStats.totalScheduledTime} + {formatDurationNs(stage.stageStats.totalScheduledTimeNanos)} @@ -455,7 +530,7 @@ class StageSummary extends React.Component { Blocked - {stage.stageStats.totalBlockedTime} + {formatDurationNs(stage.stageStats.totalBlockedTimeNanos)} @@ -463,7 +538,7 @@ class StageSummary extends React.Component { Wall - {stage.stageStats.totalUserTime} + {formatDurationNs(stage.stageStats.totalUserTimeNanos)} @@ -471,7 +546,7 @@ class StageSummary extends React.Component { CPU - {stage.stageStats.totalCpuTime} + {formatDurationNs(stage.stageStats.totalCpuTimeNanos)} @@ -504,14 +579,14 @@ class StageSummary extends React.Component { {stage.stageStats.totalMemoryReservation} - - - Buffers - - - {formatDataSize(totalBufferedBytes)} - - + {/**/} + {/* */} + {/* Buffers*/} + {/* */} + {/* */} + {/* {formatDataSize(totalBufferedBytes)}*/} + {/* */} + {/**/} Peak @@ -539,7 +614,7 @@ class StageSummary extends React.Component { Pending - {stage.tasks.filter(task => task.taskStatus.state === "PLANNED").length} + {stage.taskStats.filter(task => task.taskStatus.state === "PLANNED").length} @@ -547,7 +622,7 @@ class StageSummary extends React.Component { Running - {stage.tasks.filter(task => task.taskStatus.state === "RUNNING").length} + {stage.taskStats.filter(task => task.taskStatus.state === "RUNNING").length} @@ -555,7 +630,7 @@ class StageSummary extends React.Component { Finished - {stage.tasks.filter(function (task) { + {stage.taskStats.filter(function (task) { return task.taskStatus.state == "FINISHED" || task.taskStatus.state == "CANCELED" || task.taskStatus.state == "ABORTED" || @@ -568,7 +643,7 @@ class StageSummary extends React.Component { Total - {stage.tasks.length} + {stage.taskStats.length} @@ -776,8 +851,10 @@ export class QueryDetail extends React.Component { stageRefresh: true, taskRefresh: true, + splitRefresh: true, taskFilter: TASK_FILTER.NONE, + splitFilter: TASK_FILTER.NONE, }; this.refreshLoop = this.refreshLoop.bind(this); @@ -850,7 +927,7 @@ export class QueryDetail extends React.Component { refreshLoop() { clearTimeout(this.timeoutId); // to stop multiple series of refreshLoop from going on simultaneously const queryId = getFirstParameter(window.location.search); - $.get('/v1/query/' + queryId, function (query) { + $.get('/v1/query/stats/' + queryId, function (query) { let lastSnapshotStages = this.state.lastSnapshotStage; if (this.state.stageRefresh) { lastSnapshotStages = query.outputStage; @@ -932,6 +1009,20 @@ export class QueryDetail extends React.Component { } } + handleSplitRefreshClick() { + if (this.state.splitRefresh) { + this.setState({ + splitRefresh: false, + // lastSnapshotTasks: this.state.query.outputStage, + }); + } + else { + this.setState({ + splitRefresh: true, + }); + } + } + renderTaskRefreshButton() { if (this.state.taskRefresh) { return + } + else { + return + } + } + handleStageRefreshClick() { if (this.state.stageRefresh) { this.setState({ @@ -982,12 +1084,37 @@ export class QueryDetail extends React.Component { event.preventDefault(); } + renderSplitFilterListItem(splitFilter) { + return ( +
  • {splitFilter.text}
  • + ); + } + + handleSplitFilterClick(filter, event) { + this.setState({ + splitFilter: filter + }); + event.preventDefault(); + } + getTasksFromStage(stage) { - if (stage === undefined || !stage.hasOwnProperty('subStages') || !stage.hasOwnProperty('tasks')) { + if (stage === undefined || !stage.hasOwnProperty('subStages') || !stage.hasOwnProperty('taskStats')) { return [] } - return [].concat.apply(stage.tasks, stage.subStages.map(this.getTasksFromStage, this)); + return [].concat.apply(stage.taskStats, stage.subStages.map(this.getTasksFromStage, this)); + } + + getSplitsFromStage(stage) { + console.log("getting splits from stage"); + let tasks = this.getTasksFromStage(stage); + let splits = [] + for (let i = 0; i < tasks.length; i++) { + splits = splits.concat(tasks[i].detailedStats.driverStats) + } + + return splits; } componentDidMount() { @@ -1025,6 +1152,57 @@ export class QueryDetail extends React.Component { new Clipboard('.copy-button'); } + renderSplits() { + if (this.state.lastSnapshotTasks === null) { + return; + } + + let splits = []; + if (this.state.splitFilter !== TASK_FILTER.NONE) { + // TODO split state + splits = this.getSplitsFromStage(this.state.lastSnapshotTasks).filter(split => this.state.splitFilter.predicate(""), this); + } + + return ( +
    +
    +
    +

    Splits

    +
    +
    + + + + + + + +
    +
    + +
      + {this.renderSplitFilterListItem(TASK_FILTER.NONE)} + {this.renderSplitFilterListItem(TASK_FILTER.ALL)} + {this.renderSplitFilterListItem(TASK_FILTER.FINISHED)} +
    +
    +
      {this.renderSplitRefreshButton()}
    +
    +
    +
    +
    + +
    +
    +
    + ); + } + renderTasks() { if (this.state.lastSnapshotTasks === null) { return; @@ -1036,10 +1214,10 @@ export class QueryDetail extends React.Component { } return ( -
    +
    -

    Tasks

    +

    Tasks

    @@ -1084,10 +1262,10 @@ export class QueryDetail extends React.Component { } return ( -
    +
    -

    Stages

    +

    Stages

    @@ -1114,7 +1292,7 @@ export class QueryDetail extends React.Component { const query = this.state.query; if (query.warnings != null && query.warnings.length > 0) { return ( -
    +

    Warnings


    @@ -1173,7 +1351,7 @@ export class QueryDetail extends React.Component { const query = this.state.query; if (query.failureInfo) { return ( -
    +

    Error Information


    @@ -1239,9 +1417,9 @@ export class QueryDetail extends React.Component { return (
    -
    +
    -

    Session

    +

    Session


    @@ -1297,7 +1475,7 @@ export class QueryDetail extends React.Component {
    -

    Execution

    +

    Execution


    @@ -1345,11 +1523,11 @@ export class QueryDetail extends React.Component {
    -
    +
    -

    Resource Utilization Summary

    +

    Resource Utilization Summary


    @@ -1412,102 +1590,102 @@ export class QueryDetail extends React.Component {
    -
    -

    Timeline

    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - Parallelism - -
    -
    Loading ...
    -
    -
    - {formatCount(this.state.cpuTimeRate[this.state.cpuTimeRate.length - 1])} -
    - Scheduled Time/s - -
    -
    Loading ...
    -
    -
    - {formatCount(this.state.scheduledTimeRate[this.state.scheduledTimeRate.length - 1])} -
    - Input Rows/s - -
    -
    Loading ...
    -
    -
    - {formatCount(this.state.rowInputRate[this.state.rowInputRate.length - 1])} -
    - Input Bytes/s - -
    -
    Loading ...
    -
    -
    - {formatDataSize(this.state.byteInputRate[this.state.byteInputRate.length - 1])} -
    - Memory Utilization - -
    -
    Loading ...
    -
    -
    - {formatDataSize(this.state.reservedMemory[this.state.reservedMemory.length - 1])} -
    -
    + {/*
    */} + {/*

    Timeline

    */} + {/*
    */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/* */} + {/*
    */} + {/* Parallelism*/} + {/* */} + {/*
    */} + {/* Loading ...
    */} + {/* */} + {/*
    */} + {/* {formatCount(this.state.cpuTimeRate[this.state.cpuTimeRate.length - 1])}*/} + {/*
    */} + {/* Scheduled Time/s*/} + {/* */} + {/*
    */} + {/* Loading ...
    */} + {/* */} + {/*
    */} + {/* {formatCount(this.state.scheduledTimeRate[this.state.scheduledTimeRate.length - 1])}*/} + {/*
    */} + {/* Input Rows/s*/} + {/* */} + {/*
    */} + {/* Loading ...
    */} + {/* */} + {/*
    */} + {/* {formatCount(this.state.rowInputRate[this.state.rowInputRate.length - 1])}*/} + {/*
    */} + {/* Input Bytes/s*/} + {/* */} + {/*
    */} + {/* Loading ...
    */} + {/* */} + {/*
    */} + {/* {formatDataSize(this.state.byteInputRate[this.state.byteInputRate.length - 1])}*/} + {/*
    */} + {/* Memory Utilization*/} + {/* */} + {/*
    */} + {/* Loading ...
    */} + {/* */} + {/*
    */} + {/* {formatDataSize(this.state.reservedMemory[this.state.reservedMemory.length - 1])}*/} + {/*
    */} + {/*
    */}
    {/*{this.renderWarningInfo()}*/} {this.renderFailureInfo()} -
    + ); } diff --git a/polardbx-executor/src/main/resources/webapp/src/components/QueryHeader.jsx b/polardbx-executor/src/main/resources/webapp/src/components/QueryHeader.jsx index 842baf975..d48cd0be7 100644 --- a/polardbx-executor/src/main/resources/webapp/src/components/QueryHeader.jsx +++ b/polardbx-executor/src/main/resources/webapp/src/components/QueryHeader.jsx @@ -81,7 +81,7 @@ export class QueryHeader extends React.Component { render() { const query = this.props.query; return ( -
    +

    diff --git a/polardbx-executor/src/main/resources/webapp/src/components/QueryList.jsx b/polardbx-executor/src/main/resources/webapp/src/components/QueryList.jsx index f3092bb60..f62fe0c95 100644 --- a/polardbx-executor/src/main/resources/webapp/src/components/QueryList.jsx +++ b/polardbx-executor/src/main/resources/webapp/src/components/QueryList.jsx @@ -571,7 +571,7 @@ export class QueryList extends React.Component {
    State:
    diff --git a/polardbx-executor/src/main/resources/webapp/src/components/StageDetail.jsx b/polardbx-executor/src/main/resources/webapp/src/components/StageDetail.jsx index ed17d80b4..dca1d78ad 100644 --- a/polardbx-executor/src/main/resources/webapp/src/components/StageDetail.jsx +++ b/polardbx-executor/src/main/resources/webapp/src/components/StageDetail.jsx @@ -20,7 +20,7 @@ import * as d3 from "d3"; import { formatCount, formatDataSize, - formatDuration, + formatDurationMs, getFirstParameter, getTaskNumber, initializeGraph, @@ -50,36 +50,37 @@ class OperatorSummary extends React.Component { - Output + OutputRows - {formatCount(operator.outputRowCount) + " rows (" + formatDataSize(operator.outputBytes) + ")"} + {formatCount(operator.outputRowCount)} + {/*{formatCount(operator.outputRowCount) + " rows (" + formatDataSize(operator.outputBytes) + ")"}*/} - Startup Time + StartupTime - {formatDuration(operator.startupDuration * 1000)} + {formatDurationMs(operator.startupDuration * 1000)} - Run Time + RunTime - {formatDuration(operator.duration * 1000)} - - - - - Memory - - - {formatDataSize(operator.memory)} + {formatDurationMs(operator.duration * 1000)} + {/**/} + {/* */} + {/* Memory*/} + {/* */} + {/* */} + {/* {formatDataSize(operator.memory)}*/} + {/* */} + {/**/}
    @@ -87,6 +88,20 @@ class OperatorSummary extends React.Component { } } +class LocalBufferOperator extends React.Component { + render() { + return ( +
    +
    +
    + LocalBuffer +
    +
    +
    + ); + } +} + const BAR_CHART_PROPERTIES = { type: 'bar', barSpacing: '0', @@ -144,7 +159,7 @@ class OperatorDetail extends React.Component { name: "Total Wall Time", id: "totalWallTime", supplier: getTotalWallTime, - renderer: formatDuration + renderer: formatDurationMs }, { name: "Input Rows", @@ -267,7 +282,7 @@ class OperatorDetail extends React.Component { Wall Time - {formatDuration(totalWallTime)} + {formatDurationMs(totalWallTime)} @@ -275,7 +290,7 @@ class OperatorDetail extends React.Component { Blocked - {formatDuration(parseDuration(operator.blockedWall))} + {formatDurationMs(parseDuration(operator.blockedWall))} @@ -298,7 +313,7 @@ class OperatorDetail extends React.Component {
    -
    +
    Statistic @@ -367,7 +382,7 @@ class StageOperatorGraph extends React.Component { const sourceResult = this.computeOperatorGraphs(mapInfo, mapInfo.get(source), operatorMap); sourceResult.forEach((operator, pipelineId) => { if (sourceResults.has(pipelineId)) { - console.error("Multiple sources for ", element['@type'], " had the same pipeline ID"); + console.error("Multiple sources for " + element.relOp + " had the same pipeline ID"); return sourceResults; } sourceResults.set(pipelineId, operator); @@ -423,17 +438,18 @@ class StageOperatorGraph extends React.Component { computeOperatorMap() { const operatorMap = new Map(); this.props.stage.stageStats.operatorSummaries.forEach(operator => { - if (!operatorMap.has(operator.operatorId)) { - operatorMap.set(operator.operatorId, []) - } - - operatorMap.get(operator.operatorId).push(operator); + // if (!operatorMap.has(operator.operatorId)) { + // operatorMap.set(operator.operatorId, []) + // } + let operators = []; + operators.push(operator) + operatorMap.set(operator.operatorId, operators); }); return operatorMap; } - computeD3StageOperatorGraph(graph, operator, sink, pipelineNode) { + computeD3StageOperatorGraph(graph, operator, sink, pipelineNode, pipelineRootNodeMap) { const operatorNodeId = "operator-" + operator.pipelineId + "-" + operator.operatorId; // this is a non-standard use of ReactDOMServer, but it's the cleanest way to unify DagreD3 with React @@ -442,7 +458,9 @@ class StageOperatorGraph extends React.Component { graph.setNode(operatorNodeId, {class: "operator-stats", label: html, labelType: "html"}); if (operator.hasOwnProperty("child")) { - this.computeD3StageOperatorGraph(graph, operator.child, operatorNodeId, pipelineNode); + this.computeD3StageOperatorGraph(graph, operator.child, operatorNodeId, pipelineNode, pipelineRootNodeMap); + } else { + pipelineRootNodeMap.set(pipelineNode, operatorNodeId); } if (sink !== null) { @@ -459,6 +477,7 @@ class StageOperatorGraph extends React.Component { const stage = this.props.stage; const operatorMap = this.computeOperatorMap(); + const pipelineDepMap : Map = new Map(Object.entries(stage.tasks[0].taskStats.pipelineDeps)) const rootId = stage.plan.rootId const rels = JSON.parse(stage.plan.relNodeJson).rels @@ -471,15 +490,93 @@ class StageOperatorGraph extends React.Component { const operatorGraphs = this.computeOperatorGraphs(mapInfo, mapInfo.get(rootId), operatorMap); const graph = initializeGraph(); + const pipelineNodeMap: Map = new Map(); + const pipelineRootNodeMap: Map = new Map(); + const pipelineTopNodeMap: Map = new Map(); operatorGraphs.forEach((operator, pipelineId) => { const pipelineNodeId = "pipeline-" + pipelineId; + pipelineNodeMap.set(pipelineId, pipelineNodeId); graph.setNode(pipelineNodeId, { label: "Pipeline " + pipelineId + " ", clusterLabelPos: 'top', style: 'fill: #2b2b2b', labelStyle: 'fill: #fff' }); - this.computeD3StageOperatorGraph(graph, operator, null, pipelineNodeId) + const operatorNodeId = "operator-" + operator.pipelineId + "-" + operator.operatorId; + pipelineTopNodeMap.set(pipelineNodeId, operatorNodeId); + this.computeD3StageOperatorGraph(graph, operator, null, pipelineNodeId, pipelineRootNodeMap); + }); + pipelineDepMap.forEach((childIds, parentId) => { + for (let i = 0; i < childIds.length; i++) { + let childNode = pipelineNodeMap.get(childIds[i]); + if (childNode === undefined) { + const childNodeId = "pipeline-" + childIds[i]; + let childNodeHtml = { + label: "Pipeline " + childIds[i], + clusterLabelPos: 'top', + style: 'fill: #2b2b2b', + labelStyle: 'fill: #fff' + }; + graph.setNode(childNodeId, childNodeHtml); + const localBufferNodeId = "localBuffer-" + parentId; + const html = ReactDOMServer.renderToString(); + graph.setNode(localBufferNodeId, {class: "operator-stats", label: html, labelType: "html"}); + graph.setParent(localBufferNodeId, childNodeId); + pipelineNodeMap.set(childIds[i], childNodeId); + pipelineRootNodeMap.set(childNodeId, localBufferNodeId); + pipelineTopNodeMap.set(childNodeId, localBufferNodeId); + childNode = childNodeId; + } + let parentNode = pipelineNodeMap.get(parseInt(parentId)); + if (parentNode === undefined) { + const parentNodeId = "pipeline-" + parentId; + let parentNodeHtml = { + label: "Pipeline " + parentId, + clusterLabelPos: 'top', + style: 'fill: #2b2b2b', + labelStyle: 'fill: #fff' + }; + graph.setNode(parentNodeId, parentNodeHtml); + const localBufferNodeId = "localBuffer-" + parentId; + const html = ReactDOMServer.renderToString(); + graph.setNode(localBufferNodeId, {class: "operator-stats", label: html, labelType: "html"}); + graph.setParent(localBufferNodeId, parentNodeId); + pipelineNodeMap.set(parseInt(parentId), parentNodeId); + pipelineRootNodeMap.set(parentNodeId, localBufferNodeId); + pipelineTopNodeMap.set(parentNodeId, localBufferNodeId); + parentNode = parentNodeId; + } + const vParentInput = "v-" + parentNode + "-input"; + const vChildOutput = "v-" + childNode + "-output"; + graph.setNode(vParentInput, { + label: "", + shape: "circle", + }); + graph.setNode(vChildOutput, { + label: "", + shape: "circle", + }); + graph.setParent(vParentInput, parentNode); + graph.setParent(vChildOutput, childNode); + graph.setEdge(vChildOutput, vParentInput, { + class: "pipeline-edge", + arrowhead: "vee", + arrowheadClass: "pipeline-arrowhead", + style: "stroke-width: 2px", + }); + graph.setEdge(vParentInput, pipelineRootNodeMap.get(parentNode), { + class: "v-pipeline-edge", + arrowhead: "undirected", + style: "stroke-width: 0", + }); + graph.setEdge(pipelineTopNodeMap.get(childNode), vChildOutput, { + class: "v-pipeline-edge", + arrowhead: "undirected", + style: "stroke-width: 0", + }); + } }); $("#operator-canvas").html(""); diff --git a/polardbx-executor/src/main/resources/webapp/src/components/WorkerThreadList.jsx b/polardbx-executor/src/main/resources/webapp/src/components/WorkerThreadList.jsx index 262aa125d..9e80f1ab3 100644 --- a/polardbx-executor/src/main/resources/webapp/src/components/WorkerThreadList.jsx +++ b/polardbx-executor/src/main/resources/webapp/src/components/WorkerThreadList.jsx @@ -145,7 +145,7 @@ export class WorkerThreadList extends React.Component { return (
    {threadInfo.name} {threadInfo.state} #{threadInfo.id} {threadInfo.lockOwnerId} + className="font-black">{threadInfo.name} {threadInfo.state} #{threadInfo.id} {threadInfo.lockOwnerId} diff --git a/polardbx-executor/src/main/resources/webapp/src/pipeline.jsx b/polardbx-executor/src/main/resources/webapp/src/pipeline.jsx new file mode 100644 index 000000000..0179120a3 --- /dev/null +++ b/polardbx-executor/src/main/resources/webapp/src/pipeline.jsx @@ -0,0 +1,15 @@ +import React from "react"; +import ReactDOM from "react-dom"; +import {PipelinePlan} from "./components/Pipeline"; +import {PageTitle} from "./components/PageTitle"; +import {getFirstParameter} from "./utils"; + +ReactDOM.render( + , + document.getElementById('title') +); + +ReactDOM.render( + , + document.getElementById('live-plan-container') +); diff --git a/polardbx-executor/src/main/resources/webapp/src/utils.js b/polardbx-executor/src/main/resources/webapp/src/utils.js index 056186b14..73317b065 100644 --- a/polardbx-executor/src/main/resources/webapp/src/utils.js +++ b/polardbx-executor/src/main/resources/webapp/src/utils.js @@ -23,10 +23,10 @@ export const GLYPHICON_DEFAULT = {color: '#1edcff'}; export const GLYPHICON_HIGHLIGHT = {color: '#999999'}; const STATE_COLOR_MAP = { - QUEUED: '#1b8f72', - RUNNING: '#19874e', + QUEUED: '#7bb3fb', + RUNNING: '#265cdf', PLANNING: '#674f98', - FINISHED: '#1a4629', + FINISHED: '#22b647', BLOCKED: '#61003b', USER_ERROR: '#9a7d66', CANCELED: '#858959', @@ -291,16 +291,16 @@ number return Number.parseInt(stageId.slice(stageId.indexOf('.') + 1, stageId.length)) } -export function getTaskIdSuffix(taskId - -: -string -): -string +export function getTaskIdSuffix(taskId: string): string { return taskId.slice(taskId.indexOf('.') + 1, taskId.length) } +export function getFullSplitIdSuffix(driverId: string): string +{ + return driverId.substring(driverId.indexOf('.') + 1) +} + export function getTaskNumber(taskId : @@ -348,14 +348,7 @@ export function getHostAndPort(taskLocation) { return taskLocation.nodeServer.host + ":" + taskLocation.nodeServer.httpPort; } -export function computeRate(count - -: -number, ms -: -number -): -number +export function computeRate(count: number, ms: number): number { if (ms === 0) { return 0; @@ -363,12 +356,7 @@ number return (count / ms) * 1000.0; } -export function precisionRound(n - -: -number -): -string +export function precisionRound(n: number): string { if (n < 10) { return n.toFixed(2); @@ -379,12 +367,7 @@ string return Math.round(n).toString(); } -export function formatDuration(duration - -: -number -): -string +export function formatDurationMs(duration: number): string { let unit = "ms"; if (duration > 1000) { @@ -410,12 +393,46 @@ string return precisionRound(duration) + unit; } -export function formatRows(count +export function formatDurationNs(duration: number): string +{ + let unit = "ns"; + if (duration > 1000) { + duration /= 1000; + unit = "us"; + } + if (duration > 1000) { + duration /= 1000; + unit = "ms"; + } + if (duration > 1000) { + duration /= 1000; + unit = "s"; + } + if (unit === "s" && duration > 60) { + duration /= 60; + unit = "m"; + } + if (unit === "m" && duration > 60) { + duration /= 60; + unit = "h"; + } + if (unit === "h" && duration > 24) { + duration /= 24; + unit = "d"; + } + if (unit === "d" && duration > 7) { + duration /= 7; + unit = "w"; + } + return precisionRound(duration) + unit; +} + +export function formatNumber(num: number): string +{ + return num.toLocaleString(); +} -: -number -): -string +export function formatRows(count: number): string { if (count === 1) { return "1 row"; @@ -424,12 +441,7 @@ string return formatCount(count) + " rows"; } -export function formatCount(count - -: -number -): -string +export function formatCount(count: number): string { let unit = ""; if (count > 1000) { @@ -455,22 +467,12 @@ string return precisionRound(count) + unit; } -export function formatDataSizeBytes(size - -: -number -): -string +export function formatDataSizeBytes(size: number): string { return formatDataSizeMinUnit(size, ""); } -export function formatDataSize(size - -: -number -): -string +export function formatDataSize(size: number): string { return formatDataSizeMinUnit(size, "B"); } @@ -541,13 +543,8 @@ switch (match[2]) { } } -export function parseDuration(value - -: -string -): - ? number { - const DURATION_PATTERN = /^\s*(\d+(?:\.\d+)?)\s*([a-zA-Z]+)\s*$/; +export function parseDuration(value: string): ? number { +const DURATION_PATTERN = /^\s*(\d+(?:\.\d+)?)\s*([a-zA-Z]+)\s*$/; const match = DURATION_PATTERN.exec(value); if (match === null) { diff --git a/polardbx-executor/src/main/resources/webapp/src/webpack.config.js b/polardbx-executor/src/main/resources/webapp/src/webpack.config.js index 0fea9cfb0..4186bc0a3 100644 --- a/polardbx-executor/src/main/resources/webapp/src/webpack.config.js +++ b/polardbx-executor/src/main/resources/webapp/src/webpack.config.js @@ -3,6 +3,7 @@ module.exports = { 'index': __dirname + '/index.jsx', 'query': __dirname + '/query.jsx', 'plan': __dirname + '/plan.jsx', + 'pipeline': __dirname + '/pipeline.jsx', 'embedded_plan': __dirname + '/embedded_plan.jsx', 'stage': __dirname + '/stage.jsx', 'worker': __dirname + '/worker.jsx', diff --git a/polardbx-executor/src/main/resources/webapp/stage.html b/polardbx-executor/src/main/resources/webapp/stage.html index d830397c1..4f91224fb 100644 --- a/polardbx-executor/src/main/resources/webapp/stage.html +++ b/polardbx-executor/src/main/resources/webapp/stage.html @@ -4,10 +4,10 @@ - - Stage Performance - PolarDBX + + Stage Performance - PolarDB-X - + @@ -43,20 +43,23 @@
    -
    -
    Loading...
    -
    -
    - -
    +
    +
    +
    Loading...
    +
    +
    + +
    -